mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 15:12:02 +00:00
Merge remote-tracking branch 'origin/master' into fin
This commit is contained in:
commit
ce2b8e8061
@ -27,6 +27,8 @@ Checks: [
|
|||||||
'-bugprone-not-null-terminated-result',
|
'-bugprone-not-null-terminated-result',
|
||||||
'-bugprone-reserved-identifier', # useful but too slow, TODO retry when https://reviews.llvm.org/rG1c282052624f9d0bd273bde0b47b30c96699c6c7 is merged
|
'-bugprone-reserved-identifier', # useful but too slow, TODO retry when https://reviews.llvm.org/rG1c282052624f9d0bd273bde0b47b30c96699c6c7 is merged
|
||||||
'-bugprone-unchecked-optional-access',
|
'-bugprone-unchecked-optional-access',
|
||||||
|
'-bugprone-crtp-constructor-accessibility',
|
||||||
|
'-bugprone-suspicious-stringview-data-usage',
|
||||||
|
|
||||||
'-cert-dcl16-c',
|
'-cert-dcl16-c',
|
||||||
'-cert-dcl37-c',
|
'-cert-dcl37-c',
|
||||||
@ -36,6 +38,7 @@ Checks: [
|
|||||||
'-cert-msc51-cpp',
|
'-cert-msc51-cpp',
|
||||||
'-cert-oop54-cpp',
|
'-cert-oop54-cpp',
|
||||||
'-cert-oop57-cpp',
|
'-cert-oop57-cpp',
|
||||||
|
'-cert-err33-c', # Misreports on clang-19: it warns about all functions containing 'remove' in the name, not only about the standard library.
|
||||||
|
|
||||||
'-clang-analyzer-optin.performance.Padding',
|
'-clang-analyzer-optin.performance.Padding',
|
||||||
|
|
||||||
@ -99,6 +102,7 @@ Checks: [
|
|||||||
'-modernize-use-emplace',
|
'-modernize-use-emplace',
|
||||||
'-modernize-use-nodiscard',
|
'-modernize-use-nodiscard',
|
||||||
'-modernize-use-trailing-return-type',
|
'-modernize-use-trailing-return-type',
|
||||||
|
'-modernize-use-designated-initializers',
|
||||||
|
|
||||||
'-performance-enum-size',
|
'-performance-enum-size',
|
||||||
'-performance-inefficient-string-concatenation',
|
'-performance-inefficient-string-concatenation',
|
||||||
|
@ -13,3 +13,6 @@
|
|||||||
# dbms/ → src/
|
# dbms/ → src/
|
||||||
# (though it is unlikely that you will see it in blame)
|
# (though it is unlikely that you will see it in blame)
|
||||||
06446b4f08a142d6f1bc30664c47ded88ab51782
|
06446b4f08a142d6f1bc30664c47ded88ab51782
|
||||||
|
|
||||||
|
# Applied Black formatter for Python code
|
||||||
|
e6f5a3f98b21ba99cf274a9833797889e020a2b3
|
||||||
|
1
.github/actionlint.yml
vendored
1
.github/actionlint.yml
vendored
@ -7,3 +7,4 @@ self-hosted-runner:
|
|||||||
- stress-tester
|
- stress-tester
|
||||||
- style-checker
|
- style-checker
|
||||||
- style-checker-aarch64
|
- style-checker-aarch64
|
||||||
|
- release-maker
|
||||||
|
168
.github/actions/release/action.yml
vendored
Normal file
168
.github/actions/release/action.yml
vendored
Normal file
@ -0,0 +1,168 @@
|
|||||||
|
name: Release
|
||||||
|
|
||||||
|
description: Makes patch releases and creates new release branch
|
||||||
|
|
||||||
|
inputs:
|
||||||
|
ref:
|
||||||
|
description: 'Git reference (branch or commit sha) from which to create the release'
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
type:
|
||||||
|
description: 'The type of release: "new" for a new release or "patch" for a patch release'
|
||||||
|
required: true
|
||||||
|
type: choice
|
||||||
|
options:
|
||||||
|
- patch
|
||||||
|
- new
|
||||||
|
dry-run:
|
||||||
|
description: 'Dry run'
|
||||||
|
required: false
|
||||||
|
default: true
|
||||||
|
type: boolean
|
||||||
|
token:
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: "composite"
|
||||||
|
steps:
|
||||||
|
- name: Prepare Release Info
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --prepare-release-info \
|
||||||
|
--ref ${{ inputs.ref }} --release-type ${{ inputs.type }} \
|
||||||
|
${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
echo "::group::Release Info"
|
||||||
|
python3 -m json.tool /tmp/release_info.json
|
||||||
|
echo "::endgroup::"
|
||||||
|
release_tag=$(jq -r '.release_tag' /tmp/release_info.json)
|
||||||
|
commit_sha=$(jq -r '.commit_sha' /tmp/release_info.json)
|
||||||
|
echo "Release Tag: $release_tag"
|
||||||
|
echo "RELEASE_TAG=$release_tag" >> "$GITHUB_ENV"
|
||||||
|
echo "COMMIT_SHA=$commit_sha" >> "$GITHUB_ENV"
|
||||||
|
- name: Download All Release Artifacts
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --download-packages ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Push Git Tag for the Release
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --push-release-tag ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Push New Release Branch
|
||||||
|
if: ${{ inputs.type == 'new' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --push-new-release-branch ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Bump CH Version and Update Contributors' List
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --create-bump-version-pr ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Bump Docker versions, Changelog, Security
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
git checkout master
|
||||||
|
python3 ./tests/ci/create_release.py --set-progress-started --progress "update changelog, docker version, security"
|
||||||
|
echo "List versions"
|
||||||
|
./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv
|
||||||
|
echo "Update docker version"
|
||||||
|
./utils/list-versions/update-docker-version.sh
|
||||||
|
echo "Generate ChangeLog"
|
||||||
|
export CI=1
|
||||||
|
docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \
|
||||||
|
--volume=".:/ClickHouse" clickhouse/style-test \
|
||||||
|
/ClickHouse/tests/ci/changelog.py -v --debug-helpers \
|
||||||
|
--gh-user-or-token=${{ inputs.token }} --jobs=5 \
|
||||||
|
--output="/ClickHouse/docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }}
|
||||||
|
git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md
|
||||||
|
echo "Generate Security"
|
||||||
|
python3 ./utils/security-generator/generate_security.py > SECURITY.md
|
||||||
|
git diff HEAD
|
||||||
|
- name: Create ChangeLog PR
|
||||||
|
if: ${{ inputs.type == 'patch' && ! inputs.dry-run }}
|
||||||
|
uses: peter-evans/create-pull-request@v6
|
||||||
|
with:
|
||||||
|
author: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||||
|
token: ${{ inputs.token }}
|
||||||
|
committer: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||||
|
commit-message: Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
|
||||||
|
branch: auto/${{ env.RELEASE_TAG }}
|
||||||
|
assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher
|
||||||
|
delete-branch: true
|
||||||
|
title: Update version_date.tsv and changelog after ${{ env.RELEASE_TAG }}
|
||||||
|
labels: do not test
|
||||||
|
body: |
|
||||||
|
Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
|
||||||
|
### Changelog category (leave one):
|
||||||
|
- Not for changelog (changelog entry is not required)
|
||||||
|
- name: Complete previous steps and Restore git state
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --set-progress-completed
|
||||||
|
git reset --hard HEAD
|
||||||
|
git checkout "$GITHUB_REF_NAME"
|
||||||
|
- name: Create GH Release
|
||||||
|
shell: bash
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --create-gh-release ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Export TGZ Packages
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/artifactory.py --export-tgz ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Test TGZ Packages
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/artifactory.py --test-tgz ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Export RPM Packages
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/artifactory.py --export-rpm ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Test RPM Packages
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/artifactory.py --test-rpm ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Export Debian Packages
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/artifactory.py --export-debian ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Test Debian Packages
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/artifactory.py --test-debian ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Docker clickhouse/clickhouse-server building
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
cd "./tests/ci"
|
||||||
|
python3 ./create_release.py --set-progress-started --progress "docker server release"
|
||||||
|
export CHECK_NAME="Docker server image"
|
||||||
|
python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
|
||||||
|
python3 ./create_release.py --set-progress-completed
|
||||||
|
- name: Docker clickhouse/clickhouse-keeper building
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
cd "./tests/ci"
|
||||||
|
python3 ./create_release.py --set-progress-started --progress "docker keeper release"
|
||||||
|
export CHECK_NAME="Docker keeper image"
|
||||||
|
python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
|
||||||
|
python3 ./create_release.py --set-progress-completed
|
||||||
|
- name: Set current Release progress to Completed with OK
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --set-progress-started --progress "completed"
|
||||||
|
python3 ./tests/ci/create_release.py --set-progress-completed
|
||||||
|
- name: Post Slack Message
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --post-status ${{ inputs.dry-run && '--dry-run' || '' }}
|
98
.github/workflows/auto_release.yml
vendored
98
.github/workflows/auto_release.yml
vendored
@ -1,44 +1,110 @@
|
|||||||
name: AutoRelease
|
name: AutoRelease
|
||||||
|
|
||||||
env:
|
env:
|
||||||
# Force the stdout and stderr streams to be unbuffered
|
|
||||||
PYTHONUNBUFFERED: 1
|
PYTHONUNBUFFERED: 1
|
||||||
|
DRY_RUN: true
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: auto-release
|
group: release
|
||||||
on: # yamllint disable-line rule:truthy
|
on: # yamllint disable-line rule:truthy
|
||||||
# schedule:
|
# Workflow uses a test bucket for packages and dry run mode (no real releases)
|
||||||
# - cron: '0 10-16 * * 1-5'
|
schedule:
|
||||||
|
- cron: '0 9 * * *'
|
||||||
|
- cron: '0 15 * * *'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
dry-run:
|
||||||
|
description: 'Dry run'
|
||||||
|
required: false
|
||||||
|
default: true
|
||||||
|
type: boolean
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
CherryPick:
|
AutoRelease:
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
runs-on: [self-hosted, release-maker]
|
||||||
steps:
|
steps:
|
||||||
|
- name: DebugInfo
|
||||||
|
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||||
- name: Set envs
|
- name: Set envs
|
||||||
# https://docs.github.com/en/actions/learn-github-actions/workflow-commands-for-github-actions#multiline-strings
|
|
||||||
run: |
|
run: |
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
TEMP_PATH=${{runner.temp}}/cherry_pick
|
|
||||||
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
||||||
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||||
RCSK
|
RCSK
|
||||||
REPO_OWNER=ClickHouse
|
|
||||||
REPO_NAME=ClickHouse
|
|
||||||
REPO_TEAM=core
|
|
||||||
EOF
|
EOF
|
||||||
|
- name: Set DRY_RUN for schedule
|
||||||
|
if: ${{ github.event_name == 'schedule' }}
|
||||||
|
run: echo "DRY_RUN=true" >> "$GITHUB_ENV"
|
||||||
|
- name: Set DRY_RUN for dispatch
|
||||||
|
if: ${{ github.event_name == 'workflow_dispatch' }}
|
||||||
|
run: echo "DRY_RUN=${{ github.event.inputs.dry-run }}" >> "$GITHUB_ENV"
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
uses: ClickHouse/checkout@v1
|
uses: ClickHouse/checkout@v1
|
||||||
with:
|
with:
|
||||||
clear-repository: true
|
|
||||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Auto-release
|
- name: Auto Release Prepare
|
||||||
run: |
|
run: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
python3 auto_release.py --release-after-days=3
|
python3 auto_release.py --prepare
|
||||||
- name: Cleanup
|
echo "::group::Auto Release Info"
|
||||||
if: always()
|
python3 -m json.tool /tmp/autorelease_info.json
|
||||||
|
echo "::endgroup::"
|
||||||
|
{
|
||||||
|
echo 'AUTO_RELEASE_PARAMS<<EOF'
|
||||||
|
cat /tmp/autorelease_info.json
|
||||||
|
echo 'EOF'
|
||||||
|
} >> "$GITHUB_ENV"
|
||||||
|
- name: Post Release Branch statuses
|
||||||
|
run: |
|
||||||
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
|
python3 auto_release.py --post-status
|
||||||
|
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0].release_branch }}
|
||||||
|
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0] && fromJson(env.AUTO_RELEASE_PARAMS).releases[0].ready }}
|
||||||
|
uses: ./.github/actions/release
|
||||||
|
with:
|
||||||
|
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0].commit_sha }}
|
||||||
|
type: patch
|
||||||
|
dry-run: ${{ env.DRY_RUN }}
|
||||||
|
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||||
|
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[1].release_branch }}
|
||||||
|
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0] && fromJson(env.AUTO_RELEASE_PARAMS).releases[1].ready }}
|
||||||
|
uses: ./.github/actions/release
|
||||||
|
with:
|
||||||
|
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[1].commit_sha }}
|
||||||
|
type: patch
|
||||||
|
dry-run: ${{ env.DRY_RUN }}
|
||||||
|
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||||
|
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2].release_branch }}
|
||||||
|
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2] && fromJson(env.AUTO_RELEASE_PARAMS).releases[2].ready }}
|
||||||
|
uses: ./.github/actions/release
|
||||||
|
with:
|
||||||
|
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2].commit_sha }}
|
||||||
|
type: patch
|
||||||
|
dry-run: ${{ env.DRY_RUN }}
|
||||||
|
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||||
|
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3].release_branch }}
|
||||||
|
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3] && fromJson(env.AUTO_RELEASE_PARAMS).releases[3].ready }}
|
||||||
|
uses: ./.github/actions/release
|
||||||
|
with:
|
||||||
|
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3].commit_sha }}
|
||||||
|
type: patch
|
||||||
|
dry-run: ${{ env.DRY_RUN }}
|
||||||
|
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||||
|
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4].release_branch }}
|
||||||
|
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4] && fromJson(env.AUTO_RELEASE_PARAMS).releases[4].ready }}
|
||||||
|
uses: ./.github/actions/release
|
||||||
|
with:
|
||||||
|
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4].commit_sha }}
|
||||||
|
type: patch
|
||||||
|
dry-run: ${{ env.DRY_RUN }}
|
||||||
|
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||||
|
- name: Post Slack Message
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
|
run: |
|
||||||
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
|
python3 auto_release.py --post-auto-release-complete --wf-status ${{ job.status }}
|
||||||
|
- name: Clean up
|
||||||
run: |
|
run: |
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
20
.github/workflows/backport_branches.yml
vendored
20
.github/workflows/backport_branches.yml
vendored
@ -36,10 +36,6 @@ jobs:
|
|||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
echo "Testing the main ci directory"
|
echo "Testing the main ci directory"
|
||||||
python3 -m unittest discover -s . -p 'test_*.py'
|
python3 -m unittest discover -s . -p 'test_*.py'
|
||||||
for dir in *_lambda/; do
|
|
||||||
echo "Testing $dir"
|
|
||||||
python3 -m unittest discover -s "$dir" -p 'test_*.py'
|
|
||||||
done
|
|
||||||
- name: PrepareRunConfig
|
- name: PrepareRunConfig
|
||||||
id: runconfig
|
id: runconfig
|
||||||
run: |
|
run: |
|
||||||
@ -62,7 +58,7 @@ jobs:
|
|||||||
BuildDockers:
|
BuildDockers:
|
||||||
needs: [RunConfig]
|
needs: [RunConfig]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
uses: ./.github/workflows/reusable_docker.yml
|
uses: ./.github/workflows/docker_test_images.yml
|
||||||
with:
|
with:
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
CompatibilityCheckX86:
|
CompatibilityCheckX86:
|
||||||
@ -245,8 +241,9 @@ jobs:
|
|||||||
runner_type: stress-tester
|
runner_type: stress-tester
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
FinishCheck:
|
FinishCheck:
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
needs:
|
needs:
|
||||||
|
- RunConfig
|
||||||
- Builds_Report
|
- Builds_Report
|
||||||
- FunctionalStatelessTestAsan
|
- FunctionalStatelessTestAsan
|
||||||
- FunctionalStatefulTestDebug
|
- FunctionalStatefulTestDebug
|
||||||
@ -261,6 +258,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
clear-repository: true
|
clear-repository: true
|
||||||
- name: Finish label
|
- name: Finish label
|
||||||
|
if: ${{ !failure() }}
|
||||||
run: |
|
run: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
# update mergeable check
|
# update mergeable check
|
||||||
@ -268,3 +266,13 @@ jobs:
|
|||||||
# update overall ci report
|
# update overall ci report
|
||||||
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||||
python3 merge_pr.py
|
python3 merge_pr.py
|
||||||
|
- name: Check Workflow results
|
||||||
|
run: |
|
||||||
|
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||||
|
cat >> "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
|
${{ toJson(needs) }}
|
||||||
|
EOF
|
||||||
|
echo "::group::Workflow results"
|
||||||
|
python3 -m json.tool "$WORKFLOW_RESULT_FILE"
|
||||||
|
echo "::endgroup::"
|
||||||
|
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||||
|
33
.github/workflows/create_release.yml
vendored
33
.github/workflows/create_release.yml
vendored
@ -2,12 +2,11 @@ name: CreateRelease
|
|||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: release
|
group: release
|
||||||
|
|
||||||
'on':
|
'on':
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
inputs:
|
inputs:
|
||||||
sha:
|
ref:
|
||||||
description: 'The SHA hash of the commit from which to create the release'
|
description: 'Git reference (branch or commit sha) from which to create the release'
|
||||||
required: true
|
required: true
|
||||||
type: string
|
type: string
|
||||||
type:
|
type:
|
||||||
@ -15,15 +14,31 @@ concurrency:
|
|||||||
required: true
|
required: true
|
||||||
type: choice
|
type: choice
|
||||||
options:
|
options:
|
||||||
- new
|
|
||||||
- patch
|
- patch
|
||||||
|
- new
|
||||||
|
dry-run:
|
||||||
|
description: 'Dry run'
|
||||||
|
required: false
|
||||||
|
default: true
|
||||||
|
type: boolean
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
Release:
|
CreateRelease:
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
env:
|
||||||
|
GH_TOKEN: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
|
||||||
|
runs-on: [self-hosted, release-maker]
|
||||||
steps:
|
steps:
|
||||||
|
- name: DebugInfo
|
||||||
|
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
uses: ClickHouse/checkout@v1
|
uses: ClickHouse/checkout@v1
|
||||||
- name: Print greeting
|
with:
|
||||||
run: |
|
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||||
python3 ./tests/ci/release.py --commit ${{ inputs.sha }} --type ${{ inputs.type }} --dry-run
|
fetch-depth: 0
|
||||||
|
- name: Call Release Action
|
||||||
|
uses: ./.github/actions/release
|
||||||
|
with:
|
||||||
|
ref: ${{ inputs.ref }}
|
||||||
|
type: ${{ inputs.type }}
|
||||||
|
dry-run: ${{ inputs.dry-run }}
|
||||||
|
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||||
|
44
.github/workflows/master.yml
vendored
44
.github/workflows/master.yml
vendored
@ -33,10 +33,6 @@ jobs:
|
|||||||
# cd "$GITHUB_WORKSPACE/tests/ci"
|
# cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
# echo "Testing the main ci directory"
|
# echo "Testing the main ci directory"
|
||||||
# python3 -m unittest discover -s . -p 'test_*.py'
|
# python3 -m unittest discover -s . -p 'test_*.py'
|
||||||
# for dir in *_lambda/; do
|
|
||||||
# echo "Testing $dir"
|
|
||||||
# python3 -m unittest discover -s "$dir" -p 'test_*.py'
|
|
||||||
# done
|
|
||||||
- name: PrepareRunConfig
|
- name: PrepareRunConfig
|
||||||
id: runconfig
|
id: runconfig
|
||||||
run: |
|
run: |
|
||||||
@ -58,7 +54,7 @@ jobs:
|
|||||||
# BuildDockers:
|
# BuildDockers:
|
||||||
# needs: [RunConfig]
|
# needs: [RunConfig]
|
||||||
# if: ${{ !failure() && !cancelled() }}
|
# if: ${{ !failure() && !cancelled() }}
|
||||||
# uses: ./.github/workflows/reusable_docker.yml
|
# uses: ./.github/workflows/docker_test_images.yml
|
||||||
# with:
|
# with:
|
||||||
# data: ${{ needs.RunConfig.outputs.data }}
|
# data: ${{ needs.RunConfig.outputs.data }}
|
||||||
# StyleCheck:
|
# StyleCheck:
|
||||||
@ -125,34 +121,6 @@ jobs:
|
|||||||
runner_type: style-checker-aarch64
|
runner_type: style-checker-aarch64
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
|
||||||
MarkReleaseReady:
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
needs: [RunConfig, Builds_1, Builds_2]
|
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
|
||||||
steps:
|
|
||||||
- name: Debug
|
|
||||||
run: |
|
|
||||||
echo need with different filters
|
|
||||||
cat << 'EOF'
|
|
||||||
${{ toJSON(needs) }}
|
|
||||||
${{ toJSON(needs.*.result) }}
|
|
||||||
no failures ${{ !contains(needs.*.result, 'failure') }}
|
|
||||||
no skips ${{ !contains(needs.*.result, 'skipped') }}
|
|
||||||
no both ${{ !(contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
|
|
||||||
EOF
|
|
||||||
- name: Not ready
|
|
||||||
# fail the job to be able to restart it
|
|
||||||
if: ${{ contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure') }}
|
|
||||||
run: exit 1
|
|
||||||
- name: Check out repository code
|
|
||||||
if: ${{ ! (contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
- name: Mark Commit Release Ready
|
|
||||||
if: ${{ ! (contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
|
|
||||||
run: |
|
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
|
||||||
python3 mark_release_ready.py
|
|
||||||
|
|
||||||
FinishCheck:
|
FinishCheck:
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
needs: [RunConfig, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3]
|
needs: [RunConfig, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3]
|
||||||
@ -164,3 +132,13 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||||
|
- name: Check Workflow results
|
||||||
|
run: |
|
||||||
|
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||||
|
cat >> "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
|
${{ toJson(needs) }}
|
||||||
|
EOF
|
||||||
|
echo "::group::Workflow results"
|
||||||
|
python3 -m json.tool "$WORKFLOW_RESULT_FILE"
|
||||||
|
echo "::endgroup::"
|
||||||
|
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||||
|
19
.github/workflows/merge_queue.yml
vendored
19
.github/workflows/merge_queue.yml
vendored
@ -30,10 +30,6 @@ jobs:
|
|||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
echo "Testing the main ci directory"
|
echo "Testing the main ci directory"
|
||||||
python3 -m unittest discover -s . -p 'test_*.py'
|
python3 -m unittest discover -s . -p 'test_*.py'
|
||||||
for dir in *_lambda/; do
|
|
||||||
echo "Testing $dir"
|
|
||||||
python3 -m unittest discover -s "$dir" -p 'test_*.py'
|
|
||||||
done
|
|
||||||
- name: PrepareRunConfig
|
- name: PrepareRunConfig
|
||||||
id: runconfig
|
id: runconfig
|
||||||
run: |
|
run: |
|
||||||
@ -51,7 +47,7 @@ jobs:
|
|||||||
BuildDockers:
|
BuildDockers:
|
||||||
needs: [RunConfig]
|
needs: [RunConfig]
|
||||||
if: ${{ !failure() && !cancelled() && toJson(fromJson(needs.RunConfig.outputs.data).docker_data.missing_multi) != '[]' }}
|
if: ${{ !failure() && !cancelled() && toJson(fromJson(needs.RunConfig.outputs.data).docker_data.missing_multi) != '[]' }}
|
||||||
uses: ./.github/workflows/reusable_docker.yml
|
uses: ./.github/workflows/docker_test_images.yml
|
||||||
with:
|
with:
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
StyleCheck:
|
StyleCheck:
|
||||||
@ -97,7 +93,7 @@ jobs:
|
|||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
|
||||||
CheckReadyForMerge:
|
CheckReadyForMerge:
|
||||||
if: ${{ !cancelled() && needs.StyleCheck.result == 'success' }}
|
if: ${{ !cancelled() }}
|
||||||
# Test_2 or Test_3 must not have jobs required for Mergeable check
|
# Test_2 or Test_3 must not have jobs required for Mergeable check
|
||||||
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Tests_1]
|
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Tests_1]
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
@ -105,6 +101,17 @@ jobs:
|
|||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
uses: ClickHouse/checkout@v1
|
uses: ClickHouse/checkout@v1
|
||||||
- name: Check and set merge status
|
- name: Check and set merge status
|
||||||
|
if: ${{ needs.StyleCheck.result == 'success' }}
|
||||||
run: |
|
run: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||||
|
- name: Check Workflow results
|
||||||
|
run: |
|
||||||
|
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||||
|
cat >> "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
|
${{ toJson(needs) }}
|
||||||
|
EOF
|
||||||
|
echo "::group::Workflow results"
|
||||||
|
python3 -m json.tool "$WORKFLOW_RESULT_FILE"
|
||||||
|
echo "::endgroup::"
|
||||||
|
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||||
|
19
.github/workflows/nightly.yml
vendored
19
.github/workflows/nightly.yml
vendored
@ -40,7 +40,24 @@ jobs:
|
|||||||
} >> "$GITHUB_OUTPUT"
|
} >> "$GITHUB_OUTPUT"
|
||||||
BuildDockers:
|
BuildDockers:
|
||||||
needs: [RunConfig]
|
needs: [RunConfig]
|
||||||
uses: ./.github/workflows/reusable_docker.yml
|
uses: ./.github/workflows/docker_test_images.yml
|
||||||
with:
|
with:
|
||||||
data: "${{ needs.RunConfig.outputs.data }}"
|
data: "${{ needs.RunConfig.outputs.data }}"
|
||||||
set_latest: true
|
set_latest: true
|
||||||
|
CheckWorkflow:
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
|
needs: [RunConfig, BuildDockers]
|
||||||
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
|
steps:
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
- name: Check Workflow results
|
||||||
|
run: |
|
||||||
|
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||||
|
cat >> "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
|
${{ toJson(needs) }}
|
||||||
|
EOF
|
||||||
|
echo "::group::Workflow results"
|
||||||
|
python3 -m json.tool "$WORKFLOW_RESULT_FILE"
|
||||||
|
echo "::endgroup::"
|
||||||
|
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||||
|
21
.github/workflows/pull_request.yml
vendored
21
.github/workflows/pull_request.yml
vendored
@ -48,10 +48,6 @@ jobs:
|
|||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
echo "Testing the main ci directory"
|
echo "Testing the main ci directory"
|
||||||
python3 -m unittest discover -s . -p 'test_*.py'
|
python3 -m unittest discover -s . -p 'test_*.py'
|
||||||
for dir in *_lambda/; do
|
|
||||||
echo "Testing $dir"
|
|
||||||
python3 -m unittest discover -s "$dir" -p 'test_*.py'
|
|
||||||
done
|
|
||||||
- name: PrepareRunConfig
|
- name: PrepareRunConfig
|
||||||
id: runconfig
|
id: runconfig
|
||||||
run: |
|
run: |
|
||||||
@ -72,7 +68,7 @@ jobs:
|
|||||||
BuildDockers:
|
BuildDockers:
|
||||||
needs: [RunConfig]
|
needs: [RunConfig]
|
||||||
if: ${{ !failure() && !cancelled() && toJson(fromJson(needs.RunConfig.outputs.data).docker_data.missing_multi) != '[]' }}
|
if: ${{ !failure() && !cancelled() && toJson(fromJson(needs.RunConfig.outputs.data).docker_data.missing_multi) != '[]' }}
|
||||||
uses: ./.github/workflows/reusable_docker.yml
|
uses: ./.github/workflows/docker_test_images.yml
|
||||||
with:
|
with:
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
StyleCheck:
|
StyleCheck:
|
||||||
@ -155,7 +151,7 @@ jobs:
|
|||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
|
||||||
CheckReadyForMerge:
|
CheckReadyForMerge:
|
||||||
if: ${{ !cancelled() && needs.StyleCheck.result == 'success' }}
|
if: ${{ !cancelled() }}
|
||||||
# Test_2 or Test_3 must not have jobs required for Mergeable check
|
# Test_2 or Test_3 must not have jobs required for Mergeable check
|
||||||
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1]
|
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1]
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
@ -165,14 +161,25 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
filter: tree:0
|
filter: tree:0
|
||||||
- name: Check and set merge status
|
- name: Check and set merge status
|
||||||
|
if: ${{ needs.StyleCheck.result == 'success' }}
|
||||||
run: |
|
run: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||||
|
- name: Check Workflow results
|
||||||
|
run: |
|
||||||
|
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||||
|
cat >> "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
|
${{ toJson(needs) }}
|
||||||
|
EOF
|
||||||
|
echo "::group::Workflow results"
|
||||||
|
python3 -m json.tool "$WORKFLOW_RESULT_FILE"
|
||||||
|
echo "::endgroup::"
|
||||||
|
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||||
|
|
||||||
################################# Stage Final #################################
|
################################# Stage Final #################################
|
||||||
#
|
#
|
||||||
FinishCheck:
|
FinishCheck:
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3]
|
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3]
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
steps:
|
steps:
|
||||||
|
20
.github/workflows/release_branches.yml
vendored
20
.github/workflows/release_branches.yml
vendored
@ -33,10 +33,6 @@ jobs:
|
|||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
echo "Testing the main ci directory"
|
echo "Testing the main ci directory"
|
||||||
python3 -m unittest discover -s . -p 'test_*.py'
|
python3 -m unittest discover -s . -p 'test_*.py'
|
||||||
for dir in *_lambda/; do
|
|
||||||
echo "Testing $dir"
|
|
||||||
python3 -m unittest discover -s "$dir" -p 'test_*.py'
|
|
||||||
done
|
|
||||||
- name: PrepareRunConfig
|
- name: PrepareRunConfig
|
||||||
id: runconfig
|
id: runconfig
|
||||||
run: |
|
run: |
|
||||||
@ -57,7 +53,7 @@ jobs:
|
|||||||
BuildDockers:
|
BuildDockers:
|
||||||
needs: [RunConfig]
|
needs: [RunConfig]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
uses: ./.github/workflows/reusable_docker.yml
|
uses: ./.github/workflows/docker_test_images.yml
|
||||||
with:
|
with:
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
CompatibilityCheckX86:
|
CompatibilityCheckX86:
|
||||||
@ -445,8 +441,9 @@ jobs:
|
|||||||
runner_type: stress-tester
|
runner_type: stress-tester
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
FinishCheck:
|
FinishCheck:
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
needs:
|
needs:
|
||||||
|
- RunConfig
|
||||||
- DockerServerImage
|
- DockerServerImage
|
||||||
- DockerKeeperImage
|
- DockerKeeperImage
|
||||||
- Builds_Report
|
- Builds_Report
|
||||||
@ -482,9 +479,20 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
clear-repository: true
|
clear-repository: true
|
||||||
- name: Finish label
|
- name: Finish label
|
||||||
|
if: ${{ !failure() }}
|
||||||
run: |
|
run: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
# update mergeable check
|
# update mergeable check
|
||||||
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||||
# update overall ci report
|
# update overall ci report
|
||||||
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||||
|
- name: Check Workflow results
|
||||||
|
run: |
|
||||||
|
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||||
|
cat >> "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
|
${{ toJson(needs) }}
|
||||||
|
EOF
|
||||||
|
echo "::group::Workflow results"
|
||||||
|
python3 -m json.tool "$WORKFLOW_RESULT_FILE"
|
||||||
|
echo "::endgroup::"
|
||||||
|
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||||
|
2
.github/workflows/reusable_test.yml
vendored
2
.github/workflows/reusable_test.yml
vendored
@ -102,6 +102,8 @@ jobs:
|
|||||||
--job-name '${{inputs.test_name}}' \
|
--job-name '${{inputs.test_name}}' \
|
||||||
--run \
|
--run \
|
||||||
--run-command '''${{inputs.run_command}}'''
|
--run-command '''${{inputs.run_command}}'''
|
||||||
|
# shellcheck disable=SC2319
|
||||||
|
echo "JOB_EXIT_CODE=$?" >> "$GITHUB_ENV"
|
||||||
- name: Post run
|
- name: Post run
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
run: |
|
run: |
|
||||||
|
@ -14,3 +14,9 @@ rules:
|
|||||||
comments:
|
comments:
|
||||||
min-spaces-from-content: 1
|
min-spaces-from-content: 1
|
||||||
document-start: disable
|
document-start: disable
|
||||||
|
colons: disable
|
||||||
|
indentation: disable
|
||||||
|
line-length: disable
|
||||||
|
trailing-spaces: disable
|
||||||
|
truthy: disable
|
||||||
|
new-line-at-end-of-file: disable
|
||||||
|
@ -3,8 +3,9 @@
|
|||||||
#include <base/defines.h>
|
#include <base/defines.h>
|
||||||
|
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
#include <sstream>
|
#include <string>
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
bool cgroupsV2Enabled()
|
bool cgroupsV2Enabled()
|
||||||
{
|
{
|
||||||
@ -13,11 +14,11 @@ bool cgroupsV2Enabled()
|
|||||||
{
|
{
|
||||||
/// This file exists iff the host has cgroups v2 enabled.
|
/// This file exists iff the host has cgroups v2 enabled.
|
||||||
auto controllers_file = default_cgroups_mount / "cgroup.controllers";
|
auto controllers_file = default_cgroups_mount / "cgroup.controllers";
|
||||||
if (!std::filesystem::exists(controllers_file))
|
if (!fs::exists(controllers_file))
|
||||||
return false;
|
return false;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
catch (const std::filesystem::filesystem_error &) /// all "underlying OS API errors", typically: permission denied
|
catch (const fs::filesystem_error &) /// all "underlying OS API errors", typically: permission denied
|
||||||
{
|
{
|
||||||
return false; /// not logging the exception as most callers fall back to cgroups v1
|
return false; /// not logging the exception as most callers fall back to cgroups v1
|
||||||
}
|
}
|
||||||
@ -33,8 +34,9 @@ bool cgroupsV2MemoryControllerEnabled()
|
|||||||
/// According to https://docs.kernel.org/admin-guide/cgroup-v2.html, file "cgroup.controllers" defines which controllers are available
|
/// According to https://docs.kernel.org/admin-guide/cgroup-v2.html, file "cgroup.controllers" defines which controllers are available
|
||||||
/// for the current + child cgroups. The set of available controllers can be restricted from level to level using file
|
/// for the current + child cgroups. The set of available controllers can be restricted from level to level using file
|
||||||
/// "cgroups.subtree_control". It is therefore sufficient to check the bottom-most nested "cgroup.controllers" file.
|
/// "cgroups.subtree_control". It is therefore sufficient to check the bottom-most nested "cgroup.controllers" file.
|
||||||
std::string cgroup = cgroupV2OfProcess();
|
fs::path cgroup_dir = cgroupV2PathOfProcess();
|
||||||
auto cgroup_dir = cgroup.empty() ? default_cgroups_mount : (default_cgroups_mount / cgroup);
|
if (cgroup_dir.empty())
|
||||||
|
return false;
|
||||||
std::ifstream controllers_file(cgroup_dir / "cgroup.controllers");
|
std::ifstream controllers_file(cgroup_dir / "cgroup.controllers");
|
||||||
if (!controllers_file.is_open())
|
if (!controllers_file.is_open())
|
||||||
return false;
|
return false;
|
||||||
@ -46,7 +48,7 @@ bool cgroupsV2MemoryControllerEnabled()
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string cgroupV2OfProcess()
|
fs::path cgroupV2PathOfProcess()
|
||||||
{
|
{
|
||||||
#if defined(OS_LINUX)
|
#if defined(OS_LINUX)
|
||||||
chassert(cgroupsV2Enabled());
|
chassert(cgroupsV2Enabled());
|
||||||
@ -54,17 +56,18 @@ std::string cgroupV2OfProcess()
|
|||||||
/// A simpler way to get the membership is:
|
/// A simpler way to get the membership is:
|
||||||
std::ifstream cgroup_name_file("/proc/self/cgroup");
|
std::ifstream cgroup_name_file("/proc/self/cgroup");
|
||||||
if (!cgroup_name_file.is_open())
|
if (!cgroup_name_file.is_open())
|
||||||
return "";
|
return {};
|
||||||
/// With cgroups v2, there will be a *single* line with prefix "0::/"
|
/// With cgroups v2, there will be a *single* line with prefix "0::/"
|
||||||
/// (see https://docs.kernel.org/admin-guide/cgroup-v2.html)
|
/// (see https://docs.kernel.org/admin-guide/cgroup-v2.html)
|
||||||
std::string cgroup;
|
std::string cgroup;
|
||||||
std::getline(cgroup_name_file, cgroup);
|
std::getline(cgroup_name_file, cgroup);
|
||||||
static const std::string v2_prefix = "0::/";
|
static const std::string v2_prefix = "0::/";
|
||||||
if (!cgroup.starts_with(v2_prefix))
|
if (!cgroup.starts_with(v2_prefix))
|
||||||
return "";
|
return {};
|
||||||
cgroup = cgroup.substr(v2_prefix.length());
|
cgroup = cgroup.substr(v2_prefix.length());
|
||||||
return cgroup;
|
/// Note: The 'root' cgroup can have an empty cgroup name, this is valid
|
||||||
|
return default_cgroups_mount / cgroup;
|
||||||
#else
|
#else
|
||||||
return "";
|
return {};
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <filesystem>
|
#include <filesystem>
|
||||||
#include <string>
|
|
||||||
|
|
||||||
#if defined(OS_LINUX)
|
#if defined(OS_LINUX)
|
||||||
/// I think it is possible to mount the cgroups hierarchy somewhere else (e.g. when in containers).
|
/// I think it is possible to mount the cgroups hierarchy somewhere else (e.g. when in containers).
|
||||||
@ -16,7 +15,7 @@ bool cgroupsV2Enabled();
|
|||||||
/// Assumes that cgroupsV2Enabled() is enabled.
|
/// Assumes that cgroupsV2Enabled() is enabled.
|
||||||
bool cgroupsV2MemoryControllerEnabled();
|
bool cgroupsV2MemoryControllerEnabled();
|
||||||
|
|
||||||
/// Which cgroup does the process belong to?
|
/// Detects which cgroup v2 the process belongs to and returns the filesystem path to the cgroup.
|
||||||
/// Returns an empty string if the cgroup cannot be determined.
|
/// Returns an empty path the cgroup cannot be determined.
|
||||||
/// Assumes that cgroupsV2Enabled() is enabled.
|
/// Assumes that cgroupsV2Enabled() is enabled.
|
||||||
std::string cgroupV2OfProcess();
|
std::filesystem::path cgroupV2PathOfProcess();
|
||||||
|
@ -87,10 +87,13 @@
|
|||||||
# define ASAN_POISON_MEMORY_REGION(a, b)
|
# define ASAN_POISON_MEMORY_REGION(a, b)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
/// We used to have only ABORT_ON_LOGICAL_ERROR macro, but most of its uses were actually in places where we didn't care about logical errors
|
||||||
#if !defined(NDEBUG) || defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) || defined(MEMORY_SANITIZER) || defined(UNDEFINED_BEHAVIOR_SANITIZER)
|
/// but wanted to check exactly if the current build type is debug or with sanitizer. This new macro is introduced to fix those places.
|
||||||
#define ABORT_ON_LOGICAL_ERROR
|
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||||
#endif
|
# if !defined(NDEBUG) || defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) || defined(MEMORY_SANITIZER) \
|
||||||
|
|| defined(UNDEFINED_BEHAVIOR_SANITIZER)
|
||||||
|
# define DEBUG_OR_SANITIZER_BUILD
|
||||||
|
# endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/// chassert(x) is similar to assert(x), but:
|
/// chassert(x) is similar to assert(x), but:
|
||||||
@ -101,7 +104,7 @@
|
|||||||
/// Also it makes sense to call abort() instead of __builtin_unreachable() in debug builds,
|
/// Also it makes sense to call abort() instead of __builtin_unreachable() in debug builds,
|
||||||
/// because SIGABRT is easier to debug than SIGTRAP (the second one makes gdb crazy)
|
/// because SIGABRT is easier to debug than SIGTRAP (the second one makes gdb crazy)
|
||||||
#if !defined(chassert)
|
#if !defined(chassert)
|
||||||
#if defined(ABORT_ON_LOGICAL_ERROR)
|
# if defined(DEBUG_OR_SANITIZER_BUILD)
|
||||||
// clang-format off
|
// clang-format off
|
||||||
#include <base/types.h>
|
#include <base/types.h>
|
||||||
namespace DB
|
namespace DB
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <cstdlib>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
|
@ -108,6 +108,14 @@ struct make_unsigned // NOLINT(readability-identifier-naming)
|
|||||||
using type = std::make_unsigned_t<T>;
|
using type = std::make_unsigned_t<T>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template <> struct make_unsigned<Int8> { using type = UInt8; };
|
||||||
|
template <> struct make_unsigned<UInt8> { using type = UInt8; };
|
||||||
|
template <> struct make_unsigned<Int16> { using type = UInt16; };
|
||||||
|
template <> struct make_unsigned<UInt16> { using type = UInt16; };
|
||||||
|
template <> struct make_unsigned<Int32> { using type = UInt32; };
|
||||||
|
template <> struct make_unsigned<UInt32> { using type = UInt32; };
|
||||||
|
template <> struct make_unsigned<Int64> { using type = UInt64; };
|
||||||
|
template <> struct make_unsigned<UInt64> { using type = UInt64; };
|
||||||
template <> struct make_unsigned<Int128> { using type = UInt128; };
|
template <> struct make_unsigned<Int128> { using type = UInt128; };
|
||||||
template <> struct make_unsigned<UInt128> { using type = UInt128; };
|
template <> struct make_unsigned<UInt128> { using type = UInt128; };
|
||||||
template <> struct make_unsigned<Int256> { using type = UInt256; };
|
template <> struct make_unsigned<Int256> { using type = UInt256; };
|
||||||
@ -121,6 +129,14 @@ struct make_signed // NOLINT(readability-identifier-naming)
|
|||||||
using type = std::make_signed_t<T>;
|
using type = std::make_signed_t<T>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template <> struct make_signed<Int8> { using type = Int8; };
|
||||||
|
template <> struct make_signed<UInt8> { using type = Int8; };
|
||||||
|
template <> struct make_signed<Int16> { using type = Int16; };
|
||||||
|
template <> struct make_signed<UInt16> { using type = Int16; };
|
||||||
|
template <> struct make_signed<Int32> { using type = Int32; };
|
||||||
|
template <> struct make_signed<UInt32> { using type = Int32; };
|
||||||
|
template <> struct make_signed<Int64> { using type = Int64; };
|
||||||
|
template <> struct make_signed<UInt64> { using type = Int64; };
|
||||||
template <> struct make_signed<Int128> { using type = Int128; };
|
template <> struct make_signed<Int128> { using type = Int128; };
|
||||||
template <> struct make_signed<UInt128> { using type = Int128; };
|
template <> struct make_signed<UInt128> { using type = Int128; };
|
||||||
template <> struct make_signed<Int256> { using type = Int256; };
|
template <> struct make_signed<Int256> { using type = Int256; };
|
||||||
|
@ -23,8 +23,9 @@ std::optional<uint64_t> getCgroupsV2MemoryLimit()
|
|||||||
if (!cgroupsV2MemoryControllerEnabled())
|
if (!cgroupsV2MemoryControllerEnabled())
|
||||||
return {};
|
return {};
|
||||||
|
|
||||||
std::string cgroup = cgroupV2OfProcess();
|
std::filesystem::path current_cgroup = cgroupV2PathOfProcess();
|
||||||
auto current_cgroup = cgroup.empty() ? default_cgroups_mount : (default_cgroups_mount / cgroup);
|
if (current_cgroup.empty())
|
||||||
|
return {};
|
||||||
|
|
||||||
/// Open the bottom-most nested memory limit setting file. If there is no such file at the current
|
/// Open the bottom-most nested memory limit setting file. If there is no such file at the current
|
||||||
/// level, try again at the parent level as memory settings are inherited.
|
/// level, try again at the parent level as memory settings are inherited.
|
||||||
|
9
base/base/isSharedPtrUnique.h
Normal file
9
base/base/isSharedPtrUnique.h
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
bool isSharedPtrUnique(const std::shared_ptr<T> & ptr)
|
||||||
|
{
|
||||||
|
return ptr.use_count() == 1;
|
||||||
|
}
|
@ -232,7 +232,7 @@ void Foundation_API format(
|
|||||||
const Any & value10);
|
const Any & value10);
|
||||||
|
|
||||||
|
|
||||||
void Foundation_API format(std::string & result, const std::string & fmt, const std::vector<Any> & values);
|
void Foundation_API formatVector(std::string & result, const std::string & fmt, const std::vector<Any> & values);
|
||||||
/// Supports a variable number of arguments and is used by
|
/// Supports a variable number of arguments and is used by
|
||||||
/// all other variants of format().
|
/// all other variants of format().
|
||||||
|
|
||||||
|
@ -21,6 +21,8 @@
|
|||||||
#include "Poco/AtomicCounter.h"
|
#include "Poco/AtomicCounter.h"
|
||||||
#include "Poco/Foundation.h"
|
#include "Poco/Foundation.h"
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
|
|
||||||
|
|
||||||
namespace Poco
|
namespace Poco
|
||||||
{
|
{
|
||||||
|
@ -51,8 +51,8 @@ namespace
|
|||||||
}
|
}
|
||||||
if (width != 0) str.width(width);
|
if (width != 0) str.width(width);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void parsePrec(std::ostream& str, std::string::const_iterator& itFmt, const std::string::const_iterator& endFmt)
|
void parsePrec(std::ostream& str, std::string::const_iterator& itFmt, const std::string::const_iterator& endFmt)
|
||||||
{
|
{
|
||||||
if (itFmt != endFmt && *itFmt == '.')
|
if (itFmt != endFmt && *itFmt == '.')
|
||||||
@ -67,7 +67,7 @@ namespace
|
|||||||
if (prec >= 0) str.precision(prec);
|
if (prec >= 0) str.precision(prec);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
char parseMod(std::string::const_iterator& itFmt, const std::string::const_iterator& endFmt)
|
char parseMod(std::string::const_iterator& itFmt, const std::string::const_iterator& endFmt)
|
||||||
{
|
{
|
||||||
char mod = 0;
|
char mod = 0;
|
||||||
@ -77,13 +77,13 @@ namespace
|
|||||||
{
|
{
|
||||||
case 'l':
|
case 'l':
|
||||||
case 'h':
|
case 'h':
|
||||||
case 'L':
|
case 'L':
|
||||||
case '?': mod = *itFmt++; break;
|
case '?': mod = *itFmt++; break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return mod;
|
return mod;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::size_t parseIndex(std::string::const_iterator& itFmt, const std::string::const_iterator& endFmt)
|
std::size_t parseIndex(std::string::const_iterator& itFmt, const std::string::const_iterator& endFmt)
|
||||||
{
|
{
|
||||||
int index = 0;
|
int index = 0;
|
||||||
@ -110,8 +110,8 @@ namespace
|
|||||||
case 'f': str << std::fixed; break;
|
case 'f': str << std::fixed; break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void writeAnyInt(std::ostream& str, const Any& any)
|
void writeAnyInt(std::ostream& str, const Any& any)
|
||||||
{
|
{
|
||||||
if (any.type() == typeid(char))
|
if (any.type() == typeid(char))
|
||||||
@ -201,7 +201,7 @@ namespace
|
|||||||
str << RefAnyCast<std::string>(*itVal++);
|
str << RefAnyCast<std::string>(*itVal++);
|
||||||
break;
|
break;
|
||||||
case 'z':
|
case 'z':
|
||||||
str << AnyCast<std::size_t>(*itVal++);
|
str << AnyCast<std::size_t>(*itVal++);
|
||||||
break;
|
break;
|
||||||
case 'I':
|
case 'I':
|
||||||
case 'D':
|
case 'D':
|
||||||
@ -303,7 +303,7 @@ void format(std::string& result, const std::string& fmt, const Any& value)
|
|||||||
{
|
{
|
||||||
std::vector<Any> args;
|
std::vector<Any> args;
|
||||||
args.push_back(value);
|
args.push_back(value);
|
||||||
format(result, fmt, args);
|
formatVector(result, fmt, args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -312,7 +312,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
|||||||
std::vector<Any> args;
|
std::vector<Any> args;
|
||||||
args.push_back(value1);
|
args.push_back(value1);
|
||||||
args.push_back(value2);
|
args.push_back(value2);
|
||||||
format(result, fmt, args);
|
formatVector(result, fmt, args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -322,7 +322,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
|||||||
args.push_back(value1);
|
args.push_back(value1);
|
||||||
args.push_back(value2);
|
args.push_back(value2);
|
||||||
args.push_back(value3);
|
args.push_back(value3);
|
||||||
format(result, fmt, args);
|
formatVector(result, fmt, args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -333,7 +333,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
|||||||
args.push_back(value2);
|
args.push_back(value2);
|
||||||
args.push_back(value3);
|
args.push_back(value3);
|
||||||
args.push_back(value4);
|
args.push_back(value4);
|
||||||
format(result, fmt, args);
|
formatVector(result, fmt, args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -345,7 +345,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
|||||||
args.push_back(value3);
|
args.push_back(value3);
|
||||||
args.push_back(value4);
|
args.push_back(value4);
|
||||||
args.push_back(value5);
|
args.push_back(value5);
|
||||||
format(result, fmt, args);
|
formatVector(result, fmt, args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -358,7 +358,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
|||||||
args.push_back(value4);
|
args.push_back(value4);
|
||||||
args.push_back(value5);
|
args.push_back(value5);
|
||||||
args.push_back(value6);
|
args.push_back(value6);
|
||||||
format(result, fmt, args);
|
formatVector(result, fmt, args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -372,7 +372,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
|||||||
args.push_back(value5);
|
args.push_back(value5);
|
||||||
args.push_back(value6);
|
args.push_back(value6);
|
||||||
args.push_back(value7);
|
args.push_back(value7);
|
||||||
format(result, fmt, args);
|
formatVector(result, fmt, args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -387,7 +387,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
|||||||
args.push_back(value6);
|
args.push_back(value6);
|
||||||
args.push_back(value7);
|
args.push_back(value7);
|
||||||
args.push_back(value8);
|
args.push_back(value8);
|
||||||
format(result, fmt, args);
|
formatVector(result, fmt, args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -403,7 +403,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
|||||||
args.push_back(value7);
|
args.push_back(value7);
|
||||||
args.push_back(value8);
|
args.push_back(value8);
|
||||||
args.push_back(value9);
|
args.push_back(value9);
|
||||||
format(result, fmt, args);
|
formatVector(result, fmt, args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -420,16 +420,16 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
|||||||
args.push_back(value8);
|
args.push_back(value8);
|
||||||
args.push_back(value9);
|
args.push_back(value9);
|
||||||
args.push_back(value10);
|
args.push_back(value10);
|
||||||
format(result, fmt, args);
|
formatVector(result, fmt, args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void format(std::string& result, const std::string& fmt, const std::vector<Any>& values)
|
void formatVector(std::string& result, const std::string& fmt, const std::vector<Any>& values)
|
||||||
{
|
{
|
||||||
std::string::const_iterator itFmt = fmt.begin();
|
std::string::const_iterator itFmt = fmt.begin();
|
||||||
std::string::const_iterator endFmt = fmt.end();
|
std::string::const_iterator endFmt = fmt.end();
|
||||||
std::vector<Any>::const_iterator itVal = values.begin();
|
std::vector<Any>::const_iterator itVal = values.begin();
|
||||||
std::vector<Any>::const_iterator endVal = values.end();
|
std::vector<Any>::const_iterator endVal = values.end();
|
||||||
while (itFmt != endFmt)
|
while (itFmt != endFmt)
|
||||||
{
|
{
|
||||||
switch (*itFmt)
|
switch (*itFmt)
|
||||||
|
@ -57,7 +57,7 @@ std::string ObjectId::toString(const std::string& fmt) const
|
|||||||
|
|
||||||
for (int i = 0; i < 12; ++i)
|
for (int i = 0; i < 12; ++i)
|
||||||
{
|
{
|
||||||
s += format(fmt, (unsigned int) _id[i]);
|
s += Poco::format(fmt, (unsigned int) _id[i]);
|
||||||
}
|
}
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
@ -43,9 +43,9 @@ namespace Poco {
|
|||||||
namespace MongoDB {
|
namespace MongoDB {
|
||||||
|
|
||||||
|
|
||||||
static const std::string keyCursor {"cursor"};
|
[[ maybe_unused ]] static const std::string keyCursor {"cursor"};
|
||||||
static const std::string keyFirstBatch {"firstBatch"};
|
[[ maybe_unused ]] static const std::string keyFirstBatch {"firstBatch"};
|
||||||
static const std::string keyNextBatch {"nextBatch"};
|
[[ maybe_unused ]] static const std::string keyNextBatch {"nextBatch"};
|
||||||
|
|
||||||
static Poco::Int64 cursorIdFromResponse(const MongoDB::Document& doc);
|
static Poco::Int64 cursorIdFromResponse(const MongoDB::Document& doc);
|
||||||
|
|
||||||
@ -131,7 +131,7 @@ OpMsgMessage& OpMsgCursor::next(Connection& connection)
|
|||||||
connection.readResponse(_response);
|
connection.readResponse(_response);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
#endif
|
#endif
|
||||||
{
|
{
|
||||||
_response.clear();
|
_response.clear();
|
||||||
_query.setCursor(_cursorID, _batchSize);
|
_query.setCursor(_cursorID, _batchSize);
|
||||||
|
@ -79,7 +79,7 @@ namespace Net
|
|||||||
/// Returns the value of the first name-value pair with the given name.
|
/// Returns the value of the first name-value pair with the given name.
|
||||||
/// If no value with the given name has been found, the defaultValue is returned.
|
/// If no value with the given name has been found, the defaultValue is returned.
|
||||||
|
|
||||||
const std::vector<std::reference_wrapper<const std::string>> getAll(const std::string & name) const;
|
std::vector<std::string> getAll(const std::string & name) const;
|
||||||
/// Returns all values of all name-value pairs with the given name.
|
/// Returns all values of all name-value pairs with the given name.
|
||||||
///
|
///
|
||||||
/// Returns an empty vector if there are no name-value pairs with the given name.
|
/// Returns an empty vector if there are no name-value pairs with the given name.
|
||||||
|
@ -17,9 +17,9 @@
|
|||||||
#include "Poco/NumberFormatter.h"
|
#include "Poco/NumberFormatter.h"
|
||||||
#include "Poco/NumberParser.h"
|
#include "Poco/NumberParser.h"
|
||||||
#include "Poco/String.h"
|
#include "Poco/String.h"
|
||||||
|
#include <charconv>
|
||||||
#include <format>
|
#include <format>
|
||||||
|
|
||||||
|
|
||||||
using Poco::NumberFormatter;
|
using Poco::NumberFormatter;
|
||||||
using Poco::NumberParser;
|
using Poco::NumberParser;
|
||||||
using Poco::icompare;
|
using Poco::icompare;
|
||||||
@ -75,7 +75,7 @@ void HTTPMessage::setContentLength(std::streamsize length)
|
|||||||
erase(CONTENT_LENGTH);
|
erase(CONTENT_LENGTH);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
std::streamsize HTTPMessage::getContentLength() const
|
std::streamsize HTTPMessage::getContentLength() const
|
||||||
{
|
{
|
||||||
const std::string& contentLength = get(CONTENT_LENGTH, EMPTY);
|
const std::string& contentLength = get(CONTENT_LENGTH, EMPTY);
|
||||||
@ -98,7 +98,7 @@ void HTTPMessage::setContentLength64(Poco::Int64 length)
|
|||||||
erase(CONTENT_LENGTH);
|
erase(CONTENT_LENGTH);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Poco::Int64 HTTPMessage::getContentLength64() const
|
Poco::Int64 HTTPMessage::getContentLength64() const
|
||||||
{
|
{
|
||||||
const std::string& contentLength = get(CONTENT_LENGTH, EMPTY);
|
const std::string& contentLength = get(CONTENT_LENGTH, EMPTY);
|
||||||
@ -133,13 +133,13 @@ void HTTPMessage::setChunkedTransferEncoding(bool flag)
|
|||||||
setTransferEncoding(IDENTITY_TRANSFER_ENCODING);
|
setTransferEncoding(IDENTITY_TRANSFER_ENCODING);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
bool HTTPMessage::getChunkedTransferEncoding() const
|
bool HTTPMessage::getChunkedTransferEncoding() const
|
||||||
{
|
{
|
||||||
return icompare(getTransferEncoding(), CHUNKED_TRANSFER_ENCODING) == 0;
|
return icompare(getTransferEncoding(), CHUNKED_TRANSFER_ENCODING) == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void HTTPMessage::setContentType(const std::string& mediaType)
|
void HTTPMessage::setContentType(const std::string& mediaType)
|
||||||
{
|
{
|
||||||
if (mediaType.empty())
|
if (mediaType.empty())
|
||||||
@ -154,7 +154,7 @@ void HTTPMessage::setContentType(const MediaType& mediaType)
|
|||||||
setContentType(mediaType.toString());
|
setContentType(mediaType.toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
const std::string& HTTPMessage::getContentType() const
|
const std::string& HTTPMessage::getContentType() const
|
||||||
{
|
{
|
||||||
return get(CONTENT_TYPE, UNKNOWN_CONTENT_TYPE);
|
return get(CONTENT_TYPE, UNKNOWN_CONTENT_TYPE);
|
||||||
|
@ -102,9 +102,9 @@ const std::string& NameValueCollection::get(const std::string& name, const std::
|
|||||||
return defaultValue;
|
return defaultValue;
|
||||||
}
|
}
|
||||||
|
|
||||||
const std::vector<std::reference_wrapper<const std::string>> NameValueCollection::getAll(const std::string& name) const
|
std::vector<std::string> NameValueCollection::getAll(const std::string& name) const
|
||||||
{
|
{
|
||||||
std::vector<std::reference_wrapper<const std::string>> values;
|
std::vector<std::string> values;
|
||||||
for (ConstIterator it = _map.find(name); it != _map.end(); it++)
|
for (ConstIterator it = _map.find(name); it != _map.end(); it++)
|
||||||
if (it->first == name)
|
if (it->first == name)
|
||||||
values.push_back(it->second);
|
values.push_back(it->second);
|
||||||
|
@ -42,9 +42,19 @@ endif ()
|
|||||||
# But use 2 parallel jobs, since:
|
# But use 2 parallel jobs, since:
|
||||||
# - this is what llvm does
|
# - this is what llvm does
|
||||||
# - and I've verfied that lld-11 does not use all available CPU time (in peak) while linking one binary
|
# - and I've verfied that lld-11 does not use all available CPU time (in peak) while linking one binary
|
||||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND ENABLE_THINLTO AND PARALLEL_LINK_JOBS GREATER 2)
|
if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND ENABLE_THINLTO)
|
||||||
message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 2.")
|
if (ARCH_AARCH64)
|
||||||
set (PARALLEL_LINK_JOBS 2)
|
# aarch64 builds start to often fail with OOMs (reason not yet clear), for now let's limit the concurrency
|
||||||
|
message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 1.")
|
||||||
|
set (PARALLEL_LINK_JOBS 1)
|
||||||
|
if (LINKER_NAME MATCHES "lld")
|
||||||
|
math(EXPR LTO_JOBS ${NUMBER_OF_LOGICAL_CORES}/4)
|
||||||
|
set (CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} -Wl,--thinlto-jobs=${LTO_JOBS}")
|
||||||
|
endif()
|
||||||
|
elseif (PARALLEL_LINK_JOBS GREATER 2)
|
||||||
|
message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 2.")
|
||||||
|
set (PARALLEL_LINK_JOBS 2)
|
||||||
|
endif ()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
message(STATUS "Building sub-tree with ${PARALLEL_COMPILE_JOBS} compile jobs and ${PARALLEL_LINK_JOBS} linker jobs (system: ${NUMBER_OF_LOGICAL_CORES} cores, ${TOTAL_PHYSICAL_MEMORY} MB RAM, 'OFF' means the native core count).")
|
message(STATUS "Building sub-tree with ${PARALLEL_COMPILE_JOBS} compile jobs and ${PARALLEL_LINK_JOBS} linker jobs (system: ${NUMBER_OF_LOGICAL_CORES} cores, ${TOTAL_PHYSICAL_MEMORY} MB RAM, 'OFF' means the native core count).")
|
||||||
|
2
contrib/avro
vendored
2
contrib/avro
vendored
@ -1 +1 @@
|
|||||||
Subproject commit d43acc84d3d455b016f847d6666fbc3cd27f16a9
|
Subproject commit 545e7002683cbc2198164d93088ac8e4955b4628
|
2
contrib/azure
vendored
2
contrib/azure
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 92c94d7f37a43cc8fc4d466884a95f610c0593bf
|
Subproject commit ea3e19a7be08519134c643177d56c7484dfec884
|
2
contrib/grpc
vendored
2
contrib/grpc
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 77b2737a709d43d8c6895e3f03ca62b00bd9201c
|
Subproject commit 1716359d2e28d304a250f9df0e6c0ccad03de8db
|
@ -34,11 +34,7 @@ if (OS_LINUX)
|
|||||||
# avoid spurious latencies and additional work associated with
|
# avoid spurious latencies and additional work associated with
|
||||||
# MADV_DONTNEED. See
|
# MADV_DONTNEED. See
|
||||||
# https://github.com/ClickHouse/ClickHouse/issues/11121 for motivation.
|
# https://github.com/ClickHouse/ClickHouse/issues/11121 for motivation.
|
||||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
|
set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:0,dirty_decay_ms:5000,prof:true,prof_active:false,background_thread:true")
|
||||||
set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:0,dirty_decay_ms:5000")
|
|
||||||
else()
|
|
||||||
set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:0,dirty_decay_ms:5000,prof:true,prof_active:false,background_thread:true")
|
|
||||||
endif()
|
|
||||||
else()
|
else()
|
||||||
set (JEMALLOC_CONFIG_MALLOC_CONF "oversize_threshold:0,muzzy_decay_ms:0,dirty_decay_ms:5000")
|
set (JEMALLOC_CONFIG_MALLOC_CONF "oversize_threshold:0,muzzy_decay_ms:0,dirty_decay_ms:5000")
|
||||||
endif()
|
endif()
|
||||||
@ -179,12 +175,19 @@ endif ()
|
|||||||
|
|
||||||
target_compile_definitions(_jemalloc PRIVATE -DJEMALLOC_PROF=1)
|
target_compile_definitions(_jemalloc PRIVATE -DJEMALLOC_PROF=1)
|
||||||
|
|
||||||
# jemalloc provides support for two different libunwind flavors: the original HP libunwind and the one coming with gcc / g++ / libstdc++.
|
# jemalloc provides support two unwind flavors:
|
||||||
# The latter is identified by `JEMALLOC_PROF_LIBGCC` and uses `_Unwind_Backtrace` method instead of `unw_backtrace`.
|
# - JEMALLOC_PROF_LIBUNWIND - unw_backtrace() - gnu libunwind (compatible with llvm libunwind)
|
||||||
# At the time ClickHouse uses LLVM libunwind which follows libgcc's way of backtracking.
|
# - JEMALLOC_PROF_LIBGCC - _Unwind_Backtrace() - the original HP libunwind and the one coming with gcc / g++ / libstdc++.
|
||||||
#
|
#
|
||||||
# ClickHouse has to provide `unw_backtrace` method by the means of [commit 8e2b31e](https://github.com/ClickHouse/libunwind/commit/8e2b31e766dd502f6df74909e04a7dbdf5182eb1).
|
# But for JEMALLOC_PROF_LIBGCC it also calls _Unwind_Backtrace() during
|
||||||
target_compile_definitions (_jemalloc PRIVATE -DJEMALLOC_PROF_LIBGCC=1)
|
# bootstraping of jemalloc, which may lead to deadlock, if the dlsym will do
|
||||||
|
# allocations somewhere (like glibc does prio 2.34, see [1]).
|
||||||
|
#
|
||||||
|
# [1]: https://sourceware.org/git/?p=glibc.git;a=commit;h=fada9018199c21c469ff0e731ef75c6020074ac9
|
||||||
|
#
|
||||||
|
# And since ClickHouse unwind already supports unw_backtrace() we can safely
|
||||||
|
# switch to it to avoid this deadlock.
|
||||||
|
target_compile_definitions (_jemalloc PRIVATE -DJEMALLOC_PROF_LIBUNWIND=1)
|
||||||
target_link_libraries (_jemalloc PRIVATE unwind)
|
target_link_libraries (_jemalloc PRIVATE unwind)
|
||||||
|
|
||||||
# for RTLD_NEXT
|
# for RTLD_NEXT
|
||||||
|
@ -4,3 +4,14 @@ It allows to integrate JEMalloc into CMake project.
|
|||||||
- Added JEMALLOC_CONFIG_MALLOC_CONF substitution
|
- Added JEMALLOC_CONFIG_MALLOC_CONF substitution
|
||||||
- Add musl support (USE_MUSL)
|
- Add musl support (USE_MUSL)
|
||||||
- Also note, that darwin build requires JEMALLOC_PREFIX, while others do not
|
- Also note, that darwin build requires JEMALLOC_PREFIX, while others do not
|
||||||
|
- JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE should be disabled
|
||||||
|
|
||||||
|
CLOCK_MONOTONIC_COARSE can go backwards after clock_adjtime(ADJ_FREQUENCY)
|
||||||
|
Let's disable it for now, and this menas that CLOCK_MONOTONIC will be used,
|
||||||
|
and this, should not be a problem, since:
|
||||||
|
- jemalloc do not call clock_gettime() that frequently
|
||||||
|
- the difference is CLOCK_MONOTONIC 20ns and CLOCK_MONOTONIC_COARSE 4ns
|
||||||
|
|
||||||
|
This can be done with the following command:
|
||||||
|
|
||||||
|
gg JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE | cut -d: -f1 | xargs sed -i 's@#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE@/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */@'
|
||||||
|
@ -96,7 +96,7 @@
|
|||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||||
*/
|
*/
|
||||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||||
|
@ -98,7 +98,7 @@
|
|||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||||
*/
|
*/
|
||||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||||
|
@ -98,7 +98,7 @@
|
|||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||||
*/
|
*/
|
||||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||||
|
@ -98,7 +98,7 @@
|
|||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||||
*/
|
*/
|
||||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||||
|
@ -96,7 +96,7 @@
|
|||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||||
*/
|
*/
|
||||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||||
|
@ -98,7 +98,7 @@
|
|||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||||
*/
|
*/
|
||||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||||
|
@ -99,7 +99,7 @@
|
|||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||||
*/
|
*/
|
||||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||||
|
@ -54,7 +54,6 @@ set(SRCS
|
|||||||
"${LIBPQ_SOURCE_DIR}/port/pgstrcasecmp.c"
|
"${LIBPQ_SOURCE_DIR}/port/pgstrcasecmp.c"
|
||||||
"${LIBPQ_SOURCE_DIR}/port/thread.c"
|
"${LIBPQ_SOURCE_DIR}/port/thread.c"
|
||||||
"${LIBPQ_SOURCE_DIR}/port/path.c"
|
"${LIBPQ_SOURCE_DIR}/port/path.c"
|
||||||
"${LIBPQ_SOURCE_DIR}/port/explicit_bzero.c"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
add_library(_libpq ${SRCS})
|
add_library(_libpq ${SRCS})
|
||||||
|
2
contrib/libunwind
vendored
2
contrib/libunwind
vendored
@ -1 +1 @@
|
|||||||
Subproject commit d6a01c46327e56fd86beb8aaa31591fcd9a6b7df
|
Subproject commit 8f28e64d15819d2d096badd598c7d85bebddb1f2
|
@ -4,9 +4,6 @@ set(LIBUNWIND_CXX_SOURCES
|
|||||||
"${LIBUNWIND_SOURCE_DIR}/src/libunwind.cpp"
|
"${LIBUNWIND_SOURCE_DIR}/src/libunwind.cpp"
|
||||||
"${LIBUNWIND_SOURCE_DIR}/src/Unwind-EHABI.cpp"
|
"${LIBUNWIND_SOURCE_DIR}/src/Unwind-EHABI.cpp"
|
||||||
"${LIBUNWIND_SOURCE_DIR}/src/Unwind-seh.cpp")
|
"${LIBUNWIND_SOURCE_DIR}/src/Unwind-seh.cpp")
|
||||||
if (APPLE)
|
|
||||||
set(LIBUNWIND_CXX_SOURCES ${LIBUNWIND_CXX_SOURCES} "${LIBUNWIND_SOURCE_DIR}/src/Unwind_AppleExtras.cpp")
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
set(LIBUNWIND_C_SOURCES
|
set(LIBUNWIND_C_SOURCES
|
||||||
"${LIBUNWIND_SOURCE_DIR}/src/UnwindLevel1.c"
|
"${LIBUNWIND_SOURCE_DIR}/src/UnwindLevel1.c"
|
||||||
@ -32,6 +29,7 @@ set_target_properties(unwind PROPERTIES FOLDER "contrib/libunwind-cmake")
|
|||||||
|
|
||||||
target_include_directories(unwind SYSTEM BEFORE PUBLIC $<BUILD_INTERFACE:${LIBUNWIND_SOURCE_DIR}/include>)
|
target_include_directories(unwind SYSTEM BEFORE PUBLIC $<BUILD_INTERFACE:${LIBUNWIND_SOURCE_DIR}/include>)
|
||||||
target_compile_definitions(unwind PRIVATE -D_LIBUNWIND_NO_HEAP=1)
|
target_compile_definitions(unwind PRIVATE -D_LIBUNWIND_NO_HEAP=1)
|
||||||
|
target_compile_definitions(unwind PRIVATE -D_LIBUNWIND_REMEMBER_STACK_ALLOC=1)
|
||||||
# NOTE: from this macros sizeof(unw_context_t)/sizeof(unw_cursor_t) is depends, so it should be set always
|
# NOTE: from this macros sizeof(unw_context_t)/sizeof(unw_cursor_t) is depends, so it should be set always
|
||||||
target_compile_definitions(unwind PUBLIC -D_LIBUNWIND_IS_NATIVE_ONLY)
|
target_compile_definitions(unwind PUBLIC -D_LIBUNWIND_IS_NATIVE_ONLY)
|
||||||
|
|
||||||
|
2
contrib/openssl
vendored
2
contrib/openssl
vendored
@ -1 +1 @@
|
|||||||
Subproject commit ee2bb8513b28bf86b35404dd17a0e29305ca9e08
|
Subproject commit 66deddc1e53cda8706604a019777259372d1bd62
|
@ -1298,7 +1298,6 @@ elseif(ARCH_PPC64LE)
|
|||||||
${OPENSSL_SOURCE_DIR}/crypto/camellia/camellia.c
|
${OPENSSL_SOURCE_DIR}/crypto/camellia/camellia.c
|
||||||
${OPENSSL_SOURCE_DIR}/crypto/camellia/cmll_cbc.c
|
${OPENSSL_SOURCE_DIR}/crypto/camellia/cmll_cbc.c
|
||||||
${OPENSSL_SOURCE_DIR}/crypto/chacha/chacha_enc.c
|
${OPENSSL_SOURCE_DIR}/crypto/chacha/chacha_enc.c
|
||||||
${OPENSSL_SOURCE_DIR}/crypto/mem_clr.c
|
|
||||||
${OPENSSL_SOURCE_DIR}/crypto/rc4/rc4_enc.c
|
${OPENSSL_SOURCE_DIR}/crypto/rc4/rc4_enc.c
|
||||||
${OPENSSL_SOURCE_DIR}/crypto/rc4/rc4_skey.c
|
${OPENSSL_SOURCE_DIR}/crypto/rc4/rc4_skey.c
|
||||||
${OPENSSL_SOURCE_DIR}/crypto/sha/keccak1600.c
|
${OPENSSL_SOURCE_DIR}/crypto/sha/keccak1600.c
|
||||||
|
2
contrib/pocketfft
vendored
2
contrib/pocketfft
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 9efd4da52cf8d28d14531d14e43ad9d913807546
|
Subproject commit f4c1aa8aa9ce79ad39e80f2c9c41b92ead90fda3
|
2
contrib/rocksdb
vendored
2
contrib/rocksdb
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 078fa5638690004e1f744076d1bdcc4e93767304
|
Subproject commit be366233921293bd07a84dc4ea6991858665f202
|
@ -5,20 +5,13 @@ if (NOT ENABLE_ROCKSDB)
|
|||||||
return()
|
return()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
## this file is extracted from `contrib/rocksdb/CMakeLists.txt`
|
# Always disable jemalloc for rocksdb by default because it introduces non-standard jemalloc APIs
|
||||||
set(ROCKSDB_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/rocksdb")
|
|
||||||
list(APPEND CMAKE_MODULE_PATH "${ROCKSDB_SOURCE_DIR}/cmake/modules/")
|
|
||||||
|
|
||||||
set(PORTABLE ON)
|
|
||||||
## always disable jemalloc for rocksdb by default
|
|
||||||
## because it introduces non-standard jemalloc APIs
|
|
||||||
option(WITH_JEMALLOC "build with JeMalloc" OFF)
|
option(WITH_JEMALLOC "build with JeMalloc" OFF)
|
||||||
set(USE_SNAPPY OFF)
|
|
||||||
if (TARGET ch_contrib::snappy)
|
option(WITH_LIBURING "build with liburing" OFF) # TODO could try to enable this conditionally, depending on ClickHouse's ENABLE_LIBURING
|
||||||
set(USE_SNAPPY ON)
|
|
||||||
endif()
|
# ClickHouse cannot be compiled without snappy, lz4, zlib, zstd
|
||||||
option(WITH_SNAPPY "build with SNAPPY" ${USE_SNAPPY})
|
option(WITH_SNAPPY "build with SNAPPY" ON)
|
||||||
## lz4, zlib, zstd is enabled in ClickHouse by default
|
|
||||||
option(WITH_LZ4 "build with lz4" ON)
|
option(WITH_LZ4 "build with lz4" ON)
|
||||||
option(WITH_ZLIB "build with zlib" ON)
|
option(WITH_ZLIB "build with zlib" ON)
|
||||||
option(WITH_ZSTD "build with zstd" ON)
|
option(WITH_ZSTD "build with zstd" ON)
|
||||||
@ -26,78 +19,46 @@ option(WITH_ZSTD "build with zstd" ON)
|
|||||||
# third-party/folly is only validated to work on Linux and Windows for now.
|
# third-party/folly is only validated to work on Linux and Windows for now.
|
||||||
# So only turn it on there by default.
|
# So only turn it on there by default.
|
||||||
if(CMAKE_SYSTEM_NAME MATCHES "Linux|Windows")
|
if(CMAKE_SYSTEM_NAME MATCHES "Linux|Windows")
|
||||||
if(MSVC AND MSVC_VERSION LESS 1910)
|
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" ON)
|
||||||
# Folly does not compile with MSVC older than VS2017
|
|
||||||
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" OFF)
|
|
||||||
else()
|
|
||||||
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" ON)
|
|
||||||
endif()
|
|
||||||
else()
|
else()
|
||||||
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" OFF)
|
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" OFF)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if( NOT DEFINED CMAKE_CXX_STANDARD )
|
if(WITH_SNAPPY)
|
||||||
set(CMAKE_CXX_STANDARD 11)
|
add_definitions(-DSNAPPY)
|
||||||
|
list(APPEND THIRDPARTY_LIBS ch_contrib::snappy)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(MSVC)
|
if(WITH_ZLIB)
|
||||||
option(WITH_XPRESS "build with windows built in compression" OFF)
|
add_definitions(-DZLIB)
|
||||||
include("${ROCKSDB_SOURCE_DIR}/thirdparty.inc")
|
list(APPEND THIRDPARTY_LIBS ch_contrib::zlib)
|
||||||
else()
|
|
||||||
if(CMAKE_SYSTEM_NAME MATCHES "FreeBSD" AND NOT CMAKE_SYSTEM_NAME MATCHES "kFreeBSD")
|
|
||||||
# FreeBSD has jemalloc as default malloc
|
|
||||||
# but it does not have all the jemalloc files in include/...
|
|
||||||
set(WITH_JEMALLOC ON)
|
|
||||||
else()
|
|
||||||
if(WITH_JEMALLOC AND TARGET ch_contrib::jemalloc)
|
|
||||||
add_definitions(-DROCKSDB_JEMALLOC -DJEMALLOC_NO_DEMANGLE)
|
|
||||||
list(APPEND THIRDPARTY_LIBS ch_contrib::jemalloc)
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(WITH_SNAPPY)
|
|
||||||
add_definitions(-DSNAPPY)
|
|
||||||
list(APPEND THIRDPARTY_LIBS ch_contrib::snappy)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(WITH_ZLIB)
|
|
||||||
add_definitions(-DZLIB)
|
|
||||||
list(APPEND THIRDPARTY_LIBS ch_contrib::zlib)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(WITH_LZ4)
|
|
||||||
add_definitions(-DLZ4)
|
|
||||||
list(APPEND THIRDPARTY_LIBS ch_contrib::lz4)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(WITH_ZSTD)
|
|
||||||
add_definitions(-DZSTD)
|
|
||||||
list(APPEND THIRDPARTY_LIBS ch_contrib::zstd)
|
|
||||||
endif()
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
|
if(WITH_LZ4)
|
||||||
if(POWER9)
|
add_definitions(-DLZ4)
|
||||||
set(HAS_POWER9 1)
|
list(APPEND THIRDPARTY_LIBS ch_contrib::lz4)
|
||||||
set(HAS_ALTIVEC 1)
|
endif()
|
||||||
else()
|
|
||||||
set(HAS_POWER8 1)
|
|
||||||
set(HAS_ALTIVEC 1)
|
|
||||||
endif(POWER9)
|
|
||||||
endif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
|
|
||||||
|
|
||||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64|arm64|ARM64")
|
if(WITH_ZSTD)
|
||||||
set(HAS_ARMV8_CRC 1)
|
add_definitions(-DZSTD)
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
|
list(APPEND THIRDPARTY_LIBS ch_contrib::zstd)
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
|
endif()
|
||||||
endif(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64|arm64|ARM64")
|
|
||||||
|
|
||||||
|
option(PORTABLE "build a portable binary" ON)
|
||||||
|
|
||||||
if(ENABLE_AVX2 AND ENABLE_PCLMULQDQ)
|
if(ENABLE_SSE42 AND ENABLE_PCLMULQDQ)
|
||||||
add_definitions(-DHAVE_SSE42)
|
add_definitions(-DHAVE_SSE42)
|
||||||
add_definitions(-DHAVE_PCLMUL)
|
add_definitions(-DHAVE_PCLMUL)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
if(CMAKE_SYSTEM_PROCESSOR MATCHES "arm64|aarch64|AARCH64")
|
||||||
|
set (HAS_ARMV8_CRC 1)
|
||||||
|
# the original build descriptions set specific flags for ARM. These flags are already subsumed by ClickHouse's general
|
||||||
|
# ARM flags, see cmake/cpu_features.cmake
|
||||||
|
# set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
|
||||||
|
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
|
||||||
|
endif()
|
||||||
|
|
||||||
set (HAVE_THREAD_LOCAL 1)
|
set (HAVE_THREAD_LOCAL 1)
|
||||||
if(HAVE_THREAD_LOCAL)
|
if(HAVE_THREAD_LOCAL)
|
||||||
add_definitions(-DROCKSDB_SUPPORT_THREAD_LOCAL)
|
add_definitions(-DROCKSDB_SUPPORT_THREAD_LOCAL)
|
||||||
@ -107,8 +68,6 @@ if(CMAKE_SYSTEM_NAME MATCHES "Darwin")
|
|||||||
add_definitions(-DOS_MACOSX)
|
add_definitions(-DOS_MACOSX)
|
||||||
elseif(CMAKE_SYSTEM_NAME MATCHES "Linux")
|
elseif(CMAKE_SYSTEM_NAME MATCHES "Linux")
|
||||||
add_definitions(-DOS_LINUX)
|
add_definitions(-DOS_LINUX)
|
||||||
elseif(CMAKE_SYSTEM_NAME MATCHES "SunOS")
|
|
||||||
add_definitions(-DOS_SOLARIS)
|
|
||||||
elseif(CMAKE_SYSTEM_NAME MATCHES "FreeBSD")
|
elseif(CMAKE_SYSTEM_NAME MATCHES "FreeBSD")
|
||||||
add_definitions(-DOS_FREEBSD)
|
add_definitions(-DOS_FREEBSD)
|
||||||
elseif(CMAKE_SYSTEM_NAME MATCHES "Android")
|
elseif(CMAKE_SYSTEM_NAME MATCHES "Android")
|
||||||
@ -123,12 +82,10 @@ endif()
|
|||||||
|
|
||||||
if (OS_LINUX)
|
if (OS_LINUX)
|
||||||
add_definitions(-DROCKSDB_SCHED_GETCPU_PRESENT)
|
add_definitions(-DROCKSDB_SCHED_GETCPU_PRESENT)
|
||||||
add_definitions(-DROCKSDB_AUXV_SYSAUXV_PRESENT)
|
|
||||||
add_definitions(-DROCKSDB_AUXV_GETAUXVAL_PRESENT)
|
add_definitions(-DROCKSDB_AUXV_GETAUXVAL_PRESENT)
|
||||||
elseif (OS_FREEBSD)
|
|
||||||
add_definitions(-DROCKSDB_AUXV_SYSAUXV_PRESENT)
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
set(ROCKSDB_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/rocksdb")
|
||||||
|
|
||||||
include_directories(${ROCKSDB_SOURCE_DIR})
|
include_directories(${ROCKSDB_SOURCE_DIR})
|
||||||
include_directories("${ROCKSDB_SOURCE_DIR}/include")
|
include_directories("${ROCKSDB_SOURCE_DIR}/include")
|
||||||
@ -136,11 +93,11 @@ if(WITH_FOLLY_DISTRIBUTED_MUTEX)
|
|||||||
include_directories("${ROCKSDB_SOURCE_DIR}/third-party/folly")
|
include_directories("${ROCKSDB_SOURCE_DIR}/third-party/folly")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# Main library source code
|
|
||||||
|
|
||||||
set(SOURCES
|
set(SOURCES
|
||||||
${ROCKSDB_SOURCE_DIR}/cache/cache.cc
|
${ROCKSDB_SOURCE_DIR}/cache/cache.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/cache/cache_entry_roles.cc
|
${ROCKSDB_SOURCE_DIR}/cache/cache_entry_roles.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/cache/cache_key.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/cache/cache_reservation_manager.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/cache/clock_cache.cc
|
${ROCKSDB_SOURCE_DIR}/cache/clock_cache.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/cache/lru_cache.cc
|
${ROCKSDB_SOURCE_DIR}/cache/lru_cache.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/cache/sharded_cache.cc
|
${ROCKSDB_SOURCE_DIR}/cache/sharded_cache.cc
|
||||||
@ -156,6 +113,7 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_format.cc
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_format.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_sequential_reader.cc
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_sequential_reader.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_writer.cc
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_writer.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/db/blob/prefetch_buffer_collection.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/builder.cc
|
${ROCKSDB_SOURCE_DIR}/db/builder.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/c.cc
|
${ROCKSDB_SOURCE_DIR}/db/c.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/column_family.cc
|
${ROCKSDB_SOURCE_DIR}/db/column_family.cc
|
||||||
@ -229,6 +187,7 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/env/file_system_tracer.cc
|
${ROCKSDB_SOURCE_DIR}/env/file_system_tracer.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/env/fs_remap.cc
|
${ROCKSDB_SOURCE_DIR}/env/fs_remap.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/env/mock_env.cc
|
${ROCKSDB_SOURCE_DIR}/env/mock_env.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/env/unique_id_gen.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/file/delete_scheduler.cc
|
${ROCKSDB_SOURCE_DIR}/file/delete_scheduler.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/file/file_prefetch_buffer.cc
|
${ROCKSDB_SOURCE_DIR}/file/file_prefetch_buffer.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/file/file_util.cc
|
${ROCKSDB_SOURCE_DIR}/file/file_util.cc
|
||||||
@ -247,6 +206,7 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/memory/concurrent_arena.cc
|
${ROCKSDB_SOURCE_DIR}/memory/concurrent_arena.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/memory/jemalloc_nodump_allocator.cc
|
${ROCKSDB_SOURCE_DIR}/memory/jemalloc_nodump_allocator.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/memory/memkind_kmem_allocator.cc
|
${ROCKSDB_SOURCE_DIR}/memory/memkind_kmem_allocator.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/memory/memory_allocator.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/memtable/alloc_tracker.cc
|
${ROCKSDB_SOURCE_DIR}/memtable/alloc_tracker.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/memtable/hash_linklist_rep.cc
|
${ROCKSDB_SOURCE_DIR}/memtable/hash_linklist_rep.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/memtable/hash_skiplist_rep.cc
|
${ROCKSDB_SOURCE_DIR}/memtable/hash_skiplist_rep.cc
|
||||||
@ -322,6 +282,7 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/table/table_factory.cc
|
${ROCKSDB_SOURCE_DIR}/table/table_factory.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/table/table_properties.cc
|
${ROCKSDB_SOURCE_DIR}/table/table_properties.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/table/two_level_iterator.cc
|
${ROCKSDB_SOURCE_DIR}/table/two_level_iterator.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/table/unique_id.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/test_util/sync_point.cc
|
${ROCKSDB_SOURCE_DIR}/test_util/sync_point.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/test_util/sync_point_impl.cc
|
${ROCKSDB_SOURCE_DIR}/test_util/sync_point_impl.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/test_util/testutil.cc
|
${ROCKSDB_SOURCE_DIR}/test_util/testutil.cc
|
||||||
@ -333,9 +294,12 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/tools/ldb_tool.cc
|
${ROCKSDB_SOURCE_DIR}/tools/ldb_tool.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/tools/sst_dump_tool.cc
|
${ROCKSDB_SOURCE_DIR}/tools/sst_dump_tool.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/tools/trace_analyzer_tool.cc
|
${ROCKSDB_SOURCE_DIR}/tools/trace_analyzer_tool.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_replay.cc
|
|
||||||
${ROCKSDB_SOURCE_DIR}/trace_replay/block_cache_tracer.cc
|
${ROCKSDB_SOURCE_DIR}/trace_replay/block_cache_tracer.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/trace_replay/io_tracer.cc
|
${ROCKSDB_SOURCE_DIR}/trace_replay/io_tracer.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_record_handler.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_record_result.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_record.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_replay.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/coding.cc
|
${ROCKSDB_SOURCE_DIR}/util/coding.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/compaction_job_stats_impl.cc
|
${ROCKSDB_SOURCE_DIR}/util/compaction_job_stats_impl.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/comparator.cc
|
${ROCKSDB_SOURCE_DIR}/util/comparator.cc
|
||||||
@ -347,6 +311,7 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/util/murmurhash.cc
|
${ROCKSDB_SOURCE_DIR}/util/murmurhash.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/random.cc
|
${ROCKSDB_SOURCE_DIR}/util/random.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/rate_limiter.cc
|
${ROCKSDB_SOURCE_DIR}/util/rate_limiter.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/util/regex.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/ribbon_config.cc
|
${ROCKSDB_SOURCE_DIR}/util/ribbon_config.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/slice.cc
|
${ROCKSDB_SOURCE_DIR}/util/slice.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/file_checksum_helper.cc
|
${ROCKSDB_SOURCE_DIR}/util/file_checksum_helper.cc
|
||||||
@ -362,18 +327,23 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl_filesnapshot.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl_filesnapshot.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_dump_tool.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_dump_tool.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_file.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_file.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/cache_dump_load.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/cache_dump_load_impl.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/cassandra_compaction_filter.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/cassandra_compaction_filter.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/format.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/format.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/merge_operator.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/merge_operator.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/checkpoint/checkpoint_impl.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/checkpoint/checkpoint_impl.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/debug.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/debug.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/env_mirror.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/env_mirror.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/env_timed.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/env_timed.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_env.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_env.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_fs.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_fs.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_secondary_cache.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/leveldb_options/leveldb_options.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/leveldb_options/leveldb_options.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/memory/memory_util.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/memory/memory_util.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/bytesxor.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/bytesxor.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/max.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/max.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/put.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/put.cc
|
||||||
@ -393,6 +363,7 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/sim_cache.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/sim_cache.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_on_deletion_collector.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_on_deletion_collector.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/trace/file_trace_reader_writer.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/trace/file_trace_reader_writer.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/trace/replayer_impl.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/lock_manager.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/lock_manager.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_tracker.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_tracker.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_manager.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_manager.cc
|
||||||
@ -411,6 +382,7 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn_db.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn_db.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/ttl/db_ttl_impl.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/ttl/db_ttl_impl.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/wal_filter.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index_internal.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index_internal.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/concurrent_tree.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/concurrent_tree.cc
|
||||||
@ -425,7 +397,7 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/standalone_port.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/standalone_port.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/util/dbt.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/util/dbt.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/util/memarena.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/util/memarena.cc
|
||||||
rocksdb_build_version.cc)
|
build_version.cc) # generated by hand
|
||||||
|
|
||||||
if(ENABLE_SSE42 AND ENABLE_PCLMULQDQ)
|
if(ENABLE_SSE42 AND ENABLE_PCLMULQDQ)
|
||||||
set_source_files_properties(
|
set_source_files_properties(
|
||||||
@ -462,5 +434,6 @@ endif()
|
|||||||
add_library(_rocksdb ${SOURCES})
|
add_library(_rocksdb ${SOURCES})
|
||||||
add_library(ch_contrib::rocksdb ALIAS _rocksdb)
|
add_library(ch_contrib::rocksdb ALIAS _rocksdb)
|
||||||
target_link_libraries(_rocksdb PRIVATE ${THIRDPARTY_LIBS} ${SYSTEM_LIBS})
|
target_link_libraries(_rocksdb PRIVATE ${THIRDPARTY_LIBS} ${SYSTEM_LIBS})
|
||||||
|
|
||||||
# SYSTEM is required to overcome some issues
|
# SYSTEM is required to overcome some issues
|
||||||
target_include_directories(_rocksdb SYSTEM BEFORE INTERFACE "${ROCKSDB_SOURCE_DIR}/include")
|
target_include_directories(_rocksdb SYSTEM BEFORE INTERFACE "${ROCKSDB_SOURCE_DIR}/include")
|
||||||
|
@ -26,7 +26,10 @@ RUN apt-get update \
|
|||||||
zstd \
|
zstd \
|
||||||
--yes --no-install-recommends \
|
--yes --no-install-recommends \
|
||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* \
|
||||||
|
&& groupadd --system --gid 1000 clickhouse \
|
||||||
|
&& useradd --system --gid 1000 --uid 1000 -m clickhouse
|
||||||
|
# ^ For some reason, groupadd and useradd are needed for tests with 'expect', but I don't know, why.
|
||||||
|
|
||||||
COPY requirements.txt /
|
COPY requirements.txt /
|
||||||
RUN pip3 install --no-cache-dir -r /requirements.txt
|
RUN pip3 install --no-cache-dir -r /requirements.txt
|
||||||
|
@ -9,7 +9,7 @@ trap 'kill $(jobs -pr) ||:' EXIT
|
|||||||
stage=${stage:-}
|
stage=${stage:-}
|
||||||
|
|
||||||
# Compiler version, normally set by Dockerfile
|
# Compiler version, normally set by Dockerfile
|
||||||
export LLVM_VERSION=${LLVM_VERSION:-17}
|
export LLVM_VERSION=${LLVM_VERSION:-18}
|
||||||
|
|
||||||
# A variable to pass additional flags to CMake.
|
# A variable to pass additional flags to CMake.
|
||||||
# Here we explicitly default it to nothing so that bash doesn't complain about
|
# Here we explicitly default it to nothing so that bash doesn't complain about
|
||||||
@ -284,6 +284,11 @@ function run_tests
|
|||||||
NPROC=1
|
NPROC=1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
export CLICKHOUSE_CONFIG_DIR=$FASTTEST_DATA
|
||||||
|
export CLICKHOUSE_CONFIG="$FASTTEST_DATA/config.xml"
|
||||||
|
export CLICKHOUSE_USER_FILES="$FASTTEST_DATA/user_files"
|
||||||
|
export CLICKHOUSE_SCHEMA_FILES="$FASTTEST_DATA/format_schemas"
|
||||||
|
|
||||||
local test_opts=(
|
local test_opts=(
|
||||||
--hung-check
|
--hung-check
|
||||||
--fast-tests-only
|
--fast-tests-only
|
||||||
|
@ -33,13 +33,9 @@ RUN apt-get update \
|
|||||||
COPY requirements.txt /
|
COPY requirements.txt /
|
||||||
RUN pip3 install --no-cache-dir -r /requirements.txt
|
RUN pip3 install --no-cache-dir -r /requirements.txt
|
||||||
|
|
||||||
COPY * /
|
|
||||||
|
|
||||||
ENV FUZZER_ARGS="-max_total_time=60"
|
ENV FUZZER_ARGS="-max_total_time=60"
|
||||||
|
|
||||||
SHELL ["/bin/bash", "-c"]
|
SHELL ["/bin/bash", "-c"]
|
||||||
CMD set -o pipefail \
|
|
||||||
&& timeout -s 9 1h /run_libfuzzer.py 2>&1 | ts "$(printf '%%Y-%%m-%%d %%H:%%M:%%S\t')" | tee main.log
|
|
||||||
|
|
||||||
# docker run --network=host --volume <workspace>:/workspace -e PR_TO_TEST=<> -e SHA_TO_TEST=<> clickhouse/libfuzzer
|
# docker run --network=host --volume <workspace>:/workspace -e PR_TO_TEST=<> -e SHA_TO_TEST=<> clickhouse/libfuzzer
|
||||||
|
|
||||||
|
@ -4,6 +4,9 @@
|
|||||||
source /setup_export_logs.sh
|
source /setup_export_logs.sh
|
||||||
set -e -x
|
set -e -x
|
||||||
|
|
||||||
|
MAX_RUN_TIME=${MAX_RUN_TIME:-3600}
|
||||||
|
MAX_RUN_TIME=$((MAX_RUN_TIME == 0 ? 3600 : MAX_RUN_TIME))
|
||||||
|
|
||||||
# Choose random timezone for this test run
|
# Choose random timezone for this test run
|
||||||
TZ="$(rg -v '#' /usr/share/zoneinfo/zone.tab | awk '{print $3}' | shuf | head -n1)"
|
TZ="$(rg -v '#' /usr/share/zoneinfo/zone.tab | awk '{print $3}' | shuf | head -n1)"
|
||||||
echo "Choosen random timezone $TZ"
|
echo "Choosen random timezone $TZ"
|
||||||
@ -16,11 +19,17 @@ dpkg -i package_folder/clickhouse-client_*.deb
|
|||||||
|
|
||||||
ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
|
ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
|
||||||
|
|
||||||
|
# shellcheck disable=SC1091
|
||||||
|
source /utils.lib
|
||||||
|
|
||||||
# install test configs
|
# install test configs
|
||||||
/usr/share/clickhouse-test/config/install.sh
|
/usr/share/clickhouse-test/config/install.sh
|
||||||
|
|
||||||
azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --silent --inMemoryPersistence &
|
azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --silent --inMemoryPersistence &
|
||||||
|
|
||||||
./setup_minio.sh stateful
|
./setup_minio.sh stateful
|
||||||
|
./mc admin trace clickminio > /test_output/minio.log &
|
||||||
|
MC_ADMIN_PID=$!
|
||||||
|
|
||||||
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
|
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
|
||||||
|
|
||||||
@ -236,7 +245,22 @@ function run_tests()
|
|||||||
}
|
}
|
||||||
|
|
||||||
export -f run_tests
|
export -f run_tests
|
||||||
timeout "$MAX_RUN_TIME" bash -c run_tests ||:
|
|
||||||
|
function timeout_with_logging() {
|
||||||
|
local exit_code=0
|
||||||
|
|
||||||
|
timeout -s TERM --preserve-status "${@}" || exit_code="${?}"
|
||||||
|
|
||||||
|
if [[ "${exit_code}" -eq "124" ]]
|
||||||
|
then
|
||||||
|
echo "The command 'timeout ${*}' has been killed by timeout"
|
||||||
|
fi
|
||||||
|
|
||||||
|
return $exit_code
|
||||||
|
}
|
||||||
|
|
||||||
|
TIMEOUT=$((MAX_RUN_TIME - 700))
|
||||||
|
timeout_with_logging "$TIMEOUT" bash -c run_tests ||:
|
||||||
|
|
||||||
echo "Files in current directory"
|
echo "Files in current directory"
|
||||||
ls -la ./
|
ls -la ./
|
||||||
@ -251,6 +275,8 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]
|
|||||||
sudo clickhouse stop --pid-path /var/run/clickhouse-server2 ||:
|
sudo clickhouse stop --pid-path /var/run/clickhouse-server2 ||:
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Kill minio admin client to stop collecting logs
|
||||||
|
kill $MC_ADMIN_PID
|
||||||
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server.log ||:
|
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server.log ||:
|
||||||
|
|
||||||
zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.zst ||:
|
zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.zst ||:
|
||||||
@ -272,3 +298,5 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]
|
|||||||
mv /var/log/clickhouse-server/stderr1.log /test_output/ ||:
|
mv /var/log/clickhouse-server/stderr1.log /test_output/ ||:
|
||||||
mv /var/log/clickhouse-server/stderr2.log /test_output/ ||:
|
mv /var/log/clickhouse-server/stderr2.log /test_output/ ||:
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
collect_core_dumps
|
||||||
|
@ -86,6 +86,7 @@ RUN curl -L --no-verbose -O 'https://archive.apache.org/dist/hadoop/common/hadoo
|
|||||||
ENV MINIO_ROOT_USER="clickhouse"
|
ENV MINIO_ROOT_USER="clickhouse"
|
||||||
ENV MINIO_ROOT_PASSWORD="clickhouse"
|
ENV MINIO_ROOT_PASSWORD="clickhouse"
|
||||||
ENV EXPORT_S3_STORAGE_POLICIES=1
|
ENV EXPORT_S3_STORAGE_POLICIES=1
|
||||||
|
ENV CLICKHOUSE_GRPC_CLIENT="/usr/share/clickhouse-utils/grpc-client/clickhouse-grpc-client.py"
|
||||||
|
|
||||||
RUN npm install -g azurite@3.30.0 \
|
RUN npm install -g azurite@3.30.0 \
|
||||||
&& npm install -g tslib && npm install -g node
|
&& npm install -g tslib && npm install -g node
|
||||||
|
@ -8,6 +8,7 @@ cryptography==3.4.8
|
|||||||
dbus-python==1.2.18
|
dbus-python==1.2.18
|
||||||
distro==1.7.0
|
distro==1.7.0
|
||||||
docutils==0.17.1
|
docutils==0.17.1
|
||||||
|
grpcio==1.47.0
|
||||||
gyp==0.1
|
gyp==0.1
|
||||||
httplib2==0.20.2
|
httplib2==0.20.2
|
||||||
idna==3.3
|
idna==3.3
|
||||||
@ -28,6 +29,7 @@ packaging==24.1
|
|||||||
pandas==1.5.3
|
pandas==1.5.3
|
||||||
pip==24.1.1
|
pip==24.1.1
|
||||||
pipdeptree==2.23.0
|
pipdeptree==2.23.0
|
||||||
|
protobuf==4.25.3
|
||||||
pyarrow==15.0.0
|
pyarrow==15.0.0
|
||||||
pyasn1==0.4.8
|
pyasn1==0.4.8
|
||||||
PyJWT==2.3.0
|
PyJWT==2.3.0
|
||||||
|
@ -6,8 +6,11 @@ source /setup_export_logs.sh
|
|||||||
# fail on errors, verbose and export all env variables
|
# fail on errors, verbose and export all env variables
|
||||||
set -e -x -a
|
set -e -x -a
|
||||||
|
|
||||||
MAX_RUN_TIME=${MAX_RUN_TIME:-10800}
|
MAX_RUN_TIME=${MAX_RUN_TIME:-9000}
|
||||||
MAX_RUN_TIME=$((MAX_RUN_TIME == 0 ? 10800 : MAX_RUN_TIME))
|
MAX_RUN_TIME=$((MAX_RUN_TIME == 0 ? 9000 : MAX_RUN_TIME))
|
||||||
|
|
||||||
|
USE_DATABASE_REPLICATED=${USE_DATABASE_REPLICATED:=0}
|
||||||
|
USE_SHARED_CATALOG=${USE_SHARED_CATALOG:=0}
|
||||||
|
|
||||||
# Choose random timezone for this test run.
|
# Choose random timezone for this test run.
|
||||||
#
|
#
|
||||||
@ -45,6 +48,9 @@ source /utils.lib
|
|||||||
/usr/share/clickhouse-test/config/install.sh
|
/usr/share/clickhouse-test/config/install.sh
|
||||||
|
|
||||||
./setup_minio.sh stateless
|
./setup_minio.sh stateless
|
||||||
|
./mc admin trace clickminio > /test_output/minio.log &
|
||||||
|
MC_ADMIN_PID=$!
|
||||||
|
|
||||||
./setup_hdfs_minicluster.sh
|
./setup_hdfs_minicluster.sh
|
||||||
|
|
||||||
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
|
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
|
||||||
@ -92,7 +98,7 @@ fi
|
|||||||
# simplest way to forward env variables to server
|
# simplest way to forward env variables to server
|
||||||
sudo -E -u clickhouse /usr/bin/clickhouse-server --config /etc/clickhouse-server/config.xml --daemon --pid-file /var/run/clickhouse-server/clickhouse-server.pid
|
sudo -E -u clickhouse /usr/bin/clickhouse-server --config /etc/clickhouse-server/config.xml --daemon --pid-file /var/run/clickhouse-server/clickhouse-server.pid
|
||||||
|
|
||||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||||
sudo sed -i "s|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches/</filesystem_caches_path>|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches_1/</filesystem_caches_path>|" /etc/clickhouse-server1/config.d/filesystem_caches_path.xml
|
sudo sed -i "s|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches/</filesystem_caches_path>|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches_1/</filesystem_caches_path>|" /etc/clickhouse-server1/config.d/filesystem_caches_path.xml
|
||||||
|
|
||||||
sudo sed -i "s|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches/</filesystem_caches_path>|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches_2/</filesystem_caches_path>|" /etc/clickhouse-server2/config.d/filesystem_caches_path.xml
|
sudo sed -i "s|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches/</filesystem_caches_path>|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches_2/</filesystem_caches_path>|" /etc/clickhouse-server2/config.d/filesystem_caches_path.xml
|
||||||
@ -124,12 +130,9 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]
|
|||||||
--keeper_server.tcp_port 29181 --keeper_server.server_id 3 \
|
--keeper_server.tcp_port 29181 --keeper_server.server_id 3 \
|
||||||
--prometheus.port 29988 \
|
--prometheus.port 29988 \
|
||||||
--macros.shard s2 # It doesn't work :(
|
--macros.shard s2 # It doesn't work :(
|
||||||
|
|
||||||
MAX_RUN_TIME=$((MAX_RUN_TIME < 9000 ? MAX_RUN_TIME : 9000)) # min(MAX_RUN_TIME, 2.5 hours)
|
|
||||||
MAX_RUN_TIME=$((MAX_RUN_TIME != 0 ? MAX_RUN_TIME : 9000)) # set to 2.5 hours if 0 (unlimited)
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -n "$USE_SHARED_CATALOG" ]] && [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
||||||
sudo cat /etc/clickhouse-server1/config.d/filesystem_caches_path.xml \
|
sudo cat /etc/clickhouse-server1/config.d/filesystem_caches_path.xml \
|
||||||
| sed "s|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches/</filesystem_caches_path>|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches_1/</filesystem_caches_path>|" \
|
| sed "s|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches/</filesystem_caches_path>|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches_1/</filesystem_caches_path>|" \
|
||||||
> /etc/clickhouse-server1/config.d/filesystem_caches_path.xml.tmp
|
> /etc/clickhouse-server1/config.d/filesystem_caches_path.xml.tmp
|
||||||
@ -151,9 +154,6 @@ if [[ -n "$USE_SHARED_CATALOG" ]] && [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
|||||||
--keeper_server.tcp_port 19181 --keeper_server.server_id 2 \
|
--keeper_server.tcp_port 19181 --keeper_server.server_id 2 \
|
||||||
--prometheus.port 19988 \
|
--prometheus.port 19988 \
|
||||||
--macros.replica r2 # It doesn't work :(
|
--macros.replica r2 # It doesn't work :(
|
||||||
|
|
||||||
MAX_RUN_TIME=$((MAX_RUN_TIME < 9000 ? MAX_RUN_TIME : 9000)) # min(MAX_RUN_TIME, 2.5 hours)
|
|
||||||
MAX_RUN_TIME=$((MAX_RUN_TIME != 0 ? MAX_RUN_TIME : 9000)) # set to 2.5 hours if 0 (unlimited)
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Wait for the server to start, but not for too long.
|
# Wait for the server to start, but not for too long.
|
||||||
@ -164,7 +164,6 @@ do
|
|||||||
done
|
done
|
||||||
|
|
||||||
setup_logs_replication
|
setup_logs_replication
|
||||||
|
|
||||||
attach_gdb_to_clickhouse || true # FIXME: to not break old builds, clean on 2023-09-01
|
attach_gdb_to_clickhouse || true # FIXME: to not break old builds, clean on 2023-09-01
|
||||||
|
|
||||||
function fn_exists() {
|
function fn_exists() {
|
||||||
@ -209,15 +208,15 @@ function run_tests()
|
|||||||
ADDITIONAL_OPTIONS+=('--no-random-merge-tree-settings')
|
ADDITIONAL_OPTIONS+=('--no-random-merge-tree-settings')
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -n "$USE_SHARED_CATALOG" ]] && [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
||||||
ADDITIONAL_OPTIONS+=('--shared-catalog')
|
ADDITIONAL_OPTIONS+=('--shared-catalog')
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||||
ADDITIONAL_OPTIONS+=('--replicated-database')
|
ADDITIONAL_OPTIONS+=('--replicated-database')
|
||||||
# Too many tests fail for DatabaseReplicated in parallel.
|
# Too many tests fail for DatabaseReplicated in parallel.
|
||||||
ADDITIONAL_OPTIONS+=('--jobs')
|
ADDITIONAL_OPTIONS+=('--jobs')
|
||||||
ADDITIONAL_OPTIONS+=('2')
|
ADDITIONAL_OPTIONS+=('3')
|
||||||
elif [[ 1 == $(clickhouse-client --query "SELECT value LIKE '%SANITIZE_COVERAGE%' FROM system.build_options WHERE name = 'CXX_FLAGS'") ]]; then
|
elif [[ 1 == $(clickhouse-client --query "SELECT value LIKE '%SANITIZE_COVERAGE%' FROM system.build_options WHERE name = 'CXX_FLAGS'") ]]; then
|
||||||
# Coverage on a per-test basis could only be collected sequentially.
|
# Coverage on a per-test basis could only be collected sequentially.
|
||||||
# Do not set the --jobs parameter.
|
# Do not set the --jobs parameter.
|
||||||
@ -249,7 +248,7 @@ function run_tests()
|
|||||||
try_run_with_retry 10 clickhouse-client -q "insert into system.zookeeper (name, path, value) values ('auxiliary_zookeeper2', '/test/chroot/', '')"
|
try_run_with_retry 10 clickhouse-client -q "insert into system.zookeeper (name, path, value) values ('auxiliary_zookeeper2', '/test/chroot/', '')"
|
||||||
|
|
||||||
set +e
|
set +e
|
||||||
timeout -s TERM --preserve-status 120m clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \
|
timeout -k 60m -s TERM --preserve-status 140m clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \
|
||||||
--no-drop-if-fail --test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \
|
--no-drop-if-fail --test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \
|
||||||
| ts '%Y-%m-%d %H:%M:%S' \
|
| ts '%Y-%m-%d %H:%M:%S' \
|
||||||
| tee -a test_output/test_result.txt
|
| tee -a test_output/test_result.txt
|
||||||
@ -260,7 +259,7 @@ export -f run_tests
|
|||||||
|
|
||||||
|
|
||||||
# This should be enough to setup job and collect artifacts
|
# This should be enough to setup job and collect artifacts
|
||||||
TIMEOUT=$((MAX_RUN_TIME - 300))
|
TIMEOUT=$((MAX_RUN_TIME - 700))
|
||||||
if [ "$NUM_TRIES" -gt "1" ]; then
|
if [ "$NUM_TRIES" -gt "1" ]; then
|
||||||
# We don't run tests with Ordinary database in PRs, only in master.
|
# We don't run tests with Ordinary database in PRs, only in master.
|
||||||
# So run new/changed tests with Ordinary at least once in flaky check.
|
# So run new/changed tests with Ordinary at least once in flaky check.
|
||||||
@ -289,7 +288,7 @@ do
|
|||||||
err=$(clickhouse-client -q "select * from system.$table into outfile '/test_output/$table.tsv.gz' format TSVWithNamesAndTypes")
|
err=$(clickhouse-client -q "select * from system.$table into outfile '/test_output/$table.tsv.gz' format TSVWithNamesAndTypes")
|
||||||
echo "$err"
|
echo "$err"
|
||||||
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
|
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
|
||||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||||
err=$( { clickhouse-client --port 19000 -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst; } 2>&1 )
|
err=$( { clickhouse-client --port 19000 -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst; } 2>&1 )
|
||||||
echo "$err"
|
echo "$err"
|
||||||
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
|
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
|
||||||
@ -298,7 +297,7 @@ do
|
|||||||
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
|
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -n "$USE_SHARED_CATALOG" ]] && [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
||||||
err=$( { clickhouse-client --port 19000 -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst; } 2>&1 )
|
err=$( { clickhouse-client --port 19000 -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst; } 2>&1 )
|
||||||
echo "$err"
|
echo "$err"
|
||||||
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
|
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
|
||||||
@ -309,15 +308,20 @@ done
|
|||||||
# Why do we read data with clickhouse-local?
|
# Why do we read data with clickhouse-local?
|
||||||
# Because it's the simplest way to read it when server has crashed.
|
# Because it's the simplest way to read it when server has crashed.
|
||||||
sudo clickhouse stop ||:
|
sudo clickhouse stop ||:
|
||||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
|
||||||
|
|
||||||
|
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||||
sudo clickhouse stop --pid-path /var/run/clickhouse-server1 ||:
|
sudo clickhouse stop --pid-path /var/run/clickhouse-server1 ||:
|
||||||
sudo clickhouse stop --pid-path /var/run/clickhouse-server2 ||:
|
sudo clickhouse stop --pid-path /var/run/clickhouse-server2 ||:
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -n "$USE_SHARED_CATALOG" ]] && [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
||||||
sudo clickhouse stop --pid-path /var/run/clickhouse-server1 ||:
|
sudo clickhouse stop --pid-path /var/run/clickhouse-server1 ||:
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Kill minio admin client to stop collecting logs
|
||||||
|
kill $MC_ADMIN_PID
|
||||||
|
|
||||||
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server.log ||:
|
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server.log ||:
|
||||||
rg -A50 -Fa "============" /var/log/clickhouse-server/stderr.log ||:
|
rg -A50 -Fa "============" /var/log/clickhouse-server/stderr.log ||:
|
||||||
zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.zst &
|
zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.zst &
|
||||||
@ -341,12 +345,13 @@ if [ $failed_to_save_logs -ne 0 ]; then
|
|||||||
for table in query_log zookeeper_log trace_log transactions_info_log metric_log blob_storage_log error_log
|
for table in query_log zookeeper_log trace_log transactions_info_log metric_log blob_storage_log error_log
|
||||||
do
|
do
|
||||||
clickhouse-local "$data_path_config" --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.tsv.zst ||:
|
clickhouse-local "$data_path_config" --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.tsv.zst ||:
|
||||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
|
||||||
|
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||||
clickhouse-local --path /var/lib/clickhouse1/ --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst ||:
|
clickhouse-local --path /var/lib/clickhouse1/ --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst ||:
|
||||||
clickhouse-local --path /var/lib/clickhouse2/ --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.2.tsv.zst ||:
|
clickhouse-local --path /var/lib/clickhouse2/ --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.2.tsv.zst ||:
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -n "$USE_SHARED_CATALOG" ]] && [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
||||||
clickhouse-local --path /var/lib/clickhouse1/ --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst ||:
|
clickhouse-local --path /var/lib/clickhouse1/ --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst ||:
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
@ -382,7 +387,8 @@ rm -rf /var/lib/clickhouse/data/system/*/
|
|||||||
tar -chf /test_output/store.tar /var/lib/clickhouse/store ||:
|
tar -chf /test_output/store.tar /var/lib/clickhouse/store ||:
|
||||||
tar -chf /test_output/metadata.tar /var/lib/clickhouse/metadata/*.sql ||:
|
tar -chf /test_output/metadata.tar /var/lib/clickhouse/metadata/*.sql ||:
|
||||||
|
|
||||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
|
||||||
|
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||||
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server1.log ||:
|
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server1.log ||:
|
||||||
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server2.log ||:
|
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server2.log ||:
|
||||||
zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server1.log > /test_output/clickhouse-server1.log.zst ||:
|
zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server1.log > /test_output/clickhouse-server1.log.zst ||:
|
||||||
@ -393,9 +399,11 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]
|
|||||||
tar -chf /test_output/coordination2.tar /var/lib/clickhouse2/coordination ||:
|
tar -chf /test_output/coordination2.tar /var/lib/clickhouse2/coordination ||:
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -n "$USE_SHARED_CATALOG" ]] && [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
||||||
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server1.log ||:
|
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server1.log ||:
|
||||||
zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server1.log > /test_output/clickhouse-server1.log.zst ||:
|
zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server1.log > /test_output/clickhouse-server1.log.zst ||:
|
||||||
mv /var/log/clickhouse-server/stderr1.log /test_output/ ||:
|
mv /var/log/clickhouse-server/stderr1.log /test_output/ ||:
|
||||||
tar -chf /test_output/coordination1.tar /var/lib/clickhouse1/coordination ||:
|
tar -chf /test_output/coordination1.tar /var/lib/clickhouse1/coordination ||:
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
collect_core_dumps
|
||||||
|
@ -10,7 +10,7 @@ cd hadoop-3.3.1
|
|||||||
export JAVA_HOME=/usr
|
export JAVA_HOME=/usr
|
||||||
mkdir -p target/test/data
|
mkdir -p target/test/data
|
||||||
chown clickhouse ./target/test/data
|
chown clickhouse ./target/test/data
|
||||||
sudo -E -u clickhouse bin/mapred minicluster -format -nomr -nnport 12222 >> /test_output/garbage.log 2>&1 &
|
sudo -E -u clickhouse bin/mapred minicluster -format -nomr -nnport 12222 >> /test_output/hdfs_minicluster.log 2>&1 &
|
||||||
|
|
||||||
while ! nc -z localhost 12222; do
|
while ! nc -z localhost 12222; do
|
||||||
sleep 1
|
sleep 1
|
||||||
|
@ -1,8 +1,5 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# core.COMM.PID-TID
|
|
||||||
sysctl kernel.core_pattern='core.%e.%p-%P'
|
|
||||||
|
|
||||||
OK="\tOK\t\\N\t"
|
OK="\tOK\t\\N\t"
|
||||||
FAIL="\tFAIL\t\\N\t"
|
FAIL="\tFAIL\t\\N\t"
|
||||||
|
|
||||||
@ -315,12 +312,4 @@ function collect_query_and_trace_logs()
|
|||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
function collect_core_dumps()
|
|
||||||
{
|
|
||||||
find . -type f -maxdepth 1 -name 'core.*' | while read -r core; do
|
|
||||||
zstd --threads=0 "$core"
|
|
||||||
mv "$core.zst" /test_output/
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
# vi: ft=bash
|
# vi: ft=bash
|
||||||
|
@ -1,5 +1,10 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
|
# core.COMM.PID-TID
|
||||||
|
sysctl kernel.core_pattern='core.%e.%p-%P'
|
||||||
|
# ASAN doesn't work with suid_dumpable=2
|
||||||
|
sysctl fs.suid_dumpable=1
|
||||||
|
|
||||||
function run_with_retry()
|
function run_with_retry()
|
||||||
{
|
{
|
||||||
if [[ $- =~ e ]]; then
|
if [[ $- =~ e ]]; then
|
||||||
@ -48,4 +53,12 @@ function timeout_with_logging() {
|
|||||||
return $exit_code
|
return $exit_code
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function collect_core_dumps()
|
||||||
|
{
|
||||||
|
find . -type f -maxdepth 1 -name 'core.*' | while read -r core; do
|
||||||
|
zstd --threads=0 "$core"
|
||||||
|
mv "$core.zst" /test_output/
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
# vi: ft=bash
|
# vi: ft=bash
|
||||||
|
@ -21,6 +21,9 @@ source /attach_gdb.lib
|
|||||||
# shellcheck source=../stateless/stress_tests.lib
|
# shellcheck source=../stateless/stress_tests.lib
|
||||||
source /stress_tests.lib
|
source /stress_tests.lib
|
||||||
|
|
||||||
|
# shellcheck disable=SC1091
|
||||||
|
source /utils.lib
|
||||||
|
|
||||||
install_packages package_folder
|
install_packages package_folder
|
||||||
|
|
||||||
# Thread Fuzzer allows to check more permutations of possible thread scheduling
|
# Thread Fuzzer allows to check more permutations of possible thread scheduling
|
||||||
|
@ -3,7 +3,7 @@ aiosignal==1.3.1
|
|||||||
astroid==3.1.0
|
astroid==3.1.0
|
||||||
async-timeout==4.0.3
|
async-timeout==4.0.3
|
||||||
attrs==23.2.0
|
attrs==23.2.0
|
||||||
black==23.12.0
|
black==24.4.2
|
||||||
boto3==1.34.131
|
boto3==1.34.131
|
||||||
botocore==1.34.131
|
botocore==1.34.131
|
||||||
certifi==2024.6.2
|
certifi==2024.6.2
|
||||||
|
@ -17,6 +17,7 @@ ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=18
|
|||||||
|
|
||||||
RUN apt-get update \
|
RUN apt-get update \
|
||||||
&& apt-get install \
|
&& apt-get install \
|
||||||
|
sudo \
|
||||||
apt-transport-https \
|
apt-transport-https \
|
||||||
apt-utils \
|
apt-utils \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
|
@ -11,6 +11,7 @@ TIMEOUT_SIGN = "[ Timeout! "
|
|||||||
UNKNOWN_SIGN = "[ UNKNOWN "
|
UNKNOWN_SIGN = "[ UNKNOWN "
|
||||||
SKIPPED_SIGN = "[ SKIPPED "
|
SKIPPED_SIGN = "[ SKIPPED "
|
||||||
HUNG_SIGN = "Found hung queries in processlist"
|
HUNG_SIGN = "Found hung queries in processlist"
|
||||||
|
SERVER_DIED_SIGN = "Server died, terminating all processes"
|
||||||
DATABASE_SIGN = "Database: "
|
DATABASE_SIGN = "Database: "
|
||||||
|
|
||||||
SUCCESS_FINISH_SIGNS = ["All tests have finished", "No tests were run"]
|
SUCCESS_FINISH_SIGNS = ["All tests have finished", "No tests were run"]
|
||||||
@ -25,6 +26,7 @@ def process_test_log(log_path, broken_tests):
|
|||||||
failed = 0
|
failed = 0
|
||||||
success = 0
|
success = 0
|
||||||
hung = False
|
hung = False
|
||||||
|
server_died = False
|
||||||
retries = False
|
retries = False
|
||||||
success_finish = False
|
success_finish = False
|
||||||
test_results = []
|
test_results = []
|
||||||
@ -41,6 +43,8 @@ def process_test_log(log_path, broken_tests):
|
|||||||
if HUNG_SIGN in line:
|
if HUNG_SIGN in line:
|
||||||
hung = True
|
hung = True
|
||||||
break
|
break
|
||||||
|
if SERVER_DIED_SIGN in line:
|
||||||
|
server_died = True
|
||||||
if RETRIES_SIGN in line:
|
if RETRIES_SIGN in line:
|
||||||
retries = True
|
retries = True
|
||||||
if any(
|
if any(
|
||||||
@ -123,6 +127,7 @@ def process_test_log(log_path, broken_tests):
|
|||||||
failed,
|
failed,
|
||||||
success,
|
success,
|
||||||
hung,
|
hung,
|
||||||
|
server_died,
|
||||||
success_finish,
|
success_finish,
|
||||||
retries,
|
retries,
|
||||||
test_results,
|
test_results,
|
||||||
@ -150,6 +155,7 @@ def process_result(result_path, broken_tests):
|
|||||||
failed,
|
failed,
|
||||||
success,
|
success,
|
||||||
hung,
|
hung,
|
||||||
|
server_died,
|
||||||
success_finish,
|
success_finish,
|
||||||
retries,
|
retries,
|
||||||
test_results,
|
test_results,
|
||||||
@ -165,6 +171,10 @@ def process_result(result_path, broken_tests):
|
|||||||
description = "Some queries hung, "
|
description = "Some queries hung, "
|
||||||
state = "failure"
|
state = "failure"
|
||||||
test_results.append(("Some queries hung", "FAIL", "0", ""))
|
test_results.append(("Some queries hung", "FAIL", "0", ""))
|
||||||
|
elif server_died:
|
||||||
|
description = "Server died, "
|
||||||
|
state = "failure"
|
||||||
|
test_results.append(("Server died", "FAIL", "0", ""))
|
||||||
elif not success_finish:
|
elif not success_finish:
|
||||||
description = "Tests are not finished, "
|
description = "Tests are not finished, "
|
||||||
state = "failure"
|
state = "failure"
|
||||||
@ -218,5 +228,20 @@ if __name__ == "__main__":
|
|||||||
state, description, test_results = process_result(args.in_results_dir, broken_tests)
|
state, description, test_results = process_result(args.in_results_dir, broken_tests)
|
||||||
logging.info("Result parsed")
|
logging.info("Result parsed")
|
||||||
status = (state, description)
|
status = (state, description)
|
||||||
|
|
||||||
|
def test_result_comparator(item):
|
||||||
|
# sort by status then by check name
|
||||||
|
order = {
|
||||||
|
"FAIL": 0,
|
||||||
|
"Timeout": 1,
|
||||||
|
"NOT_FAILED": 2,
|
||||||
|
"BROKEN": 3,
|
||||||
|
"OK": 4,
|
||||||
|
"SKIPPED": 5,
|
||||||
|
}
|
||||||
|
return order.get(item[1], 10), str(item[0]), item[1]
|
||||||
|
|
||||||
|
test_results.sort(key=test_result_comparator)
|
||||||
|
|
||||||
write_results(args.out_results_file, args.out_status_file, test_results, status)
|
write_results(args.out_results_file, args.out_status_file, test_results, status)
|
||||||
logging.info("Result written")
|
logging.info("Result written")
|
||||||
|
@ -11,7 +11,7 @@ This is for the case when you have Linux machine and want to use it to build `cl
|
|||||||
|
|
||||||
The cross-build for RISC-V 64 is based on the [Build instructions](../development/build.md), follow them first.
|
The cross-build for RISC-V 64 is based on the [Build instructions](../development/build.md), follow them first.
|
||||||
|
|
||||||
## Install Clang-16
|
## Install Clang-18
|
||||||
|
|
||||||
Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup or do
|
Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup or do
|
||||||
```
|
```
|
||||||
|
@ -226,15 +226,59 @@ Other IDEs you can use are [Sublime Text](https://www.sublimetext.com/), [Visual
|
|||||||
|
|
||||||
## Writing Code {#writing-code}
|
## Writing Code {#writing-code}
|
||||||
|
|
||||||
The description of ClickHouse architecture can be found here: https://clickhouse.com/docs/en/development/architecture/
|
Below you can find some quick links which may be useful when writing code for ClickHouse:
|
||||||
|
|
||||||
The Code Style Guide: https://clickhouse.com/docs/en/development/style/
|
- [ClickHouse architecture description](https://clickhouse.com/docs/en/development/architecture/).
|
||||||
|
- [The code style guide](https://clickhouse.com/docs/en/development/style/).
|
||||||
|
- [Adding third-party libraries](https://clickhouse.com/docs/en/development/contrib/#adding-third-party-libraries)
|
||||||
|
- [Writing tests](https://clickhouse.com/docs/en/development/tests/)
|
||||||
|
- [List of open issues](https://github.com/ClickHouse/ClickHouse/issues?q=is%3Aopen+is%3Aissue+label%3Ahacktoberfest)
|
||||||
|
|
||||||
Adding third-party libraries: https://clickhouse.com/docs/en/development/contrib/#adding-third-party-libraries
|
## Writing Documentation {#writing-documentation}
|
||||||
|
|
||||||
Writing tests: https://clickhouse.com/docs/en/development/tests/
|
As part of every pull request which adds a new feature, it is necessary to write documentation for it. If you'd like to preview your documentation changes the instructions for how to build the documentation page locally are available in the README.md file [here](https://github.com/ClickHouse/clickhouse-docs). When adding a new function to ClickHouse you can use the template below as a guide:
|
||||||
|
|
||||||
List of tasks: https://github.com/ClickHouse/ClickHouse/issues?q=is%3Aopen+is%3Aissue+label%3Ahacktoberfest
|
```markdown
|
||||||
|
# newFunctionName
|
||||||
|
|
||||||
|
A short description of the function goes here. It should describe briefly what it does and a typical usage case.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
\```sql
|
||||||
|
newFunctionName(arg1, arg2[, arg3])
|
||||||
|
\```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `arg1` — Description of the argument. [DataType](../data-types/float.md)
|
||||||
|
- `arg2` — Description of the argument. [DataType](../data-types/float.md)
|
||||||
|
- `arg3` — Description of optional argument (optional). [DataType](../data-types/float.md)
|
||||||
|
|
||||||
|
**Implementation Details**
|
||||||
|
|
||||||
|
A description of implementation details if relevant.
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- Returns {insert what the function returns here}. [DataType](../data-types/float.md)
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
\```sql
|
||||||
|
SELECT 'write your example query here';
|
||||||
|
\```
|
||||||
|
|
||||||
|
Response:
|
||||||
|
|
||||||
|
\```response
|
||||||
|
┌───────────────────────────────────┐
|
||||||
|
│ the result of the query │
|
||||||
|
└───────────────────────────────────┘
|
||||||
|
\```
|
||||||
|
```
|
||||||
|
|
||||||
## Test Data {#test-data}
|
## Test Data {#test-data}
|
||||||
|
|
||||||
|
@ -75,7 +75,7 @@ SETTINGS
|
|||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
- unordered — With unordered mode, the set of all already processed files is tracked with persistent nodes in ZooKeeper.
|
- unordered — With unordered mode, the set of all already processed files is tracked with persistent nodes in ZooKeeper.
|
||||||
- ordered — With ordered mode, only the max name of the successfully consumed file, and the names of files that will be retried after unsuccessful loading attempt are being stored in ZooKeeper.
|
- ordered — With ordered mode, the files are processed in lexicographic order. It means that if file named 'BBB' was processed at some point and later on a file named 'AA' is added to the bucket, it will be ignored. Only the max name (in lexicographic sense) of the successfully consumed file, and the names of files that will be retried after unsuccessful loading attempt are being stored in ZooKeeper.
|
||||||
|
|
||||||
Default value: `ordered` in versions before 24.6. Starting with 24.6 there is no default value, the setting becomes required to be specified manually. For tables created on earlier versions the default value will remain `Ordered` for compatibility.
|
Default value: `ordered` in versions before 24.6. Starting with 24.6 there is no default value, the setting becomes required to be specified manually. For tables created on earlier versions the default value will remain `Ordered` for compatibility.
|
||||||
|
|
||||||
|
@ -15,7 +15,7 @@ You have four options for getting up and running with ClickHouse:
|
|||||||
|
|
||||||
- **[ClickHouse Cloud](https://clickhouse.com/cloud/):** The official ClickHouse as a service, - built by, maintained and supported by the creators of ClickHouse
|
- **[ClickHouse Cloud](https://clickhouse.com/cloud/):** The official ClickHouse as a service, - built by, maintained and supported by the creators of ClickHouse
|
||||||
- **[Quick Install](#quick-install):** an easy-to-download binary for testing and developing with ClickHouse
|
- **[Quick Install](#quick-install):** an easy-to-download binary for testing and developing with ClickHouse
|
||||||
- **[Production Deployments](#available-installation-options):** ClickHouse can run on any Linux, FreeBSD, or macOS with x86-64, ARM, or PowerPC64LE CPU architecture
|
- **[Production Deployments](#available-installation-options):** ClickHouse can run on any Linux, FreeBSD, or macOS with x86-64, modern ARM (ARMv8.2-A up), or PowerPC64LE CPU architecture
|
||||||
- **[Docker Image](https://hub.docker.com/r/clickhouse/clickhouse-server/):** use the official Docker image in Docker Hub
|
- **[Docker Image](https://hub.docker.com/r/clickhouse/clickhouse-server/):** use the official Docker image in Docker Hub
|
||||||
|
|
||||||
## ClickHouse Cloud
|
## ClickHouse Cloud
|
||||||
|
@ -185,6 +185,7 @@ You can pass parameters to `clickhouse-client` (all parameters have a default va
|
|||||||
- `--format, -f` – Use the specified default format to output the result.
|
- `--format, -f` – Use the specified default format to output the result.
|
||||||
- `--vertical, -E` – If specified, use the [Vertical format](../interfaces/formats.md#vertical) by default to output the result. This is the same as `–format=Vertical`. In this format, each value is printed on a separate line, which is helpful when displaying wide tables.
|
- `--vertical, -E` – If specified, use the [Vertical format](../interfaces/formats.md#vertical) by default to output the result. This is the same as `–format=Vertical`. In this format, each value is printed on a separate line, which is helpful when displaying wide tables.
|
||||||
- `--time, -t` – If specified, print the query execution time to ‘stderr’ in non-interactive mode.
|
- `--time, -t` – If specified, print the query execution time to ‘stderr’ in non-interactive mode.
|
||||||
|
- `--memory-usage` – If specified, print memory usage to ‘stderr’ in non-interactive mode]. Possible values: 'none' - do not print memory usage, 'default' - print number of bytes, 'readable' - print memory usage in human-readable format.
|
||||||
- `--stacktrace` – If specified, also print the stack trace if an exception occurs.
|
- `--stacktrace` – If specified, also print the stack trace if an exception occurs.
|
||||||
- `--config-file` – The name of the configuration file.
|
- `--config-file` – The name of the configuration file.
|
||||||
- `--secure` – If specified, will connect to server over secure connection (TLS). You might need to configure your CA certificates in the [configuration file](#configuration_files). The available configuration settings are the same as for [server-side TLS configuration](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-openssl).
|
- `--secure` – If specified, will connect to server over secure connection (TLS). You might need to configure your CA certificates in the [configuration file](#configuration_files). The available configuration settings are the same as for [server-side TLS configuration](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-openssl).
|
||||||
@ -339,7 +340,7 @@ clickhouse-client clickhouse://some_user%40some_mail.com@localhost:9000
|
|||||||
Connect to one of provides hosts: `192.168.1.15`, `192.168.1.25`.
|
Connect to one of provides hosts: `192.168.1.15`, `192.168.1.25`.
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
clickhouse-client clickhouse://192.168.1.15,192.168.1.25
|
clickhouse-client clickhouse://192.168.1.15,192.168.1.25
|
||||||
```
|
```
|
||||||
|
|
||||||
### Configuration Files {#configuration_files}
|
### Configuration Files {#configuration_files}
|
||||||
@ -367,7 +368,7 @@ Example of a config file:
|
|||||||
```
|
```
|
||||||
|
|
||||||
Or the same config in a YAML format:
|
Or the same config in a YAML format:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
user: username
|
user: username
|
||||||
password: 'password'
|
password: 'password'
|
||||||
|
@ -67,6 +67,7 @@ The supported formats are:
|
|||||||
| [Prometheus](#prometheus) | ✗ | ✔ |
|
| [Prometheus](#prometheus) | ✗ | ✔ |
|
||||||
| [Protobuf](#protobuf) | ✔ | ✔ |
|
| [Protobuf](#protobuf) | ✔ | ✔ |
|
||||||
| [ProtobufSingle](#protobufsingle) | ✔ | ✔ |
|
| [ProtobufSingle](#protobufsingle) | ✔ | ✔ |
|
||||||
|
| [ProtobufList](#protobuflist) | ✔ | ✔ |
|
||||||
| [Avro](#data-format-avro) | ✔ | ✔ |
|
| [Avro](#data-format-avro) | ✔ | ✔ |
|
||||||
| [AvroConfluent](#data-format-avro-confluent) | ✔ | ✗ |
|
| [AvroConfluent](#data-format-avro-confluent) | ✔ | ✗ |
|
||||||
| [Parquet](#data-format-parquet) | ✔ | ✔ |
|
| [Parquet](#data-format-parquet) | ✔ | ✔ |
|
||||||
@ -1535,6 +1536,10 @@ the columns from input data will be mapped to the columns from the table by thei
|
|||||||
Otherwise, the first row will be skipped.
|
Otherwise, the first row will be skipped.
|
||||||
If setting [input_format_with_types_use_header](/docs/en/operations/settings/settings-formats.md/#input_format_with_types_use_header) is set to 1,
|
If setting [input_format_with_types_use_header](/docs/en/operations/settings/settings-formats.md/#input_format_with_types_use_header) is set to 1,
|
||||||
the types from input data will be compared with the types of the corresponding columns from the table. Otherwise, the second row will be skipped.
|
the types from input data will be compared with the types of the corresponding columns from the table. Otherwise, the second row will be skipped.
|
||||||
|
If setting [output_format_binary_encode_types_in_binary_format](/docs/en/operations/settings/settings-formats.md/#output_format_binary_encode_types_in_binary_format) is set to 1,
|
||||||
|
the types in header will be written using [binary encoding](/docs/en/sql-reference/data-types/data-types-binary-encoding.md) instead of strings with type names in RowBinaryWithNamesAndTypes output format.
|
||||||
|
If setting [input_format_binary_encode_types_in_binary_format](/docs/en/operations/settings/settings-formats.md/#input_format_binary_encode_types_in_binary_format) is set to 1,
|
||||||
|
the types in header will be read using [binary encoding](/docs/en/sql-reference/data-types/data-types-binary-encoding.md) instead of strings with type names in RowBinaryWithNamesAndTypes input format.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
## RowBinaryWithDefaults {#rowbinarywithdefaults}
|
## RowBinaryWithDefaults {#rowbinarywithdefaults}
|
||||||
@ -1948,6 +1953,35 @@ SYSTEM DROP FORMAT SCHEMA CACHE FOR Protobuf
|
|||||||
|
|
||||||
Same as [Protobuf](#protobuf) but for storing/parsing single Protobuf message without length delimiters.
|
Same as [Protobuf](#protobuf) but for storing/parsing single Protobuf message without length delimiters.
|
||||||
|
|
||||||
|
## ProtobufList {#protobuflist}
|
||||||
|
|
||||||
|
Similar to Protobuf but rows are represented as a sequence of sub-messages contained in a message with fixed name "Envelope".
|
||||||
|
|
||||||
|
Usage example:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT * FROM test.table FORMAT ProtobufList SETTINGS format_schema = 'schemafile:MessageType'
|
||||||
|
```
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
cat protobuflist_messages.bin | clickhouse-client --query "INSERT INTO test.table FORMAT ProtobufList SETTINGS format_schema='schemafile:MessageType'"
|
||||||
|
```
|
||||||
|
|
||||||
|
where the file `schemafile.proto` looks like this:
|
||||||
|
|
||||||
|
``` capnp
|
||||||
|
syntax = "proto3";
|
||||||
|
message Envelope {
|
||||||
|
message MessageType {
|
||||||
|
string name = 1;
|
||||||
|
string surname = 2;
|
||||||
|
uint32 birthDate = 3;
|
||||||
|
repeated string phoneNumbers = 4;
|
||||||
|
};
|
||||||
|
MessageType row = 1;
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
## Avro {#data-format-avro}
|
## Avro {#data-format-avro}
|
||||||
|
|
||||||
[Apache Avro](https://avro.apache.org/) is a row-oriented data serialization framework developed within Apache’s Hadoop project.
|
[Apache Avro](https://avro.apache.org/) is a row-oriented data serialization framework developed within Apache’s Hadoop project.
|
||||||
|
@ -6,7 +6,20 @@ sidebar_label: MySQL Interface
|
|||||||
|
|
||||||
# MySQL Interface
|
# MySQL Interface
|
||||||
|
|
||||||
ClickHouse supports the MySQL wire protocol. This allow tools that are MySQL-compatible to interact with ClickHouse seamlessly (e.g. [Looker Studio](../integrations/data-visualization/looker-studio-and-clickhouse.md)).
|
ClickHouse supports the MySQL wire protocol. This allows certain clients that do not have native ClickHouse connectors leverage the MySQL protocol instead, and it has been validated with the following BI tools:
|
||||||
|
|
||||||
|
- [Looker Studio](../integrations/data-visualization/looker-studio-and-clickhouse.md)
|
||||||
|
- [Tableau Online](../integrations/tableau-online)
|
||||||
|
- [QuickSight](../integrations/quicksight)
|
||||||
|
|
||||||
|
If you are trying other untested clients or integrations, keep in mind that there could be the following limitations:
|
||||||
|
|
||||||
|
- SSL implementation might not be fully compatible; there could be potential [TLS SNI](https://www.cloudflare.com/learning/ssl/what-is-sni/) issues.
|
||||||
|
- A particular tool might require dialect features (e.g., MySQL-specific functions or settings) that are not implemented yet.
|
||||||
|
|
||||||
|
If there is a native driver available (e.g., [DBeaver](../integrations/dbeaver)), it is always preferred to use it instead of the MySQL interface. Additionally, while most of the MySQL language clients should work fine, MySQL interface is not guaranteed to be a drop-in replacement for a codebase with existing MySQL queries.
|
||||||
|
|
||||||
|
If your use case involves a particular tool that does not have a native ClickHouse driver, and you would like to use it via the MySQL interface and you found certain incompatibilities - please [create an issue](https://github.com/ClickHouse/ClickHouse/issues) in the ClickHouse repository.
|
||||||
|
|
||||||
## Enabling the MySQL Interface On ClickHouse Cloud
|
## Enabling the MySQL Interface On ClickHouse Cloud
|
||||||
|
|
||||||
|
@ -124,7 +124,7 @@ which is equal to
|
|||||||
|
|
||||||
#### Default values for from_env and from_zk attributes
|
#### Default values for from_env and from_zk attributes
|
||||||
|
|
||||||
It's possible to set the default value and substitute it only if the environment variable or zookeeper node is set using `replace="1"`.
|
It's possible to set the default value and substitute it only if the environment variable or zookeeper node is set using `replace="1"` (must be declared before from_env).
|
||||||
|
|
||||||
With previous example, but `MAX_QUERY_SIZE` is unset:
|
With previous example, but `MAX_QUERY_SIZE` is unset:
|
||||||
|
|
||||||
@ -132,7 +132,7 @@ With previous example, but `MAX_QUERY_SIZE` is unset:
|
|||||||
<clickhouse>
|
<clickhouse>
|
||||||
<profiles>
|
<profiles>
|
||||||
<default>
|
<default>
|
||||||
<max_query_size from_env="MAX_QUERY_SIZE" replace="1">150000</max_query_size>
|
<max_query_size replace="1" from_env="MAX_QUERY_SIZE">150000</max_query_size>
|
||||||
</default>
|
</default>
|
||||||
</profiles>
|
</profiles>
|
||||||
</clickhouse>
|
</clickhouse>
|
||||||
|
@ -2,15 +2,11 @@
|
|||||||
slug: /en/operations/opentelemetry
|
slug: /en/operations/opentelemetry
|
||||||
sidebar_position: 62
|
sidebar_position: 62
|
||||||
sidebar_label: Tracing ClickHouse with OpenTelemetry
|
sidebar_label: Tracing ClickHouse with OpenTelemetry
|
||||||
title: "[experimental] Tracing ClickHouse with OpenTelemetry"
|
title: "Tracing ClickHouse with OpenTelemetry"
|
||||||
---
|
---
|
||||||
|
|
||||||
[OpenTelemetry](https://opentelemetry.io/) is an open standard for collecting traces and metrics from the distributed application. ClickHouse has some support for OpenTelemetry.
|
[OpenTelemetry](https://opentelemetry.io/) is an open standard for collecting traces and metrics from the distributed application. ClickHouse has some support for OpenTelemetry.
|
||||||
|
|
||||||
:::note
|
|
||||||
This is an experimental feature that will change in backwards-incompatible ways in future releases.
|
|
||||||
:::
|
|
||||||
|
|
||||||
## Supplying Trace Context to ClickHouse
|
## Supplying Trace Context to ClickHouse
|
||||||
|
|
||||||
ClickHouse accepts trace context HTTP headers, as described by the [W3C recommendation](https://www.w3.org/TR/trace-context/). It also accepts trace context over a native protocol that is used for communication between ClickHouse servers or between the client and server. For manual testing, trace context headers conforming to the Trace Context recommendation can be supplied to `clickhouse-client` using `--opentelemetry-traceparent` and `--opentelemetry-tracestate` flags.
|
ClickHouse accepts trace context HTTP headers, as described by the [W3C recommendation](https://www.w3.org/TR/trace-context/). It also accepts trace context over a native protocol that is used for communication between ClickHouse servers or between the client and server. For manual testing, trace context headers conforming to the Trace Context recommendation can be supplied to `clickhouse-client` using `--opentelemetry-traceparent` and `--opentelemetry-tracestate` flags.
|
||||||
|
@ -1030,7 +1030,7 @@ A table with no primary key represents the extreme case of a single equivalence
|
|||||||
|
|
||||||
The fewer and the larger the equivalence classes are, the higher the degree of freedom when re-shuffling rows.
|
The fewer and the larger the equivalence classes are, the higher the degree of freedom when re-shuffling rows.
|
||||||
|
|
||||||
The heuristics applied to find the best row order within each equivalence class is suggested by D. Lemir, O. Kaser in [Reordering columns for smaller indexes](https://doi.org/10.1016/j.ins.2011.02.002) and based on sorting the rows within each equivalence class by ascending cardinality of the non-primary key columns.
|
The heuristics applied to find the best row order within each equivalence class is suggested by D. Lemire, O. Kaser in [Reordering columns for smaller indexes](https://doi.org/10.1016/j.ins.2011.02.002) and based on sorting the rows within each equivalence class by ascending cardinality of the non-primary key columns.
|
||||||
It performs three steps:
|
It performs three steps:
|
||||||
1. Find all equivalence classes based on the row values in primary key columns.
|
1. Find all equivalence classes based on the row values in primary key columns.
|
||||||
2. For each equivalence class, calculate (usually estimate) the cardinalities of the non-primary-key columns.
|
2. For each equivalence class, calculate (usually estimate) the cardinalities of the non-primary-key columns.
|
||||||
|
@ -1951,6 +1951,18 @@ The maximum allowed size for String in RowBinary format. It prevents allocating
|
|||||||
|
|
||||||
Default value: `1GiB`.
|
Default value: `1GiB`.
|
||||||
|
|
||||||
|
### output_format_binary_encode_types_in_binary_format {#output_format_binary_encode_types_in_binary_format}
|
||||||
|
|
||||||
|
Write data types in [binary format](../../sql-reference/data-types/data-types-binary-encoding.md) instead of type names in RowBinaryWithNamesAndTypes output format.
|
||||||
|
|
||||||
|
Disabled by default.
|
||||||
|
|
||||||
|
### input_format_binary_decode_types_in_binary_format {#input_format_binary_decode_types_in_binary_format}
|
||||||
|
|
||||||
|
Read data types in [binary format](../../sql-reference/data-types/data-types-binary-encoding.md) instead of type names in RowBinaryWithNamesAndTypes input format.
|
||||||
|
|
||||||
|
Disabled by default.
|
||||||
|
|
||||||
## Native format settings {#native-format-settings}
|
## Native format settings {#native-format-settings}
|
||||||
|
|
||||||
### input_format_native_allow_types_conversion {#input_format_native_allow_types_conversion}
|
### input_format_native_allow_types_conversion {#input_format_native_allow_types_conversion}
|
||||||
@ -1958,3 +1970,15 @@ Default value: `1GiB`.
|
|||||||
Allow types conversion in Native input format between columns from input data and requested columns.
|
Allow types conversion in Native input format between columns from input data and requested columns.
|
||||||
|
|
||||||
Enabled by default.
|
Enabled by default.
|
||||||
|
|
||||||
|
### output_format_native_encode_types_in_binary_format {#output_format_native_encode_types_in_binary_format}
|
||||||
|
|
||||||
|
Write data types in [binary format](../../sql-reference/data-types/data-types-binary-encoding.md) instead of type names in Native output format.
|
||||||
|
|
||||||
|
Disabled by default.
|
||||||
|
|
||||||
|
### input_format_native_decode_types_in_binary_format {#input_format_native_decode_types_in_binary_format}
|
||||||
|
|
||||||
|
Read data types in [binary format](../../sql-reference/data-types/data-types-binary-encoding.md) instead of type names in Native input format.
|
||||||
|
|
||||||
|
Disabled by default.
|
@ -22,6 +22,21 @@ Structure of the `users` section:
|
|||||||
<!-- Or -->
|
<!-- Or -->
|
||||||
<password_sha256_hex></password_sha256_hex>
|
<password_sha256_hex></password_sha256_hex>
|
||||||
|
|
||||||
|
<ssh_keys>
|
||||||
|
<ssh_key>
|
||||||
|
<type>ssh-ed25519</type>
|
||||||
|
<base64_key>AAAAC3NzaC1lZDI1NTE5AAAAIDNf0r6vRl24Ix3tv2IgPmNPO2ATa2krvt80DdcTatLj</base64_key>
|
||||||
|
</ssh_key>
|
||||||
|
<ssh_key>
|
||||||
|
<type>ecdsa-sha2-nistp256</type>
|
||||||
|
<base64_key>AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNxeV2uN5UY6CUbCzTA1rXfYimKQA5ivNIqxdax4bcMXz4D0nSk2l5E1TkR5mG8EBWtmExSPbcEPJ8V7lyWWbA8=</base64_key>
|
||||||
|
</ssh_key>
|
||||||
|
<ssh_key>
|
||||||
|
<type>ssh-rsa</type>
|
||||||
|
<base64_key>AAAAB3NzaC1yc2EAAAADAQABAAABgQCpgqL1SHhPVBOTFlOm0pu+cYBbADzC2jL41sPMawYCJHDyHuq7t+htaVVh2fRgpAPmSEnLEC2d4BEIKMtPK3bfR8plJqVXlLt6Q8t4b1oUlnjb3VPA9P6iGcW7CV1FBkZQEVx8ckOfJ3F+kI5VsrRlEDgiecm/C1VPl0/9M2llW/mPUMaD65cM9nlZgM/hUeBrfxOEqM11gDYxEZm1aRSbZoY4dfdm3vzvpSQ6lrCrkjn3X2aSmaCLcOWJhfBWMovNDB8uiPuw54g3ioZ++qEQMlfxVsqXDGYhXCrsArOVuW/5RbReO79BvXqdssiYShfwo+GhQ0+aLWMIW/jgBkkqx/n7uKLzCMX7b2F+aebRYFh+/QXEj7SnihdVfr9ud6NN3MWzZ1ltfIczlEcFLrLJ1Yq57wW6wXtviWh59WvTWFiPejGjeSjjJyqqB49tKdFVFuBnIU5u/bch2DXVgiAEdQwUrIp1ACoYPq22HFFAYUJrL32y7RxX3PGzuAv3LOc=</base64_key>
|
||||||
|
</ssh_key>
|
||||||
|
</ssh_keys>
|
||||||
|
|
||||||
<access_management>0|1</access_management>
|
<access_management>0|1</access_management>
|
||||||
|
|
||||||
<networks incl="networks" replace="replace">
|
<networks incl="networks" replace="replace">
|
||||||
@ -79,6 +94,24 @@ Password can be specified in plaintext or in SHA256 (hex format).
|
|||||||
|
|
||||||
The first line of the result is the password. The second line is the corresponding double SHA1 hash.
|
The first line of the result is the password. The second line is the corresponding double SHA1 hash.
|
||||||
|
|
||||||
|
### username/ssh-key {#user-sshkey}
|
||||||
|
|
||||||
|
This setting allows authenticating with SSH keys.
|
||||||
|
|
||||||
|
Given a SSH key (as generated by `ssh-keygen`) like
|
||||||
|
```
|
||||||
|
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDNf0r6vRl24Ix3tv2IgPmNPO2ATa2krvt80DdcTatLj john@example.com
|
||||||
|
```
|
||||||
|
The `ssh_key` element is expected to be
|
||||||
|
```
|
||||||
|
<ssh_key>
|
||||||
|
<type>ssh-ed25519</type>
|
||||||
|
<base64_key>AAAAC3NzaC1lZDI1NTE5AAAAIDNf0r6vRl24Ix3tv2IgPmNPO2ATa2krvt80DdcTatLj</base64_key>
|
||||||
|
</ssh_key>
|
||||||
|
```
|
||||||
|
|
||||||
|
Substitute `ssh-ed25519` with `ssh-rsa` or `ecdsa-sha2-nistp256` for the other supported algorithms.
|
||||||
|
|
||||||
### access_management {#access_management-user-setting}
|
### access_management {#access_management-user-setting}
|
||||||
|
|
||||||
This setting enables or disables using of SQL-driven [access control and account management](../../guides/sre/user-management/index.md#access-control) for the user.
|
This setting enables or disables using of SQL-driven [access control and account management](../../guides/sre/user-management/index.md#access-control) for the user.
|
||||||
|
@ -1170,6 +1170,10 @@ Data in the VALUES clause of INSERT queries is processed by a separate stream pa
|
|||||||
|
|
||||||
Default value: 262144 (= 256 KiB).
|
Default value: 262144 (= 256 KiB).
|
||||||
|
|
||||||
|
:::note
|
||||||
|
`max_query_size` cannot be set within an SQL query (e.g., `SELECT now() SETTINGS max_query_size=10000`) because ClickHouse needs to allocate a buffer to parse the query, and this buffer size is determined by the `max_query_size` setting, which must be configured before the query is executed.
|
||||||
|
:::
|
||||||
|
|
||||||
## max_parser_depth {#max_parser_depth}
|
## max_parser_depth {#max_parser_depth}
|
||||||
|
|
||||||
Limits maximum recursion depth in the recursive descent parser. Allows controlling the stack size.
|
Limits maximum recursion depth in the recursive descent parser. Allows controlling the stack size.
|
||||||
@ -1354,12 +1358,25 @@ Connection pool size for PostgreSQL table engine and database engine.
|
|||||||
|
|
||||||
Default value: 16
|
Default value: 16
|
||||||
|
|
||||||
|
## postgresql_connection_attempt_timeout {#postgresql-connection-attempt-timeout}
|
||||||
|
|
||||||
|
Connection timeout in seconds of a single attempt to connect PostgreSQL end-point.
|
||||||
|
The value is passed as a `connect_timeout` parameter of the connection URL.
|
||||||
|
|
||||||
|
Default value: `2`.
|
||||||
|
|
||||||
## postgresql_connection_pool_wait_timeout {#postgresql-connection-pool-wait-timeout}
|
## postgresql_connection_pool_wait_timeout {#postgresql-connection-pool-wait-timeout}
|
||||||
|
|
||||||
Connection pool push/pop timeout on empty pool for PostgreSQL table engine and database engine. By default it will block on empty pool.
|
Connection pool push/pop timeout on empty pool for PostgreSQL table engine and database engine. By default it will block on empty pool.
|
||||||
|
|
||||||
Default value: 5000
|
Default value: 5000
|
||||||
|
|
||||||
|
## postgresql_connection_pool_retries {#postgresql-connection-pool-retries}
|
||||||
|
|
||||||
|
The maximum number of retries to establish a connection with the PostgreSQL end-point.
|
||||||
|
|
||||||
|
Default value: `2`.
|
||||||
|
|
||||||
## postgresql_connection_pool_auto_close_connection {#postgresql-connection-pool-auto-close-connection}
|
## postgresql_connection_pool_auto_close_connection {#postgresql-connection-pool-auto-close-connection}
|
||||||
|
|
||||||
Close connection before returning connection to the pool.
|
Close connection before returning connection to the pool.
|
||||||
|
35
docs/en/operations/system-tables/detached_tables.md
Normal file
35
docs/en/operations/system-tables/detached_tables.md
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
---
|
||||||
|
slug: /en/operations/system-tables/detached_tables
|
||||||
|
---
|
||||||
|
# detached_tables
|
||||||
|
|
||||||
|
Contains information of each detached table.
|
||||||
|
|
||||||
|
Columns:
|
||||||
|
|
||||||
|
- `database` ([String](../../sql-reference/data-types/string.md)) — The name of the database the table is in.
|
||||||
|
|
||||||
|
- `table` ([String](../../sql-reference/data-types/string.md)) — Table name.
|
||||||
|
|
||||||
|
- `uuid` ([UUID](../../sql-reference/data-types/uuid.md)) — Table uuid (Atomic database).
|
||||||
|
|
||||||
|
- `metadata_path` ([String](../../sql-reference/data-types/string.md)) - Path to the table metadata in the file system.
|
||||||
|
|
||||||
|
- `is_permanently` ([UInt8](../../sql-reference/data-types/int-uint.md)) - Flag indicates that the table was detached PERMANENTLY.
|
||||||
|
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT * FROM system.detached_tables FORMAT Vertical;
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
Row 1:
|
||||||
|
──────
|
||||||
|
database: base
|
||||||
|
table: t1
|
||||||
|
uuid: 81b1c20a-b7c6-4116-a2ce-7583fb6b6736
|
||||||
|
metadata_path: /var/lib/clickhouse/store/461/461cf698-fd0b-406d-8c01-5d8fd5748a91/t1.sql
|
||||||
|
is_permanently: 1
|
||||||
|
```
|
@ -9,7 +9,6 @@ Columns:
|
|||||||
|
|
||||||
- `name` ([String](../../sql-reference/data-types/string.md)) – The name of the function.
|
- `name` ([String](../../sql-reference/data-types/string.md)) – The name of the function.
|
||||||
- `is_aggregate` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Whether the function is an aggregate function.
|
- `is_aggregate` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Whether the function is an aggregate function.
|
||||||
- `is_deterministic` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt8](../../sql-reference/data-types/int-uint.md))) - Whether the function is deterministic.
|
|
||||||
- `case_insensitive`, ([UInt8](../../sql-reference/data-types/int-uint.md)) - Whether the function name can be used case-insensitively.
|
- `case_insensitive`, ([UInt8](../../sql-reference/data-types/int-uint.md)) - Whether the function name can be used case-insensitively.
|
||||||
- `alias_to`, ([String](../../sql-reference/data-types/string.md)) - The original function name, if the function name is an alias.
|
- `alias_to`, ([String](../../sql-reference/data-types/string.md)) - The original function name, if the function name is an alias.
|
||||||
- `create_query`, ([String](../../sql-reference/data-types/enum.md)) - Unused.
|
- `create_query`, ([String](../../sql-reference/data-types/enum.md)) - Unused.
|
||||||
|
@ -16,7 +16,7 @@ sidebar_label: clickhouse-local
|
|||||||
|
|
||||||
While `clickhouse-local` is a great tool for development and testing purposes, and for processing files, it is not suitable for serving end users or applications. In these scenarios, it is recommended to use the open-source [ClickHouse](https://clickhouse.com/docs/en/install). ClickHouse is a powerful OLAP database that is designed to handle large-scale analytical workloads. It provides fast and efficient processing of complex queries on large datasets, making it ideal for use in production environments where high-performance is critical. Additionally, ClickHouse offers a wide range of features such as replication, sharding, and high availability, which are essential for scaling up to handle large datasets and serving applications. If you need to handle larger datasets or serve end users or applications, we recommend using open-source ClickHouse instead of `clickhouse-local`.
|
While `clickhouse-local` is a great tool for development and testing purposes, and for processing files, it is not suitable for serving end users or applications. In these scenarios, it is recommended to use the open-source [ClickHouse](https://clickhouse.com/docs/en/install). ClickHouse is a powerful OLAP database that is designed to handle large-scale analytical workloads. It provides fast and efficient processing of complex queries on large datasets, making it ideal for use in production environments where high-performance is critical. Additionally, ClickHouse offers a wide range of features such as replication, sharding, and high availability, which are essential for scaling up to handle large datasets and serving applications. If you need to handle larger datasets or serve end users or applications, we recommend using open-source ClickHouse instead of `clickhouse-local`.
|
||||||
|
|
||||||
Please read the docs below that show example use cases for `clickhouse-local`, such as [querying local CSVs](#query-data-in-a-csv-file-using-sql) or [reading a parquet file in S3](#query-data-in-a-parquet-file-in-aws-s3).
|
Please read the docs below that show example use cases for `clickhouse-local`, such as [querying local file](#query_data_in_file) or [reading a parquet file in S3](#query-data-in-a-parquet-file-in-aws-s3).
|
||||||
|
|
||||||
## Download clickhouse-local
|
## Download clickhouse-local
|
||||||
|
|
||||||
|
@ -18,7 +18,7 @@ ClickHouse also supports:
|
|||||||
|
|
||||||
During aggregation, all `NULL` arguments are skipped. If the aggregation has several arguments it will ignore any row in which one or more of them are NULL.
|
During aggregation, all `NULL` arguments are skipped. If the aggregation has several arguments it will ignore any row in which one or more of them are NULL.
|
||||||
|
|
||||||
There is an exception to this rule, which are the functions [`first_value`](../../sql-reference/aggregate-functions/reference/first_value.md), [`last_value`](../../sql-reference/aggregate-functions/reference/last_value.md) and their aliases when followed by the modifier `RESPECT NULLS`: `FIRST_VALUE(b) RESPECT NULLS`.
|
There is an exception to this rule, which are the functions [`first_value`](../../sql-reference/aggregate-functions/reference/first_value.md), [`last_value`](../../sql-reference/aggregate-functions/reference/last_value.md) and their aliases (`any` and `anyLast` respectively) when followed by the modifier `RESPECT NULLS`. For example, `FIRST_VALUE(b) RESPECT NULLS`.
|
||||||
|
|
||||||
**Examples:**
|
**Examples:**
|
||||||
|
|
||||||
|
@ -5,12 +5,12 @@ sidebar_position: 102
|
|||||||
|
|
||||||
# any
|
# any
|
||||||
|
|
||||||
Selects the first encountered value of a column.
|
Selects the first encountered value of a column, ignoring any `NULL` values.
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
any(column)
|
any(column) [RESPECT NULLS]
|
||||||
```
|
```
|
||||||
|
|
||||||
Aliases: `any_value`, [`first_value`](../reference/first_value.md).
|
Aliases: `any_value`, [`first_value`](../reference/first_value.md).
|
||||||
@ -20,7 +20,9 @@ Aliases: `any_value`, [`first_value`](../reference/first_value.md).
|
|||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
By default, it ignores NULL values and returns the first NOT NULL value found in the column. Like [`first_value`](../../../sql-reference/aggregate-functions/reference/first_value.md) it supports `RESPECT NULLS`, in which case it will select the first value passed, independently on whether it's NULL or not.
|
:::note
|
||||||
|
Supports the `RESPECT NULLS` modifier after the function name. Using this modifier will ensure the function selects the first value passed, regardless of whether it is `NULL` or not.
|
||||||
|
:::
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
The return type of the function is the same as the input, except for LowCardinality which is discarded. This means that given no rows as input it will return the default value of that type (0 for integers, or Null for a Nullable() column). You might use the `-OrNull` [combinator](../../../sql-reference/aggregate-functions/combinators.md) ) to modify this behaviour.
|
The return type of the function is the same as the input, except for LowCardinality which is discarded. This means that given no rows as input it will return the default value of that type (0 for integers, or Null for a Nullable() column). You might use the `-OrNull` [combinator](../../../sql-reference/aggregate-functions/combinators.md) ) to modify this behaviour.
|
||||||
|
@ -1,44 +0,0 @@
|
|||||||
---
|
|
||||||
slug: /en/sql-reference/aggregate-functions/reference/any_respect_nulls
|
|
||||||
sidebar_position: 103
|
|
||||||
---
|
|
||||||
|
|
||||||
# any_respect_nulls
|
|
||||||
|
|
||||||
Selects the first encountered value of a column, irregardless of whether it is a `NULL` value or not.
|
|
||||||
|
|
||||||
Alias: `any_value_respect_nulls`, `first_value_repect_nulls`.
|
|
||||||
|
|
||||||
**Syntax**
|
|
||||||
|
|
||||||
```sql
|
|
||||||
any_respect_nulls(column)
|
|
||||||
```
|
|
||||||
|
|
||||||
**Parameters**
|
|
||||||
- `column`: The column name.
|
|
||||||
|
|
||||||
**Returned value**
|
|
||||||
|
|
||||||
- The last value encountered, irregardless of whether it is a `NULL` value or not.
|
|
||||||
|
|
||||||
**Example**
|
|
||||||
|
|
||||||
Query:
|
|
||||||
|
|
||||||
```sql
|
|
||||||
CREATE TABLE any_nulls (city Nullable(String)) ENGINE=Log;
|
|
||||||
|
|
||||||
INSERT INTO any_nulls (city) VALUES (NULL), ('Amsterdam'), ('New York'), ('Tokyo'), ('Valencia'), (NULL);
|
|
||||||
|
|
||||||
SELECT any(city), any_respect_nulls(city) FROM any_nulls;
|
|
||||||
```
|
|
||||||
|
|
||||||
```response
|
|
||||||
┌─any(city)─┬─any_respect_nulls(city)─┐
|
|
||||||
│ Amsterdam │ ᴺᵁᴸᴸ │
|
|
||||||
└───────────┴─────────────────────────┘
|
|
||||||
```
|
|
||||||
|
|
||||||
**See Also**
|
|
||||||
- [any](../reference/any.md)
|
|
@ -5,17 +5,21 @@ sidebar_position: 105
|
|||||||
|
|
||||||
# anyLast
|
# anyLast
|
||||||
|
|
||||||
Selects the last value encountered. The result is just as indeterminate as for the [any](../../../sql-reference/aggregate-functions/reference/any.md) function.
|
Selects the last value encountered, ignoring any `NULL` values by default. The result is just as indeterminate as for the [any](../../../sql-reference/aggregate-functions/reference/any.md) function.
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
anyLast(column)
|
anyLast(column) [RESPECT NULLS]
|
||||||
```
|
```
|
||||||
|
|
||||||
**Parameters**
|
**Parameters**
|
||||||
- `column`: The column name.
|
- `column`: The column name.
|
||||||
|
|
||||||
|
:::note
|
||||||
|
Supports the `RESPECT NULLS` modifier after the function name. Using this modifier will ensure the function selects the first value passed, regardless of whether it is `NULL` or not.
|
||||||
|
:::
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- The last value encountered.
|
- The last value encountered.
|
||||||
|
@ -1,39 +0,0 @@
|
|||||||
---
|
|
||||||
slug: /en/sql-reference/aggregate-functions/reference/anylast_respect_nulls
|
|
||||||
sidebar_position: 106
|
|
||||||
---
|
|
||||||
|
|
||||||
# anyLast_respect_nulls
|
|
||||||
|
|
||||||
Selects the last value encountered, irregardless of whether it is `NULL` or not.
|
|
||||||
|
|
||||||
**Syntax**
|
|
||||||
|
|
||||||
```sql
|
|
||||||
anyLast_respect_nulls(column)
|
|
||||||
```
|
|
||||||
|
|
||||||
**Parameters**
|
|
||||||
- `column`: The column name.
|
|
||||||
|
|
||||||
**Returned value**
|
|
||||||
|
|
||||||
- The last value encountered, irregardless of whether it is `NULL` or not.
|
|
||||||
|
|
||||||
**Example**
|
|
||||||
|
|
||||||
Query:
|
|
||||||
|
|
||||||
```sql
|
|
||||||
CREATE TABLE any_last_nulls (city Nullable(String)) ENGINE=Log;
|
|
||||||
|
|
||||||
INSERT INTO any_last_nulls (city) VALUES ('Amsterdam'),(NULL),('New York'),('Tokyo'),('Valencia'),(NULL);
|
|
||||||
|
|
||||||
SELECT anyLast(city), anyLast_respect_nulls(city) FROM any_last_nulls;
|
|
||||||
```
|
|
||||||
|
|
||||||
```response
|
|
||||||
┌─anyLast(city)─┬─anyLast_respect_nulls(city)─┐
|
|
||||||
│ Valencia │ ᴺᵁᴸᴸ │
|
|
||||||
└───────────────┴─────────────────────────────┘
|
|
||||||
```
|
|
@ -45,10 +45,9 @@ ClickHouse-specific aggregate functions:
|
|||||||
|
|
||||||
- [aggThrow](../reference/aggthrow.md)
|
- [aggThrow](../reference/aggthrow.md)
|
||||||
- [analysisOfVariance](../reference/analysis_of_variance.md)
|
- [analysisOfVariance](../reference/analysis_of_variance.md)
|
||||||
- [any](../reference/any_respect_nulls.md)
|
- [any](../reference/any.md)
|
||||||
- [anyHeavy](../reference/anyheavy.md)
|
- [anyHeavy](../reference/anyheavy.md)
|
||||||
- [anyLast](../reference/anylast.md)
|
- [anyLast](../reference/anylast.md)
|
||||||
- [anyLast](../reference/anylast_respect_nulls.md)
|
|
||||||
- [boundingRatio](../reference/boundrat.md)
|
- [boundingRatio](../reference/boundrat.md)
|
||||||
- [first_value](../reference/first_value.md)
|
- [first_value](../reference/first_value.md)
|
||||||
- [last_value](../reference/last_value.md)
|
- [last_value](../reference/last_value.md)
|
||||||
|
@ -16,7 +16,7 @@ singleValueOrNull(x)
|
|||||||
|
|
||||||
**Parameters**
|
**Parameters**
|
||||||
|
|
||||||
- `x` — Column of any [data type](../../data-types/index.md).
|
- `x` — Column of any [data type](../../data-types/index.md) (except [Map](../../data-types/map.md), [Array](../../data-types/array.md) or [Tuple](../../data-types/tuple) which cannot be of type [Nullable](../../data-types/nullable.md)).
|
||||||
|
|
||||||
**Returned values**
|
**Returned values**
|
||||||
|
|
||||||
|
@ -96,3 +96,22 @@ Result:
|
|||||||
│ 1 │ [2] │ [[4,1]] │
|
│ 1 │ [2] │ [[4,1]] │
|
||||||
└───────────┴───────────┴───────────┘
|
└───────────┴───────────┴───────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Reading nested subcolumns from Array
|
||||||
|
|
||||||
|
If nested type `T` inside `Array` has subcolumns (for example, if it's a [named tuple](./tuple.md)), you can read its subcolumns from an `Array(T)` type with the same subcolumn names. The type of a subcolumn will be `Array` of the type of original subcolumn.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE t_arr (arr Array(Tuple(field1 UInt32, field2 String))) ENGINE = MergeTree ORDER BY tuple();
|
||||||
|
INSERT INTO t_arr VALUES ([(1, 'Hello'), (2, 'World')]), ([(3, 'This'), (4, 'is'), (5, 'subcolumn')]);
|
||||||
|
SELECT arr.field1, toTypeName(arr.field1), arr.field2, toTypeName(arr.field2) from t_arr;
|
||||||
|
```
|
||||||
|
|
||||||
|
```test
|
||||||
|
┌─arr.field1─┬─toTypeName(arr.field1)─┬─arr.field2────────────────┬─toTypeName(arr.field2)─┐
|
||||||
|
│ [1,2] │ Array(UInt32) │ ['Hello','World'] │ Array(String) │
|
||||||
|
│ [3,4,5] │ Array(UInt32) │ ['This','is','subcolumn'] │ Array(String) │
|
||||||
|
└────────────┴────────────────────────┴───────────────────────────┴────────────────────────┘
|
||||||
|
```
|
||||||
|
115
docs/en/sql-reference/data-types/data-types-binary-encoding.md
Normal file
115
docs/en/sql-reference/data-types/data-types-binary-encoding.md
Normal file
@ -0,0 +1,115 @@
|
|||||||
|
---
|
||||||
|
slug: /en/sql-reference/data-types/data-types-binary-encoding
|
||||||
|
sidebar_position: 56
|
||||||
|
sidebar_label: Data types binary encoding specification.
|
||||||
|
---
|
||||||
|
|
||||||
|
|
||||||
|
# Data types binary encoding specification
|
||||||
|
|
||||||
|
This specification describes the binary format that can be used for binary encoding and decoding of ClickHouse data types. This format is used in `Dynamic` column [binary serialization](dynamic.md#binary-output-format) and can be used in input/output formats [RowBinaryWithNamesAndTypes](../../interfaces/formats.md#rowbinarywithnamesandtypes) and [Native](../../interfaces/formats.md#native) under corresponding settings.
|
||||||
|
|
||||||
|
The table below describes how each data type is represented in binary format. Each data type encoding consist of 1 byte that indicates the type and some optional additional information.
|
||||||
|
`var_uint` in the binary encoding means that the size is encoded using Variable-Length Quantity compression.
|
||||||
|
|
||||||
|
| ClickHouse data type | Binary encoding |
|
||||||
|
|--------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| `Nothing` | `0x00` |
|
||||||
|
| `UInt8` | `0x01` |
|
||||||
|
| `UInt16` | `0x02` |
|
||||||
|
| `UInt32` | `0x03` |
|
||||||
|
| `UInt64` | `0x04` |
|
||||||
|
| `UInt128` | `0x05` |
|
||||||
|
| `UInt256` | `0x06` |
|
||||||
|
| `Int8` | `0x07` |
|
||||||
|
| `Int16` | `0x08` |
|
||||||
|
| `Int32` | `0x09` |
|
||||||
|
| `Int64` | `0x0A` |
|
||||||
|
| `Int128` | `0x0B` |
|
||||||
|
| `Int256` | `0x0C` |
|
||||||
|
| `Float32` | `0x0D` |
|
||||||
|
| `Float64` | `0x0E` |
|
||||||
|
| `Date` | `0x0F` |
|
||||||
|
| `Date32` | `0x10` |
|
||||||
|
| `DateTime` | `0x11` |
|
||||||
|
| `DateTime(time_zone)` | `0x12<var_uint_time_zone_name_size><time_zone_name_data>` |
|
||||||
|
| `DateTime64(P)` | `0x13<uint8_precision>` |
|
||||||
|
| `DateTime64(P, time_zone)` | `0x14<uint8_precision><var_uint_time_zone_name_size><time_zone_name_data>` |
|
||||||
|
| `String` | `0x15` |
|
||||||
|
| `FixedString(N)` | `0x16<var_uint_size>` |
|
||||||
|
| `Enum8` | `0x17<var_uint_number_of_elements><var_uint_name_size_1><name_data_1><int8_value_1>...<var_uint_name_size_N><name_data_N><int8_value_N>` |
|
||||||
|
| `Enum16` | `0x18<var_uint_number_of_elements><var_uint_name_size_1><name_data_1><int16_little_endian_value_1>...><var_uint_name_size_N><name_data_N><int16_little_endian_value_N>` |
|
||||||
|
| `Decimal32(P, S)` | `0x19<uint8_precision><uint8_scale>` |
|
||||||
|
| `Decimal64(P, S)` | `0x1A<uint8_precision><uint8_scale>` |
|
||||||
|
| `Decimal128(P, S)` | `0x1B<uint8_precision><uint8_scale>` |
|
||||||
|
| `Decimal256(P, S)` | `0x1C<uint8_precision><uint8_scale>` |
|
||||||
|
| `UUID` | `0x1D` |
|
||||||
|
| `Array(T)` | `0x1E<nested_type_encoding>` |
|
||||||
|
| `Tuple(T1, ..., TN)` | `0x1F<var_uint_number_of_elements><nested_type_encoding_1>...<nested_type_encoding_N>` |
|
||||||
|
| `Tuple(name1 T1, ..., nameN TN)` | `0x20<var_uint_number_of_elements><var_uint_name_size_1><name_data_1><nested_type_encoding_1>...<var_uint_name_size_N><name_data_N><nested_type_encoding_N>` |
|
||||||
|
| `Set` | `0x21` |
|
||||||
|
| `Interval` | `0x22<interval_kind>` (see [interval kind binary encoding](#interval-kind-binary-encoding)) |
|
||||||
|
| `Nullable(T)` | `0x23<nested_type_encoding>` |
|
||||||
|
| `Function` | `0x24<var_uint_number_of_arguments><argument_type_encoding_1>...<argument_type_encoding_N><return_type_encoding>` |
|
||||||
|
| `AggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN)` | `0x25<var_uint_version><var_uint_function_name_size><function_name_data><var_uint_number_of_parameters><param_1>...<param_N><var_uint_number_of_arguments><argument_type_encoding_1>...<argument_type_encoding_N>` (see [aggregate function parameter binary encoding](#aggregate-function-parameter-binary-encoding)) |
|
||||||
|
| `LowCardinality(T)` | `0x26<nested_type_encoding>` |
|
||||||
|
| `Map(K, V)` | `0x27<key_type_encoding><value_type_encoding>` |
|
||||||
|
| `IPv4` | `0x28` |
|
||||||
|
| `IPv6` | `0x29` |
|
||||||
|
| `Variant(T1, ..., TN)` | `0x2A<var_uint_number_of_variants><variant_type_encoding_1>...<variant_type_encoding_N>` |
|
||||||
|
| `Dynamic(max_types=N)` | `0x2B<uint8_max_types>` |
|
||||||
|
| `Custom type` (`Ring`, `Polygon`, etc) | `0x2C<var_uint_type_name_size><type_name_data>` |
|
||||||
|
| `Bool` | `0x2D` |
|
||||||
|
| `SimpleAggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN)` | `0x2E<var_uint_function_name_size><function_name_data><var_uint_number_of_parameters><param_1>...<param_N><var_uint_number_of_arguments><argument_type_encoding_1>...<argument_type_encoding_N>` (see [aggregate function parameter binary encoding](#aggregate-function-parameter-binary-encoding)) |
|
||||||
|
| `Nested(name1 T1, ..., nameN TN)` | `0x2F<var_uint_number_of_elements><var_uint_name_size_1><name_data_1><nested_type_encoding_1>...<var_uint_name_size_N><name_data_N><nested_type_encoding_N>` |
|
||||||
|
|
||||||
|
|
||||||
|
### Interval kind binary encoding
|
||||||
|
|
||||||
|
The table below describes how different interval kinds of `Interval` data type are encoded.
|
||||||
|
|
||||||
|
| Interval kind | Binary encoding |
|
||||||
|
|---------------|-----------------|
|
||||||
|
| `Nanosecond` | `0x00` |
|
||||||
|
| `Microsecond` | `0x01` |
|
||||||
|
| `Millisecond` | `0x02` |
|
||||||
|
| `Second` | `0x03` |
|
||||||
|
| `Minute` | `0x04` |
|
||||||
|
| `Hour` | `0x05` |
|
||||||
|
| `Day` | `0x06` |
|
||||||
|
| `Week` | `0x07` |
|
||||||
|
| `Month` | `0x08` |
|
||||||
|
| `Quarter` | `0x09` |
|
||||||
|
| `Year` | `0x1A` |
|
||||||
|
|
||||||
|
### Aggregate function parameter binary encoding
|
||||||
|
|
||||||
|
The table below describes how parameters of `AggragateFunction` and `SimpleAggregateFunction` are encoded.
|
||||||
|
The encoding of a parameter consists of 1 byte indicating the type of the parameter and the value itself.
|
||||||
|
|
||||||
|
| Parameter type | Binary encoding |
|
||||||
|
|--------------------------|--------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| `Null` | `0x00` |
|
||||||
|
| `UInt64` | `0x01<var_uint_value>` |
|
||||||
|
| `Int64` | `0x02<var_int_value>` |
|
||||||
|
| `UInt128` | `0x03<uint128_little_endian_value>` |
|
||||||
|
| `Int128` | `0x04<int128_little_endian_value>` |
|
||||||
|
| `UInt128` | `0x05<uint128_little_endian_value>` |
|
||||||
|
| `Int128` | `0x06<int128_little_endian_value>` |
|
||||||
|
| `Float64` | `0x07<float64_little_endian_value>` |
|
||||||
|
| `Decimal32` | `0x08<var_uint_scale><int32_little_endian_value>` |
|
||||||
|
| `Decimal64` | `0x09<var_uint_scale><int64_little_endian_value>` |
|
||||||
|
| `Decimal128` | `0x0A<var_uint_scale><int128_little_endian_value>` |
|
||||||
|
| `Decimal256` | `0x0B<var_uint_scale><int256_little_endian_value>` |
|
||||||
|
| `String` | `0x0C<var_uint_size><data>` |
|
||||||
|
| `Array` | `0x0D<var_uint_size><value_encoding_1>...<value_encoding_N>` |
|
||||||
|
| `Tuple` | `0x0E<var_uint_size><value_encoding_1>...<value_encoding_N>` |
|
||||||
|
| `Map` | `0x0F<var_uint_size><key_encoding_1><value_encoding_1>...<key_endoding_N><value_encoding_N>` |
|
||||||
|
| `IPv4` | `0x10<uint32_little_endian_value>` |
|
||||||
|
| `IPv6` | `0x11<uint128_little_endian_value>` |
|
||||||
|
| `UUID` | `0x12<uuid_value>` |
|
||||||
|
| `Bool` | `0x13<bool_value>` |
|
||||||
|
| `Object` | `0x14<var_uint_size><var_uint_key_size_1><key_data_1><value_encoding_1>...<var_uint_key_size_N><key_data_N><value_encoding_N>` |
|
||||||
|
| `AggregateFunctionState` | `0x15<var_uint_name_size><name_data><var_uint_data_size><data>` |
|
||||||
|
| `Negative infinity` | `0xFE` |
|
||||||
|
| `Positive infinity` | `0xFF` |
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/data-types/dynamic
|
slug: /en/sql-reference/data-types/dynamic
|
||||||
sidebar_position: 56
|
sidebar_position: 62
|
||||||
sidebar_label: Dynamic
|
sidebar_label: Dynamic
|
||||||
---
|
---
|
||||||
|
|
||||||
@ -493,3 +493,44 @@ SELECT count(), dynamicType(d), _part FROM test GROUP BY _part, dynamicType(d) O
|
|||||||
```
|
```
|
||||||
|
|
||||||
As we can see, ClickHouse kept the most frequent types `UInt64` and `Array(UInt64)` and casted all other types to `String`.
|
As we can see, ClickHouse kept the most frequent types `UInt64` and `Array(UInt64)` and casted all other types to `String`.
|
||||||
|
|
||||||
|
## JSONExtract functions with Dynamic
|
||||||
|
|
||||||
|
All `JSONExtract*` functions support `Dynamic` type:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT JSONExtract('{"a" : [1, 2, 3]}', 'a', 'Dynamic') AS dynamic, dynamicType(dynamic) AS dynamic_type;
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─dynamic─┬─dynamic_type───────────┐
|
||||||
|
│ [1,2,3] │ Array(Nullable(Int64)) │
|
||||||
|
└─────────┴────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT JSONExtract('{"obj" : {"a" : 42, "b" : "Hello", "c" : [1,2,3]}}', 'obj', 'Map(String, Variant(UInt32, String, Array(UInt32)))') AS map_of_dynamics, mapApply((k, v) -> (k, variantType(v)), map_of_dynamics) AS map_of_dynamic_types```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─map_of_dynamics──────────────────┬─map_of_dynamic_types────────────────────────────┐
|
||||||
|
│ {'a':42,'b':'Hello','c':[1,2,3]} │ {'a':'UInt32','b':'String','c':'Array(UInt32)'} │
|
||||||
|
└──────────────────────────────────┴─────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT JSONExtractKeysAndValues('{"a" : 42, "b" : "Hello", "c" : [1,2,3]}', 'Variant(UInt32, String, Array(UInt32))') AS dynamics, arrayMap(x -> (x.1, variantType(x.2)), dynamics) AS dynamic_types```
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─dynamics───────────────────────────────┬─dynamic_types─────────────────────────────────────────┐
|
||||||
|
│ [('a',42),('b','Hello'),('c',[1,2,3])] │ [('a','UInt32'),('b','String'),('c','Array(UInt32)')] │
|
||||||
|
└────────────────────────────────────────┴───────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### Binary output format
|
||||||
|
|
||||||
|
In RowBinary format values of `Dynamic` type are serialized in the following format:
|
||||||
|
|
||||||
|
```text
|
||||||
|
<binary_encoded_data_type><value_in_binary_format_according_to_the_data_type>
|
||||||
|
```
|
||||||
|
@ -5,11 +5,11 @@ sidebar_label: Object Data Type
|
|||||||
keywords: [object, data type]
|
keywords: [object, data type]
|
||||||
---
|
---
|
||||||
|
|
||||||
# Object Data Type
|
# Object Data Type (deprecated)
|
||||||
|
|
||||||
:::note
|
**This feature is not production-ready and is now deprecated.** If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-ingestion/data-formats/json) instead. A new implementation to support JSON object is in progress and can be tracked [here](https://github.com/ClickHouse/ClickHouse/issues/54864).
|
||||||
This feature is not production-ready and is now deprecated. If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-ingestion/data-formats/json) instead. A new implementation to support JSON object is in progress and can be tracked [here](https://github.com/ClickHouse/ClickHouse/issues/54864)
|
|
||||||
:::
|
<hr />
|
||||||
|
|
||||||
Stores JavaScript Object Notation (JSON) documents in a single column.
|
Stores JavaScript Object Notation (JSON) documents in a single column.
|
||||||
|
|
||||||
|
@ -56,7 +56,6 @@ Functions:
|
|||||||
|
|
||||||
## Related content
|
## Related content
|
||||||
|
|
||||||
- [Reducing ClickHouse Storage Cost with the Low Cardinality Type – Lessons from an Instana Engineer](https://altinity.com/blog/2020-5-20-reducing-clickhouse-storage-cost-with-the-low-cardinality-type-lessons-from-an-instana-engineer)
|
|
||||||
- [String Optimization (video presentation in Russian)](https://youtu.be/rqf-ILRgBdY?list=PL0Z2YDlm0b3iwXCpEFiOOYmwXzVmjJfEt). [Slides in English](https://github.com/ClickHouse/clickhouse-presentations/raw/master/meetup19/string_optimization.pdf)
|
|
||||||
- Blog: [Optimizing ClickHouse with Schemas and Codecs](https://clickhouse.com/blog/optimize-clickhouse-codecs-compression-schema)
|
- Blog: [Optimizing ClickHouse with Schemas and Codecs](https://clickhouse.com/blog/optimize-clickhouse-codecs-compression-schema)
|
||||||
- Blog: [Working with time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse)
|
- Blog: [Working with time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse)
|
||||||
|
- [String Optimization (video presentation in Russian)](https://youtu.be/rqf-ILRgBdY?list=PL0Z2YDlm0b3iwXCpEFiOOYmwXzVmjJfEt). [Slides in English](https://github.com/ClickHouse/clickhouse-presentations/raw/master/meetup19/string_optimization.pdf)
|
||||||
|
@ -3080,4 +3080,4 @@ Result:
|
|||||||
|
|
||||||
## Distance functions
|
## Distance functions
|
||||||
|
|
||||||
All supported functions are described in [distance functions documentation](../../sql-reference/functions/distance-functions.md).
|
All supported functions are described in [distance functions documentation](../../sql-reference/functions/distance-functions.md).
|
@ -2698,6 +2698,204 @@ Like function `YYYYMMDDhhmmssToDate()` but produces a [DateTime64](../data-types
|
|||||||
|
|
||||||
Accepts an additional, optional `precision` parameter after the `timezone` parameter.
|
Accepts an additional, optional `precision` parameter after the `timezone` parameter.
|
||||||
|
|
||||||
|
## changeYear
|
||||||
|
|
||||||
|
Changes the year component of a date or date time.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
``` sql
|
||||||
|
|
||||||
|
changeYear(date_or_datetime, value)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `date_or_datetime` - a [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md)
|
||||||
|
- `value` - a new value of the year. [Integer](../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- The same type as `date_or_datetime`.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT changeYear(toDate('1999-01-01'), 2000), changeYear(toDateTime64('1999-01-01 00:00:00.000', 3), 2000);
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─changeYear(toDate('1999-01-01'), 2000)─┬─changeYear(toDateTime64('1999-01-01 00:00:00.000', 3), 2000)─┐
|
||||||
|
│ 2000-01-01 │ 2000-01-01 00:00:00.000 │
|
||||||
|
└────────────────────────────────────────┴──────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## changeMonth
|
||||||
|
|
||||||
|
Changes the month component of a date or date time.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
changeMonth(date_or_datetime, value)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `date_or_datetime` - a [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md)
|
||||||
|
- `value` - a new value of the month. [Integer](../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- Returns a value of same type as `date_or_datetime`.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT changeMonth(toDate('1999-01-01'), 2), changeMonth(toDateTime64('1999-01-01 00:00:00.000', 3), 2);
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─changeMonth(toDate('1999-01-01'), 2)─┬─changeMonth(toDateTime64('1999-01-01 00:00:00.000', 3), 2)─┐
|
||||||
|
│ 1999-02-01 │ 1999-02-01 00:00:00.000 │
|
||||||
|
└──────────────────────────────────────┴────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## changeDay
|
||||||
|
|
||||||
|
Changes the day component of a date or date time.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
changeDay(date_or_datetime, value)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `date_or_datetime` - a [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md)
|
||||||
|
- `value` - a new value of the day. [Integer](../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- Returns a value of same type as `date_or_datetime`.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT changeDay(toDate('1999-01-01'), 5), changeDay(toDateTime64('1999-01-01 00:00:00.000', 3), 5);
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─changeDay(toDate('1999-01-01'), 5)─┬─changeDay(toDateTime64('1999-01-01 00:00:00.000', 3), 5)─┐
|
||||||
|
│ 1999-01-05 │ 1999-01-05 00:00:00.000 │
|
||||||
|
└────────────────────────────────────┴──────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## changeHour
|
||||||
|
|
||||||
|
Changes the hour component of a date or date time.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
changeHour(date_or_datetime, value)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `date_or_datetime` - a [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md)
|
||||||
|
- `value` - a new value of the hour. [Integer](../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- Returns a value of same type as `date_or_datetime`. If the input is a [Date](../data-types/date.md), return [DateTime](../data-types/datetime.md). If the input is a [Date32](../data-types/date32.md), return [DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT changeHour(toDate('1999-01-01'), 14), changeHour(toDateTime64('1999-01-01 00:00:00.000', 3), 14);
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─changeHour(toDate('1999-01-01'), 14)─┬─changeHour(toDateTime64('1999-01-01 00:00:00.000', 3), 14)─┐
|
||||||
|
│ 1999-01-01 14:00:00 │ 1999-01-01 14:00:00.000 │
|
||||||
|
└──────────────────────────────────────┴────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## changeMinute
|
||||||
|
|
||||||
|
Changes the minute component of a date or date time.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
changeMinute(date_or_datetime, value)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `date_or_datetime` - a [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md)
|
||||||
|
- `value` - a new value of the minute. [Integer](../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- Returns a value of same type as `date_or_datetime`. If the input is a [Date](../data-types/date.md), return [DateTime](../data-types/datetime.md). If the input is a [Date32](../data-types/date32.md), return [DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT changeMinute(toDate('1999-01-01'), 15), changeMinute(toDateTime64('1999-01-01 00:00:00.000', 3), 15);
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─changeMinute(toDate('1999-01-01'), 15)─┬─changeMinute(toDateTime64('1999-01-01 00:00:00.000', 3), 15)─┐
|
||||||
|
│ 1999-01-01 00:15:00 │ 1999-01-01 00:15:00.000 │
|
||||||
|
└────────────────────────────────────────┴──────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## changeSecond
|
||||||
|
|
||||||
|
Changes the second component of a date or date time.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
changeSecond(date_or_datetime, value)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `date_or_datetime` - a [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md)
|
||||||
|
- `value` - a new value of the second. [Integer](../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- Returns a value of same type as `date_or_datetime`. If the input is a [Date](../data-types/date.md), return [DateTime](../data-types/datetime.md). If the input is a [Date32](../data-types/date32.md), return [DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT changeSecond(toDate('1999-01-01'), 15), changeSecond(toDateTime64('1999-01-01 00:00:00.000', 3), 15);
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─changeSecond(toDate('1999-01-01'), 15)─┬─changeSecond(toDateTime64('1999-01-01 00:00:00.000', 3), 15)─┐
|
||||||
|
│ 1999-01-01 00:00:15 │ 1999-01-01 00:00:15.000 │
|
||||||
|
└────────────────────────────────────────┴──────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## addYears
|
## addYears
|
||||||
|
|
||||||
Adds a specified number of years to a date, a date with time or a string-encoded date / date with time.
|
Adds a specified number of years to a date, a date with time or a string-encoded date / date with time.
|
||||||
@ -2714,6 +2912,7 @@ addYears(date, num)
|
|||||||
- `num`: Number of years to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of years to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date` plus `num` years. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
- Returns `date` plus `num` years. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -2751,6 +2950,7 @@ addQuarters(date, num)
|
|||||||
- `num`: Number of quarters to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of quarters to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date` plus `num` quarters. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
- Returns `date` plus `num` quarters. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -2788,6 +2988,7 @@ addMonths(date, num)
|
|||||||
- `num`: Number of months to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of months to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date` plus `num` months. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
- Returns `date` plus `num` months. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -2825,6 +3026,7 @@ addWeeks(date, num)
|
|||||||
- `num`: Number of weeks to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of weeks to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date` plus `num` weeks. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
- Returns `date` plus `num` weeks. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -2862,6 +3064,7 @@ addDays(date, num)
|
|||||||
- `num`: Number of days to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of days to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date` plus `num` days. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
- Returns `date` plus `num` days. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -2899,6 +3102,7 @@ addHours(date, num)
|
|||||||
- `num`: Number of hours to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of hours to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
o
|
||||||
- Returns `date` plus `num` hours. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
- Returns `date` plus `num` hours. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -2936,6 +3140,7 @@ addMinutes(date, num)
|
|||||||
- `num`: Number of minutes to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of minutes to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date` plus `num` minutes. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
- Returns `date` plus `num` minutes. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -2973,6 +3178,7 @@ addSeconds(date, num)
|
|||||||
- `num`: Number of seconds to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of seconds to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date` plus `num` seconds. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
- Returns `date` plus `num` seconds. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -3010,6 +3216,7 @@ addMilliseconds(date_time, num)
|
|||||||
- `num`: Number of milliseconds to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of milliseconds to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date_time` plus `num` milliseconds. [DateTime64](../data-types/datetime64.md).
|
- Returns `date_time` plus `num` milliseconds. [DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -3045,6 +3252,7 @@ addMicroseconds(date_time, num)
|
|||||||
- `num`: Number of microseconds to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of microseconds to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date_time` plus `num` microseconds. [DateTime64](../data-types/datetime64.md).
|
- Returns `date_time` plus `num` microseconds. [DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -3080,6 +3288,7 @@ addNanoseconds(date_time, num)
|
|||||||
- `num`: Number of nanoseconds to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of nanoseconds to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date_time` plus `num` nanoseconds. [DateTime64](../data-types/datetime64.md).
|
- Returns `date_time` plus `num` nanoseconds. [DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -3115,6 +3324,7 @@ addInterval(interval_1, interval_2)
|
|||||||
- `interval_2`: Second interval to be added. [interval](../data-types/special-data-types/interval.md).
|
- `interval_2`: Second interval to be added. [interval](../data-types/special-data-types/interval.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns a tuple of intervals. [tuple](../data-types/tuple.md)([interval](../data-types/special-data-types/interval.md)).
|
- Returns a tuple of intervals. [tuple](../data-types/tuple.md)([interval](../data-types/special-data-types/interval.md)).
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
@ -3161,6 +3371,7 @@ addTupleOfIntervals(interval_1, interval_2)
|
|||||||
- `intervals`: Tuple of intervals to add to `date`. [tuple](../data-types/tuple.md)([interval](../data-types/special-data-types/interval.md)).
|
- `intervals`: Tuple of intervals to add to `date`. [tuple](../data-types/tuple.md)([interval](../data-types/special-data-types/interval.md)).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date` with added `intervals`. [date](../data-types/date.md)/[date32](../data-types/date32.md)/[datetime](../data-types/datetime.md)/[datetime64](../data-types/datetime64.md).
|
- Returns `date` with added `intervals`. [date](../data-types/date.md)/[date32](../data-types/date32.md)/[datetime](../data-types/datetime.md)/[datetime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -3195,6 +3406,7 @@ subtractYears(date, num)
|
|||||||
- `num`: Number of years to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of years to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date` minus `num` years. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
- Returns `date` minus `num` years. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -3232,6 +3444,7 @@ subtractQuarters(date, num)
|
|||||||
- `num`: Number of quarters to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of quarters to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date` minus `num` quarters. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
- Returns `date` minus `num` quarters. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -3269,6 +3482,7 @@ subtractMonths(date, num)
|
|||||||
- `num`: Number of months to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of months to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date` minus `num` months. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
- Returns `date` minus `num` months. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -3306,6 +3520,7 @@ subtractWeeks(date, num)
|
|||||||
- `num`: Number of weeks to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of weeks to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date` minus `num` weeks. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
- Returns `date` minus `num` weeks. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -3343,6 +3558,7 @@ subtractDays(date, num)
|
|||||||
- `num`: Number of days to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of days to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date` minus `num` days. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
- Returns `date` minus `num` days. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -3380,6 +3596,7 @@ subtractHours(date, num)
|
|||||||
- `num`: Number of hours to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of hours to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date` minus `num` hours. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[Datetime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
- Returns `date` minus `num` hours. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[Datetime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -3417,6 +3634,7 @@ subtractMinutes(date, num)
|
|||||||
- `num`: Number of minutes to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of minutes to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date` minus `num` minutes. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
- Returns `date` minus `num` minutes. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -3454,6 +3672,7 @@ subtractSeconds(date, num)
|
|||||||
- `num`: Number of seconds to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of seconds to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date` minus `num` seconds. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
- Returns `date` minus `num` seconds. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -3491,6 +3710,7 @@ subtractMilliseconds(date_time, num)
|
|||||||
- `num`: Number of milliseconds to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of milliseconds to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date_time` minus `num` milliseconds. [DateTime64](../data-types/datetime64.md).
|
- Returns `date_time` minus `num` milliseconds. [DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -3526,6 +3746,7 @@ subtractMicroseconds(date_time, num)
|
|||||||
- `num`: Number of microseconds to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of microseconds to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date_time` minus `num` microseconds. [DateTime64](../data-types/datetime64.md).
|
- Returns `date_time` minus `num` microseconds. [DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -3561,6 +3782,7 @@ subtractNanoseconds(date_time, num)
|
|||||||
- `num`: Number of nanoseconds to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
- `num`: Number of nanoseconds to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date_time` minus `num` nanoseconds. [DateTime64](../data-types/datetime64.md).
|
- Returns `date_time` minus `num` nanoseconds. [DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -3596,6 +3818,7 @@ subtractInterval(interval_1, interval_2)
|
|||||||
- `interval_2`: Second interval to be negated. [interval](../data-types/special-data-types/interval.md).
|
- `interval_2`: Second interval to be negated. [interval](../data-types/special-data-types/interval.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns a tuple of intervals. [tuple](../data-types/tuple.md)([interval](../data-types/special-data-types/interval.md)).
|
- Returns a tuple of intervals. [tuple](../data-types/tuple.md)([interval](../data-types/special-data-types/interval.md)).
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
@ -3642,6 +3865,7 @@ subtractTupleOfIntervals(interval_1, interval_2)
|
|||||||
- `intervals`: Tuple of intervals to subtract from `date`. [tuple](../data-types/tuple.md)([interval](../data-types/special-data-types/interval.md)).
|
- `intervals`: Tuple of intervals to subtract from `date`. [tuple](../data-types/tuple.md)([interval](../data-types/special-data-types/interval.md)).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Returns `date` with subtracted `intervals`. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
- Returns `date` with subtracted `intervals`. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
@ -314,10 +314,71 @@ SELECT groupBitXor(cityHash64(*)) FROM table
|
|||||||
Calculates a 32-bit hash code from any type of integer.
|
Calculates a 32-bit hash code from any type of integer.
|
||||||
This is a relatively fast non-cryptographic hash function of average quality for numbers.
|
This is a relatively fast non-cryptographic hash function of average quality for numbers.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
intHash32(int)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `int` — Integer to hash. [(U)Int*](../data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- 32-bit hash code. [UInt32](../data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT intHash32(42);
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─intHash32(42)─┐
|
||||||
|
│ 1228623923 │
|
||||||
|
└───────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## intHash64
|
## intHash64
|
||||||
|
|
||||||
Calculates a 64-bit hash code from any type of integer.
|
Calculates a 64-bit hash code from any type of integer.
|
||||||
It works faster than intHash32. Average quality.
|
This is a relatively fast non-cryptographic hash function of average quality for numbers.
|
||||||
|
It works faster than [intHash32](#inthash32).
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
intHash64(int)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `int` — Integer to hash. [(U)Int*](../data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- 64-bit hash code. [UInt64](../data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT intHash64(42);
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌────────intHash64(42)─┐
|
||||||
|
│ 11490350930367293593 │
|
||||||
|
└──────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## SHA1, SHA224, SHA256, SHA512, SHA512_256
|
## SHA1, SHA224, SHA256, SHA512, SHA512_256
|
||||||
|
|
||||||
|
@ -76,7 +76,7 @@ WHERE macro = 'test';
|
|||||||
└───────┴──────────────┘
|
└───────┴──────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
## FQDN
|
## fqdn
|
||||||
|
|
||||||
Returns the fully qualified domain name of the ClickHouse server.
|
Returns the fully qualified domain name of the ClickHouse server.
|
||||||
|
|
||||||
@ -86,7 +86,7 @@ Returns the fully qualified domain name of the ClickHouse server.
|
|||||||
fqdn();
|
fqdn();
|
||||||
```
|
```
|
||||||
|
|
||||||
Aliases: `fullHostName`, 'FQDN'.
|
Aliases: `fullHostName`, `FQDN`.
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
@ -2984,6 +2984,66 @@ Result:
|
|||||||
└─────────┘
|
└─────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## partitionID
|
||||||
|
|
||||||
|
Computes the [partition ID](../../engines/table-engines/mergetree-family/custom-partitioning-key.md).
|
||||||
|
|
||||||
|
:::note
|
||||||
|
This function is slow and should not be called for large amount of rows.
|
||||||
|
:::
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
partitionID(x[, y, ...]);
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `x` — Column for which to return the partition ID.
|
||||||
|
- `y, ...` — Remaining N columns for which to return the partition ID (optional).
|
||||||
|
|
||||||
|
**Returned Value**
|
||||||
|
|
||||||
|
- Partition ID that the row would belong to. [String](../data-types/string.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
DROP TABLE IF EXISTS tab;
|
||||||
|
|
||||||
|
CREATE TABLE tab
|
||||||
|
(
|
||||||
|
i int,
|
||||||
|
j int
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree
|
||||||
|
PARTITION BY i
|
||||||
|
ORDER BY tuple();
|
||||||
|
|
||||||
|
INSERT INTO tab VALUES (1, 1), (1, 2), (1, 3), (2, 4), (2, 5), (2, 6);
|
||||||
|
|
||||||
|
SELECT i, j, partitionID(i), _partition_id FROM tab ORDER BY i, j;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─i─┬─j─┬─partitionID(i)─┬─_partition_id─┐
|
||||||
|
│ 1 │ 1 │ 1 │ 1 │
|
||||||
|
│ 1 │ 2 │ 1 │ 1 │
|
||||||
|
│ 1 │ 3 │ 1 │ 1 │
|
||||||
|
└───┴───┴────────────────┴───────────────┘
|
||||||
|
┌─i─┬─j─┬─partitionID(i)─┬─_partition_id─┐
|
||||||
|
│ 2 │ 4 │ 2 │ 2 │
|
||||||
|
│ 2 │ 5 │ 2 │ 2 │
|
||||||
|
│ 2 │ 6 │ 2 │ 2 │
|
||||||
|
└───┴───┴────────────────┴───────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
## shardNum
|
## shardNum
|
||||||
|
|
||||||
Returns the index of a shard which processes a part of data in a distributed query. Indices are started from `1`.
|
Returns the index of a shard which processes a part of data in a distributed query. Indices are started from `1`.
|
||||||
|
@ -34,7 +34,7 @@ Alias: `replace`.
|
|||||||
|
|
||||||
Replaces the first occurrence of the substring matching the regular expression `pattern` (in [re2 syntax](https://github.com/google/re2/wiki/Syntax)) in `haystack` by the `replacement` string.
|
Replaces the first occurrence of the substring matching the regular expression `pattern` (in [re2 syntax](https://github.com/google/re2/wiki/Syntax)) in `haystack` by the `replacement` string.
|
||||||
|
|
||||||
`replacement` can containing substitutions `\0-\9`.
|
`replacement` can contain substitutions `\0-\9`.
|
||||||
Substitutions `\1-\9` correspond to the 1st to 9th capturing group (submatch), substitution `\0` corresponds to the entire match.
|
Substitutions `\1-\9` correspond to the 1st to 9th capturing group (submatch), substitution `\0` corresponds to the entire match.
|
||||||
|
|
||||||
To use a verbatim `\` character in the `pattern` or `replacement` strings, escape it using `\`.
|
To use a verbatim `\` character in the `pattern` or `replacement` strings, escape it using `\`.
|
||||||
|
@ -7,7 +7,7 @@ sidebar_label: Tuples
|
|||||||
## tuple
|
## tuple
|
||||||
|
|
||||||
A function that allows grouping multiple columns.
|
A function that allows grouping multiple columns.
|
||||||
For columns with the types T1, T2, ..., it returns a Tuple(T1, T2, ...) type tuple containing these columns. There is no cost to execute the function.
|
For columns C1, C2, ... with the types T1, T2, ..., it returns a named Tuple(C1 T1, C2 T2, ...) type tuple containing these columns if their names are unique and can be treated as unquoted identifiers, otherwise a Tuple(T1, T2, ...) is returned. There is no cost to execute the function.
|
||||||
Tuples are normally used as intermediate values for an argument of IN operators, or for creating a list of formal parameters of lambda functions. Tuples can’t be written to a table.
|
Tuples are normally used as intermediate values for an argument of IN operators, or for creating a list of formal parameters of lambda functions. Tuples can’t be written to a table.
|
||||||
|
|
||||||
The function implements the operator `(x, y, ...)`.
|
The function implements the operator `(x, y, ...)`.
|
||||||
@ -259,6 +259,60 @@ Result:
|
|||||||
└───────────────────────────────────────┘
|
└───────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## tupleNames
|
||||||
|
|
||||||
|
Converts a tuple into an array of column names. For a tuple in the form `Tuple(a T, b T, ...)`, it returns an array of strings representing the named columns of the tuple. If the tuple elements do not have explicit names, their indices will be used as the column names instead.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
tupleNames(tuple)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `tuple` — Named tuple. [Tuple](../../sql-reference/data-types/tuple.md) with any types of values.
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- An array with strings.
|
||||||
|
|
||||||
|
Type: [Array](../../sql-reference/data-types/array.md)([Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md), ...)).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
CREATE TABLE tupletest (col Tuple(user_ID UInt64, session_ID UInt64)) ENGINE = Memory;
|
||||||
|
|
||||||
|
INSERT INTO tupletest VALUES (tuple(1, 2));
|
||||||
|
|
||||||
|
SELECT tupleNames(col) FROM tupletest;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─tupleNames(col)──────────┐
|
||||||
|
│ ['user_ID','session_ID'] │
|
||||||
|
└──────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
If you pass a simple tuple to the function, ClickHouse uses the indexes of the columns as their names:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT tupleNames(tuple(3, 2, 1));
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─tupleNames((3, 2, 1))─┐
|
||||||
|
│ ['1','2','3'] │
|
||||||
|
└───────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## tuplePlus
|
## tuplePlus
|
||||||
|
|
||||||
Calculates the sum of corresponding values of two tuples of the same size.
|
Calculates the sum of corresponding values of two tuples of the same size.
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user