mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-26 09:32:01 +00:00
Merge branch 'master' into period-detect
This commit is contained in:
commit
a8cce4f103
@ -27,6 +27,8 @@ Checks: [
|
|||||||
'-bugprone-not-null-terminated-result',
|
'-bugprone-not-null-terminated-result',
|
||||||
'-bugprone-reserved-identifier', # useful but too slow, TODO retry when https://reviews.llvm.org/rG1c282052624f9d0bd273bde0b47b30c96699c6c7 is merged
|
'-bugprone-reserved-identifier', # useful but too slow, TODO retry when https://reviews.llvm.org/rG1c282052624f9d0bd273bde0b47b30c96699c6c7 is merged
|
||||||
'-bugprone-unchecked-optional-access',
|
'-bugprone-unchecked-optional-access',
|
||||||
|
'-bugprone-crtp-constructor-accessibility',
|
||||||
|
'-bugprone-suspicious-stringview-data-usage',
|
||||||
|
|
||||||
'-cert-dcl16-c',
|
'-cert-dcl16-c',
|
||||||
'-cert-dcl37-c',
|
'-cert-dcl37-c',
|
||||||
@ -36,6 +38,7 @@ Checks: [
|
|||||||
'-cert-msc51-cpp',
|
'-cert-msc51-cpp',
|
||||||
'-cert-oop54-cpp',
|
'-cert-oop54-cpp',
|
||||||
'-cert-oop57-cpp',
|
'-cert-oop57-cpp',
|
||||||
|
'-cert-err33-c', # Misreports on clang-19: it warns about all functions containing 'remove' in the name, not only about the standard library.
|
||||||
|
|
||||||
'-clang-analyzer-optin.performance.Padding',
|
'-clang-analyzer-optin.performance.Padding',
|
||||||
|
|
||||||
@ -99,6 +102,7 @@ Checks: [
|
|||||||
'-modernize-use-emplace',
|
'-modernize-use-emplace',
|
||||||
'-modernize-use-nodiscard',
|
'-modernize-use-nodiscard',
|
||||||
'-modernize-use-trailing-return-type',
|
'-modernize-use-trailing-return-type',
|
||||||
|
'-modernize-use-designated-initializers',
|
||||||
|
|
||||||
'-performance-enum-size',
|
'-performance-enum-size',
|
||||||
'-performance-inefficient-string-concatenation',
|
'-performance-inefficient-string-concatenation',
|
||||||
|
@ -13,3 +13,6 @@
|
|||||||
# dbms/ → src/
|
# dbms/ → src/
|
||||||
# (though it is unlikely that you will see it in blame)
|
# (though it is unlikely that you will see it in blame)
|
||||||
06446b4f08a142d6f1bc30664c47ded88ab51782
|
06446b4f08a142d6f1bc30664c47ded88ab51782
|
||||||
|
|
||||||
|
# Applied Black formatter for Python code
|
||||||
|
e6f5a3f98b21ba99cf274a9833797889e020a2b3
|
||||||
|
168
.github/actions/release/action.yml
vendored
Normal file
168
.github/actions/release/action.yml
vendored
Normal file
@ -0,0 +1,168 @@
|
|||||||
|
name: Release
|
||||||
|
|
||||||
|
description: Makes patch releases and creates new release branch
|
||||||
|
|
||||||
|
inputs:
|
||||||
|
ref:
|
||||||
|
description: 'Git reference (branch or commit sha) from which to create the release'
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
type:
|
||||||
|
description: 'The type of release: "new" for a new release or "patch" for a patch release'
|
||||||
|
required: true
|
||||||
|
type: choice
|
||||||
|
options:
|
||||||
|
- patch
|
||||||
|
- new
|
||||||
|
dry-run:
|
||||||
|
description: 'Dry run'
|
||||||
|
required: false
|
||||||
|
default: true
|
||||||
|
type: boolean
|
||||||
|
token:
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: "composite"
|
||||||
|
steps:
|
||||||
|
- name: Prepare Release Info
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --prepare-release-info \
|
||||||
|
--ref ${{ inputs.ref }} --release-type ${{ inputs.type }} \
|
||||||
|
${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
echo "::group::Release Info"
|
||||||
|
python3 -m json.tool /tmp/release_info.json
|
||||||
|
echo "::endgroup::"
|
||||||
|
release_tag=$(jq -r '.release_tag' /tmp/release_info.json)
|
||||||
|
commit_sha=$(jq -r '.commit_sha' /tmp/release_info.json)
|
||||||
|
echo "Release Tag: $release_tag"
|
||||||
|
echo "RELEASE_TAG=$release_tag" >> "$GITHUB_ENV"
|
||||||
|
echo "COMMIT_SHA=$commit_sha" >> "$GITHUB_ENV"
|
||||||
|
- name: Download All Release Artifacts
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --download-packages ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Push Git Tag for the Release
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --push-release-tag ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Push New Release Branch
|
||||||
|
if: ${{ inputs.type == 'new' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --push-new-release-branch ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Bump CH Version and Update Contributors' List
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --create-bump-version-pr ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Bump Docker versions, Changelog, Security
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
git checkout master
|
||||||
|
python3 ./tests/ci/create_release.py --set-progress-started --progress "update changelog, docker version, security"
|
||||||
|
echo "List versions"
|
||||||
|
./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv
|
||||||
|
echo "Update docker version"
|
||||||
|
./utils/list-versions/update-docker-version.sh
|
||||||
|
echo "Generate ChangeLog"
|
||||||
|
export CI=1
|
||||||
|
docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \
|
||||||
|
--volume=".:/ClickHouse" clickhouse/style-test \
|
||||||
|
/ClickHouse/tests/ci/changelog.py -v --debug-helpers \
|
||||||
|
--gh-user-or-token=${{ inputs.token }} --jobs=5 \
|
||||||
|
--output="/ClickHouse/docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }}
|
||||||
|
git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md
|
||||||
|
echo "Generate Security"
|
||||||
|
python3 ./utils/security-generator/generate_security.py > SECURITY.md
|
||||||
|
git diff HEAD
|
||||||
|
- name: Create ChangeLog PR
|
||||||
|
if: ${{ inputs.type == 'patch' && ! inputs.dry-run }}
|
||||||
|
uses: peter-evans/create-pull-request@v6
|
||||||
|
with:
|
||||||
|
author: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||||
|
token: ${{ inputs.token }}
|
||||||
|
committer: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||||
|
commit-message: Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
|
||||||
|
branch: auto/${{ env.RELEASE_TAG }}
|
||||||
|
assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher
|
||||||
|
delete-branch: true
|
||||||
|
title: Update version_date.tsv and changelog after ${{ env.RELEASE_TAG }}
|
||||||
|
labels: do not test
|
||||||
|
body: |
|
||||||
|
Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
|
||||||
|
### Changelog category (leave one):
|
||||||
|
- Not for changelog (changelog entry is not required)
|
||||||
|
- name: Complete previous steps and Restore git state
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --set-progress-completed
|
||||||
|
git reset --hard HEAD
|
||||||
|
git checkout "$GITHUB_REF_NAME"
|
||||||
|
- name: Create GH Release
|
||||||
|
shell: bash
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --create-gh-release ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Export TGZ Packages
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/artifactory.py --export-tgz ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Test TGZ Packages
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/artifactory.py --test-tgz ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Export RPM Packages
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/artifactory.py --export-rpm ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Test RPM Packages
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/artifactory.py --test-rpm ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Export Debian Packages
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/artifactory.py --export-debian ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Test Debian Packages
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/artifactory.py --test-debian ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||||
|
- name: Docker clickhouse/clickhouse-server building
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
cd "./tests/ci"
|
||||||
|
python3 ./create_release.py --set-progress-started --progress "docker server release"
|
||||||
|
export CHECK_NAME="Docker server image"
|
||||||
|
python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
|
||||||
|
python3 ./create_release.py --set-progress-completed
|
||||||
|
- name: Docker clickhouse/clickhouse-keeper building
|
||||||
|
if: ${{ inputs.type == 'patch' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
cd "./tests/ci"
|
||||||
|
python3 ./create_release.py --set-progress-started --progress "docker keeper release"
|
||||||
|
export CHECK_NAME="Docker keeper image"
|
||||||
|
python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
|
||||||
|
python3 ./create_release.py --set-progress-completed
|
||||||
|
- name: Set current Release progress to Completed with OK
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --set-progress-started --progress "completed"
|
||||||
|
python3 ./tests/ci/create_release.py --set-progress-completed
|
||||||
|
- name: Post Slack Message
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 ./tests/ci/create_release.py --post-status ${{ inputs.dry-run && '--dry-run' || '' }}
|
98
.github/workflows/auto_release.yml
vendored
98
.github/workflows/auto_release.yml
vendored
@ -1,44 +1,110 @@
|
|||||||
name: AutoRelease
|
name: AutoRelease
|
||||||
|
|
||||||
env:
|
env:
|
||||||
# Force the stdout and stderr streams to be unbuffered
|
|
||||||
PYTHONUNBUFFERED: 1
|
PYTHONUNBUFFERED: 1
|
||||||
|
DRY_RUN: true
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: auto-release
|
group: release
|
||||||
on: # yamllint disable-line rule:truthy
|
on: # yamllint disable-line rule:truthy
|
||||||
# schedule:
|
# Workflow uses a test bucket for packages and dry run mode (no real releases)
|
||||||
# - cron: '0 10-16 * * 1-5'
|
schedule:
|
||||||
|
- cron: '0 9 * * *'
|
||||||
|
- cron: '0 15 * * *'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
dry-run:
|
||||||
|
description: 'Dry run'
|
||||||
|
required: false
|
||||||
|
default: true
|
||||||
|
type: boolean
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
CherryPick:
|
AutoRelease:
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
runs-on: [self-hosted, release-maker]
|
||||||
steps:
|
steps:
|
||||||
|
- name: DebugInfo
|
||||||
|
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||||
- name: Set envs
|
- name: Set envs
|
||||||
# https://docs.github.com/en/actions/learn-github-actions/workflow-commands-for-github-actions#multiline-strings
|
|
||||||
run: |
|
run: |
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
TEMP_PATH=${{runner.temp}}/cherry_pick
|
|
||||||
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
||||||
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||||
RCSK
|
RCSK
|
||||||
REPO_OWNER=ClickHouse
|
|
||||||
REPO_NAME=ClickHouse
|
|
||||||
REPO_TEAM=core
|
|
||||||
EOF
|
EOF
|
||||||
|
- name: Set DRY_RUN for schedule
|
||||||
|
if: ${{ github.event_name == 'schedule' }}
|
||||||
|
run: echo "DRY_RUN=true" >> "$GITHUB_ENV"
|
||||||
|
- name: Set DRY_RUN for dispatch
|
||||||
|
if: ${{ github.event_name == 'workflow_dispatch' }}
|
||||||
|
run: echo "DRY_RUN=${{ github.event.inputs.dry-run }}" >> "$GITHUB_ENV"
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
uses: ClickHouse/checkout@v1
|
uses: ClickHouse/checkout@v1
|
||||||
with:
|
with:
|
||||||
clear-repository: true
|
|
||||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Auto-release
|
- name: Auto Release Prepare
|
||||||
run: |
|
run: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
python3 auto_release.py --release-after-days=3
|
python3 auto_release.py --prepare
|
||||||
- name: Cleanup
|
echo "::group::Auto Release Info"
|
||||||
if: always()
|
python3 -m json.tool /tmp/autorelease_info.json
|
||||||
|
echo "::endgroup::"
|
||||||
|
{
|
||||||
|
echo 'AUTO_RELEASE_PARAMS<<EOF'
|
||||||
|
cat /tmp/autorelease_info.json
|
||||||
|
echo 'EOF'
|
||||||
|
} >> "$GITHUB_ENV"
|
||||||
|
- name: Post Release Branch statuses
|
||||||
|
run: |
|
||||||
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
|
python3 auto_release.py --post-status
|
||||||
|
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0].release_branch }}
|
||||||
|
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0] && fromJson(env.AUTO_RELEASE_PARAMS).releases[0].ready }}
|
||||||
|
uses: ./.github/actions/release
|
||||||
|
with:
|
||||||
|
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0].commit_sha }}
|
||||||
|
type: patch
|
||||||
|
dry-run: ${{ env.DRY_RUN }}
|
||||||
|
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||||
|
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[1].release_branch }}
|
||||||
|
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0] && fromJson(env.AUTO_RELEASE_PARAMS).releases[1].ready }}
|
||||||
|
uses: ./.github/actions/release
|
||||||
|
with:
|
||||||
|
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[1].commit_sha }}
|
||||||
|
type: patch
|
||||||
|
dry-run: ${{ env.DRY_RUN }}
|
||||||
|
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||||
|
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2].release_branch }}
|
||||||
|
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2] && fromJson(env.AUTO_RELEASE_PARAMS).releases[2].ready }}
|
||||||
|
uses: ./.github/actions/release
|
||||||
|
with:
|
||||||
|
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2].commit_sha }}
|
||||||
|
type: patch
|
||||||
|
dry-run: ${{ env.DRY_RUN }}
|
||||||
|
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||||
|
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3].release_branch }}
|
||||||
|
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3] && fromJson(env.AUTO_RELEASE_PARAMS).releases[3].ready }}
|
||||||
|
uses: ./.github/actions/release
|
||||||
|
with:
|
||||||
|
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3].commit_sha }}
|
||||||
|
type: patch
|
||||||
|
dry-run: ${{ env.DRY_RUN }}
|
||||||
|
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||||
|
- name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4].release_branch }}
|
||||||
|
if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4] && fromJson(env.AUTO_RELEASE_PARAMS).releases[4].ready }}
|
||||||
|
uses: ./.github/actions/release
|
||||||
|
with:
|
||||||
|
ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4].commit_sha }}
|
||||||
|
type: patch
|
||||||
|
dry-run: ${{ env.DRY_RUN }}
|
||||||
|
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||||
|
- name: Post Slack Message
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
|
run: |
|
||||||
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
|
python3 auto_release.py --post-auto-release-complete --wf-status ${{ job.status }}
|
||||||
|
- name: Clean up
|
||||||
run: |
|
run: |
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
17
.github/workflows/backport_branches.yml
vendored
17
.github/workflows/backport_branches.yml
vendored
@ -36,10 +36,6 @@ jobs:
|
|||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
echo "Testing the main ci directory"
|
echo "Testing the main ci directory"
|
||||||
python3 -m unittest discover -s . -p 'test_*.py'
|
python3 -m unittest discover -s . -p 'test_*.py'
|
||||||
for dir in *_lambda/; do
|
|
||||||
echo "Testing $dir"
|
|
||||||
python3 -m unittest discover -s "$dir" -p 'test_*.py'
|
|
||||||
done
|
|
||||||
- name: PrepareRunConfig
|
- name: PrepareRunConfig
|
||||||
id: runconfig
|
id: runconfig
|
||||||
run: |
|
run: |
|
||||||
@ -62,7 +58,7 @@ jobs:
|
|||||||
BuildDockers:
|
BuildDockers:
|
||||||
needs: [RunConfig]
|
needs: [RunConfig]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
uses: ./.github/workflows/reusable_docker.yml
|
uses: ./.github/workflows/docker_test_images.yml
|
||||||
with:
|
with:
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
CompatibilityCheckX86:
|
CompatibilityCheckX86:
|
||||||
@ -245,8 +241,9 @@ jobs:
|
|||||||
runner_type: stress-tester
|
runner_type: stress-tester
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
FinishCheck:
|
FinishCheck:
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
needs:
|
needs:
|
||||||
|
- RunConfig
|
||||||
- Builds_Report
|
- Builds_Report
|
||||||
- FunctionalStatelessTestAsan
|
- FunctionalStatelessTestAsan
|
||||||
- FunctionalStatefulTestDebug
|
- FunctionalStatefulTestDebug
|
||||||
@ -261,6 +258,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
clear-repository: true
|
clear-repository: true
|
||||||
- name: Finish label
|
- name: Finish label
|
||||||
|
if: ${{ !failure() }}
|
||||||
run: |
|
run: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
# update mergeable check
|
# update mergeable check
|
||||||
@ -268,3 +266,10 @@ jobs:
|
|||||||
# update overall ci report
|
# update overall ci report
|
||||||
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||||
python3 merge_pr.py
|
python3 merge_pr.py
|
||||||
|
- name: Check Workflow results
|
||||||
|
run: |
|
||||||
|
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||||
|
cat >> "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
|
${{ toJson(needs) }}
|
||||||
|
EOF
|
||||||
|
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||||
|
134
.github/workflows/create_release.yml
vendored
134
.github/workflows/create_release.yml
vendored
@ -2,7 +2,6 @@ name: CreateRelease
|
|||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: release
|
group: release
|
||||||
|
|
||||||
'on':
|
'on':
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
inputs:
|
inputs:
|
||||||
@ -31,136 +30,15 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: DebugInfo
|
- name: DebugInfo
|
||||||
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
|
||||||
- name: Set envs
|
|
||||||
# https://docs.github.com/en/actions/learn-github-actions/workflow-commands-for-github-actions#multiline-strings
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
|
||||||
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
|
||||||
RCSK
|
|
||||||
RELEASE_INFO_FILE=${{ runner.temp }}/release_info.json
|
|
||||||
EOF
|
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
uses: ClickHouse/checkout@v1
|
uses: ClickHouse/checkout@v1
|
||||||
with:
|
with:
|
||||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Prepare Release Info
|
- name: Call Release Action
|
||||||
run: |
|
uses: ./.github/actions/release
|
||||||
python3 ./tests/ci/create_release.py --prepare-release-info \
|
|
||||||
--ref ${{ inputs.ref }} --release-type ${{ inputs.type }} \
|
|
||||||
--outfile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
|
||||||
echo "::group::Release Info"
|
|
||||||
python3 -m json.tool "$RELEASE_INFO_FILE"
|
|
||||||
echo "::endgroup::"
|
|
||||||
release_tag=$(jq -r '.release_tag' "$RELEASE_INFO_FILE")
|
|
||||||
commit_sha=$(jq -r '.commit_sha' "$RELEASE_INFO_FILE")
|
|
||||||
echo "Release Tag: $release_tag"
|
|
||||||
echo "RELEASE_TAG=$release_tag" >> "$GITHUB_ENV"
|
|
||||||
echo "COMMIT_SHA=$commit_sha" >> "$GITHUB_ENV"
|
|
||||||
- name: Download All Release Artifacts
|
|
||||||
if: ${{ inputs.type == 'patch' }}
|
|
||||||
run: |
|
|
||||||
python3 ./tests/ci/create_release.py --infile "$RELEASE_INFO_FILE" --download-packages ${{ inputs.dry-run && '--dry-run' || '' }}
|
|
||||||
- name: Push Git Tag for the Release
|
|
||||||
run: |
|
|
||||||
python3 ./tests/ci/create_release.py --push-release-tag --infile "$RELEASE_INFO_FILE" ${{ inputs.dry-run && '--dry-run' || '' }}
|
|
||||||
- name: Push New Release Branch
|
|
||||||
if: ${{ inputs.type == 'new' }}
|
|
||||||
run: |
|
|
||||||
python3 ./tests/ci/create_release.py --push-new-release-branch --infile "$RELEASE_INFO_FILE" ${{ inputs.dry-run && '--dry-run' || '' }}
|
|
||||||
- name: Bump CH Version and Update Contributors' List
|
|
||||||
run: |
|
|
||||||
python3 ./tests/ci/create_release.py --create-bump-version-pr --infile "$RELEASE_INFO_FILE" ${{ inputs.dry-run && '--dry-run' || '' }}
|
|
||||||
- name: Checkout master
|
|
||||||
run: |
|
|
||||||
git checkout master
|
|
||||||
- name: Bump Docker versions, Changelog, Security
|
|
||||||
if: ${{ inputs.type == 'patch' }}
|
|
||||||
run: |
|
|
||||||
[ "$(git branch --show-current)" != "master" ] && echo "not on the master" && exit 1
|
|
||||||
echo "List versions"
|
|
||||||
./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv
|
|
||||||
echo "Update docker version"
|
|
||||||
./utils/list-versions/update-docker-version.sh
|
|
||||||
echo "Generate ChangeLog"
|
|
||||||
export CI=1
|
|
||||||
docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \
|
|
||||||
--volume=".:/ClickHouse" clickhouse/style-test \
|
|
||||||
/ClickHouse/tests/ci/changelog.py -v --debug-helpers \
|
|
||||||
--gh-user-or-token="$GH_TOKEN" --jobs=5 \
|
|
||||||
--output="/ClickHouse/docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }}
|
|
||||||
git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md
|
|
||||||
echo "Generate Security"
|
|
||||||
python3 ./utils/security-generator/generate_security.py > SECURITY.md
|
|
||||||
git diff HEAD
|
|
||||||
- name: Generate ChangeLog
|
|
||||||
if: ${{ inputs.type == 'patch' && ! inputs.dry-run }}
|
|
||||||
uses: peter-evans/create-pull-request@v6
|
|
||||||
with:
|
with:
|
||||||
author: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
ref: ${{ inputs.ref }}
|
||||||
token: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
|
type: ${{ inputs.type }}
|
||||||
committer: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
dry-run: ${{ inputs.dry-run }}
|
||||||
commit-message: Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
|
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||||
branch: auto/${{ env.RELEASE_TAG }}
|
|
||||||
assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher
|
|
||||||
delete-branch: true
|
|
||||||
title: Update version_date.tsv and changelog after ${{ env.RELEASE_TAG }}
|
|
||||||
labels: do not test
|
|
||||||
body: |
|
|
||||||
Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }}
|
|
||||||
### Changelog category (leave one):
|
|
||||||
- Not for changelog (changelog entry is not required)
|
|
||||||
- name: Reset changes if Dry-run
|
|
||||||
if: ${{ inputs.dry-run }}
|
|
||||||
run: |
|
|
||||||
git reset --hard HEAD
|
|
||||||
- name: Checkout back to GITHUB_REF
|
|
||||||
run: |
|
|
||||||
git checkout "$GITHUB_REF_NAME"
|
|
||||||
- name: Create GH Release
|
|
||||||
if: ${{ inputs.type == 'patch' }}
|
|
||||||
run: |
|
|
||||||
python3 ./tests/ci/create_release.py --create-gh-release \
|
|
||||||
--infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
|
||||||
|
|
||||||
- name: Export TGZ Packages
|
|
||||||
if: ${{ inputs.type == 'patch' }}
|
|
||||||
run: |
|
|
||||||
python3 ./tests/ci/artifactory.py --export-tgz --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
|
||||||
- name: Test TGZ Packages
|
|
||||||
if: ${{ inputs.type == 'patch' }}
|
|
||||||
run: |
|
|
||||||
python3 ./tests/ci/artifactory.py --test-tgz --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
|
||||||
- name: Export RPM Packages
|
|
||||||
if: ${{ inputs.type == 'patch' }}
|
|
||||||
run: |
|
|
||||||
python3 ./tests/ci/artifactory.py --export-rpm --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
|
||||||
- name: Test RPM Packages
|
|
||||||
if: ${{ inputs.type == 'patch' }}
|
|
||||||
run: |
|
|
||||||
python3 ./tests/ci/artifactory.py --test-rpm --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
|
||||||
- name: Export Debian Packages
|
|
||||||
if: ${{ inputs.type == 'patch' }}
|
|
||||||
run: |
|
|
||||||
python3 ./tests/ci/artifactory.py --export-debian --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
|
||||||
- name: Test Debian Packages
|
|
||||||
if: ${{ inputs.type == 'patch' }}
|
|
||||||
run: |
|
|
||||||
python3 ./tests/ci/artifactory.py --test-debian --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }}
|
|
||||||
- name: Docker clickhouse/clickhouse-server building
|
|
||||||
if: ${{ inputs.type == 'patch' }}
|
|
||||||
run: |
|
|
||||||
cd "./tests/ci"
|
|
||||||
export CHECK_NAME="Docker server image"
|
|
||||||
python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
|
|
||||||
- name: Docker clickhouse/clickhouse-keeper building
|
|
||||||
if: ${{ inputs.type == 'patch' }}
|
|
||||||
run: |
|
|
||||||
cd "./tests/ci"
|
|
||||||
export CHECK_NAME="Docker keeper image"
|
|
||||||
python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
|
|
||||||
- name: Post Slack Message
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
echo Slack Message
|
|
||||||
|
41
.github/workflows/master.yml
vendored
41
.github/workflows/master.yml
vendored
@ -33,10 +33,6 @@ jobs:
|
|||||||
# cd "$GITHUB_WORKSPACE/tests/ci"
|
# cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
# echo "Testing the main ci directory"
|
# echo "Testing the main ci directory"
|
||||||
# python3 -m unittest discover -s . -p 'test_*.py'
|
# python3 -m unittest discover -s . -p 'test_*.py'
|
||||||
# for dir in *_lambda/; do
|
|
||||||
# echo "Testing $dir"
|
|
||||||
# python3 -m unittest discover -s "$dir" -p 'test_*.py'
|
|
||||||
# done
|
|
||||||
- name: PrepareRunConfig
|
- name: PrepareRunConfig
|
||||||
id: runconfig
|
id: runconfig
|
||||||
run: |
|
run: |
|
||||||
@ -58,7 +54,7 @@ jobs:
|
|||||||
# BuildDockers:
|
# BuildDockers:
|
||||||
# needs: [RunConfig]
|
# needs: [RunConfig]
|
||||||
# if: ${{ !failure() && !cancelled() }}
|
# if: ${{ !failure() && !cancelled() }}
|
||||||
# uses: ./.github/workflows/reusable_docker.yml
|
# uses: ./.github/workflows/docker_test_images.yml
|
||||||
# with:
|
# with:
|
||||||
# data: ${{ needs.RunConfig.outputs.data }}
|
# data: ${{ needs.RunConfig.outputs.data }}
|
||||||
# StyleCheck:
|
# StyleCheck:
|
||||||
@ -125,34 +121,6 @@ jobs:
|
|||||||
runner_type: style-checker-aarch64
|
runner_type: style-checker-aarch64
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
|
||||||
MarkReleaseReady:
|
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
needs: [RunConfig, Builds_1, Builds_2]
|
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
|
||||||
steps:
|
|
||||||
- name: Debug
|
|
||||||
run: |
|
|
||||||
echo need with different filters
|
|
||||||
cat << 'EOF'
|
|
||||||
${{ toJSON(needs) }}
|
|
||||||
${{ toJSON(needs.*.result) }}
|
|
||||||
no failures ${{ !contains(needs.*.result, 'failure') }}
|
|
||||||
no skips ${{ !contains(needs.*.result, 'skipped') }}
|
|
||||||
no both ${{ !(contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
|
|
||||||
EOF
|
|
||||||
- name: Not ready
|
|
||||||
# fail the job to be able to restart it
|
|
||||||
if: ${{ contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure') }}
|
|
||||||
run: exit 1
|
|
||||||
- name: Check out repository code
|
|
||||||
if: ${{ ! (contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
- name: Mark Commit Release Ready
|
|
||||||
if: ${{ ! (contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
|
|
||||||
run: |
|
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
|
||||||
python3 mark_release_ready.py
|
|
||||||
|
|
||||||
FinishCheck:
|
FinishCheck:
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
needs: [RunConfig, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3]
|
needs: [RunConfig, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3]
|
||||||
@ -164,3 +132,10 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||||
|
- name: Check Workflow results
|
||||||
|
run: |
|
||||||
|
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||||
|
cat >> "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
|
${{ toJson(needs) }}
|
||||||
|
EOF
|
||||||
|
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||||
|
16
.github/workflows/merge_queue.yml
vendored
16
.github/workflows/merge_queue.yml
vendored
@ -30,10 +30,6 @@ jobs:
|
|||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
echo "Testing the main ci directory"
|
echo "Testing the main ci directory"
|
||||||
python3 -m unittest discover -s . -p 'test_*.py'
|
python3 -m unittest discover -s . -p 'test_*.py'
|
||||||
for dir in *_lambda/; do
|
|
||||||
echo "Testing $dir"
|
|
||||||
python3 -m unittest discover -s "$dir" -p 'test_*.py'
|
|
||||||
done
|
|
||||||
- name: PrepareRunConfig
|
- name: PrepareRunConfig
|
||||||
id: runconfig
|
id: runconfig
|
||||||
run: |
|
run: |
|
||||||
@ -51,7 +47,7 @@ jobs:
|
|||||||
BuildDockers:
|
BuildDockers:
|
||||||
needs: [RunConfig]
|
needs: [RunConfig]
|
||||||
if: ${{ !failure() && !cancelled() && toJson(fromJson(needs.RunConfig.outputs.data).docker_data.missing_multi) != '[]' }}
|
if: ${{ !failure() && !cancelled() && toJson(fromJson(needs.RunConfig.outputs.data).docker_data.missing_multi) != '[]' }}
|
||||||
uses: ./.github/workflows/reusable_docker.yml
|
uses: ./.github/workflows/docker_test_images.yml
|
||||||
with:
|
with:
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
StyleCheck:
|
StyleCheck:
|
||||||
@ -97,7 +93,7 @@ jobs:
|
|||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
|
||||||
CheckReadyForMerge:
|
CheckReadyForMerge:
|
||||||
if: ${{ !cancelled() && needs.StyleCheck.result == 'success' }}
|
if: ${{ !cancelled() }}
|
||||||
# Test_2 or Test_3 must not have jobs required for Mergeable check
|
# Test_2 or Test_3 must not have jobs required for Mergeable check
|
||||||
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Tests_1]
|
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Tests_1]
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
@ -105,6 +101,14 @@ jobs:
|
|||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
uses: ClickHouse/checkout@v1
|
uses: ClickHouse/checkout@v1
|
||||||
- name: Check and set merge status
|
- name: Check and set merge status
|
||||||
|
if: ${{ needs.StyleCheck.result == 'success' }}
|
||||||
run: |
|
run: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||||
|
- name: Check Workflow results
|
||||||
|
run: |
|
||||||
|
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||||
|
cat >> "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
|
${{ toJson(needs) }}
|
||||||
|
EOF
|
||||||
|
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||||
|
16
.github/workflows/nightly.yml
vendored
16
.github/workflows/nightly.yml
vendored
@ -40,7 +40,21 @@ jobs:
|
|||||||
} >> "$GITHUB_OUTPUT"
|
} >> "$GITHUB_OUTPUT"
|
||||||
BuildDockers:
|
BuildDockers:
|
||||||
needs: [RunConfig]
|
needs: [RunConfig]
|
||||||
uses: ./.github/workflows/reusable_docker.yml
|
uses: ./.github/workflows/docker_test_images.yml
|
||||||
with:
|
with:
|
||||||
data: "${{ needs.RunConfig.outputs.data }}"
|
data: "${{ needs.RunConfig.outputs.data }}"
|
||||||
set_latest: true
|
set_latest: true
|
||||||
|
CheckWorkflow:
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
|
needs: [RunConfig, BuildDockers]
|
||||||
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
|
steps:
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
- name: Check Workflow results
|
||||||
|
run: |
|
||||||
|
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||||
|
cat >> "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
|
${{ toJson(needs) }}
|
||||||
|
EOF
|
||||||
|
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||||
|
16
.github/workflows/pull_request.yml
vendored
16
.github/workflows/pull_request.yml
vendored
@ -48,10 +48,6 @@ jobs:
|
|||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
echo "Testing the main ci directory"
|
echo "Testing the main ci directory"
|
||||||
python3 -m unittest discover -s . -p 'test_*.py'
|
python3 -m unittest discover -s . -p 'test_*.py'
|
||||||
for dir in *_lambda/; do
|
|
||||||
echo "Testing $dir"
|
|
||||||
python3 -m unittest discover -s "$dir" -p 'test_*.py'
|
|
||||||
done
|
|
||||||
- name: PrepareRunConfig
|
- name: PrepareRunConfig
|
||||||
id: runconfig
|
id: runconfig
|
||||||
run: |
|
run: |
|
||||||
@ -72,7 +68,7 @@ jobs:
|
|||||||
BuildDockers:
|
BuildDockers:
|
||||||
needs: [RunConfig]
|
needs: [RunConfig]
|
||||||
if: ${{ !failure() && !cancelled() && toJson(fromJson(needs.RunConfig.outputs.data).docker_data.missing_multi) != '[]' }}
|
if: ${{ !failure() && !cancelled() && toJson(fromJson(needs.RunConfig.outputs.data).docker_data.missing_multi) != '[]' }}
|
||||||
uses: ./.github/workflows/reusable_docker.yml
|
uses: ./.github/workflows/docker_test_images.yml
|
||||||
with:
|
with:
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
StyleCheck:
|
StyleCheck:
|
||||||
@ -155,7 +151,7 @@ jobs:
|
|||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
|
||||||
CheckReadyForMerge:
|
CheckReadyForMerge:
|
||||||
if: ${{ !cancelled() && needs.StyleCheck.result == 'success' }}
|
if: ${{ !cancelled() }}
|
||||||
# Test_2 or Test_3 must not have jobs required for Mergeable check
|
# Test_2 or Test_3 must not have jobs required for Mergeable check
|
||||||
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1]
|
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1]
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
@ -165,9 +161,17 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
filter: tree:0
|
filter: tree:0
|
||||||
- name: Check and set merge status
|
- name: Check and set merge status
|
||||||
|
if: ${{ needs.StyleCheck.result == 'success' }}
|
||||||
run: |
|
run: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||||
|
- name: Check Workflow results
|
||||||
|
run: |
|
||||||
|
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||||
|
cat >> "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
|
${{ toJson(needs) }}
|
||||||
|
EOF
|
||||||
|
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||||
|
|
||||||
################################# Stage Final #################################
|
################################# Stage Final #################################
|
||||||
#
|
#
|
||||||
|
18
.github/workflows/release_branches.yml
vendored
18
.github/workflows/release_branches.yml
vendored
@ -33,10 +33,6 @@ jobs:
|
|||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
echo "Testing the main ci directory"
|
echo "Testing the main ci directory"
|
||||||
python3 -m unittest discover -s . -p 'test_*.py'
|
python3 -m unittest discover -s . -p 'test_*.py'
|
||||||
for dir in *_lambda/; do
|
|
||||||
echo "Testing $dir"
|
|
||||||
python3 -m unittest discover -s "$dir" -p 'test_*.py'
|
|
||||||
done
|
|
||||||
- name: PrepareRunConfig
|
- name: PrepareRunConfig
|
||||||
id: runconfig
|
id: runconfig
|
||||||
run: |
|
run: |
|
||||||
@ -57,7 +53,7 @@ jobs:
|
|||||||
BuildDockers:
|
BuildDockers:
|
||||||
needs: [RunConfig]
|
needs: [RunConfig]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
uses: ./.github/workflows/reusable_docker.yml
|
uses: ./.github/workflows/docker_test_images.yml
|
||||||
with:
|
with:
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
CompatibilityCheckX86:
|
CompatibilityCheckX86:
|
||||||
@ -445,8 +441,9 @@ jobs:
|
|||||||
runner_type: stress-tester
|
runner_type: stress-tester
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
FinishCheck:
|
FinishCheck:
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
needs:
|
needs:
|
||||||
|
- RunConfig
|
||||||
- DockerServerImage
|
- DockerServerImage
|
||||||
- DockerKeeperImage
|
- DockerKeeperImage
|
||||||
- Builds_Report
|
- Builds_Report
|
||||||
@ -482,9 +479,18 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
clear-repository: true
|
clear-repository: true
|
||||||
- name: Finish label
|
- name: Finish label
|
||||||
|
if: ${{ !failure() }}
|
||||||
run: |
|
run: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
# update mergeable check
|
# update mergeable check
|
||||||
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||||
# update overall ci report
|
# update overall ci report
|
||||||
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||||
|
- name: Check Workflow results
|
||||||
|
run: |
|
||||||
|
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||||
|
cat >> "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||||
|
${{ toJson(needs) }}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||||
|
2
.github/workflows/reusable_test.yml
vendored
2
.github/workflows/reusable_test.yml
vendored
@ -102,6 +102,8 @@ jobs:
|
|||||||
--job-name '${{inputs.test_name}}' \
|
--job-name '${{inputs.test_name}}' \
|
||||||
--run \
|
--run \
|
||||||
--run-command '''${{inputs.run_command}}'''
|
--run-command '''${{inputs.run_command}}'''
|
||||||
|
# shellcheck disable=SC2319
|
||||||
|
echo "JOB_EXIT_CODE=$?" >> "$GITHUB_ENV"
|
||||||
- name: Post run
|
- name: Post run
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
run: |
|
run: |
|
||||||
|
@ -14,3 +14,9 @@ rules:
|
|||||||
comments:
|
comments:
|
||||||
min-spaces-from-content: 1
|
min-spaces-from-content: 1
|
||||||
document-start: disable
|
document-start: disable
|
||||||
|
colons: disable
|
||||||
|
indentation: disable
|
||||||
|
line-length: disable
|
||||||
|
trailing-spaces: disable
|
||||||
|
truthy: disable
|
||||||
|
new-line-at-end-of-file: disable
|
||||||
|
@ -3,8 +3,9 @@
|
|||||||
#include <base/defines.h>
|
#include <base/defines.h>
|
||||||
|
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
#include <sstream>
|
#include <string>
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
bool cgroupsV2Enabled()
|
bool cgroupsV2Enabled()
|
||||||
{
|
{
|
||||||
@ -13,11 +14,11 @@ bool cgroupsV2Enabled()
|
|||||||
{
|
{
|
||||||
/// This file exists iff the host has cgroups v2 enabled.
|
/// This file exists iff the host has cgroups v2 enabled.
|
||||||
auto controllers_file = default_cgroups_mount / "cgroup.controllers";
|
auto controllers_file = default_cgroups_mount / "cgroup.controllers";
|
||||||
if (!std::filesystem::exists(controllers_file))
|
if (!fs::exists(controllers_file))
|
||||||
return false;
|
return false;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
catch (const std::filesystem::filesystem_error &) /// all "underlying OS API errors", typically: permission denied
|
catch (const fs::filesystem_error &) /// all "underlying OS API errors", typically: permission denied
|
||||||
{
|
{
|
||||||
return false; /// not logging the exception as most callers fall back to cgroups v1
|
return false; /// not logging the exception as most callers fall back to cgroups v1
|
||||||
}
|
}
|
||||||
@ -33,8 +34,9 @@ bool cgroupsV2MemoryControllerEnabled()
|
|||||||
/// According to https://docs.kernel.org/admin-guide/cgroup-v2.html, file "cgroup.controllers" defines which controllers are available
|
/// According to https://docs.kernel.org/admin-guide/cgroup-v2.html, file "cgroup.controllers" defines which controllers are available
|
||||||
/// for the current + child cgroups. The set of available controllers can be restricted from level to level using file
|
/// for the current + child cgroups. The set of available controllers can be restricted from level to level using file
|
||||||
/// "cgroups.subtree_control". It is therefore sufficient to check the bottom-most nested "cgroup.controllers" file.
|
/// "cgroups.subtree_control". It is therefore sufficient to check the bottom-most nested "cgroup.controllers" file.
|
||||||
std::string cgroup = cgroupV2OfProcess();
|
fs::path cgroup_dir = cgroupV2PathOfProcess();
|
||||||
auto cgroup_dir = cgroup.empty() ? default_cgroups_mount : (default_cgroups_mount / cgroup);
|
if (cgroup_dir.empty())
|
||||||
|
return false;
|
||||||
std::ifstream controllers_file(cgroup_dir / "cgroup.controllers");
|
std::ifstream controllers_file(cgroup_dir / "cgroup.controllers");
|
||||||
if (!controllers_file.is_open())
|
if (!controllers_file.is_open())
|
||||||
return false;
|
return false;
|
||||||
@ -46,7 +48,7 @@ bool cgroupsV2MemoryControllerEnabled()
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string cgroupV2OfProcess()
|
fs::path cgroupV2PathOfProcess()
|
||||||
{
|
{
|
||||||
#if defined(OS_LINUX)
|
#if defined(OS_LINUX)
|
||||||
chassert(cgroupsV2Enabled());
|
chassert(cgroupsV2Enabled());
|
||||||
@ -54,17 +56,18 @@ std::string cgroupV2OfProcess()
|
|||||||
/// A simpler way to get the membership is:
|
/// A simpler way to get the membership is:
|
||||||
std::ifstream cgroup_name_file("/proc/self/cgroup");
|
std::ifstream cgroup_name_file("/proc/self/cgroup");
|
||||||
if (!cgroup_name_file.is_open())
|
if (!cgroup_name_file.is_open())
|
||||||
return "";
|
return {};
|
||||||
/// With cgroups v2, there will be a *single* line with prefix "0::/"
|
/// With cgroups v2, there will be a *single* line with prefix "0::/"
|
||||||
/// (see https://docs.kernel.org/admin-guide/cgroup-v2.html)
|
/// (see https://docs.kernel.org/admin-guide/cgroup-v2.html)
|
||||||
std::string cgroup;
|
std::string cgroup;
|
||||||
std::getline(cgroup_name_file, cgroup);
|
std::getline(cgroup_name_file, cgroup);
|
||||||
static const std::string v2_prefix = "0::/";
|
static const std::string v2_prefix = "0::/";
|
||||||
if (!cgroup.starts_with(v2_prefix))
|
if (!cgroup.starts_with(v2_prefix))
|
||||||
return "";
|
return {};
|
||||||
cgroup = cgroup.substr(v2_prefix.length());
|
cgroup = cgroup.substr(v2_prefix.length());
|
||||||
return cgroup;
|
/// Note: The 'root' cgroup can have an empty cgroup name, this is valid
|
||||||
|
return default_cgroups_mount / cgroup;
|
||||||
#else
|
#else
|
||||||
return "";
|
return {};
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <filesystem>
|
#include <filesystem>
|
||||||
#include <string>
|
|
||||||
|
|
||||||
#if defined(OS_LINUX)
|
#if defined(OS_LINUX)
|
||||||
/// I think it is possible to mount the cgroups hierarchy somewhere else (e.g. when in containers).
|
/// I think it is possible to mount the cgroups hierarchy somewhere else (e.g. when in containers).
|
||||||
@ -16,7 +15,7 @@ bool cgroupsV2Enabled();
|
|||||||
/// Assumes that cgroupsV2Enabled() is enabled.
|
/// Assumes that cgroupsV2Enabled() is enabled.
|
||||||
bool cgroupsV2MemoryControllerEnabled();
|
bool cgroupsV2MemoryControllerEnabled();
|
||||||
|
|
||||||
/// Which cgroup does the process belong to?
|
/// Detects which cgroup v2 the process belongs to and returns the filesystem path to the cgroup.
|
||||||
/// Returns an empty string if the cgroup cannot be determined.
|
/// Returns an empty path the cgroup cannot be determined.
|
||||||
/// Assumes that cgroupsV2Enabled() is enabled.
|
/// Assumes that cgroupsV2Enabled() is enabled.
|
||||||
std::string cgroupV2OfProcess();
|
std::filesystem::path cgroupV2PathOfProcess();
|
||||||
|
@ -87,10 +87,13 @@
|
|||||||
# define ASAN_POISON_MEMORY_REGION(a, b)
|
# define ASAN_POISON_MEMORY_REGION(a, b)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
/// We used to have only ABORT_ON_LOGICAL_ERROR macro, but most of its uses were actually in places where we didn't care about logical errors
|
||||||
#if !defined(NDEBUG) || defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) || defined(MEMORY_SANITIZER) || defined(UNDEFINED_BEHAVIOR_SANITIZER)
|
/// but wanted to check exactly if the current build type is debug or with sanitizer. This new macro is introduced to fix those places.
|
||||||
#define ABORT_ON_LOGICAL_ERROR
|
#if !defined(DEBUG_OR_SANITIZER_BUILD)
|
||||||
#endif
|
# if !defined(NDEBUG) || defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) || defined(MEMORY_SANITIZER) \
|
||||||
|
|| defined(UNDEFINED_BEHAVIOR_SANITIZER)
|
||||||
|
# define DEBUG_OR_SANITIZER_BUILD
|
||||||
|
# endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/// chassert(x) is similar to assert(x), but:
|
/// chassert(x) is similar to assert(x), but:
|
||||||
@ -101,7 +104,7 @@
|
|||||||
/// Also it makes sense to call abort() instead of __builtin_unreachable() in debug builds,
|
/// Also it makes sense to call abort() instead of __builtin_unreachable() in debug builds,
|
||||||
/// because SIGABRT is easier to debug than SIGTRAP (the second one makes gdb crazy)
|
/// because SIGABRT is easier to debug than SIGTRAP (the second one makes gdb crazy)
|
||||||
#if !defined(chassert)
|
#if !defined(chassert)
|
||||||
#if defined(ABORT_ON_LOGICAL_ERROR)
|
# if defined(DEBUG_OR_SANITIZER_BUILD)
|
||||||
// clang-format off
|
// clang-format off
|
||||||
#include <base/types.h>
|
#include <base/types.h>
|
||||||
namespace DB
|
namespace DB
|
||||||
|
@ -23,8 +23,9 @@ std::optional<uint64_t> getCgroupsV2MemoryLimit()
|
|||||||
if (!cgroupsV2MemoryControllerEnabled())
|
if (!cgroupsV2MemoryControllerEnabled())
|
||||||
return {};
|
return {};
|
||||||
|
|
||||||
std::string cgroup = cgroupV2OfProcess();
|
std::filesystem::path current_cgroup = cgroupV2PathOfProcess();
|
||||||
auto current_cgroup = cgroup.empty() ? default_cgroups_mount : (default_cgroups_mount / cgroup);
|
if (current_cgroup.empty())
|
||||||
|
return {};
|
||||||
|
|
||||||
/// Open the bottom-most nested memory limit setting file. If there is no such file at the current
|
/// Open the bottom-most nested memory limit setting file. If there is no such file at the current
|
||||||
/// level, try again at the parent level as memory settings are inherited.
|
/// level, try again at the parent level as memory settings are inherited.
|
||||||
|
@ -2,11 +2,11 @@
|
|||||||
|
|
||||||
# NOTE: VERSION_REVISION has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
# NOTE: VERSION_REVISION has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
||||||
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
||||||
SET(VERSION_REVISION 54488)
|
SET(VERSION_REVISION 54489)
|
||||||
SET(VERSION_MAJOR 24)
|
SET(VERSION_MAJOR 24)
|
||||||
SET(VERSION_MINOR 7)
|
SET(VERSION_MINOR 8)
|
||||||
SET(VERSION_PATCH 1)
|
SET(VERSION_PATCH 1)
|
||||||
SET(VERSION_GITHASH aa023477a9265e403982fca5ee29a714db5133d9)
|
SET(VERSION_GITHASH 3f8b27d7accd2b5ec4afe7d0dd459115323304af)
|
||||||
SET(VERSION_DESCRIBE v24.7.1.1-testing)
|
SET(VERSION_DESCRIBE v24.8.1.1-testing)
|
||||||
SET(VERSION_STRING 24.7.1.1)
|
SET(VERSION_STRING 24.8.1.1)
|
||||||
# end of autochange
|
# end of autochange
|
||||||
|
2
contrib/grpc
vendored
2
contrib/grpc
vendored
@ -1 +1 @@
|
|||||||
Subproject commit f5b7fdc2dff09ada06dbf6c75df298fb40f898df
|
Subproject commit 1716359d2e28d304a250f9df0e6c0ccad03de8db
|
@ -34,11 +34,7 @@ if (OS_LINUX)
|
|||||||
# avoid spurious latencies and additional work associated with
|
# avoid spurious latencies and additional work associated with
|
||||||
# MADV_DONTNEED. See
|
# MADV_DONTNEED. See
|
||||||
# https://github.com/ClickHouse/ClickHouse/issues/11121 for motivation.
|
# https://github.com/ClickHouse/ClickHouse/issues/11121 for motivation.
|
||||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
|
set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:0,dirty_decay_ms:5000,prof:true,prof_active:false,background_thread:true")
|
||||||
set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:0,dirty_decay_ms:5000")
|
|
||||||
else()
|
|
||||||
set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:0,dirty_decay_ms:5000,prof:true,prof_active:false,background_thread:true")
|
|
||||||
endif()
|
|
||||||
else()
|
else()
|
||||||
set (JEMALLOC_CONFIG_MALLOC_CONF "oversize_threshold:0,muzzy_decay_ms:0,dirty_decay_ms:5000")
|
set (JEMALLOC_CONFIG_MALLOC_CONF "oversize_threshold:0,muzzy_decay_ms:0,dirty_decay_ms:5000")
|
||||||
endif()
|
endif()
|
||||||
|
@ -4,3 +4,14 @@ It allows to integrate JEMalloc into CMake project.
|
|||||||
- Added JEMALLOC_CONFIG_MALLOC_CONF substitution
|
- Added JEMALLOC_CONFIG_MALLOC_CONF substitution
|
||||||
- Add musl support (USE_MUSL)
|
- Add musl support (USE_MUSL)
|
||||||
- Also note, that darwin build requires JEMALLOC_PREFIX, while others do not
|
- Also note, that darwin build requires JEMALLOC_PREFIX, while others do not
|
||||||
|
- JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE should be disabled
|
||||||
|
|
||||||
|
CLOCK_MONOTONIC_COARSE can go backwards after clock_adjtime(ADJ_FREQUENCY)
|
||||||
|
Let's disable it for now, and this menas that CLOCK_MONOTONIC will be used,
|
||||||
|
and this, should not be a problem, since:
|
||||||
|
- jemalloc do not call clock_gettime() that frequently
|
||||||
|
- the difference is CLOCK_MONOTONIC 20ns and CLOCK_MONOTONIC_COARSE 4ns
|
||||||
|
|
||||||
|
This can be done with the following command:
|
||||||
|
|
||||||
|
gg JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE | cut -d: -f1 | xargs sed -i 's@#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE@/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */@'
|
||||||
|
@ -96,7 +96,7 @@
|
|||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||||
*/
|
*/
|
||||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||||
|
@ -98,7 +98,7 @@
|
|||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||||
*/
|
*/
|
||||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||||
|
@ -98,7 +98,7 @@
|
|||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||||
*/
|
*/
|
||||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||||
|
@ -98,7 +98,7 @@
|
|||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||||
*/
|
*/
|
||||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||||
|
@ -96,7 +96,7 @@
|
|||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||||
*/
|
*/
|
||||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||||
|
@ -98,7 +98,7 @@
|
|||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||||
*/
|
*/
|
||||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||||
|
@ -99,7 +99,7 @@
|
|||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||||
*/
|
*/
|
||||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||||
|
2
contrib/libunwind
vendored
2
contrib/libunwind
vendored
@ -1 +1 @@
|
|||||||
Subproject commit d6a01c46327e56fd86beb8aaa31591fcd9a6b7df
|
Subproject commit fe854449e24bedfa26e38465b84374312dbd587f
|
@ -4,9 +4,6 @@ set(LIBUNWIND_CXX_SOURCES
|
|||||||
"${LIBUNWIND_SOURCE_DIR}/src/libunwind.cpp"
|
"${LIBUNWIND_SOURCE_DIR}/src/libunwind.cpp"
|
||||||
"${LIBUNWIND_SOURCE_DIR}/src/Unwind-EHABI.cpp"
|
"${LIBUNWIND_SOURCE_DIR}/src/Unwind-EHABI.cpp"
|
||||||
"${LIBUNWIND_SOURCE_DIR}/src/Unwind-seh.cpp")
|
"${LIBUNWIND_SOURCE_DIR}/src/Unwind-seh.cpp")
|
||||||
if (APPLE)
|
|
||||||
set(LIBUNWIND_CXX_SOURCES ${LIBUNWIND_CXX_SOURCES} "${LIBUNWIND_SOURCE_DIR}/src/Unwind_AppleExtras.cpp")
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
set(LIBUNWIND_C_SOURCES
|
set(LIBUNWIND_C_SOURCES
|
||||||
"${LIBUNWIND_SOURCE_DIR}/src/UnwindLevel1.c"
|
"${LIBUNWIND_SOURCE_DIR}/src/UnwindLevel1.c"
|
||||||
@ -32,6 +29,7 @@ set_target_properties(unwind PROPERTIES FOLDER "contrib/libunwind-cmake")
|
|||||||
|
|
||||||
target_include_directories(unwind SYSTEM BEFORE PUBLIC $<BUILD_INTERFACE:${LIBUNWIND_SOURCE_DIR}/include>)
|
target_include_directories(unwind SYSTEM BEFORE PUBLIC $<BUILD_INTERFACE:${LIBUNWIND_SOURCE_DIR}/include>)
|
||||||
target_compile_definitions(unwind PRIVATE -D_LIBUNWIND_NO_HEAP=1)
|
target_compile_definitions(unwind PRIVATE -D_LIBUNWIND_NO_HEAP=1)
|
||||||
|
target_compile_definitions(unwind PRIVATE -D_LIBUNWIND_REMEMBER_STACK_ALLOC=1)
|
||||||
# NOTE: from this macros sizeof(unw_context_t)/sizeof(unw_cursor_t) is depends, so it should be set always
|
# NOTE: from this macros sizeof(unw_context_t)/sizeof(unw_cursor_t) is depends, so it should be set always
|
||||||
target_compile_definitions(unwind PUBLIC -D_LIBUNWIND_IS_NATIVE_ONLY)
|
target_compile_definitions(unwind PUBLIC -D_LIBUNWIND_IS_NATIVE_ONLY)
|
||||||
|
|
||||||
|
2
contrib/openssl
vendored
2
contrib/openssl
vendored
@ -1 +1 @@
|
|||||||
Subproject commit ee2bb8513b28bf86b35404dd17a0e29305ca9e08
|
Subproject commit 66deddc1e53cda8706604a019777259372d1bd62
|
@ -1298,7 +1298,6 @@ elseif(ARCH_PPC64LE)
|
|||||||
${OPENSSL_SOURCE_DIR}/crypto/camellia/camellia.c
|
${OPENSSL_SOURCE_DIR}/crypto/camellia/camellia.c
|
||||||
${OPENSSL_SOURCE_DIR}/crypto/camellia/cmll_cbc.c
|
${OPENSSL_SOURCE_DIR}/crypto/camellia/cmll_cbc.c
|
||||||
${OPENSSL_SOURCE_DIR}/crypto/chacha/chacha_enc.c
|
${OPENSSL_SOURCE_DIR}/crypto/chacha/chacha_enc.c
|
||||||
${OPENSSL_SOURCE_DIR}/crypto/mem_clr.c
|
|
||||||
${OPENSSL_SOURCE_DIR}/crypto/rc4/rc4_enc.c
|
${OPENSSL_SOURCE_DIR}/crypto/rc4/rc4_enc.c
|
||||||
${OPENSSL_SOURCE_DIR}/crypto/rc4/rc4_skey.c
|
${OPENSSL_SOURCE_DIR}/crypto/rc4/rc4_skey.c
|
||||||
${OPENSSL_SOURCE_DIR}/crypto/sha/keccak1600.c
|
${OPENSSL_SOURCE_DIR}/crypto/sha/keccak1600.c
|
||||||
|
@ -26,7 +26,10 @@ RUN apt-get update \
|
|||||||
zstd \
|
zstd \
|
||||||
--yes --no-install-recommends \
|
--yes --no-install-recommends \
|
||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* \
|
||||||
|
&& groupadd --system --gid 1000 clickhouse \
|
||||||
|
&& useradd --system --gid 1000 --uid 1000 -m clickhouse
|
||||||
|
# ^ For some reason, groupadd and useradd are needed for tests with 'expect', but I don't know, why.
|
||||||
|
|
||||||
COPY requirements.txt /
|
COPY requirements.txt /
|
||||||
RUN pip3 install --no-cache-dir -r /requirements.txt
|
RUN pip3 install --no-cache-dir -r /requirements.txt
|
||||||
|
@ -9,7 +9,7 @@ trap 'kill $(jobs -pr) ||:' EXIT
|
|||||||
stage=${stage:-}
|
stage=${stage:-}
|
||||||
|
|
||||||
# Compiler version, normally set by Dockerfile
|
# Compiler version, normally set by Dockerfile
|
||||||
export LLVM_VERSION=${LLVM_VERSION:-17}
|
export LLVM_VERSION=${LLVM_VERSION:-18}
|
||||||
|
|
||||||
# A variable to pass additional flags to CMake.
|
# A variable to pass additional flags to CMake.
|
||||||
# Here we explicitly default it to nothing so that bash doesn't complain about
|
# Here we explicitly default it to nothing so that bash doesn't complain about
|
||||||
|
@ -33,13 +33,9 @@ RUN apt-get update \
|
|||||||
COPY requirements.txt /
|
COPY requirements.txt /
|
||||||
RUN pip3 install --no-cache-dir -r /requirements.txt
|
RUN pip3 install --no-cache-dir -r /requirements.txt
|
||||||
|
|
||||||
COPY * /
|
|
||||||
|
|
||||||
ENV FUZZER_ARGS="-max_total_time=60"
|
ENV FUZZER_ARGS="-max_total_time=60"
|
||||||
|
|
||||||
SHELL ["/bin/bash", "-c"]
|
SHELL ["/bin/bash", "-c"]
|
||||||
CMD set -o pipefail \
|
|
||||||
&& timeout -s 9 1h /run_libfuzzer.py 2>&1 | ts "$(printf '%%Y-%%m-%%d %%H:%%M:%%S\t')" | tee main.log
|
|
||||||
|
|
||||||
# docker run --network=host --volume <workspace>:/workspace -e PR_TO_TEST=<> -e SHA_TO_TEST=<> clickhouse/libfuzzer
|
# docker run --network=host --volume <workspace>:/workspace -e PR_TO_TEST=<> -e SHA_TO_TEST=<> clickhouse/libfuzzer
|
||||||
|
|
||||||
|
@ -4,6 +4,9 @@
|
|||||||
source /setup_export_logs.sh
|
source /setup_export_logs.sh
|
||||||
set -e -x
|
set -e -x
|
||||||
|
|
||||||
|
MAX_RUN_TIME=${MAX_RUN_TIME:-3600}
|
||||||
|
MAX_RUN_TIME=$((MAX_RUN_TIME == 0 ? 3600 : MAX_RUN_TIME))
|
||||||
|
|
||||||
# Choose random timezone for this test run
|
# Choose random timezone for this test run
|
||||||
TZ="$(rg -v '#' /usr/share/zoneinfo/zone.tab | awk '{print $3}' | shuf | head -n1)"
|
TZ="$(rg -v '#' /usr/share/zoneinfo/zone.tab | awk '{print $3}' | shuf | head -n1)"
|
||||||
echo "Choosen random timezone $TZ"
|
echo "Choosen random timezone $TZ"
|
||||||
@ -25,7 +28,7 @@ source /utils.lib
|
|||||||
azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --silent --inMemoryPersistence &
|
azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --silent --inMemoryPersistence &
|
||||||
|
|
||||||
./setup_minio.sh stateful
|
./setup_minio.sh stateful
|
||||||
./mc admin trace clickminio > /test_output/rubbish.log &
|
./mc admin trace clickminio > /test_output/minio.log &
|
||||||
MC_ADMIN_PID=$!
|
MC_ADMIN_PID=$!
|
||||||
|
|
||||||
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
|
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
|
||||||
@ -188,8 +191,8 @@ else
|
|||||||
ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)
|
ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)
|
||||||
SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'"
|
SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'"
|
||||||
|
|
||||||
clickhouse-client --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0"
|
clickhouse-client --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16"
|
||||||
clickhouse-client --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0"
|
clickhouse-client --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16"
|
||||||
clickhouse-client --query "DROP TABLE datasets.visits_v1 SYNC"
|
clickhouse-client --query "DROP TABLE datasets.visits_v1 SYNC"
|
||||||
clickhouse-client --query "DROP TABLE datasets.hits_v1 SYNC"
|
clickhouse-client --query "DROP TABLE datasets.hits_v1 SYNC"
|
||||||
else
|
else
|
||||||
@ -197,7 +200,7 @@ else
|
|||||||
clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits"
|
clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits"
|
||||||
fi
|
fi
|
||||||
clickhouse-client --query "CREATE TABLE test.hits_s3 (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'"
|
clickhouse-client --query "CREATE TABLE test.hits_s3 (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'"
|
||||||
clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM test.hits SETTINGS enable_filesystem_cache_on_write_operations=0"
|
clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM test.hits SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
clickhouse-client --query "SHOW TABLES FROM test"
|
clickhouse-client --query "SHOW TABLES FROM test"
|
||||||
@ -242,7 +245,22 @@ function run_tests()
|
|||||||
}
|
}
|
||||||
|
|
||||||
export -f run_tests
|
export -f run_tests
|
||||||
timeout "$MAX_RUN_TIME" bash -c run_tests ||:
|
|
||||||
|
function timeout_with_logging() {
|
||||||
|
local exit_code=0
|
||||||
|
|
||||||
|
timeout -s TERM --preserve-status "${@}" || exit_code="${?}"
|
||||||
|
|
||||||
|
if [[ "${exit_code}" -eq "124" ]]
|
||||||
|
then
|
||||||
|
echo "The command 'timeout ${*}' has been killed by timeout"
|
||||||
|
fi
|
||||||
|
|
||||||
|
return $exit_code
|
||||||
|
}
|
||||||
|
|
||||||
|
TIMEOUT=$((MAX_RUN_TIME - 700))
|
||||||
|
timeout_with_logging "$TIMEOUT" bash -c run_tests ||:
|
||||||
|
|
||||||
echo "Files in current directory"
|
echo "Files in current directory"
|
||||||
ls -la ./
|
ls -la ./
|
||||||
|
@ -6,18 +6,12 @@ source /setup_export_logs.sh
|
|||||||
# fail on errors, verbose and export all env variables
|
# fail on errors, verbose and export all env variables
|
||||||
set -e -x -a
|
set -e -x -a
|
||||||
|
|
||||||
MAX_RUN_TIME=${MAX_RUN_TIME:-7200}
|
MAX_RUN_TIME=${MAX_RUN_TIME:-9000}
|
||||||
MAX_RUN_TIME=$((MAX_RUN_TIME == 0 ? 7200 : MAX_RUN_TIME))
|
MAX_RUN_TIME=$((MAX_RUN_TIME == 0 ? 9000 : MAX_RUN_TIME))
|
||||||
|
|
||||||
USE_DATABASE_REPLICATED=${USE_DATABASE_REPLICATED:=0}
|
USE_DATABASE_REPLICATED=${USE_DATABASE_REPLICATED:=0}
|
||||||
USE_SHARED_CATALOG=${USE_SHARED_CATALOG:=0}
|
USE_SHARED_CATALOG=${USE_SHARED_CATALOG:=0}
|
||||||
|
|
||||||
RUN_SEQUENTIAL_TESTS_IN_PARALLEL=1
|
|
||||||
|
|
||||||
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]] || [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
|
||||||
RUN_SEQUENTIAL_TESTS_IN_PARALLEL=0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Choose random timezone for this test run.
|
# Choose random timezone for this test run.
|
||||||
#
|
#
|
||||||
# NOTE: that clickhouse-test will randomize session_timezone by itself as well
|
# NOTE: that clickhouse-test will randomize session_timezone by itself as well
|
||||||
@ -54,7 +48,7 @@ source /utils.lib
|
|||||||
/usr/share/clickhouse-test/config/install.sh
|
/usr/share/clickhouse-test/config/install.sh
|
||||||
|
|
||||||
./setup_minio.sh stateless
|
./setup_minio.sh stateless
|
||||||
m./c admin trace clickminio > /test_output/rubbish.log &
|
./mc admin trace clickminio > /test_output/minio.log &
|
||||||
MC_ADMIN_PID=$!
|
MC_ADMIN_PID=$!
|
||||||
|
|
||||||
./setup_hdfs_minicluster.sh
|
./setup_hdfs_minicluster.sh
|
||||||
@ -101,53 +95,6 @@ if [ "$NUM_TRIES" -gt "1" ]; then
|
|||||||
mkdir -p /var/run/clickhouse-server
|
mkdir -p /var/run/clickhouse-server
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Run a CH instance to execute sequential tests on it in parallel with all other tests.
|
|
||||||
if [[ "$RUN_SEQUENTIAL_TESTS_IN_PARALLEL" -eq 1 ]]; then
|
|
||||||
mkdir -p /var/run/clickhouse-server3 /etc/clickhouse-server3 /var/lib/clickhouse3
|
|
||||||
cp -r -L /etc/clickhouse-server/* /etc/clickhouse-server3/
|
|
||||||
|
|
||||||
sudo chown clickhouse:clickhouse /var/run/clickhouse-server3 /var/lib/clickhouse3 /etc/clickhouse-server3/
|
|
||||||
sudo chown -R clickhouse:clickhouse /etc/clickhouse-server3/*
|
|
||||||
|
|
||||||
function replace(){
|
|
||||||
sudo find /etc/clickhouse-server3/ -type f -name '*.xml' -exec sed -i "$1" {} \;
|
|
||||||
}
|
|
||||||
|
|
||||||
replace "s|<port>9000</port>|<port>19000</port>|g"
|
|
||||||
replace "s|<port>9440</port>|<port>19440</port>|g"
|
|
||||||
replace "s|<port>9988</port>|<port>19988</port>|g"
|
|
||||||
replace "s|<port>9234</port>|<port>19234</port>|g"
|
|
||||||
replace "s|<port>9181</port>|<port>19181</port>|g"
|
|
||||||
replace "s|<https_port>8443</https_port>|<https_port>18443</https_port>|g"
|
|
||||||
replace "s|<tcp_port>9000</tcp_port>|<tcp_port>19000</tcp_port>|g"
|
|
||||||
replace "s|<tcp_port>9181</tcp_port>|<tcp_port>19181</tcp_port>|g"
|
|
||||||
replace "s|<tcp_port_secure>9440</tcp_port_secure>|<tcp_port_secure>19440</tcp_port_secure>|g"
|
|
||||||
replace "s|<tcp_with_proxy_port>9010</tcp_with_proxy_port>|<tcp_with_proxy_port>19010</tcp_with_proxy_port>|g"
|
|
||||||
replace "s|<mysql_port>9004</mysql_port>|<mysql_port>19004</mysql_port>|g"
|
|
||||||
replace "s|<postgresql_port>9005</postgresql_port>|<postgresql_port>19005</postgresql_port>|g"
|
|
||||||
replace "s|<interserver_http_port>9009</interserver_http_port>|<interserver_http_port>19009</interserver_http_port>|g"
|
|
||||||
replace "s|8123|18123|g"
|
|
||||||
replace "s|/var/lib/clickhouse/|/var/lib/clickhouse3/|g"
|
|
||||||
replace "s|/etc/clickhouse-server/|/etc/clickhouse-server3/|g"
|
|
||||||
# distributed cache
|
|
||||||
replace "s|<tcp_port>10001</tcp_port>|<tcp_port>10003</tcp_port>|g"
|
|
||||||
replace "s|<tcp_port>10002</tcp_port>|<tcp_port>10004</tcp_port>|g"
|
|
||||||
|
|
||||||
sudo -E -u clickhouse /usr/bin/clickhouse server --daemon --config /etc/clickhouse-server3/config.xml \
|
|
||||||
--pid-file /var/run/clickhouse-server3/clickhouse-server.pid \
|
|
||||||
-- --path /var/lib/clickhouse3/ --logger.stderr /var/log/clickhouse-server/stderr3.log \
|
|
||||||
--logger.log /var/log/clickhouse-server/clickhouse-server3.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server3.err.log \
|
|
||||||
--tcp_port 19000 --tcp_port_secure 19440 --http_port 18123 --https_port 18443 --interserver_http_port 19009 --tcp_with_proxy_port 19010 \
|
|
||||||
--prometheus.port 19988 --keeper_server.raft_configuration.server.port 19234 --keeper_server.tcp_port 19181 \
|
|
||||||
--mysql_port 19004 --postgresql_port 19005
|
|
||||||
|
|
||||||
for _ in {1..100}
|
|
||||||
do
|
|
||||||
clickhouse-client --port 19000 --query "SELECT 1" && break
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
|
|
||||||
# simplest way to forward env variables to server
|
# simplest way to forward env variables to server
|
||||||
sudo -E -u clickhouse /usr/bin/clickhouse-server --config /etc/clickhouse-server/config.xml --daemon --pid-file /var/run/clickhouse-server/clickhouse-server.pid
|
sudo -E -u clickhouse /usr/bin/clickhouse-server --config /etc/clickhouse-server/config.xml --daemon --pid-file /var/run/clickhouse-server/clickhouse-server.pid
|
||||||
|
|
||||||
@ -183,9 +130,6 @@ if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
|||||||
--keeper_server.tcp_port 29181 --keeper_server.server_id 3 \
|
--keeper_server.tcp_port 29181 --keeper_server.server_id 3 \
|
||||||
--prometheus.port 29988 \
|
--prometheus.port 29988 \
|
||||||
--macros.shard s2 # It doesn't work :(
|
--macros.shard s2 # It doesn't work :(
|
||||||
|
|
||||||
MAX_RUN_TIME=$((MAX_RUN_TIME < 9000 ? MAX_RUN_TIME : 9000)) # min(MAX_RUN_TIME, 2.5 hours)
|
|
||||||
MAX_RUN_TIME=$((MAX_RUN_TIME != 0 ? MAX_RUN_TIME : 9000)) # set to 2.5 hours if 0 (unlimited)
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
||||||
@ -210,9 +154,6 @@ if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
|||||||
--keeper_server.tcp_port 19181 --keeper_server.server_id 2 \
|
--keeper_server.tcp_port 19181 --keeper_server.server_id 2 \
|
||||||
--prometheus.port 19988 \
|
--prometheus.port 19988 \
|
||||||
--macros.replica r2 # It doesn't work :(
|
--macros.replica r2 # It doesn't work :(
|
||||||
|
|
||||||
MAX_RUN_TIME=$((MAX_RUN_TIME < 9000 ? MAX_RUN_TIME : 9000)) # min(MAX_RUN_TIME, 2.5 hours)
|
|
||||||
MAX_RUN_TIME=$((MAX_RUN_TIME != 0 ? MAX_RUN_TIME : 9000)) # set to 2.5 hours if 0 (unlimited)
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Wait for the server to start, but not for too long.
|
# Wait for the server to start, but not for too long.
|
||||||
@ -223,7 +164,6 @@ do
|
|||||||
done
|
done
|
||||||
|
|
||||||
setup_logs_replication
|
setup_logs_replication
|
||||||
|
|
||||||
attach_gdb_to_clickhouse || true # FIXME: to not break old builds, clean on 2023-09-01
|
attach_gdb_to_clickhouse || true # FIXME: to not break old builds, clean on 2023-09-01
|
||||||
|
|
||||||
function fn_exists() {
|
function fn_exists() {
|
||||||
@ -284,11 +224,7 @@ function run_tests()
|
|||||||
else
|
else
|
||||||
# All other configurations are OK.
|
# All other configurations are OK.
|
||||||
ADDITIONAL_OPTIONS+=('--jobs')
|
ADDITIONAL_OPTIONS+=('--jobs')
|
||||||
ADDITIONAL_OPTIONS+=('5')
|
ADDITIONAL_OPTIONS+=('8')
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$RUN_SEQUENTIAL_TESTS_IN_PARALLEL" -eq 1 ]]; then
|
|
||||||
ADDITIONAL_OPTIONS+=('--run-sequential-tests-in-parallel')
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -n "$RUN_BY_HASH_NUM" ]] && [[ -n "$RUN_BY_HASH_TOTAL" ]]; then
|
if [[ -n "$RUN_BY_HASH_NUM" ]] && [[ -n "$RUN_BY_HASH_TOTAL" ]]; then
|
||||||
@ -373,9 +309,6 @@ done
|
|||||||
# Because it's the simplest way to read it when server has crashed.
|
# Because it's the simplest way to read it when server has crashed.
|
||||||
sudo clickhouse stop ||:
|
sudo clickhouse stop ||:
|
||||||
|
|
||||||
if [[ "$RUN_SEQUENTIAL_TESTS_IN_PARALLEL" -eq 1 ]]; then
|
|
||||||
sudo clickhouse stop --pid-path /var/run/clickhouse-server3 ||:
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||||
sudo clickhouse stop --pid-path /var/run/clickhouse-server1 ||:
|
sudo clickhouse stop --pid-path /var/run/clickhouse-server1 ||:
|
||||||
@ -393,12 +326,6 @@ rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server.log ||:
|
|||||||
rg -A50 -Fa "============" /var/log/clickhouse-server/stderr.log ||:
|
rg -A50 -Fa "============" /var/log/clickhouse-server/stderr.log ||:
|
||||||
zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.zst &
|
zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.zst &
|
||||||
|
|
||||||
if [[ "$RUN_SEQUENTIAL_TESTS_IN_PARALLEL" -eq 1 ]]; then
|
|
||||||
rg -Fa "<Fatal>" /var/log/clickhouse-server3/clickhouse-server.log ||:
|
|
||||||
rg -A50 -Fa "============" /var/log/clickhouse-server3/stderr.log ||:
|
|
||||||
zstd --threads=0 < /var/log/clickhouse-server3/clickhouse-server.log > /test_output/clickhouse-server3.log.zst &
|
|
||||||
fi
|
|
||||||
|
|
||||||
data_path_config="--path=/var/lib/clickhouse/"
|
data_path_config="--path=/var/lib/clickhouse/"
|
||||||
if [[ -n "$USE_S3_STORAGE_FOR_MERGE_TREE" ]] && [[ "$USE_S3_STORAGE_FOR_MERGE_TREE" -eq 1 ]]; then
|
if [[ -n "$USE_S3_STORAGE_FOR_MERGE_TREE" ]] && [[ "$USE_S3_STORAGE_FOR_MERGE_TREE" -eq 1 ]]; then
|
||||||
# We need s3 storage configuration (but it's more likely that clickhouse-local will fail for some reason)
|
# We need s3 storage configuration (but it's more likely that clickhouse-local will fail for some reason)
|
||||||
@ -419,10 +346,6 @@ if [ $failed_to_save_logs -ne 0 ]; then
|
|||||||
do
|
do
|
||||||
clickhouse-local "$data_path_config" --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.tsv.zst ||:
|
clickhouse-local "$data_path_config" --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.tsv.zst ||:
|
||||||
|
|
||||||
if [[ "$RUN_SEQUENTIAL_TESTS_IN_PARALLEL" -eq 1 ]]; then
|
|
||||||
clickhouse-local --path /var/lib/clickhouse3/ --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.3.tsv.zst ||:
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||||
clickhouse-local --path /var/lib/clickhouse1/ --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst ||:
|
clickhouse-local --path /var/lib/clickhouse1/ --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst ||:
|
||||||
clickhouse-local --path /var/lib/clickhouse2/ --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.2.tsv.zst ||:
|
clickhouse-local --path /var/lib/clickhouse2/ --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.2.tsv.zst ||:
|
||||||
@ -464,12 +387,6 @@ rm -rf /var/lib/clickhouse/data/system/*/
|
|||||||
tar -chf /test_output/store.tar /var/lib/clickhouse/store ||:
|
tar -chf /test_output/store.tar /var/lib/clickhouse/store ||:
|
||||||
tar -chf /test_output/metadata.tar /var/lib/clickhouse/metadata/*.sql ||:
|
tar -chf /test_output/metadata.tar /var/lib/clickhouse/metadata/*.sql ||:
|
||||||
|
|
||||||
if [[ "$RUN_SEQUENTIAL_TESTS_IN_PARALLEL" -eq 1 ]]; then
|
|
||||||
rm -rf /var/lib/clickhouse3/data/system/*/
|
|
||||||
tar -chf /test_output/store.tar /var/lib/clickhouse3/store ||:
|
|
||||||
tar -chf /test_output/metadata.tar /var/lib/clickhouse3/metadata/*.sql ||:
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||||
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server1.log ||:
|
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server1.log ||:
|
||||||
|
@ -10,7 +10,7 @@ cd hadoop-3.3.1
|
|||||||
export JAVA_HOME=/usr
|
export JAVA_HOME=/usr
|
||||||
mkdir -p target/test/data
|
mkdir -p target/test/data
|
||||||
chown clickhouse ./target/test/data
|
chown clickhouse ./target/test/data
|
||||||
sudo -E -u clickhouse bin/mapred minicluster -format -nomr -nnport 12222 >> /test_output/garbage.log 2>&1 &
|
sudo -E -u clickhouse bin/mapred minicluster -format -nomr -nnport 12222 >> /test_output/hdfs_minicluster.log 2>&1 &
|
||||||
|
|
||||||
while ! nc -z localhost 12222; do
|
while ! nc -z localhost 12222; do
|
||||||
sleep 1
|
sleep 1
|
||||||
|
@ -209,9 +209,9 @@ clickhouse-client --query "CREATE TABLE test.visits (CounterID UInt32, StartDat
|
|||||||
ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)
|
ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)
|
||||||
SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='$TEMP_POLICY'"
|
SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='$TEMP_POLICY'"
|
||||||
|
|
||||||
clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0"
|
clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16"
|
||||||
clickhouse-client --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0"
|
clickhouse-client --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16"
|
||||||
clickhouse-client --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0"
|
clickhouse-client --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16"
|
||||||
|
|
||||||
clickhouse-client --query "DROP TABLE datasets.visits_v1 SYNC"
|
clickhouse-client --query "DROP TABLE datasets.visits_v1 SYNC"
|
||||||
clickhouse-client --query "DROP TABLE datasets.hits_v1 SYNC"
|
clickhouse-client --query "DROP TABLE datasets.hits_v1 SYNC"
|
||||||
|
@ -17,6 +17,7 @@ ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=18
|
|||||||
|
|
||||||
RUN apt-get update \
|
RUN apt-get update \
|
||||||
&& apt-get install \
|
&& apt-get install \
|
||||||
|
sudo \
|
||||||
apt-transport-https \
|
apt-transport-https \
|
||||||
apt-utils \
|
apt-utils \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
|
@ -11,7 +11,7 @@ This is for the case when you have Linux machine and want to use it to build `cl
|
|||||||
|
|
||||||
The cross-build for RISC-V 64 is based on the [Build instructions](../development/build.md), follow them first.
|
The cross-build for RISC-V 64 is based on the [Build instructions](../development/build.md), follow them first.
|
||||||
|
|
||||||
## Install Clang-16
|
## Install Clang-18
|
||||||
|
|
||||||
Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup or do
|
Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup or do
|
||||||
```
|
```
|
||||||
|
@ -226,15 +226,59 @@ Other IDEs you can use are [Sublime Text](https://www.sublimetext.com/), [Visual
|
|||||||
|
|
||||||
## Writing Code {#writing-code}
|
## Writing Code {#writing-code}
|
||||||
|
|
||||||
The description of ClickHouse architecture can be found here: https://clickhouse.com/docs/en/development/architecture/
|
Below you can find some quick links which may be useful when writing code for ClickHouse:
|
||||||
|
|
||||||
The Code Style Guide: https://clickhouse.com/docs/en/development/style/
|
- [ClickHouse architecture description](https://clickhouse.com/docs/en/development/architecture/).
|
||||||
|
- [The code style guide](https://clickhouse.com/docs/en/development/style/).
|
||||||
|
- [Adding third-party libraries](https://clickhouse.com/docs/en/development/contrib/#adding-third-party-libraries)
|
||||||
|
- [Writing tests](https://clickhouse.com/docs/en/development/tests/)
|
||||||
|
- [List of open issues](https://github.com/ClickHouse/ClickHouse/issues?q=is%3Aopen+is%3Aissue+label%3Ahacktoberfest)
|
||||||
|
|
||||||
Adding third-party libraries: https://clickhouse.com/docs/en/development/contrib/#adding-third-party-libraries
|
## Writing Documentation {#writing-documentation}
|
||||||
|
|
||||||
Writing tests: https://clickhouse.com/docs/en/development/tests/
|
As part of every pull request which adds a new feature, it is necessary to write documentation for it. If you'd like to preview your documentation changes the instructions for how to build the documentation page locally are available in the README.md file [here](https://github.com/ClickHouse/clickhouse-docs). When adding a new function to ClickHouse you can use the template below as a guide:
|
||||||
|
|
||||||
List of tasks: https://github.com/ClickHouse/ClickHouse/issues?q=is%3Aopen+is%3Aissue+label%3Ahacktoberfest
|
```markdown
|
||||||
|
# newFunctionName
|
||||||
|
|
||||||
|
A short description of the function goes here. It should describe briefly what it does and a typical usage case.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
\```sql
|
||||||
|
newFunctionName(arg1, arg2[, arg3])
|
||||||
|
\```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `arg1` — Description of the argument. [DataType](../data-types/float.md)
|
||||||
|
- `arg2` — Description of the argument. [DataType](../data-types/float.md)
|
||||||
|
- `arg3` — Description of optional argument (optional). [DataType](../data-types/float.md)
|
||||||
|
|
||||||
|
**Implementation Details**
|
||||||
|
|
||||||
|
A description of implementation details if relevant.
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- Returns {insert what the function returns here}. [DataType](../data-types/float.md)
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
\```sql
|
||||||
|
SELECT 'write your example query here';
|
||||||
|
\```
|
||||||
|
|
||||||
|
Response:
|
||||||
|
|
||||||
|
\```response
|
||||||
|
┌───────────────────────────────────┐
|
||||||
|
│ the result of the query │
|
||||||
|
└───────────────────────────────────┘
|
||||||
|
\```
|
||||||
|
```
|
||||||
|
|
||||||
## Test Data {#test-data}
|
## Test Data {#test-data}
|
||||||
|
|
||||||
|
@ -15,7 +15,7 @@ You have four options for getting up and running with ClickHouse:
|
|||||||
|
|
||||||
- **[ClickHouse Cloud](https://clickhouse.com/cloud/):** The official ClickHouse as a service, - built by, maintained and supported by the creators of ClickHouse
|
- **[ClickHouse Cloud](https://clickhouse.com/cloud/):** The official ClickHouse as a service, - built by, maintained and supported by the creators of ClickHouse
|
||||||
- **[Quick Install](#quick-install):** an easy-to-download binary for testing and developing with ClickHouse
|
- **[Quick Install](#quick-install):** an easy-to-download binary for testing and developing with ClickHouse
|
||||||
- **[Production Deployments](#available-installation-options):** ClickHouse can run on any Linux, FreeBSD, or macOS with x86-64, ARM, or PowerPC64LE CPU architecture
|
- **[Production Deployments](#available-installation-options):** ClickHouse can run on any Linux, FreeBSD, or macOS with x86-64, modern ARM (ARMv8.2-A up), or PowerPC64LE CPU architecture
|
||||||
- **[Docker Image](https://hub.docker.com/r/clickhouse/clickhouse-server/):** use the official Docker image in Docker Hub
|
- **[Docker Image](https://hub.docker.com/r/clickhouse/clickhouse-server/):** use the official Docker image in Docker Hub
|
||||||
|
|
||||||
## ClickHouse Cloud
|
## ClickHouse Cloud
|
||||||
|
@ -185,6 +185,7 @@ You can pass parameters to `clickhouse-client` (all parameters have a default va
|
|||||||
- `--format, -f` – Use the specified default format to output the result.
|
- `--format, -f` – Use the specified default format to output the result.
|
||||||
- `--vertical, -E` – If specified, use the [Vertical format](../interfaces/formats.md#vertical) by default to output the result. This is the same as `–format=Vertical`. In this format, each value is printed on a separate line, which is helpful when displaying wide tables.
|
- `--vertical, -E` – If specified, use the [Vertical format](../interfaces/formats.md#vertical) by default to output the result. This is the same as `–format=Vertical`. In this format, each value is printed on a separate line, which is helpful when displaying wide tables.
|
||||||
- `--time, -t` – If specified, print the query execution time to ‘stderr’ in non-interactive mode.
|
- `--time, -t` – If specified, print the query execution time to ‘stderr’ in non-interactive mode.
|
||||||
|
- `--memory-usage` – If specified, print memory usage to ‘stderr’ in non-interactive mode]. Possible values: 'none' - do not print memory usage, 'default' - print number of bytes, 'readable' - print memory usage in human-readable format.
|
||||||
- `--stacktrace` – If specified, also print the stack trace if an exception occurs.
|
- `--stacktrace` – If specified, also print the stack trace if an exception occurs.
|
||||||
- `--config-file` – The name of the configuration file.
|
- `--config-file` – The name of the configuration file.
|
||||||
- `--secure` – If specified, will connect to server over secure connection (TLS). You might need to configure your CA certificates in the [configuration file](#configuration_files). The available configuration settings are the same as for [server-side TLS configuration](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-openssl).
|
- `--secure` – If specified, will connect to server over secure connection (TLS). You might need to configure your CA certificates in the [configuration file](#configuration_files). The available configuration settings are the same as for [server-side TLS configuration](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-openssl).
|
||||||
@ -339,7 +340,7 @@ clickhouse-client clickhouse://some_user%40some_mail.com@localhost:9000
|
|||||||
Connect to one of provides hosts: `192.168.1.15`, `192.168.1.25`.
|
Connect to one of provides hosts: `192.168.1.15`, `192.168.1.25`.
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
clickhouse-client clickhouse://192.168.1.15,192.168.1.25
|
clickhouse-client clickhouse://192.168.1.15,192.168.1.25
|
||||||
```
|
```
|
||||||
|
|
||||||
### Configuration Files {#configuration_files}
|
### Configuration Files {#configuration_files}
|
||||||
@ -367,7 +368,7 @@ Example of a config file:
|
|||||||
```
|
```
|
||||||
|
|
||||||
Or the same config in a YAML format:
|
Or the same config in a YAML format:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
user: username
|
user: username
|
||||||
password: 'password'
|
password: 'password'
|
||||||
|
@ -67,6 +67,7 @@ The supported formats are:
|
|||||||
| [Prometheus](#prometheus) | ✗ | ✔ |
|
| [Prometheus](#prometheus) | ✗ | ✔ |
|
||||||
| [Protobuf](#protobuf) | ✔ | ✔ |
|
| [Protobuf](#protobuf) | ✔ | ✔ |
|
||||||
| [ProtobufSingle](#protobufsingle) | ✔ | ✔ |
|
| [ProtobufSingle](#protobufsingle) | ✔ | ✔ |
|
||||||
|
| [ProtobufList](#protobuflist) | ✔ | ✔ |
|
||||||
| [Avro](#data-format-avro) | ✔ | ✔ |
|
| [Avro](#data-format-avro) | ✔ | ✔ |
|
||||||
| [AvroConfluent](#data-format-avro-confluent) | ✔ | ✗ |
|
| [AvroConfluent](#data-format-avro-confluent) | ✔ | ✗ |
|
||||||
| [Parquet](#data-format-parquet) | ✔ | ✔ |
|
| [Parquet](#data-format-parquet) | ✔ | ✔ |
|
||||||
@ -1952,6 +1953,35 @@ SYSTEM DROP FORMAT SCHEMA CACHE FOR Protobuf
|
|||||||
|
|
||||||
Same as [Protobuf](#protobuf) but for storing/parsing single Protobuf message without length delimiters.
|
Same as [Protobuf](#protobuf) but for storing/parsing single Protobuf message without length delimiters.
|
||||||
|
|
||||||
|
## ProtobufList {#protobuflist}
|
||||||
|
|
||||||
|
Similar to Protobuf but rows are represented as a sequence of sub-messages contained in a message with fixed name "Envelope".
|
||||||
|
|
||||||
|
Usage example:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT * FROM test.table FORMAT ProtobufList SETTINGS format_schema = 'schemafile:MessageType'
|
||||||
|
```
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
cat protobuflist_messages.bin | clickhouse-client --query "INSERT INTO test.table FORMAT ProtobufList SETTINGS format_schema='schemafile:MessageType'"
|
||||||
|
```
|
||||||
|
|
||||||
|
where the file `schemafile.proto` looks like this:
|
||||||
|
|
||||||
|
``` capnp
|
||||||
|
syntax = "proto3";
|
||||||
|
message Envelope {
|
||||||
|
message MessageType {
|
||||||
|
string name = 1;
|
||||||
|
string surname = 2;
|
||||||
|
uint32 birthDate = 3;
|
||||||
|
repeated string phoneNumbers = 4;
|
||||||
|
};
|
||||||
|
MessageType row = 1;
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
## Avro {#data-format-avro}
|
## Avro {#data-format-avro}
|
||||||
|
|
||||||
[Apache Avro](https://avro.apache.org/) is a row-oriented data serialization framework developed within Apache’s Hadoop project.
|
[Apache Avro](https://avro.apache.org/) is a row-oriented data serialization framework developed within Apache’s Hadoop project.
|
||||||
|
@ -6,7 +6,20 @@ sidebar_label: MySQL Interface
|
|||||||
|
|
||||||
# MySQL Interface
|
# MySQL Interface
|
||||||
|
|
||||||
ClickHouse supports the MySQL wire protocol. This allow tools that are MySQL-compatible to interact with ClickHouse seamlessly (e.g. [Looker Studio](../integrations/data-visualization/looker-studio-and-clickhouse.md)).
|
ClickHouse supports the MySQL wire protocol. This allows certain clients that do not have native ClickHouse connectors leverage the MySQL protocol instead, and it has been validated with the following BI tools:
|
||||||
|
|
||||||
|
- [Looker Studio](../integrations/data-visualization/looker-studio-and-clickhouse.md)
|
||||||
|
- [Tableau Online](../integrations/tableau-online)
|
||||||
|
- [QuickSight](../integrations/quicksight)
|
||||||
|
|
||||||
|
If you are trying other untested clients or integrations, keep in mind that there could be the following limitations:
|
||||||
|
|
||||||
|
- SSL implementation might not be fully compatible; there could be potential [TLS SNI](https://www.cloudflare.com/learning/ssl/what-is-sni/) issues.
|
||||||
|
- A particular tool might require dialect features (e.g., MySQL-specific functions or settings) that are not implemented yet.
|
||||||
|
|
||||||
|
If there is a native driver available (e.g., [DBeaver](../integrations/dbeaver)), it is always preferred to use it instead of the MySQL interface. Additionally, while most of the MySQL language clients should work fine, MySQL interface is not guaranteed to be a drop-in replacement for a codebase with existing MySQL queries.
|
||||||
|
|
||||||
|
If your use case involves a particular tool that does not have a native ClickHouse driver, and you would like to use it via the MySQL interface and you found certain incompatibilities - please [create an issue](https://github.com/ClickHouse/ClickHouse/issues) in the ClickHouse repository.
|
||||||
|
|
||||||
## Enabling the MySQL Interface On ClickHouse Cloud
|
## Enabling the MySQL Interface On ClickHouse Cloud
|
||||||
|
|
||||||
|
@ -124,7 +124,7 @@ which is equal to
|
|||||||
|
|
||||||
#### Default values for from_env and from_zk attributes
|
#### Default values for from_env and from_zk attributes
|
||||||
|
|
||||||
It's possible to set the default value and substitute it only if the environment variable or zookeeper node is set using `replace="1"`.
|
It's possible to set the default value and substitute it only if the environment variable or zookeeper node is set using `replace="1"` (must be declared before from_env).
|
||||||
|
|
||||||
With previous example, but `MAX_QUERY_SIZE` is unset:
|
With previous example, but `MAX_QUERY_SIZE` is unset:
|
||||||
|
|
||||||
@ -132,7 +132,7 @@ With previous example, but `MAX_QUERY_SIZE` is unset:
|
|||||||
<clickhouse>
|
<clickhouse>
|
||||||
<profiles>
|
<profiles>
|
||||||
<default>
|
<default>
|
||||||
<max_query_size from_env="MAX_QUERY_SIZE" replace="1">150000</max_query_size>
|
<max_query_size replace="1" from_env="MAX_QUERY_SIZE">150000</max_query_size>
|
||||||
</default>
|
</default>
|
||||||
</profiles>
|
</profiles>
|
||||||
</clickhouse>
|
</clickhouse>
|
||||||
|
@ -22,6 +22,21 @@ Structure of the `users` section:
|
|||||||
<!-- Or -->
|
<!-- Or -->
|
||||||
<password_sha256_hex></password_sha256_hex>
|
<password_sha256_hex></password_sha256_hex>
|
||||||
|
|
||||||
|
<ssh_keys>
|
||||||
|
<ssh_key>
|
||||||
|
<type>ssh-ed25519</type>
|
||||||
|
<base64_key>AAAAC3NzaC1lZDI1NTE5AAAAIDNf0r6vRl24Ix3tv2IgPmNPO2ATa2krvt80DdcTatLj</base64_key>
|
||||||
|
</ssh_key>
|
||||||
|
<ssh_key>
|
||||||
|
<type>ecdsa-sha2-nistp256</type>
|
||||||
|
<base64_key>AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNxeV2uN5UY6CUbCzTA1rXfYimKQA5ivNIqxdax4bcMXz4D0nSk2l5E1TkR5mG8EBWtmExSPbcEPJ8V7lyWWbA8=</base64_key>
|
||||||
|
</ssh_key>
|
||||||
|
<ssh_key>
|
||||||
|
<type>ssh-rsa</type>
|
||||||
|
<base64_key>AAAAB3NzaC1yc2EAAAADAQABAAABgQCpgqL1SHhPVBOTFlOm0pu+cYBbADzC2jL41sPMawYCJHDyHuq7t+htaVVh2fRgpAPmSEnLEC2d4BEIKMtPK3bfR8plJqVXlLt6Q8t4b1oUlnjb3VPA9P6iGcW7CV1FBkZQEVx8ckOfJ3F+kI5VsrRlEDgiecm/C1VPl0/9M2llW/mPUMaD65cM9nlZgM/hUeBrfxOEqM11gDYxEZm1aRSbZoY4dfdm3vzvpSQ6lrCrkjn3X2aSmaCLcOWJhfBWMovNDB8uiPuw54g3ioZ++qEQMlfxVsqXDGYhXCrsArOVuW/5RbReO79BvXqdssiYShfwo+GhQ0+aLWMIW/jgBkkqx/n7uKLzCMX7b2F+aebRYFh+/QXEj7SnihdVfr9ud6NN3MWzZ1ltfIczlEcFLrLJ1Yq57wW6wXtviWh59WvTWFiPejGjeSjjJyqqB49tKdFVFuBnIU5u/bch2DXVgiAEdQwUrIp1ACoYPq22HFFAYUJrL32y7RxX3PGzuAv3LOc=</base64_key>
|
||||||
|
</ssh_key>
|
||||||
|
</ssh_keys>
|
||||||
|
|
||||||
<access_management>0|1</access_management>
|
<access_management>0|1</access_management>
|
||||||
|
|
||||||
<networks incl="networks" replace="replace">
|
<networks incl="networks" replace="replace">
|
||||||
@ -79,6 +94,24 @@ Password can be specified in plaintext or in SHA256 (hex format).
|
|||||||
|
|
||||||
The first line of the result is the password. The second line is the corresponding double SHA1 hash.
|
The first line of the result is the password. The second line is the corresponding double SHA1 hash.
|
||||||
|
|
||||||
|
### username/ssh-key {#user-sshkey}
|
||||||
|
|
||||||
|
This setting allows authenticating with SSH keys.
|
||||||
|
|
||||||
|
Given a SSH key (as generated by `ssh-keygen`) like
|
||||||
|
```
|
||||||
|
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDNf0r6vRl24Ix3tv2IgPmNPO2ATa2krvt80DdcTatLj john@example.com
|
||||||
|
```
|
||||||
|
The `ssh_key` element is expected to be
|
||||||
|
```
|
||||||
|
<ssh_key>
|
||||||
|
<type>ssh-ed25519</type>
|
||||||
|
<base64_key>AAAAC3NzaC1lZDI1NTE5AAAAIDNf0r6vRl24Ix3tv2IgPmNPO2ATa2krvt80DdcTatLj</base64_key>
|
||||||
|
</ssh_key>
|
||||||
|
```
|
||||||
|
|
||||||
|
Substitute `ssh-ed25519` with `ssh-rsa` or `ecdsa-sha2-nistp256` for the other supported algorithms.
|
||||||
|
|
||||||
### access_management {#access_management-user-setting}
|
### access_management {#access_management-user-setting}
|
||||||
|
|
||||||
This setting enables or disables using of SQL-driven [access control and account management](../../guides/sre/user-management/index.md#access-control) for the user.
|
This setting enables or disables using of SQL-driven [access control and account management](../../guides/sre/user-management/index.md#access-control) for the user.
|
||||||
|
35
docs/en/operations/system-tables/detached_tables.md
Normal file
35
docs/en/operations/system-tables/detached_tables.md
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
---
|
||||||
|
slug: /en/operations/system-tables/detached_tables
|
||||||
|
---
|
||||||
|
# detached_tables
|
||||||
|
|
||||||
|
Contains information of each detached table.
|
||||||
|
|
||||||
|
Columns:
|
||||||
|
|
||||||
|
- `database` ([String](../../sql-reference/data-types/string.md)) — The name of the database the table is in.
|
||||||
|
|
||||||
|
- `table` ([String](../../sql-reference/data-types/string.md)) — Table name.
|
||||||
|
|
||||||
|
- `uuid` ([UUID](../../sql-reference/data-types/uuid.md)) — Table uuid (Atomic database).
|
||||||
|
|
||||||
|
- `metadata_path` ([String](../../sql-reference/data-types/string.md)) - Path to the table metadata in the file system.
|
||||||
|
|
||||||
|
- `is_permanently` ([UInt8](../../sql-reference/data-types/int-uint.md)) - Flag indicates that the table was detached PERMANENTLY.
|
||||||
|
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT * FROM system.detached_tables FORMAT Vertical;
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
Row 1:
|
||||||
|
──────
|
||||||
|
database: base
|
||||||
|
table: t1
|
||||||
|
uuid: 81b1c20a-b7c6-4116-a2ce-7583fb6b6736
|
||||||
|
metadata_path: /var/lib/clickhouse/store/461/461cf698-fd0b-406d-8c01-5d8fd5748a91/t1.sql
|
||||||
|
is_permanently: 1
|
||||||
|
```
|
@ -9,7 +9,6 @@ Columns:
|
|||||||
|
|
||||||
- `name` ([String](../../sql-reference/data-types/string.md)) – The name of the function.
|
- `name` ([String](../../sql-reference/data-types/string.md)) – The name of the function.
|
||||||
- `is_aggregate` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Whether the function is an aggregate function.
|
- `is_aggregate` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Whether the function is an aggregate function.
|
||||||
- `is_deterministic` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt8](../../sql-reference/data-types/int-uint.md))) - Whether the function is deterministic.
|
|
||||||
- `case_insensitive`, ([UInt8](../../sql-reference/data-types/int-uint.md)) - Whether the function name can be used case-insensitively.
|
- `case_insensitive`, ([UInt8](../../sql-reference/data-types/int-uint.md)) - Whether the function name can be used case-insensitively.
|
||||||
- `alias_to`, ([String](../../sql-reference/data-types/string.md)) - The original function name, if the function name is an alias.
|
- `alias_to`, ([String](../../sql-reference/data-types/string.md)) - The original function name, if the function name is an alias.
|
||||||
- `create_query`, ([String](../../sql-reference/data-types/enum.md)) - Unused.
|
- `create_query`, ([String](../../sql-reference/data-types/enum.md)) - Unused.
|
||||||
|
@ -96,3 +96,22 @@ Result:
|
|||||||
│ 1 │ [2] │ [[4,1]] │
|
│ 1 │ [2] │ [[4,1]] │
|
||||||
└───────────┴───────────┴───────────┘
|
└───────────┴───────────┴───────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Reading nested subcolumns from Array
|
||||||
|
|
||||||
|
If nested type `T` inside `Array` has subcolumns (for example, if it's a [named tuple](./tuple.md)), you can read its subcolumns from an `Array(T)` type with the same subcolumn names. The type of a subcolumn will be `Array` of the type of original subcolumn.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE t_arr (arr Array(Tuple(field1 UInt32, field2 String))) ENGINE = MergeTree ORDER BY tuple();
|
||||||
|
INSERT INTO t_arr VALUES ([(1, 'Hello'), (2, 'World')]), ([(3, 'This'), (4, 'is'), (5, 'subcolumn')]);
|
||||||
|
SELECT arr.field1, toTypeName(arr.field1), arr.field2, toTypeName(arr.field2) from t_arr;
|
||||||
|
```
|
||||||
|
|
||||||
|
```test
|
||||||
|
┌─arr.field1─┬─toTypeName(arr.field1)─┬─arr.field2────────────────┬─toTypeName(arr.field2)─┐
|
||||||
|
│ [1,2] │ Array(UInt32) │ ['Hello','World'] │ Array(String) │
|
||||||
|
│ [3,4,5] │ Array(UInt32) │ ['This','is','subcolumn'] │ Array(String) │
|
||||||
|
└────────────┴────────────────────────┴───────────────────────────┴────────────────────────┘
|
||||||
|
```
|
||||||
|
@ -56,7 +56,6 @@ Functions:
|
|||||||
|
|
||||||
## Related content
|
## Related content
|
||||||
|
|
||||||
- [Reducing ClickHouse Storage Cost with the Low Cardinality Type – Lessons from an Instana Engineer](https://altinity.com/blog/2020-5-20-reducing-clickhouse-storage-cost-with-the-low-cardinality-type-lessons-from-an-instana-engineer)
|
|
||||||
- [String Optimization (video presentation in Russian)](https://youtu.be/rqf-ILRgBdY?list=PL0Z2YDlm0b3iwXCpEFiOOYmwXzVmjJfEt). [Slides in English](https://github.com/ClickHouse/clickhouse-presentations/raw/master/meetup19/string_optimization.pdf)
|
|
||||||
- Blog: [Optimizing ClickHouse with Schemas and Codecs](https://clickhouse.com/blog/optimize-clickhouse-codecs-compression-schema)
|
- Blog: [Optimizing ClickHouse with Schemas and Codecs](https://clickhouse.com/blog/optimize-clickhouse-codecs-compression-schema)
|
||||||
- Blog: [Working with time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse)
|
- Blog: [Working with time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse)
|
||||||
|
- [String Optimization (video presentation in Russian)](https://youtu.be/rqf-ILRgBdY?list=PL0Z2YDlm0b3iwXCpEFiOOYmwXzVmjJfEt). [Slides in English](https://github.com/ClickHouse/clickhouse-presentations/raw/master/meetup19/string_optimization.pdf)
|
||||||
|
@ -76,7 +76,7 @@ WHERE macro = 'test';
|
|||||||
└───────┴──────────────┘
|
└───────┴──────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
## FQDN
|
## fqdn
|
||||||
|
|
||||||
Returns the fully qualified domain name of the ClickHouse server.
|
Returns the fully qualified domain name of the ClickHouse server.
|
||||||
|
|
||||||
@ -86,7 +86,7 @@ Returns the fully qualified domain name of the ClickHouse server.
|
|||||||
fqdn();
|
fqdn();
|
||||||
```
|
```
|
||||||
|
|
||||||
Aliases: `fullHostName`, 'FQDN'.
|
Aliases: `fullHostName`, `FQDN`.
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
@ -2984,6 +2984,66 @@ Result:
|
|||||||
└─────────┘
|
└─────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## partitionID
|
||||||
|
|
||||||
|
Computes the [partition ID](../../engines/table-engines/mergetree-family/custom-partitioning-key.md).
|
||||||
|
|
||||||
|
:::note
|
||||||
|
This function is slow and should not be called for large amount of rows.
|
||||||
|
:::
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
partitionID(x[, y, ...]);
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `x` — Column for which to return the partition ID.
|
||||||
|
- `y, ...` — Remaining N columns for which to return the partition ID (optional).
|
||||||
|
|
||||||
|
**Returned Value**
|
||||||
|
|
||||||
|
- Partition ID that the row would belong to. [String](../data-types/string.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
DROP TABLE IF EXISTS tab;
|
||||||
|
|
||||||
|
CREATE TABLE tab
|
||||||
|
(
|
||||||
|
i int,
|
||||||
|
j int
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree
|
||||||
|
PARTITION BY i
|
||||||
|
ORDER BY tuple();
|
||||||
|
|
||||||
|
INSERT INTO tab VALUES (1, 1), (1, 2), (1, 3), (2, 4), (2, 5), (2, 6);
|
||||||
|
|
||||||
|
SELECT i, j, partitionID(i), _partition_id FROM tab ORDER BY i, j;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─i─┬─j─┬─partitionID(i)─┬─_partition_id─┐
|
||||||
|
│ 1 │ 1 │ 1 │ 1 │
|
||||||
|
│ 1 │ 2 │ 1 │ 1 │
|
||||||
|
│ 1 │ 3 │ 1 │ 1 │
|
||||||
|
└───┴───┴────────────────┴───────────────┘
|
||||||
|
┌─i─┬─j─┬─partitionID(i)─┬─_partition_id─┐
|
||||||
|
│ 2 │ 4 │ 2 │ 2 │
|
||||||
|
│ 2 │ 5 │ 2 │ 2 │
|
||||||
|
│ 2 │ 6 │ 2 │ 2 │
|
||||||
|
└───┴───┴────────────────┴───────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
## shardNum
|
## shardNum
|
||||||
|
|
||||||
Returns the index of a shard which processes a part of data in a distributed query. Indices are started from `1`.
|
Returns the index of a shard which processes a part of data in a distributed query. Indices are started from `1`.
|
||||||
|
@ -567,12 +567,13 @@ While no standard or recommendation exists for the epoch of Snowflake IDs, imple
|
|||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
generateSnowflakeID([expr])
|
generateSnowflakeID([expr, [machine_id]])
|
||||||
```
|
```
|
||||||
|
|
||||||
**Arguments**
|
**Arguments**
|
||||||
|
|
||||||
- `expr` — An arbitrary [expression](../../sql-reference/syntax.md#syntax-expressions) used to bypass [common subexpression elimination](../../sql-reference/functions/index.md#common-subexpression-elimination) if the function is called multiple times in a query. The value of the expression has no effect on the returned Snowflake ID. Optional.
|
- `expr` — An arbitrary [expression](../../sql-reference/syntax.md#syntax-expressions) used to bypass [common subexpression elimination](../../sql-reference/functions/index.md#common-subexpression-elimination) if the function is called multiple times in a query. The value of the expression has no effect on the returned Snowflake ID. Optional.
|
||||||
|
- `machine_id` — A machine ID, the lowest 10 bits are used. [Int64](../data-types/int-uint.md). Optional.
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
@ -608,6 +609,16 @@ SELECT generateSnowflakeID(1), generateSnowflakeID(2);
|
|||||||
└────────────────────────┴────────────────────────┘
|
└────────────────────────┴────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**Example with expression and a machine ID**
|
||||||
|
|
||||||
|
```
|
||||||
|
SELECT generateSnowflakeID('expr', 1);
|
||||||
|
|
||||||
|
┌─generateSnowflakeID('expr', 1)─┐
|
||||||
|
│ 7201148511606784002 │
|
||||||
|
└────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## snowflakeToDateTime
|
## snowflakeToDateTime
|
||||||
|
|
||||||
:::warning
|
:::warning
|
||||||
|
@ -297,7 +297,7 @@ Algorithm requires the special column in tables. This column:
|
|||||||
|
|
||||||
- Must contain an ordered sequence.
|
- Must contain an ordered sequence.
|
||||||
- Can be one of the following types: [Int, UInt](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md), [Date](../../../sql-reference/data-types/date.md), [DateTime](../../../sql-reference/data-types/datetime.md), [Decimal](../../../sql-reference/data-types/decimal.md).
|
- Can be one of the following types: [Int, UInt](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md), [Date](../../../sql-reference/data-types/date.md), [DateTime](../../../sql-reference/data-types/datetime.md), [Decimal](../../../sql-reference/data-types/decimal.md).
|
||||||
- Can’t be the only column in the `JOIN` clause.
|
- For `hash` join algorithm it can’t be the only column in the `JOIN` clause.
|
||||||
|
|
||||||
Syntax `ASOF JOIN ... ON`:
|
Syntax `ASOF JOIN ... ON`:
|
||||||
|
|
||||||
@ -337,7 +337,8 @@ For example, consider the following tables:
|
|||||||
`ASOF JOIN` can take the timestamp of a user event from `table_1` and find an event in `table_2` where the timestamp is closest to the timestamp of the event from `table_1` corresponding to the closest match condition. Equal timestamp values are the closest if available. Here, the `user_id` column can be used for joining on equality and the `ev_time` column can be used for joining on the closest match. In our example, `event_1_1` can be joined with `event_2_1` and `event_1_2` can be joined with `event_2_3`, but `event_2_2` can’t be joined.
|
`ASOF JOIN` can take the timestamp of a user event from `table_1` and find an event in `table_2` where the timestamp is closest to the timestamp of the event from `table_1` corresponding to the closest match condition. Equal timestamp values are the closest if available. Here, the `user_id` column can be used for joining on equality and the `ev_time` column can be used for joining on the closest match. In our example, `event_1_1` can be joined with `event_2_1` and `event_1_2` can be joined with `event_2_3`, but `event_2_2` can’t be joined.
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
`ASOF` join is **not** supported in the [Join](../../../engines/table-engines/special/join.md) table engine.
|
`ASOF JOIN` is supported only by `hash` and `full_sorting_merge` join algorithms.
|
||||||
|
It's **not** supported in the [Join](../../../engines/table-engines/special/join.md) table engine.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
## PASTE JOIN Usage
|
## PASTE JOIN Usage
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/table-functions/azureBlobStorageCluster
|
slug: /en/sql-reference/table-functions/azureBlobStorageCluster
|
||||||
sidebar_position: 55
|
sidebar_position: 15
|
||||||
sidebar_label: azureBlobStorageCluster
|
sidebar_label: azureBlobStorageCluster
|
||||||
title: "azureBlobStorageCluster Table Function"
|
title: "azureBlobStorageCluster Table Function"
|
||||||
---
|
---
|
||||||
|
@ -6,38 +6,38 @@ sidebar_label: Playground
|
|||||||
|
|
||||||
# ClickHouse Playground {#clickhouse-playground}
|
# ClickHouse Playground {#clickhouse-playground}
|
||||||
|
|
||||||
[ClickHouse Playground](https://play.clickhouse.com/play?user=play) allows people to experiment with ClickHouse by running queries instantly, without setting up their server or cluster.
|
[ClickHouse Playground](https://play.clickhouse.com/play?user=play) позволяет пользователям экспериментировать с ClickHouse, выполняя запросы мгновенно, без необходимости настройки сервера или кластера.
|
||||||
Several example datasets are available in Playground.
|
В Playground доступны несколько примеров наборов данных.
|
||||||
|
|
||||||
You can make queries to Playground using any HTTP client, for example [curl](https://curl.haxx.se) or [wget](https://www.gnu.org/software/wget/), or set up a connection using [JDBC](../interfaces/jdbc.md) or [ODBC](../interfaces/odbc.md) drivers. More information about software products that support ClickHouse is available [here](../interfaces/index.md).
|
Вы можете выполнять запросы к Playground, используя любой HTTP-клиент, например [curl](https://curl.haxx.se) или [wget](https://www.gnu.org/software/wget/), или настроить соединение, используя драйверы [JDBC](../interfaces/jdbc.md) или [ODBC](../interfaces/odbc.md). Дополнительную информацию о программных продуктах, поддерживающих ClickHouse, можно найти [здесь](../interfaces/index.md).
|
||||||
|
|
||||||
## Credentials {#credentials}
|
## Учетные данные {#credentials}
|
||||||
|
|
||||||
| Parameter | Value |
|
| Параметр | Значение |
|
||||||
|:--------------------|:-----------------------------------|
|
|:--------------------|:-----------------------------------|
|
||||||
| HTTPS endpoint | `https://play.clickhouse.com:443/` |
|
| HTTPS-адрес | `https://play.clickhouse.com:443/` |
|
||||||
| Native TCP endpoint | `play.clickhouse.com:9440` |
|
| TCP-адрес | `play.clickhouse.com:9440` |
|
||||||
| User | `explorer` or `play` |
|
| Пользователь | `explorer` или `play` |
|
||||||
| Password | (empty) |
|
| Пароль | (пусто) |
|
||||||
|
|
||||||
## Limitations {#limitations}
|
## Ограничения {#limitations}
|
||||||
|
|
||||||
The queries are executed as a read-only user. It implies some limitations:
|
Запросы выполняются от имени пользователя с правами только на чтение. Это предполагает некоторые ограничения:
|
||||||
|
|
||||||
- DDL queries are not allowed
|
- DDL-запросы не разрешены
|
||||||
- INSERT queries are not allowed
|
- INSERT-запросы не разрешены
|
||||||
|
|
||||||
The service also have quotas on its usage.
|
Сервис также имеет квоты на использование.
|
||||||
|
|
||||||
## Examples {#examples}
|
## Примеры {#examples}
|
||||||
|
|
||||||
HTTPS endpoint example with `curl`:
|
Пример использования HTTPS-адреса с `curl`:
|
||||||
|
|
||||||
``` bash
|
```bash
|
||||||
curl "https://play.clickhouse.com/?user=explorer" --data-binary "SELECT 'Play ClickHouse'"
|
curl "https://play.clickhouse.com/?user=explorer" --data-binary "SELECT 'Play ClickHouse'"
|
||||||
```
|
```
|
||||||
|
|
||||||
TCP endpoint example with [CLI](../interfaces/cli.md):
|
Пример использования TCP-адреса с [CLI](../interfaces/cli.md):
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
clickhouse client --secure --host play.clickhouse.com --user explorer
|
clickhouse client --secure --host play.clickhouse.com --user explorer
|
||||||
|
@ -356,7 +356,7 @@ sidebar_label: "\u53D8\u66F4\u65E5\u5FD7"
|
|||||||
|
|
||||||
#### 新功能 {#new-feature-1}
|
#### 新功能 {#new-feature-1}
|
||||||
|
|
||||||
- 添加 `deduplicate_blocks_in_dependent_materialized_views` 用于控制具有实例化视图的表中幂等插入的行为的选项。 这个新功能是由Altinity的特殊要求添加到错误修正版本中的。
|
- 添加 `deduplicate_blocks_in_dependent_materialized_views` 用于控制具有实例化视图的表中幂等插入的行为的选项。
|
||||||
[#9070](https://github.com/ClickHouse/ClickHouse/pull/9070) [(urykhy)](https://github.com/urykhy)
|
[#9070](https://github.com/ClickHouse/ClickHouse/pull/9070) [(urykhy)](https://github.com/urykhy)
|
||||||
|
|
||||||
### ClickHouse版本v20.1.2.4,2020-01-22 {#clickhouse-release-v20-1-2-4-2020-01-22}
|
### ClickHouse版本v20.1.2.4,2020-01-22 {#clickhouse-release-v20-1-2-4-2020-01-22}
|
||||||
|
@ -82,14 +82,14 @@ FROM LEFT_RIGHT
|
|||||||
SELECT
|
SELECT
|
||||||
left,
|
left,
|
||||||
right,
|
right,
|
||||||
if(left < right, 'left is smaller than right', 'right is greater or equal than left') AS is_smaller
|
if(left < right, 'left is smaller than right', 'right is smaller or equal than left') AS is_smaller
|
||||||
FROM LEFT_RIGHT
|
FROM LEFT_RIGHT
|
||||||
WHERE isNotNull(left) AND isNotNull(right)
|
WHERE isNotNull(left) AND isNotNull(right)
|
||||||
|
|
||||||
┌─left─┬─right─┬─is_smaller──────────────────────────┐
|
┌─left─┬─right─┬─is_smaller──────────────────────────┐
|
||||||
│ 1 │ 3 │ left is smaller than right │
|
│ 1 │ 3 │ left is smaller than right │
|
||||||
│ 2 │ 2 │ right is greater or equal than left │
|
│ 2 │ 2 │ right is smaller or equal than left │
|
||||||
│ 3 │ 1 │ right is greater or equal than left │
|
│ 3 │ 1 │ right is smaller or equal than left │
|
||||||
└──────┴───────┴─────────────────────────────────────┘
|
└──────┴───────┴─────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -4,6 +4,9 @@ if (USE_CLANG_TIDY)
|
|||||||
set (CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH}")
|
set (CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH}")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
|
set(MAX_LINKER_MEMORY 3500)
|
||||||
|
include(../cmake/limit_jobs.cmake)
|
||||||
|
|
||||||
include(${ClickHouse_SOURCE_DIR}/cmake/split_debug_symbols.cmake)
|
include(${ClickHouse_SOURCE_DIR}/cmake/split_debug_symbols.cmake)
|
||||||
|
|
||||||
# The `clickhouse` binary is a multi purpose tool that contains multiple execution modes (client, server, etc.),
|
# The `clickhouse` binary is a multi purpose tool that contains multiple execution modes (client, server, etc.),
|
||||||
|
@ -24,9 +24,8 @@
|
|||||||
#include <Common/TerminalSize.h>
|
#include <Common/TerminalSize.h>
|
||||||
#include <Common/config_version.h>
|
#include <Common/config_version.h>
|
||||||
#include <Common/formatReadable.h>
|
#include <Common/formatReadable.h>
|
||||||
|
#include <Core/Settings.h>
|
||||||
#include <Columns/ColumnString.h>
|
#include <Columns/ColumnString.h>
|
||||||
#include <Poco/Util/Application.h>
|
|
||||||
|
|
||||||
#include <IO/ReadBufferFromString.h>
|
#include <IO/ReadBufferFromString.h>
|
||||||
#include <IO/ReadHelpers.h>
|
#include <IO/ReadHelpers.h>
|
||||||
@ -49,6 +48,8 @@
|
|||||||
#include <Formats/registerFormats.h>
|
#include <Formats/registerFormats.h>
|
||||||
#include <Formats/FormatFactory.h>
|
#include <Formats/FormatFactory.h>
|
||||||
|
|
||||||
|
#include <Poco/Util/Application.h>
|
||||||
|
|
||||||
namespace fs = std::filesystem;
|
namespace fs = std::filesystem;
|
||||||
using namespace std::literals;
|
using namespace std::literals;
|
||||||
|
|
||||||
@ -185,6 +186,8 @@ void Client::parseConnectionsCredentials(Poco::Util::AbstractConfiguration & con
|
|||||||
history_file = home_path + "/" + history_file.substr(1);
|
history_file = home_path + "/" + history_file.substr(1);
|
||||||
config.setString("history_file", history_file);
|
config.setString("history_file", history_file);
|
||||||
}
|
}
|
||||||
|
if (config.has(prefix + ".accept-invalid-certificate"))
|
||||||
|
config.setBool("accept-invalid-certificate", config.getBool(prefix + ".accept-invalid-certificate"));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!connection_name.empty() && !connection_found)
|
if (!connection_name.empty() && !connection_found)
|
||||||
@ -276,6 +279,12 @@ void Client::initialize(Poco::Util::Application & self)
|
|||||||
else if (config().has("connection"))
|
else if (config().has("connection"))
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "--connection was specified, but config does not exist");
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "--connection was specified, but config does not exist");
|
||||||
|
|
||||||
|
if (config().has("accept-invalid-certificate"))
|
||||||
|
{
|
||||||
|
config().setString("openSSL.client.invalidCertificateHandler.name", "AcceptCertificateHandler");
|
||||||
|
config().setString("openSSL.client.verificationMode", "none");
|
||||||
|
}
|
||||||
|
|
||||||
/** getenv is thread-safe in Linux glibc and in all sane libc implementations.
|
/** getenv is thread-safe in Linux glibc and in all sane libc implementations.
|
||||||
* But the standard does not guarantee that subsequent calls will not rewrite the value by returned pointer.
|
* But the standard does not guarantee that subsequent calls will not rewrite the value by returned pointer.
|
||||||
*
|
*
|
||||||
@ -730,7 +739,7 @@ bool Client::processWithFuzzing(const String & full_query)
|
|||||||
}
|
}
|
||||||
if (auto *q = orig_ast->as<ASTSetQuery>())
|
if (auto *q = orig_ast->as<ASTSetQuery>())
|
||||||
{
|
{
|
||||||
if (auto *setDialect = q->changes.tryGet("dialect"); setDialect && setDialect->safeGet<String>() == "kusto")
|
if (auto *set_dialect = q->changes.tryGet("dialect"); set_dialect && set_dialect->safeGet<String>() == "kusto")
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
<!-- Config set into /etc/clickhouse-client/. It's used if no other configs are found. -->
|
<!-- Config set into /etc/clickhouse-client/. It's used if no other configs are found. -->
|
||||||
<config>
|
<config>
|
||||||
|
<!-- Shorthand for self-signed combination in openSSL section below: <accept-invalid-certificate>1</accept-invalid-certificate> -->
|
||||||
<openSSL>
|
<openSSL>
|
||||||
<client> <!-- Used for connection to server's secure tcp port -->
|
<client> <!-- Used for connection to server's secure tcp port -->
|
||||||
<loadDefaultCAFile>true</loadDefaultCAFile>
|
<loadDefaultCAFile>true</loadDefaultCAFile>
|
||||||
@ -72,6 +73,7 @@
|
|||||||
|
|
||||||
Default: "hostname" will be used. -->
|
Default: "hostname" will be used. -->
|
||||||
<name>default</name>
|
<name>default</name>
|
||||||
|
<!-- For self-signed server certificate when connecting to secure tcp: <accept-invalid-certificate>1</accept-invalid-certificate> -->
|
||||||
<!-- Host that will be used for connection. -->
|
<!-- Host that will be used for connection. -->
|
||||||
<hostname>127.0.0.1</hostname>
|
<hostname>127.0.0.1</hostname>
|
||||||
<port>9000</port>
|
<port>9000</port>
|
||||||
|
@ -45,16 +45,17 @@ int mainEntryClickHouseKeeperConverter(int argc, char ** argv)
|
|||||||
keeper_context->setDigestEnabled(true);
|
keeper_context->setDigestEnabled(true);
|
||||||
keeper_context->setSnapshotDisk(std::make_shared<DiskLocal>("Keeper-snapshots", options["output-dir"].as<std::string>()));
|
keeper_context->setSnapshotDisk(std::make_shared<DiskLocal>("Keeper-snapshots", options["output-dir"].as<std::string>()));
|
||||||
|
|
||||||
DB::KeeperStorage storage(/* tick_time_ms */ 500, /* superdigest */ "", keeper_context, /* initialize_system_nodes */ false);
|
/// TODO(hanfei): support rocksdb here
|
||||||
|
DB::KeeperMemoryStorage storage(/* tick_time_ms */ 500, /* superdigest */ "", keeper_context, /* initialize_system_nodes */ false);
|
||||||
|
|
||||||
DB::deserializeKeeperStorageFromSnapshotsDir(storage, options["zookeeper-snapshots-dir"].as<std::string>(), logger);
|
DB::deserializeKeeperStorageFromSnapshotsDir(storage, options["zookeeper-snapshots-dir"].as<std::string>(), logger);
|
||||||
storage.initializeSystemNodes();
|
storage.initializeSystemNodes();
|
||||||
|
|
||||||
DB::deserializeLogsAndApplyToStorage(storage, options["zookeeper-logs-dir"].as<std::string>(), logger);
|
DB::deserializeLogsAndApplyToStorage(storage, options["zookeeper-logs-dir"].as<std::string>(), logger);
|
||||||
DB::SnapshotMetadataPtr snapshot_meta = std::make_shared<DB::SnapshotMetadata>(storage.getZXID(), 1, std::make_shared<nuraft::cluster_config>());
|
DB::SnapshotMetadataPtr snapshot_meta = std::make_shared<DB::SnapshotMetadata>(storage.getZXID(), 1, std::make_shared<nuraft::cluster_config>());
|
||||||
DB::KeeperStorageSnapshot snapshot(&storage, snapshot_meta);
|
DB::KeeperStorageSnapshot<DB::KeeperMemoryStorage> snapshot(&storage, snapshot_meta);
|
||||||
|
|
||||||
DB::KeeperSnapshotManager manager(1, keeper_context);
|
DB::KeeperSnapshotManager<DB::KeeperMemoryStorage> manager(1, keeper_context);
|
||||||
auto snp = manager.serializeSnapshotToBuffer(snapshot);
|
auto snp = manager.serializeSnapshotToBuffer(snapshot);
|
||||||
auto file_info = manager.serializeSnapshotBufferToDisk(*snp, storage.getZXID());
|
auto file_info = manager.serializeSnapshotBufferToDisk(*snp, storage.getZXID());
|
||||||
std::cout << "Snapshot serialized to path:" << fs::path(file_info->disk->getPath()) / file_info->path << std::endl;
|
std::cout << "Snapshot serialized to path:" << fs::path(file_info->disk->getPath()) / file_info->path << std::endl;
|
||||||
|
@ -52,6 +52,10 @@
|
|||||||
# include <Server/CertificateReloader.h>
|
# include <Server/CertificateReloader.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if USE_GWP_ASAN
|
||||||
|
# include <Common/GWPAsan.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
#include <Server/ProtocolServerAdapter.h>
|
#include <Server/ProtocolServerAdapter.h>
|
||||||
#include <Server/KeeperTCPHandlerFactory.h>
|
#include <Server/KeeperTCPHandlerFactory.h>
|
||||||
|
|
||||||
@ -639,6 +643,10 @@ try
|
|||||||
tryLogCurrentException(log, "Disabling cgroup memory observer because of an error during initialization");
|
tryLogCurrentException(log, "Disabling cgroup memory observer because of an error during initialization");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if USE_GWP_ASAN
|
||||||
|
GWPAsan::initFinished();
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
LOG_INFO(log, "Ready for connections.");
|
LOG_INFO(log, "Ready for connections.");
|
||||||
|
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
#include "LibraryBridge.h"
|
#include "LibraryBridge.h"
|
||||||
|
|
||||||
|
#include <iostream>
|
||||||
|
|
||||||
int mainEntryClickHouseLibraryBridge(int argc, char ** argv)
|
int mainEntryClickHouseLibraryBridge(int argc, char ** argv)
|
||||||
{
|
{
|
||||||
DB::LibraryBridge app;
|
DB::LibraryBridge app;
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#include "ExternalDictionaryLibraryHandlerFactory.h"
|
#include "ExternalDictionaryLibraryHandlerFactory.h"
|
||||||
|
|
||||||
#include <Formats/FormatFactory.h>
|
#include <Formats/FormatFactory.h>
|
||||||
|
#include <IO/Operators.h>
|
||||||
#include <IO/ReadBufferFromString.h>
|
#include <IO/ReadBufferFromString.h>
|
||||||
#include <IO/ReadHelpers.h>
|
#include <IO/ReadHelpers.h>
|
||||||
#include <Common/BridgeProtocolVersion.h>
|
#include <Common/BridgeProtocolVersion.h>
|
||||||
|
@ -423,6 +423,7 @@ void LocalServer::connect()
|
|||||||
{
|
{
|
||||||
connection_parameters = ConnectionParameters(getClientConfiguration(), "localhost");
|
connection_parameters = ConnectionParameters(getClientConfiguration(), "localhost");
|
||||||
|
|
||||||
|
/// This is needed for table function input(...).
|
||||||
ReadBuffer * in;
|
ReadBuffer * in;
|
||||||
auto table_file = getClientConfiguration().getString("table-file", "-");
|
auto table_file = getClientConfiguration().getString("table-file", "-");
|
||||||
if (table_file == "-" || table_file == "stdin")
|
if (table_file == "-" || table_file == "stdin")
|
||||||
|
@ -3,11 +3,12 @@
|
|||||||
#include <Client/ClientBase.h>
|
#include <Client/ClientBase.h>
|
||||||
#include <Client/LocalConnection.h>
|
#include <Client/LocalConnection.h>
|
||||||
|
|
||||||
#include <Common/StatusFile.h>
|
#include <Core/ServerSettings.h>
|
||||||
#include <Common/InterruptListener.h>
|
|
||||||
#include <Loggers/Loggers.h>
|
|
||||||
#include <Core/Settings.h>
|
#include <Core/Settings.h>
|
||||||
#include <Interpreters/Context.h>
|
#include <Interpreters/Context.h>
|
||||||
|
#include <Loggers/Loggers.h>
|
||||||
|
#include <Common/InterruptListener.h>
|
||||||
|
#include <Common/StatusFile.h>
|
||||||
|
|
||||||
#include <filesystem>
|
#include <filesystem>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
#if USE_ODBC
|
#if USE_ODBC
|
||||||
|
|
||||||
|
#include <Core/Settings.h>
|
||||||
#include <DataTypes/DataTypeFactory.h>
|
#include <DataTypes/DataTypeFactory.h>
|
||||||
#include <DataTypes/DataTypeNullable.h>
|
#include <DataTypes/DataTypeNullable.h>
|
||||||
#include <Server/HTTP/WriteBufferFromHTTPServerResponse.h>
|
#include <Server/HTTP/WriteBufferFromHTTPServerResponse.h>
|
||||||
|
@ -11,6 +11,7 @@
|
|||||||
#include <Poco/Net/HTTPServerResponse.h>
|
#include <Poco/Net/HTTPServerResponse.h>
|
||||||
#include <Common/BridgeProtocolVersion.h>
|
#include <Common/BridgeProtocolVersion.h>
|
||||||
#include <Common/logger_useful.h>
|
#include <Common/logger_useful.h>
|
||||||
|
#include <Core/Settings.h>
|
||||||
#include "getIdentifierQuote.h"
|
#include "getIdentifierQuote.h"
|
||||||
#include "validateODBCConnectionString.h"
|
#include "validateODBCConnectionString.h"
|
||||||
#include "ODBCPooledConnectionFactory.h"
|
#include "ODBCPooledConnectionFactory.h"
|
||||||
|
@ -9,6 +9,7 @@
|
|||||||
#include <Server/HTTP/WriteBufferFromHTTPServerResponse.h>
|
#include <Server/HTTP/WriteBufferFromHTTPServerResponse.h>
|
||||||
#include <IO/WriteHelpers.h>
|
#include <IO/WriteHelpers.h>
|
||||||
#include <IO/ReadHelpers.h>
|
#include <IO/ReadHelpers.h>
|
||||||
|
#include <Core/Settings.h>
|
||||||
#include <IO/ReadBufferFromIStream.h>
|
#include <IO/ReadBufferFromIStream.h>
|
||||||
#include <Poco/Net/HTTPServerRequest.h>
|
#include <Poco/Net/HTTPServerRequest.h>
|
||||||
#include <Poco/Net/HTTPServerResponse.h>
|
#include <Poco/Net/HTTPServerResponse.h>
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
#include "ODBCBridge.h"
|
#include "ODBCBridge.h"
|
||||||
|
|
||||||
|
#include <iostream>
|
||||||
|
|
||||||
int mainEntryClickHouseODBCBridge(int argc, char ** argv)
|
int mainEntryClickHouseODBCBridge(int argc, char ** argv)
|
||||||
{
|
{
|
||||||
DB::ODBCBridge app;
|
DB::ODBCBridge app;
|
||||||
|
@ -2,8 +2,10 @@
|
|||||||
|
|
||||||
#if USE_ODBC
|
#if USE_ODBC
|
||||||
|
|
||||||
|
#include <Core/Settings.h>
|
||||||
#include <Server/HTTP/HTMLForm.h>
|
#include <Server/HTTP/HTMLForm.h>
|
||||||
#include <Server/HTTP/WriteBufferFromHTTPServerResponse.h>
|
#include <Server/HTTP/WriteBufferFromHTTPServerResponse.h>
|
||||||
|
#include <IO/Operators.h>
|
||||||
#include <IO/ReadHelpers.h>
|
#include <IO/ReadHelpers.h>
|
||||||
#include <IO/WriteHelpers.h>
|
#include <IO/WriteHelpers.h>
|
||||||
#include <Poco/Net/HTTPServerRequest.h>
|
#include <Poco/Net/HTTPServerRequest.h>
|
||||||
|
@ -68,6 +68,7 @@
|
|||||||
#include <Interpreters/registerInterpreters.h>
|
#include <Interpreters/registerInterpreters.h>
|
||||||
#include <Interpreters/JIT/CompiledExpressionCache.h>
|
#include <Interpreters/JIT/CompiledExpressionCache.h>
|
||||||
#include <Access/AccessControl.h>
|
#include <Access/AccessControl.h>
|
||||||
|
#include <Storages/MergeTree/MergeTreeSettings.h>
|
||||||
#include <Storages/StorageReplicatedMergeTree.h>
|
#include <Storages/StorageReplicatedMergeTree.h>
|
||||||
#include <Storages/System/attachSystemTables.h>
|
#include <Storages/System/attachSystemTables.h>
|
||||||
#include <Storages/System/attachInformationSchemaTables.h>
|
#include <Storages/System/attachInformationSchemaTables.h>
|
||||||
@ -2212,6 +2213,7 @@ try
|
|||||||
CannotAllocateThreadFaultInjector::setFaultProbability(server_settings.cannot_allocate_thread_fault_injection_probability);
|
CannotAllocateThreadFaultInjector::setFaultProbability(server_settings.cannot_allocate_thread_fault_injection_probability);
|
||||||
|
|
||||||
#if USE_GWP_ASAN
|
#if USE_GWP_ASAN
|
||||||
|
GWPAsan::initFinished();
|
||||||
GWPAsan::setForceSampleProbability(server_settings.gwp_asan_force_sample_probability);
|
GWPAsan::setForceSampleProbability(server_settings.gwp_asan_force_sample_probability);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -2730,8 +2732,7 @@ void Server::createInterserverServers(
|
|||||||
|
|
||||||
void Server::stopServers(
|
void Server::stopServers(
|
||||||
std::vector<ProtocolServerAdapter> & servers,
|
std::vector<ProtocolServerAdapter> & servers,
|
||||||
const ServerType & server_type
|
const ServerType & server_type) const
|
||||||
) const
|
|
||||||
{
|
{
|
||||||
LoggerRawPtr log = &logger();
|
LoggerRawPtr log = &logger();
|
||||||
|
|
||||||
|
@ -129,8 +129,7 @@ private:
|
|||||||
|
|
||||||
void stopServers(
|
void stopServers(
|
||||||
std::vector<ProtocolServerAdapter> & servers,
|
std::vector<ProtocolServerAdapter> & servers,
|
||||||
const ServerType & server_type
|
const ServerType & server_type) const;
|
||||||
) const;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -21,7 +21,6 @@
|
|||||||
#include <Backups/BackupEntriesCollector.h>
|
#include <Backups/BackupEntriesCollector.h>
|
||||||
#include <Backups/RestorerFromBackup.h>
|
#include <Backups/RestorerFromBackup.h>
|
||||||
#include <Core/Settings.h>
|
#include <Core/Settings.h>
|
||||||
#include <Storages/MergeTree/MergeTreeSettings.h>
|
|
||||||
#include <base/defines.h>
|
#include <base/defines.h>
|
||||||
#include <IO/Operators.h>
|
#include <IO/Operators.h>
|
||||||
#include <Common/re2.h>
|
#include <Common/re2.h>
|
||||||
|
@ -118,10 +118,10 @@ AggregateFunctionPtr createAggregateFunctionAnalysisOfVariance(const std::string
|
|||||||
void registerAggregateFunctionAnalysisOfVariance(AggregateFunctionFactory & factory)
|
void registerAggregateFunctionAnalysisOfVariance(AggregateFunctionFactory & factory)
|
||||||
{
|
{
|
||||||
AggregateFunctionProperties properties = { .is_order_dependent = false };
|
AggregateFunctionProperties properties = { .is_order_dependent = false };
|
||||||
factory.registerFunction("analysisOfVariance", {createAggregateFunctionAnalysisOfVariance, properties}, AggregateFunctionFactory::CaseInsensitive);
|
factory.registerFunction("analysisOfVariance", {createAggregateFunctionAnalysisOfVariance, properties}, AggregateFunctionFactory::Case::Insensitive);
|
||||||
|
|
||||||
/// This is widely used term
|
/// This is widely used term
|
||||||
factory.registerAlias("anova", "analysisOfVariance", AggregateFunctionFactory::CaseInsensitive);
|
factory.registerAlias("anova", "analysisOfVariance", AggregateFunctionFactory::Case::Insensitive);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -361,9 +361,9 @@ void registerAggregateFunctionsAny(AggregateFunctionFactory & factory)
|
|||||||
AggregateFunctionProperties default_properties = {.returns_default_when_only_null = false, .is_order_dependent = true};
|
AggregateFunctionProperties default_properties = {.returns_default_when_only_null = false, .is_order_dependent = true};
|
||||||
|
|
||||||
factory.registerFunction("any", {createAggregateFunctionAny, default_properties});
|
factory.registerFunction("any", {createAggregateFunctionAny, default_properties});
|
||||||
factory.registerAlias("any_value", "any", AggregateFunctionFactory::CaseInsensitive);
|
factory.registerAlias("any_value", "any", AggregateFunctionFactory::Case::Insensitive);
|
||||||
factory.registerAlias("first_value", "any", AggregateFunctionFactory::CaseInsensitive);
|
factory.registerAlias("first_value", "any", AggregateFunctionFactory::Case::Insensitive);
|
||||||
factory.registerFunction("anyLast", {createAggregateFunctionAnyLast, default_properties});
|
factory.registerFunction("anyLast", {createAggregateFunctionAnyLast, default_properties});
|
||||||
factory.registerAlias("last_value", "anyLast", AggregateFunctionFactory::CaseInsensitive);
|
factory.registerAlias("last_value", "anyLast", AggregateFunctionFactory::Case::Insensitive);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -221,11 +221,11 @@ void registerAggregateFunctionsAnyRespectNulls(AggregateFunctionFactory & factor
|
|||||||
= {.returns_default_when_only_null = false, .is_order_dependent = true, .is_window_function = true};
|
= {.returns_default_when_only_null = false, .is_order_dependent = true, .is_window_function = true};
|
||||||
|
|
||||||
factory.registerFunction("any_respect_nulls", {createAggregateFunctionAnyRespectNulls, default_properties_for_respect_nulls});
|
factory.registerFunction("any_respect_nulls", {createAggregateFunctionAnyRespectNulls, default_properties_for_respect_nulls});
|
||||||
factory.registerAlias("any_value_respect_nulls", "any_respect_nulls", AggregateFunctionFactory::CaseInsensitive);
|
factory.registerAlias("any_value_respect_nulls", "any_respect_nulls", AggregateFunctionFactory::Case::Insensitive);
|
||||||
factory.registerAlias("first_value_respect_nulls", "any_respect_nulls", AggregateFunctionFactory::CaseInsensitive);
|
factory.registerAlias("first_value_respect_nulls", "any_respect_nulls", AggregateFunctionFactory::Case::Insensitive);
|
||||||
|
|
||||||
factory.registerFunction("anyLast_respect_nulls", {createAggregateFunctionAnyLastRespectNulls, default_properties_for_respect_nulls});
|
factory.registerFunction("anyLast_respect_nulls", {createAggregateFunctionAnyLastRespectNulls, default_properties_for_respect_nulls});
|
||||||
factory.registerAlias("last_value_respect_nulls", "anyLast_respect_nulls", AggregateFunctionFactory::CaseInsensitive);
|
factory.registerAlias("last_value_respect_nulls", "anyLast_respect_nulls", AggregateFunctionFactory::Case::Insensitive);
|
||||||
|
|
||||||
/// Must happen after registering any and anyLast
|
/// Must happen after registering any and anyLast
|
||||||
factory.registerNullsActionTransformation("any", "any_respect_nulls");
|
factory.registerNullsActionTransformation("any", "any_respect_nulls");
|
||||||
|
@ -46,6 +46,6 @@ AggregateFunctionPtr createAggregateFunctionAvg(const std::string & name, const
|
|||||||
|
|
||||||
void registerAggregateFunctionAvg(AggregateFunctionFactory & factory)
|
void registerAggregateFunctionAvg(AggregateFunctionFactory & factory)
|
||||||
{
|
{
|
||||||
factory.registerFunction("avg", createAggregateFunctionAvg, AggregateFunctionFactory::CaseInsensitive);
|
factory.registerFunction("avg", createAggregateFunctionAvg, AggregateFunctionFactory::Case::Insensitive);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -234,9 +234,9 @@ void registerAggregateFunctionsBitwise(AggregateFunctionFactory & factory)
|
|||||||
factory.registerFunction("groupBitXor", createAggregateFunctionBitwise<AggregateFunctionGroupBitXorData>);
|
factory.registerFunction("groupBitXor", createAggregateFunctionBitwise<AggregateFunctionGroupBitXorData>);
|
||||||
|
|
||||||
/// Aliases for compatibility with MySQL.
|
/// Aliases for compatibility with MySQL.
|
||||||
factory.registerAlias("BIT_OR", "groupBitOr", AggregateFunctionFactory::CaseInsensitive);
|
factory.registerAlias("BIT_OR", "groupBitOr", AggregateFunctionFactory::Case::Insensitive);
|
||||||
factory.registerAlias("BIT_AND", "groupBitAnd", AggregateFunctionFactory::CaseInsensitive);
|
factory.registerAlias("BIT_AND", "groupBitAnd", AggregateFunctionFactory::Case::Insensitive);
|
||||||
factory.registerAlias("BIT_XOR", "groupBitXor", AggregateFunctionFactory::CaseInsensitive);
|
factory.registerAlias("BIT_XOR", "groupBitXor", AggregateFunctionFactory::Case::Insensitive);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -9,7 +9,7 @@ template <typename T1, typename T2> using AggregateFunctionCorr = AggregateFunct
|
|||||||
|
|
||||||
void registerAggregateFunctionsStatisticsCorr(AggregateFunctionFactory & factory)
|
void registerAggregateFunctionsStatisticsCorr(AggregateFunctionFactory & factory)
|
||||||
{
|
{
|
||||||
factory.registerFunction("corr", createAggregateFunctionStatisticsBinary<AggregateFunctionCorr, StatisticsFunctionKind::corr>, AggregateFunctionFactory::CaseInsensitive);
|
factory.registerFunction("corr", createAggregateFunctionStatisticsBinary<AggregateFunctionCorr, StatisticsFunctionKind::corr>, AggregateFunctionFactory::Case::Insensitive);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -37,7 +37,7 @@ AggregateFunctionPtr createAggregateFunctionCount(const std::string & name, cons
|
|||||||
void registerAggregateFunctionCount(AggregateFunctionFactory & factory)
|
void registerAggregateFunctionCount(AggregateFunctionFactory & factory)
|
||||||
{
|
{
|
||||||
AggregateFunctionProperties properties = { .returns_default_when_only_null = true, .is_order_dependent = false };
|
AggregateFunctionProperties properties = { .returns_default_when_only_null = true, .is_order_dependent = false };
|
||||||
factory.registerFunction("count", {createAggregateFunctionCount, properties}, AggregateFunctionFactory::CaseInsensitive);
|
factory.registerFunction("count", {createAggregateFunctionCount, properties}, AggregateFunctionFactory::Case::Insensitive);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -13,8 +13,8 @@ void registerAggregateFunctionsStatisticsCovar(AggregateFunctionFactory & factor
|
|||||||
factory.registerFunction("covarPop", createAggregateFunctionStatisticsBinary<AggregateFunctionCovar, StatisticsFunctionKind::covarPop>);
|
factory.registerFunction("covarPop", createAggregateFunctionStatisticsBinary<AggregateFunctionCovar, StatisticsFunctionKind::covarPop>);
|
||||||
|
|
||||||
/// Synonyms for compatibility.
|
/// Synonyms for compatibility.
|
||||||
factory.registerAlias("COVAR_SAMP", "covarSamp", AggregateFunctionFactory::CaseInsensitive);
|
factory.registerAlias("COVAR_SAMP", "covarSamp", AggregateFunctionFactory::Case::Insensitive);
|
||||||
factory.registerAlias("COVAR_POP", "covarPop", AggregateFunctionFactory::CaseInsensitive);
|
factory.registerAlias("COVAR_POP", "covarPop", AggregateFunctionFactory::Case::Insensitive);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#include <IO/WriteHelpers.h>
|
#include <IO/WriteHelpers.h>
|
||||||
#include <Interpreters/Context.h>
|
#include <Interpreters/Context.h>
|
||||||
#include <Common/CurrentThread.h>
|
#include <Common/CurrentThread.h>
|
||||||
|
#include <Core/Settings.h>
|
||||||
|
|
||||||
static constexpr size_t MAX_AGGREGATE_FUNCTION_NAME_LENGTH = 1000;
|
static constexpr size_t MAX_AGGREGATE_FUNCTION_NAME_LENGTH = 1000;
|
||||||
|
|
||||||
@ -28,7 +29,7 @@ const String & getAggregateFunctionCanonicalNameIfAny(const String & name)
|
|||||||
return AggregateFunctionFactory::instance().getCanonicalNameIfAny(name);
|
return AggregateFunctionFactory::instance().getCanonicalNameIfAny(name);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AggregateFunctionFactory::registerFunction(const String & name, Value creator_with_properties, CaseSensitiveness case_sensitiveness)
|
void AggregateFunctionFactory::registerFunction(const String & name, Value creator_with_properties, Case case_sensitiveness)
|
||||||
{
|
{
|
||||||
if (creator_with_properties.creator == nullptr)
|
if (creator_with_properties.creator == nullptr)
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "AggregateFunctionFactory: "
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "AggregateFunctionFactory: "
|
||||||
@ -38,7 +39,7 @@ void AggregateFunctionFactory::registerFunction(const String & name, Value creat
|
|||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "AggregateFunctionFactory: the aggregate function name '{}' is not unique",
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "AggregateFunctionFactory: the aggregate function name '{}' is not unique",
|
||||||
name);
|
name);
|
||||||
|
|
||||||
if (case_sensitiveness == CaseInsensitive)
|
if (case_sensitiveness == Case::Insensitive)
|
||||||
{
|
{
|
||||||
auto key = Poco::toLower(name);
|
auto key = Poco::toLower(name);
|
||||||
if (!case_insensitive_aggregate_functions.emplace(key, creator_with_properties).second)
|
if (!case_insensitive_aggregate_functions.emplace(key, creator_with_properties).second)
|
||||||
|
@ -60,7 +60,7 @@ public:
|
|||||||
void registerFunction(
|
void registerFunction(
|
||||||
const String & name,
|
const String & name,
|
||||||
Value creator,
|
Value creator,
|
||||||
CaseSensitiveness case_sensitiveness = CaseSensitive);
|
Case case_sensitiveness = Case::Sensitive);
|
||||||
|
|
||||||
/// Register how to transform from one aggregate function to other based on NullsAction
|
/// Register how to transform from one aggregate function to other based on NullsAction
|
||||||
/// Registers them both ways:
|
/// Registers them both ways:
|
||||||
|
@ -840,8 +840,8 @@ void registerAggregateFunctionGroupArray(AggregateFunctionFactory & factory)
|
|||||||
AggregateFunctionProperties properties = { .returns_default_when_only_null = false, .is_order_dependent = true };
|
AggregateFunctionProperties properties = { .returns_default_when_only_null = false, .is_order_dependent = true };
|
||||||
|
|
||||||
factory.registerFunction("groupArray", { createAggregateFunctionGroupArray<false>, properties });
|
factory.registerFunction("groupArray", { createAggregateFunctionGroupArray<false>, properties });
|
||||||
factory.registerAlias("array_agg", "groupArray", AggregateFunctionFactory::CaseInsensitive);
|
factory.registerAlias("array_agg", "groupArray", AggregateFunctionFactory::Case::Insensitive);
|
||||||
factory.registerAliasUnchecked("array_concat_agg", "groupArrayArray", AggregateFunctionFactory::CaseInsensitive);
|
factory.registerAliasUnchecked("array_concat_agg", "groupArrayArray", AggregateFunctionFactory::Case::Insensitive);
|
||||||
factory.registerFunction("groupArraySample", { createAggregateFunctionGroupArraySample, properties });
|
factory.registerFunction("groupArraySample", { createAggregateFunctionGroupArraySample, properties });
|
||||||
factory.registerFunction("groupArrayLast", { createAggregateFunctionGroupArray<true>, properties });
|
factory.registerFunction("groupArrayLast", { createAggregateFunctionGroupArray<true>, properties });
|
||||||
}
|
}
|
||||||
|
@ -150,8 +150,18 @@ public:
|
|||||||
|
|
||||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||||
{
|
{
|
||||||
readVarUInt(this->data(place).version, buf);
|
auto & set = this->data(place).value;
|
||||||
this->data(place).value.read(buf);
|
auto & version = this->data(place).version;
|
||||||
|
size_t size;
|
||||||
|
readVarUInt(version, buf);
|
||||||
|
readVarUInt(size, buf);
|
||||||
|
set.reserve(size);
|
||||||
|
for (size_t i = 0; i < size; ++i)
|
||||||
|
{
|
||||||
|
int key;
|
||||||
|
readIntBinary(key, buf);
|
||||||
|
set.insert(key);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||||
@ -292,7 +302,7 @@ public:
|
|||||||
}
|
}
|
||||||
return new_map;
|
return new_map;
|
||||||
};
|
};
|
||||||
auto new_map = rhs_value.size() < set.size() ? create_new_map(rhs_value, set) : create_new_map(set, rhs_value);
|
auto new_map = create_new_map(set, rhs_value);
|
||||||
set = std::move(new_map);
|
set = std::move(new_map);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -316,11 +326,9 @@ public:
|
|||||||
readVarUInt(version, buf);
|
readVarUInt(version, buf);
|
||||||
readVarUInt(size, buf);
|
readVarUInt(size, buf);
|
||||||
set.reserve(size);
|
set.reserve(size);
|
||||||
UInt64 elem_version;
|
|
||||||
for (size_t i = 0; i < size; ++i)
|
for (size_t i = 0; i < size; ++i)
|
||||||
{
|
{
|
||||||
auto key = readStringBinaryInto(*arena, buf);
|
auto key = readStringBinaryInto(*arena, buf);
|
||||||
readVarUInt(elem_version, buf);
|
|
||||||
set.insert(key);
|
set.insert(key);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -277,7 +277,7 @@ void registerAggregateFunctionGroupConcat(AggregateFunctionFactory & factory)
|
|||||||
AggregateFunctionProperties properties = { .returns_default_when_only_null = false, .is_order_dependent = true };
|
AggregateFunctionProperties properties = { .returns_default_when_only_null = false, .is_order_dependent = true };
|
||||||
|
|
||||||
factory.registerFunction("groupConcat", { createAggregateFunctionGroupConcat, properties });
|
factory.registerFunction("groupConcat", { createAggregateFunctionGroupConcat, properties });
|
||||||
factory.registerAlias("group_concat", "groupConcat", AggregateFunctionFactory::CaseInsensitive);
|
factory.registerAlias("group_concat", "groupConcat", AggregateFunctionFactory::Case::Insensitive);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -350,7 +350,7 @@ AggregateFunctionPtr createAggregateFunctionKolmogorovSmirnovTest(
|
|||||||
|
|
||||||
void registerAggregateFunctionKolmogorovSmirnovTest(AggregateFunctionFactory & factory)
|
void registerAggregateFunctionKolmogorovSmirnovTest(AggregateFunctionFactory & factory)
|
||||||
{
|
{
|
||||||
factory.registerFunction("kolmogorovSmirnovTest", createAggregateFunctionKolmogorovSmirnovTest, AggregateFunctionFactory::CaseInsensitive);
|
factory.registerFunction("kolmogorovSmirnovTest", createAggregateFunctionKolmogorovSmirnovTest, AggregateFunctionFactory::Case::Insensitive);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -15,11 +15,11 @@ void registerAggregateFunctionsStatisticsSecondMoment(AggregateFunctionFactory &
|
|||||||
factory.registerFunction("stddevPop", createAggregateFunctionStatisticsUnary<AggregateFunctionSecondMoment, StatisticsFunctionKind::stddevPop>);
|
factory.registerFunction("stddevPop", createAggregateFunctionStatisticsUnary<AggregateFunctionSecondMoment, StatisticsFunctionKind::stddevPop>);
|
||||||
|
|
||||||
/// Synonyms for compatibility.
|
/// Synonyms for compatibility.
|
||||||
factory.registerAlias("VAR_SAMP", "varSamp", AggregateFunctionFactory::CaseInsensitive);
|
factory.registerAlias("VAR_SAMP", "varSamp", AggregateFunctionFactory::Case::Insensitive);
|
||||||
factory.registerAlias("VAR_POP", "varPop", AggregateFunctionFactory::CaseInsensitive);
|
factory.registerAlias("VAR_POP", "varPop", AggregateFunctionFactory::Case::Insensitive);
|
||||||
factory.registerAlias("STDDEV_SAMP", "stddevSamp", AggregateFunctionFactory::CaseInsensitive);
|
factory.registerAlias("STDDEV_SAMP", "stddevSamp", AggregateFunctionFactory::Case::Insensitive);
|
||||||
factory.registerAlias("STDDEV_POP", "stddevPop", AggregateFunctionFactory::CaseInsensitive);
|
factory.registerAlias("STDDEV_POP", "stddevPop", AggregateFunctionFactory::Case::Insensitive);
|
||||||
factory.registerAlias("STD", "stddevPop", AggregateFunctionFactory::CaseInsensitive);
|
factory.registerAlias("STD", "stddevPop", AggregateFunctionFactory::Case::Insensitive);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -72,7 +72,7 @@ AggregateFunctionPtr createAggregateFunctionSum(const std::string & name, const
|
|||||||
|
|
||||||
void registerAggregateFunctionSum(AggregateFunctionFactory & factory)
|
void registerAggregateFunctionSum(AggregateFunctionFactory & factory)
|
||||||
{
|
{
|
||||||
factory.registerFunction("sum", createAggregateFunctionSum<AggregateFunctionSumSimple>, AggregateFunctionFactory::CaseInsensitive);
|
factory.registerFunction("sum", createAggregateFunctionSum<AggregateFunctionSumSimple>, AggregateFunctionFactory::Case::Insensitive);
|
||||||
factory.registerFunction("sumWithOverflow", createAggregateFunctionSum<AggregateFunctionSumWithOverflow>);
|
factory.registerFunction("sumWithOverflow", createAggregateFunctionSum<AggregateFunctionSumWithOverflow>);
|
||||||
factory.registerFunction("sumKahan", createAggregateFunctionSum<AggregateFunctionSumKahan>);
|
factory.registerFunction("sumKahan", createAggregateFunctionSum<AggregateFunctionSumKahan>);
|
||||||
}
|
}
|
||||||
|
@ -535,9 +535,9 @@ void registerAggregateFunctionTopK(AggregateFunctionFactory & factory)
|
|||||||
|
|
||||||
factory.registerFunction("topK", { createAggregateFunctionTopK<false, false>, properties });
|
factory.registerFunction("topK", { createAggregateFunctionTopK<false, false>, properties });
|
||||||
factory.registerFunction("topKWeighted", { createAggregateFunctionTopK<true, false>, properties });
|
factory.registerFunction("topKWeighted", { createAggregateFunctionTopK<true, false>, properties });
|
||||||
factory.registerFunction("approx_top_k", { createAggregateFunctionTopK<false, true>, properties }, AggregateFunctionFactory::CaseInsensitive);
|
factory.registerFunction("approx_top_k", { createAggregateFunctionTopK<false, true>, properties }, AggregateFunctionFactory::Case::Insensitive);
|
||||||
factory.registerFunction("approx_top_sum", { createAggregateFunctionTopK<true, true>, properties }, AggregateFunctionFactory::CaseInsensitive);
|
factory.registerFunction("approx_top_sum", { createAggregateFunctionTopK<true, true>, properties }, AggregateFunctionFactory::Case::Insensitive);
|
||||||
factory.registerAlias("approx_top_count", "approx_top_k", AggregateFunctionFactory::CaseInsensitive);
|
factory.registerAlias("approx_top_count", "approx_top_k", AggregateFunctionFactory::Case::Insensitive);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -195,8 +195,8 @@ AggregateFunctionPtr createAggregateFunctionMinMax(
|
|||||||
|
|
||||||
void registerAggregateFunctionsMinMax(AggregateFunctionFactory & factory)
|
void registerAggregateFunctionsMinMax(AggregateFunctionFactory & factory)
|
||||||
{
|
{
|
||||||
factory.registerFunction("min", createAggregateFunctionMinMax<true>, AggregateFunctionFactory::CaseInsensitive);
|
factory.registerFunction("min", createAggregateFunctionMinMax<true>, AggregateFunctionFactory::Case::Insensitive);
|
||||||
factory.registerFunction("max", createAggregateFunctionMinMax<false>, AggregateFunctionFactory::CaseInsensitive);
|
factory.registerFunction("max", createAggregateFunctionMinMax<false>, AggregateFunctionFactory::Case::Insensitive);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -3,7 +3,6 @@
|
|||||||
#include <base/scope_guard.h>
|
#include <base/scope_guard.h>
|
||||||
|
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
#include <Core/Settings.h>
|
|
||||||
|
|
||||||
#include <Analyzer/IQueryTreeNode.h>
|
#include <Analyzer/IQueryTreeNode.h>
|
||||||
#include <Analyzer/QueryNode.h>
|
#include <Analyzer/QueryNode.h>
|
||||||
|
@ -10,6 +10,8 @@
|
|||||||
#include <Analyzer/TableNode.h>
|
#include <Analyzer/TableNode.h>
|
||||||
#include <Analyzer/UnionNode.h>
|
#include <Analyzer/UnionNode.h>
|
||||||
|
|
||||||
|
#include <Core/Settings.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
|
@ -11,6 +11,8 @@
|
|||||||
#include <Analyzer/FunctionNode.h>
|
#include <Analyzer/FunctionNode.h>
|
||||||
#include <Analyzer/Utils.h>
|
#include <Analyzer/Utils.h>
|
||||||
|
|
||||||
|
#include <Core/Settings.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
|
@ -11,6 +11,8 @@
|
|||||||
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||||
#include <Analyzer/LambdaNode.h>
|
#include <Analyzer/LambdaNode.h>
|
||||||
|
|
||||||
|
#include <Core/Settings.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
|
@ -8,6 +8,8 @@
|
|||||||
#include <Analyzer/TableExpressionModifiers.h>
|
#include <Analyzer/TableExpressionModifiers.h>
|
||||||
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||||
|
|
||||||
|
#include <Core/Settings.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user