mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 23:21:59 +00:00
Merge branch 'master' into stress-test
This commit is contained in:
commit
ffb6c5b37b
2
.gitattributes
vendored
2
.gitattributes
vendored
@ -1,4 +1,2 @@
|
||||
contrib/* linguist-vendored
|
||||
*.h linguist-language=C++
|
||||
# to avoid frequent conflicts
|
||||
tests/queries/0_stateless/arcadia_skip_list.txt text merge=union
|
||||
|
4
.github/workflows/anchore-analysis.yml
vendored
4
.github/workflows/anchore-analysis.yml
vendored
@ -8,6 +8,10 @@
|
||||
|
||||
name: Docker Container Scan (clickhouse-server)
|
||||
|
||||
env:
|
||||
# Force the stdout and stderr streams to be unbuffered
|
||||
PYTHONUNBUFFERED: 1
|
||||
|
||||
"on":
|
||||
pull_request:
|
||||
paths:
|
||||
|
23
.github/workflows/backport.yml
vendored
23
.github/workflows/backport.yml
vendored
@ -1,4 +1,9 @@
|
||||
name: CherryPick
|
||||
|
||||
env:
|
||||
# Force the stdout and stderr streams to be unbuffered
|
||||
PYTHONUNBUFFERED: 1
|
||||
|
||||
concurrency:
|
||||
group: cherry-pick
|
||||
on: # yamllint disable-line rule:truthy
|
||||
@ -8,18 +13,24 @@ jobs:
|
||||
CherryPick:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Set envs
|
||||
# https://docs.github.com/en/actions/learn-github-actions/workflow-commands-for-github-actions#multiline-strings
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/cherry_pick
|
||||
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
||||
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||
RCSK
|
||||
REPO_OWNER=ClickHouse
|
||||
REPO_NAME=ClickHouse
|
||||
REPO_TEAM=core
|
||||
EOF
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
fetch-depth: 0
|
||||
- name: Cherry pick
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/cherry_pick
|
||||
ROBOT_CLICKHOUSE_SSH_KEY: ${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||
REPO_OWNER: "ClickHouse"
|
||||
REPO_NAME: "ClickHouse"
|
||||
REPO_TEAM: "core"
|
||||
run: |
|
||||
sudo pip install GitPython
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
|
241
.github/workflows/backport_branches.yml
vendored
241
.github/workflows/backport_branches.yml
vendored
@ -1,4 +1,9 @@
|
||||
name: BackportPR
|
||||
|
||||
env:
|
||||
# Force the stdout and stderr streams to be unbuffered
|
||||
PYTHONUNBUFFERED: 1
|
||||
|
||||
on: # yamllint disable-line rule:truthy
|
||||
push:
|
||||
branches:
|
||||
@ -7,6 +12,9 @@ jobs:
|
||||
DockerHubPush:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Images check
|
||||
@ -22,17 +30,23 @@ jobs:
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/compatibility_check
|
||||
REPO_COPY=${{runner.temp}}/compatibility_check/ClickHouse
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
EOF
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: CompatibilityCheck
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/compatibility_check
|
||||
REPO_COPY: ${{runner.temp}}/compatibility_check/ClickHouse
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
@ -51,24 +65,30 @@ jobs:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=package_release
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/images_path
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'recursive'
|
||||
submodules: 'true'
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/build_check
|
||||
IMAGES_PATH: ${{runner.temp}}/images_path
|
||||
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH: ${{runner.temp}}/../ccaches
|
||||
CHECK_NAME: 'ClickHouse build check (actions)'
|
||||
BUILD_NAME: 'package_release'
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
@ -78,35 +98,41 @@ jobs:
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
sudo rm -fr $TEMP_PATH $CACHES_PATH
|
||||
BuilderDebAsan:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=package_asan
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/images_path
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'recursive'
|
||||
submodules: 'true'
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/build_check
|
||||
IMAGES_PATH: ${{runner.temp}}/images_path
|
||||
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH: ${{runner.temp}}/../ccaches
|
||||
CHECK_NAME: 'ClickHouse build check (actions)'
|
||||
BUILD_NAME: 'package_asan'
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
@ -116,35 +142,41 @@ jobs:
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
sudo rm -fr $TEMP_PATH $CACHES_PATH
|
||||
BuilderDebTsan:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=package_tsan
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/images_path
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'recursive'
|
||||
submodules: 'true'
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/build_check
|
||||
IMAGES_PATH: ${{runner.temp}}/images_path
|
||||
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH: ${{runner.temp}}/../ccaches
|
||||
CHECK_NAME: 'ClickHouse build check (actions)'
|
||||
BUILD_NAME: 'package_tsan'
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
@ -154,35 +186,41 @@ jobs:
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
sudo rm -fr $TEMP_PATH $CACHES_PATH
|
||||
BuilderDebDebug:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=package_debug
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/images_path
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'recursive'
|
||||
submodules: 'true'
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/build_check
|
||||
IMAGES_PATH: ${{runner.temp}}/images_path
|
||||
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH: ${{runner.temp}}/../ccaches
|
||||
CHECK_NAME: 'ClickHouse build check (actions)'
|
||||
BUILD_NAME: 'package_debug'
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
@ -192,13 +230,13 @@ jobs:
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
sudo rm -fr $TEMP_PATH $CACHES_PATH
|
||||
############################################################################################
|
||||
##################################### BUILD REPORTER #######################################
|
||||
############################################################################################
|
||||
@ -207,22 +245,26 @@ jobs:
|
||||
- BuilderDebRelease
|
||||
- BuilderDebAsan
|
||||
- BuilderDebTsan
|
||||
- BuilderDebUBsan
|
||||
- BuilderDebMsan
|
||||
- BuilderDebDebug
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/report_check
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Report Builder
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/report_check
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'ClickHouse build check (actions)'
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
@ -241,19 +283,25 @@ jobs:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (address, actions)
|
||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stateless_debug
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stateless tests (address, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT: 10800
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
@ -273,19 +321,25 @@ jobs:
|
||||
needs: [BuilderDebDebug]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateful_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateful tests (debug, actions)
|
||||
REPO_COPY=${{runner.temp}}/stateful_debug/ClickHouse
|
||||
KILL_TIMEOUT=3600
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stateful_debug
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stateful tests (debug, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stateful_debug/ClickHouse
|
||||
KILL_TIMEOUT: 3600
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
@ -303,20 +357,30 @@ jobs:
|
||||
##############################################################################################
|
||||
StressTestTsan:
|
||||
needs: [BuilderDebTsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
# func testers have 16 cores + 128 GB memory
|
||||
# while stress testers have 36 cores + 72 memory
|
||||
# It would be better to have something like 32 + 128,
|
||||
# but such servers almost unavailable as spot instances.
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stress_thread
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stress test (thread, actions)
|
||||
REPO_COPY=${{runner.temp}}/stress_thread/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Stress test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stress_thread
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stress test (thread, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stress_thread/ClickHouse
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
@ -333,21 +397,27 @@ jobs:
|
||||
############################# INTEGRATION TESTS #############################################
|
||||
#############################################################################################
|
||||
IntegrationTestsRelease:
|
||||
needs: [BuilderDebRelease, FunctionalStatelessTestRelease]
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_release
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (release, actions)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Integration test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/integration_tests_release
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Integration tests (release, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/integration_tests_release/ClickHouse
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
@ -371,6 +441,9 @@ jobs:
|
||||
- CompatibilityCheck
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Finish label
|
||||
|
5
.github/workflows/cancel.yml
vendored
5
.github/workflows/cancel.yml
vendored
@ -1,4 +1,9 @@
|
||||
name: Cancel
|
||||
|
||||
env:
|
||||
# Force the stdout and stderr streams to be unbuffered
|
||||
PYTHONUNBUFFERED: 1
|
||||
|
||||
on: # yamllint disable-line rule:truthy
|
||||
workflow_run:
|
||||
workflows: ["CIGithubActions", "ReleaseCI", "DocsCheck", "BackportPR"]
|
||||
|
25
.github/workflows/docs_check.yml
vendored
25
.github/workflows/docs_check.yml
vendored
@ -1,4 +1,9 @@
|
||||
name: DocsCheck
|
||||
|
||||
env:
|
||||
# Force the stdout and stderr streams to be unbuffered
|
||||
PYTHONUNBUFFERED: 1
|
||||
|
||||
on: # yamllint disable-line rule:truthy
|
||||
pull_request:
|
||||
types:
|
||||
@ -14,6 +19,9 @@ jobs:
|
||||
CheckLabels:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -rf $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Labels check
|
||||
@ -24,6 +32,9 @@ jobs:
|
||||
needs: CheckLabels
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -rf $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Images check
|
||||
@ -39,17 +50,23 @@ jobs:
|
||||
needs: DockerHubPush
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/docs_check
|
||||
REPO_COPY=${{runner.temp}}/docs_check/ClickHouse
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/docs_check
|
||||
path: ${{ env.TEMP_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -rf $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Docs Check
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/docs_check
|
||||
REPO_COPY: ${{runner.temp}}/docs_check/ClickHouse
|
||||
run: |
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
|
1999
.github/workflows/main.yml
vendored
1999
.github/workflows/main.yml
vendored
File diff suppressed because it is too large
Load Diff
1842
.github/workflows/master.yml
vendored
1842
.github/workflows/master.yml
vendored
File diff suppressed because it is too large
Load Diff
33
.github/workflows/release.yml
vendored
33
.github/workflows/release.yml
vendored
@ -1,4 +1,9 @@
|
||||
name: DocsReleaseChecks
|
||||
|
||||
env:
|
||||
# Force the stdout and stderr streams to be unbuffered
|
||||
PYTHONUNBUFFERED: 1
|
||||
|
||||
concurrency:
|
||||
group: master-release
|
||||
cancel-in-progress: true
|
||||
@ -11,10 +16,15 @@ on: # yamllint disable-line rule:truthy
|
||||
- 'website/**'
|
||||
- 'benchmark/**'
|
||||
- 'docker/**'
|
||||
- '.github/**'
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
DockerHubPush:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Images check
|
||||
@ -30,20 +40,31 @@ jobs:
|
||||
needs: DockerHubPush
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
# https://docs.github.com/en/actions/learn-github-actions/workflow-commands-for-github-actions#multiline-strings
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/docs_release
|
||||
REPO_COPY=${{runner.temp}}/docs_release/ClickHouse
|
||||
CLOUDFLARE_TOKEN=${{secrets.CLOUDFLARE}}
|
||||
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
||||
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||
RCSK
|
||||
EOF
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{runner.temp}}/docs_release
|
||||
path: ${{ env.TEMP_PATH }}
|
||||
- name: Docs Release
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/docs_release
|
||||
REPO_COPY: ${{runner.temp}}/docs_release/ClickHouse
|
||||
CLOUDFLARE_TOKEN: ${{secrets.CLOUDFLARE}}
|
||||
ROBOT_CLICKHOUSE_SSH_KEY: ${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 docs_release.py
|
||||
|
1149
.github/workflows/release_branches.yml
vendored
1149
.github/workflows/release_branches.yml
vendored
File diff suppressed because it is too large
Load Diff
42
.github/workflows/woboq.yml
vendored
Normal file
42
.github/workflows/woboq.yml
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
name: WoboqBuilder
|
||||
env:
|
||||
# Force the stdout and stderr streams to be unbuffered
|
||||
PYTHONUNBUFFERED: 1
|
||||
|
||||
concurrency:
|
||||
group: woboq
|
||||
on: # yamllint disable-line rule:truthy
|
||||
schedule:
|
||||
- cron: '0 */18 * * *'
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
# don't use dockerhub push because this image updates so rarely
|
||||
WoboqCodebrowser:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/codebrowser
|
||||
REPO_COPY=${{runner.temp}}/codebrowser/ClickHouse
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
EOF
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
- name: Codebrowser
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci && python3 codebrowser_check.py
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -247,3 +247,6 @@
|
||||
[submodule "contrib/sysroot"]
|
||||
path = contrib/sysroot
|
||||
url = https://github.com/ClickHouse-Extras/sysroot.git
|
||||
[submodule "contrib/azure"]
|
||||
path = contrib/azure
|
||||
url = https://github.com/ClickHouse-Extras/azure-sdk-for-cpp.git
|
||||
|
178
CHANGELOG.md
178
CHANGELOG.md
@ -1,3 +1,181 @@
|
||||
### ClickHouse release v21.12, 2021-12-15
|
||||
|
||||
#### Backward Incompatible Change
|
||||
|
||||
* *A fix for a feature that previously had unwanted behaviour.* Do not allow direct select for Kafka/RabbitMQ/FileLog. Can be enabled by setting `stream_like_engine_allow_direct_select`. Direct select will be not allowed even if enabled by setting, in case there is an attached materialized view. For Kafka and RabbitMQ direct selectm if allowed, will not commit massages by default. To enable commits with direct select, user must use storage level setting `kafka{rabbitmq}_commit_on_select=1` (default `0`). [#31053](https://github.com/ClickHouse/ClickHouse/pull/31053) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* *A slight change in behaviour of a new function.* Return unquoted string in JSON_VALUE. Closes [#27965](https://github.com/ClickHouse/ClickHouse/issues/27965). [#31008](https://github.com/ClickHouse/ClickHouse/pull/31008) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* *Setting rename.* Add custom null representation support for TSV/CSV input formats. Fix deserialing Nullable(String) in TSV/CSV/JSONCompactStringsEachRow/JSONStringsEachRow input formats. Rename `output_format_csv_null_representation` and `output_format_tsv_null_representation` to `format_csv_null_representation` and `format_tsv_null_representation` accordingly. [#30497](https://github.com/ClickHouse/ClickHouse/pull/30497) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* *Further deprecation of already unused code.* This is relevant only for users of ClickHouse versions older than 20.6. A "leader election" mechanism is removed from `ReplicatedMergeTree`, because multiple leaders are supported since 20.6. If you are upgrading from an older version and some replica with an old version is a leader, then server will fail to start after upgrade. Stop replicas with old version to make new version start. After that it will not be possible to downgrade to version older than 20.6. [#32140](https://github.com/ClickHouse/ClickHouse/pull/32140) ([tavplubix](https://github.com/tavplubix)).
|
||||
|
||||
#### New Feature
|
||||
|
||||
* Implemented more of the ZooKeeper Four Letter Words commands in clickhouse-keeper: https://zookeeper.apache.org/doc/r3.4.8/zookeeperAdmin.html#sc_zkCommands. [#28981](https://github.com/ClickHouse/ClickHouse/pull/28981) ([JackyWoo](https://github.com/JackyWoo)). Now `clickhouse-keeper` is feature complete.
|
||||
* Support for `Bool` data type. [#31072](https://github.com/ClickHouse/ClickHouse/pull/31072) ([kevin wan](https://github.com/MaxWk)).
|
||||
* Support for `PARTITION BY` in File, URL, HDFS storages and with `INSERT INTO` table function. Closes [#30273](https://github.com/ClickHouse/ClickHouse/issues/30273). [#30690](https://github.com/ClickHouse/ClickHouse/pull/30690) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Added `CONSTRAINT ... ASSUME ...` (without checking during `INSERT`). Added query transformation to CNF (https://github.com/ClickHouse/ClickHouse/issues/11749) for more convenient optimization. Added simple query rewriting using constraints (only simple matching now, will be improved to support <,=,>... later). Added ability to replace heavy columns with light columns if it's possible. [#18787](https://github.com/ClickHouse/ClickHouse/pull/18787) ([Nikita Vasilev](https://github.com/nikvas0)).
|
||||
* Basic access authentication for http/url functions. [#31648](https://github.com/ClickHouse/ClickHouse/pull/31648) ([michael1589](https://github.com/michael1589)).
|
||||
* Support `INTERVAL` type in `STEP` clause for `WITH FILL` modifier. [#30927](https://github.com/ClickHouse/ClickHouse/pull/30927) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Add support for parallel reading from multiple files and support globs in `FROM INFILE` clause. [#30135](https://github.com/ClickHouse/ClickHouse/pull/30135) ([Filatenkov Artur](https://github.com/FArthur-cmd)).
|
||||
* Add support for `Identifier` table and database query parameters. Closes [#27226](https://github.com/ClickHouse/ClickHouse/issues/27226). [#28668](https://github.com/ClickHouse/ClickHouse/pull/28668) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* *TLDR: Major improvements of completeness and consistency of text formats.* Refactor formats `TSV`, `TSVRaw`, `CSV` and `JSONCompactEachRow`, `JSONCompactStringsEachRow`, remove code duplication, add base interface for formats with `-WithNames` and `-WithNamesAndTypes` suffixes. Add formats `CSVWithNamesAndTypes`, `TSVRawWithNames`, `TSVRawWithNamesAndTypes`, `JSONCompactEachRowWIthNames`, `JSONCompactStringsEachRowWIthNames`, `RowBinaryWithNames`. Support parallel parsing for formats `TSVWithNamesAndTypes`, `TSVRaw(WithNames/WIthNamesAndTypes)`, `CSVWithNamesAndTypes`, `JSONCompactEachRow(WithNames/WIthNamesAndTypes)`, `JSONCompactStringsEachRow(WithNames/WIthNamesAndTypes)`. Support columns mapping and types checking for `RowBinaryWithNamesAndTypes` format. Add setting `input_format_with_types_use_header` which specify if we should check that types written in <format_name>`WIthNamesAndTypes` format matches with table structure. Add setting `input_format_csv_empty_as_default` and use it in CSV format instead of `input_format_defaults_for_omitted_fields` (because this setting should not control `csv_empty_as_default`). Fix usage of setting `input_format_defaults_for_omitted_fields` (it was used only as `csv_empty_as_default`, but it should control calculation of default expressions for omitted fields). Fix Nullable input/output in `TSVRaw` format, make this format fully compatible with inserting into TSV. Fix inserting NULLs in `LowCardinality(Nullable)` when `input_format_null_as_default` is enabled (previously default values was inserted instead of actual NULLs). Fix strings deserialization in `JSONStringsEachRow`/`JSONCompactStringsEachRow` formats (strings were parsed just until first '\n' or '\t'). Add ability to use `Raw` escaping rule in Template input format. Add diagnostic info for JSONCompactEachRow(WithNames/WIthNamesAndTypes) input format. Fix bug with parallel parsing of `-WithNames` formats in case when setting `min_chunk_bytes_for_parallel_parsing` is less than bytes in a single row. [#30178](https://github.com/ClickHouse/ClickHouse/pull/30178) ([Kruglov Pavel](https://github.com/Avogar)). Allow to print/parse names and types of colums in `CustomSeparated` input/output format. Add formats `CustomSeparatedWithNames/WithNamesAndTypes` similar to `TSVWithNames/WithNamesAndTypes`. [#31434](https://github.com/ClickHouse/ClickHouse/pull/31434) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Aliyun OSS Storage support. [#31286](https://github.com/ClickHouse/ClickHouse/pull/31286) ([cfcz48](https://github.com/cfcz48)).
|
||||
* Exposes all settings of the global thread pool in the configuration file. [#31285](https://github.com/ClickHouse/ClickHouse/pull/31285) ([Tomáš Hromada](https://github.com/gyfis)).
|
||||
* Introduced window functions `exponentialTimeDecayedSum`, `exponentialTimeDecayedMax`, `exponentialTimeDecayedCount` and `exponentialTimeDecayedAvg` which are more effective than `exponentialMovingAverage` for bigger windows. Also more use-cases were covered. [#29799](https://github.com/ClickHouse/ClickHouse/pull/29799) ([Vladimir Chebotarev](https://github.com/excitoon)).
|
||||
* Add option to compress logs before writing them to a file using LZ4. Closes [#23860](https://github.com/ClickHouse/ClickHouse/issues/23860). [#29219](https://github.com/ClickHouse/ClickHouse/pull/29219) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Support `JOIN ON 1 = 1` that have CROSS JOIN semantic. This closes [#25578](https://github.com/ClickHouse/ClickHouse/issues/25578). [#25894](https://github.com/ClickHouse/ClickHouse/pull/25894) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Add Map combinator for `Map` type. - Rename old `sum-, min-, max- Map` for mapped arrays to `sum-, min-, max- MappedArrays`. [#24539](https://github.com/ClickHouse/ClickHouse/pull/24539) ([Ildus Kurbangaliev](https://github.com/ildus)).
|
||||
* Make reading from HTTP retriable. Closes [#29696](https://github.com/ClickHouse/ClickHouse/issues/29696). [#29894](https://github.com/ClickHouse/ClickHouse/pull/29894) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
|
||||
#### Experimental Feature
|
||||
|
||||
* `WINDOW VIEW` to enable stream processing in ClickHouse. [#8331](https://github.com/ClickHouse/ClickHouse/pull/8331) ([vxider](https://github.com/Vxider)).
|
||||
* Drop support for using Ordinary databases with `MaterializedMySQL`. [#31292](https://github.com/ClickHouse/ClickHouse/pull/31292) ([Stig Bakken](https://github.com/stigsb)).
|
||||
* Implement the commands BACKUP and RESTORE for the Log family. This feature is under development. [#30688](https://github.com/ClickHouse/ClickHouse/pull/30688) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
|
||||
#### Performance Improvement
|
||||
|
||||
* Reduce memory usage when reading with `s3` / `url` / `hdfs` formats `Parquet`, `ORC`, `Arrow` (controlled by setting `input_format_allow_seeks`, enabled by default). Also add setting `remote_read_min_bytes_for_seek` to control seeks. Closes [#10461](https://github.com/ClickHouse/ClickHouse/issues/10461). Closes [#16857](https://github.com/ClickHouse/ClickHouse/issues/16857). [#30936](https://github.com/ClickHouse/ClickHouse/pull/30936) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Add optimizations for constant conditions in JOIN ON, ref [#26928](https://github.com/ClickHouse/ClickHouse/issues/26928). [#27021](https://github.com/ClickHouse/ClickHouse/pull/27021) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Support parallel formatting for all text formats, except `JSONEachRowWithProgress` and `PrettyCompactMonoBlock`. [#31489](https://github.com/ClickHouse/ClickHouse/pull/31489) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Speed up count over nullable columns. [#31806](https://github.com/ClickHouse/ClickHouse/pull/31806) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Speed up `avg` and `sumCount` aggregate functions. [#31694](https://github.com/ClickHouse/ClickHouse/pull/31694) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Improve performance of JSON and XML output formats. [#31673](https://github.com/ClickHouse/ClickHouse/pull/31673) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Improve performance of syncing data to block device. This closes [#31181](https://github.com/ClickHouse/ClickHouse/issues/31181). [#31229](https://github.com/ClickHouse/ClickHouse/pull/31229) ([zhanglistar](https://github.com/zhanglistar)).
|
||||
* Fixing query performance issue in `LiveView` tables. Fixes [#30831](https://github.com/ClickHouse/ClickHouse/issues/30831). [#31006](https://github.com/ClickHouse/ClickHouse/pull/31006) ([vzakaznikov](https://github.com/vzakaznikov)).
|
||||
* Speed up query parsing. [#31949](https://github.com/ClickHouse/ClickHouse/pull/31949) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Allow to split `GraphiteMergeTree` rollup rules for plain/tagged metrics (optional `rule_type` field). [#25122](https://github.com/ClickHouse/ClickHouse/pull/25122) ([Michail Safronov](https://github.com/msaf1980)).
|
||||
* Remove excessive `DESC TABLE` requests for `remote()` (in case of `remote('127.1', system.one)` (i.e. identifier as the db.table instead of string) there was excessive `DESC TABLE` request). [#32019](https://github.com/ClickHouse/ClickHouse/pull/32019) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Optimize function `tupleElement` to reading of subcolumn with enabled setting `optimize_functions_to_subcolumns`. [#31261](https://github.com/ClickHouse/ClickHouse/pull/31261) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Optimize function `mapContains` to reading of subcolumn `key` with enabled settings `optimize_functions_to_subcolumns`. [#31218](https://github.com/ClickHouse/ClickHouse/pull/31218) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Add settings `merge_tree_min_rows_for_concurrent_read_for_remote_filesystem` and `merge_tree_min_bytes_for_concurrent_read_for_remote_filesystem`. [#30970](https://github.com/ClickHouse/ClickHouse/pull/30970) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Skipping mutations of different partitions in `StorageMergeTree`. [#21326](https://github.com/ClickHouse/ClickHouse/pull/21326) ([Vladimir Chebotarev](https://github.com/excitoon)).
|
||||
|
||||
#### Improvement
|
||||
|
||||
* Do not allow to drop a table or dictionary if some tables or dictionaries depend on it. [#30977](https://github.com/ClickHouse/ClickHouse/pull/30977) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Allow versioning of aggregate function states. Now we can introduce backward compatible changes in serialization format of aggregate function states. Closes [#12552](https://github.com/ClickHouse/ClickHouse/issues/12552). [#24820](https://github.com/ClickHouse/ClickHouse/pull/24820) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Support PostgreSQL style `ALTER MODIFY COLUMN` syntax. [#32003](https://github.com/ClickHouse/ClickHouse/pull/32003) ([SuperDJY](https://github.com/cmsxbc)).
|
||||
* Added `update_field` support for `RangeHashedDictionary`, `ComplexKeyRangeHashedDictionary`. [#32185](https://github.com/ClickHouse/ClickHouse/pull/32185) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* The `murmurHash3_128` and `sipHash128` functions now accept an arbitrary number of arguments. This closes [#28774](https://github.com/ClickHouse/ClickHouse/issues/28774). [#28965](https://github.com/ClickHouse/ClickHouse/pull/28965) ([小路](https://github.com/nicelulu)).
|
||||
* Support default expression for `HDFS` storage and optimize fetching when source is column oriented. [#32256](https://github.com/ClickHouse/ClickHouse/pull/32256) ([李扬](https://github.com/taiyang-li)).
|
||||
* Improve the operation name of an opentelemetry span. [#32234](https://github.com/ClickHouse/ClickHouse/pull/32234) ([Frank Chen](https://github.com/FrankChen021)).
|
||||
* Use `Content-Type: application/x-ndjson` (http://ndjson.org/) for output format `JSONEachRow`. [#32223](https://github.com/ClickHouse/ClickHouse/pull/32223) ([Dmitriy Dorofeev](https://github.com/deem0n)).
|
||||
* Improve skipping unknown fields with quoted escaping rule in Template/CustomSeparated formats. Previously you could skip only quoted strings, now you can skip values with any type. [#32204](https://github.com/ClickHouse/ClickHouse/pull/32204) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Now `clickhouse-keeper` refuses to start or apply configuration changes when they contain duplicated IDs or endpoints. Fixes [#31339](https://github.com/ClickHouse/ClickHouse/issues/31339). [#32121](https://github.com/ClickHouse/ClickHouse/pull/32121) ([alesapin](https://github.com/alesapin)).
|
||||
* Set Content-Type in HTTP packets issued from URL engine. [#32113](https://github.com/ClickHouse/ClickHouse/pull/32113) ([Frank Chen](https://github.com/FrankChen021)).
|
||||
* Return Content-Type as 'application/json' for `JSONEachRow` format if `output_format_json_array_of_rows` is enabled. [#32112](https://github.com/ClickHouse/ClickHouse/pull/32112) ([Frank Chen](https://github.com/FrankChen021)).
|
||||
* Allow to parse `+` before `Float32`/`Float64` values. [#32079](https://github.com/ClickHouse/ClickHouse/pull/32079) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Allow a user configured `hdfs_replication` parameter for `DiskHDFS` and `StorageHDFS`. Closes [#32039](https://github.com/ClickHouse/ClickHouse/issues/32039). [#32049](https://github.com/ClickHouse/ClickHouse/pull/32049) ([leosunli](https://github.com/leosunli)).
|
||||
* Added ClickHouse `exception` and `exception_code` fields to opentelemetry span log. [#32040](https://github.com/ClickHouse/ClickHouse/pull/32040) ([Frank Chen](https://github.com/FrankChen021)).
|
||||
* Improve opentelemetry span log duration - it was is zero at the query level if there is a query exception. [#32038](https://github.com/ClickHouse/ClickHouse/pull/32038) ([Frank Chen](https://github.com/FrankChen021)).
|
||||
* Fix the issue that `LowCardinality` of `Int256` cannot be created. [#31832](https://github.com/ClickHouse/ClickHouse/pull/31832) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Recreate `system.*_log` tables in case of different engine/partition_by. [#31824](https://github.com/ClickHouse/ClickHouse/pull/31824) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* `MaterializedMySQL`: Fix issue with table named 'table'. [#31781](https://github.com/ClickHouse/ClickHouse/pull/31781) ([Håvard Kvålen](https://github.com/havardk)).
|
||||
* ClickHouse dictionary source: support predefined connections. Closes [#31705](https://github.com/ClickHouse/ClickHouse/issues/31705). [#31749](https://github.com/ClickHouse/ClickHouse/pull/31749) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Allow to use predefined connections configuration for Kafka and RabbitMQ engines (the same way as for other integration table engines). [#31691](https://github.com/ClickHouse/ClickHouse/pull/31691) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Always re-render prompt while navigating history in clickhouse-client. This will improve usability of manipulating very long queries that don't fit on screen. [#31675](https://github.com/ClickHouse/ClickHouse/pull/31675) ([alexey-milovidov](https://github.com/alexey-milovidov)) (author: Amos Bird).
|
||||
* Add key bindings for navigating through history (instead of lines/history). [#31641](https://github.com/ClickHouse/ClickHouse/pull/31641) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Improve the `max_execution_time` checks. Fixed some cases when timeout checks do not happen and query could run too long. [#31636](https://github.com/ClickHouse/ClickHouse/pull/31636) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Better exception message when `users.xml` cannot be loaded due to bad password hash. This closes [#24126](https://github.com/ClickHouse/ClickHouse/issues/24126). [#31557](https://github.com/ClickHouse/ClickHouse/pull/31557) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Use shard and replica name from `Replicated` database arguments when expanding macros in `ReplicatedMergeTree` arguments if these macros are not defined in config. Closes [#31471](https://github.com/ClickHouse/ClickHouse/issues/31471). [#31488](https://github.com/ClickHouse/ClickHouse/pull/31488) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Better analysis for `min/max/count` projection. Now, with enabled `allow_experimental_projection_optimization`, virtual `min/max/count` projection can be used together with columns from partition key. [#31474](https://github.com/ClickHouse/ClickHouse/pull/31474) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Add `--pager` support for `clickhouse-local`. [#31457](https://github.com/ClickHouse/ClickHouse/pull/31457) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix waiting of the editor during interactive query edition (`waitpid()` returns -1 on `SIGWINCH` and `EDITOR` and `clickhouse-local`/`clickhouse-client` works concurrently). [#31456](https://github.com/ClickHouse/ClickHouse/pull/31456) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Throw an exception if there is some garbage after field in `JSONCompactStrings(EachRow)` format. [#31455](https://github.com/ClickHouse/ClickHouse/pull/31455) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Default value of `http_send_timeout` and `http_receive_timeout` settings changed from 1800 (30 minutes) to 180 (3 minutes). [#31450](https://github.com/ClickHouse/ClickHouse/pull/31450) ([tavplubix](https://github.com/tavplubix)).
|
||||
* `MaterializedMySQL` now handles `CREATE TABLE ... LIKE ...` DDL queries. [#31410](https://github.com/ClickHouse/ClickHouse/pull/31410) ([Stig Bakken](https://github.com/stigsb)).
|
||||
* Return artificial create query when executing `show create table` on system's tables. [#31391](https://github.com/ClickHouse/ClickHouse/pull/31391) ([SuperDJY](https://github.com/cmsxbc)).
|
||||
* Previously progress was shown only for `numbers` table function. Now for `numbers_mt` it is also shown. [#31318](https://github.com/ClickHouse/ClickHouse/pull/31318) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Initial user's roles are used now to find row policies, see [#31080](https://github.com/ClickHouse/ClickHouse/issues/31080). [#31262](https://github.com/ClickHouse/ClickHouse/pull/31262) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* If some obsolete setting is changed - show warning in `system.warnings`. [#31252](https://github.com/ClickHouse/ClickHouse/pull/31252) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Improved backoff for background cleanup tasks in `MergeTree`. Settings `merge_tree_clear_old_temporary_directories_interval_seconds` and `merge_tree_clear_old_parts_interval_seconds` moved from users settings to merge tree settings. [#31180](https://github.com/ClickHouse/ClickHouse/pull/31180) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Now every replica will send to client only incremental information about profile events counters. [#31155](https://github.com/ClickHouse/ClickHouse/pull/31155) ([Dmitry Novik](https://github.com/novikd)). This makes `--hardware_utilization` option in `clickhouse-client` usable.
|
||||
* Enable multiline editing in clickhouse-client by default. This addresses [#31121](https://github.com/ClickHouse/ClickHouse/issues/31121) . [#31123](https://github.com/ClickHouse/ClickHouse/pull/31123) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Function name normalization for `ALTER` queries. This helps avoid metadata mismatch between creating table with indices/projections and adding indices/projections via alter commands. This is a follow-up PR of https://github.com/ClickHouse/ClickHouse/pull/20174. Mark as improvements as there are no bug reports and the senario is somehow rare. [#31095](https://github.com/ClickHouse/ClickHouse/pull/31095) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Support `IF EXISTS` modifier for `RENAME DATABASE`/`TABLE`/`DICTIONARY` query. If this directive is used, one will not get an error if the DATABASE/TABLE/DICTIONARY to be renamed doesn't exist. [#31081](https://github.com/ClickHouse/ClickHouse/pull/31081) ([victorgao](https://github.com/kafka1991)).
|
||||
* Cancel vertical merges when partition is dropped. This is a follow-up of https://github.com/ClickHouse/ClickHouse/pull/25684 and https://github.com/ClickHouse/ClickHouse/pull/30996. [#31057](https://github.com/ClickHouse/ClickHouse/pull/31057) ([Amos Bird](https://github.com/amosbird)).
|
||||
* The local session inside a Clickhouse dictionary source won't send its events to the session log anymore. This fixes a possible deadlock (tsan alert) on shutdown. Also this PR fixes flaky `test_dictionaries_dependency_xml/`. [#31013](https://github.com/ClickHouse/ClickHouse/pull/31013) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Less locking in ALTER command. [#31010](https://github.com/ClickHouse/ClickHouse/pull/31010) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix `--verbose` option in clickhouse-local interactive mode and allow logging into file. [#30881](https://github.com/ClickHouse/ClickHouse/pull/30881) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Added `\l`, `\d`, `\c` commands in `clickhouse-client` like in MySQL and PostgreSQL. [#30876](https://github.com/ClickHouse/ClickHouse/pull/30876) ([Pavel Medvedev](https://github.com/pmed)).
|
||||
* For clickhouse-local or clickhouse-client: if there is `--interactive` option with `--query` or `--queries-file`, then first execute them like in non-interactive and then start interactive mode. [#30851](https://github.com/ClickHouse/ClickHouse/pull/30851) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix possible "The local set of parts of X doesn't look like the set of parts in ZooKeeper" error (if DROP fails during removing znodes from zookeeper). [#30826](https://github.com/ClickHouse/ClickHouse/pull/30826) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Avro format works against Kafka. Setting `output_format_avro_rows_in_file` added. [#30351](https://github.com/ClickHouse/ClickHouse/pull/30351) ([Ilya Golshtein](https://github.com/ilejn)).
|
||||
* Allow to specify one or any number of PostgreSQL schemas for one `MaterializedPostgreSQL` database. Closes [#28901](https://github.com/ClickHouse/ClickHouse/issues/28901). Closes [#29324](https://github.com/ClickHouse/ClickHouse/issues/29324). [#28933](https://github.com/ClickHouse/ClickHouse/pull/28933) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Replaced default ports for clickhouse-keeper internal communication from 44444 to 9234. Fixes [#30879](https://github.com/ClickHouse/ClickHouse/issues/30879). [#31799](https://github.com/ClickHouse/ClickHouse/pull/31799) ([alesapin](https://github.com/alesapin)).
|
||||
* Implement function transform with Decimal arguments. [#31839](https://github.com/ClickHouse/ClickHouse/pull/31839) ([李帅](https://github.com/loneylee)).
|
||||
* Fix abort in debug server and `DB::Exception: std::out_of_range: basic_string` error in release server in case of bad hdfs url by adding additional check of hdfs url structure. [#31042](https://github.com/ClickHouse/ClickHouse/pull/31042) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix possible assert in `hdfs` table function/engine, add test. [#31036](https://github.com/ClickHouse/ClickHouse/pull/31036) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
||||
#### Bug Fixes
|
||||
|
||||
* Fix group by / order by / limit by aliases with positional arguments enabled. Closes [#31173](https://github.com/ClickHouse/ClickHouse/issues/31173). [#31741](https://github.com/ClickHouse/ClickHouse/pull/31741) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix usage of `Buffer` table engine with type `Map`. Fixes [#30546](https://github.com/ClickHouse/ClickHouse/issues/30546). [#31742](https://github.com/ClickHouse/ClickHouse/pull/31742) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix reading from `MergeTree` tables with enabled `use_uncompressed_cache`. [#31826](https://github.com/ClickHouse/ClickHouse/pull/31826) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fixed the behavior when mutations that have nothing to do are stuck (with enabled setting `empty_result_for_aggregation_by_empty_set`). [#32358](https://github.com/ClickHouse/ClickHouse/pull/32358) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Fix skipping columns while writing protobuf. This PR fixes [#31160](https://github.com/ClickHouse/ClickHouse/issues/31160), see the comment [#31160](https://github.com/ClickHouse/ClickHouse/issues/31160)#issuecomment-980595318. [#31988](https://github.com/ClickHouse/ClickHouse/pull/31988) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix bug when remove unneeded columns in subquery. If there is an aggregation function in query without group by, do not remove if it is unneeded. [#32289](https://github.com/ClickHouse/ClickHouse/pull/32289) ([dongyifeng](https://github.com/dyf6372)).
|
||||
* Quota limit was not reached, but the limit was exceeded. This PR fixes [#31174](https://github.com/ClickHouse/ClickHouse/issues/31174). [#31337](https://github.com/ClickHouse/ClickHouse/pull/31337) ([sunny](https://github.com/sunny19930321)).
|
||||
* Fix SHOW GRANTS when partial revokes are used. This PR fixes [#31138](https://github.com/ClickHouse/ClickHouse/issues/31138). [#31249](https://github.com/ClickHouse/ClickHouse/pull/31249) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Memory amount was incorrectly estimated when ClickHouse is run in containers with cgroup limits. [#31157](https://github.com/ClickHouse/ClickHouse/pull/31157) ([Pavel Medvedev](https://github.com/pmed)).
|
||||
* Fix `ALTER ... MATERIALIZE COLUMN ...` queries in case when data type of default expression is not equal to the data type of column. [#32348](https://github.com/ClickHouse/ClickHouse/pull/32348) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fixed crash with SIGFPE in aggregate function `avgWeighted` with `Decimal` argument. Fixes [#32053](https://github.com/ClickHouse/ClickHouse/issues/32053). [#32303](https://github.com/ClickHouse/ClickHouse/pull/32303) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Server might fail to start with `Cannot attach 1 tables due to cyclic dependencies` error if `Dictionary` table looks at XML-dictionary with the same name, it's fixed. Fixes [#31315](https://github.com/ClickHouse/ClickHouse/issues/31315). [#32288](https://github.com/ClickHouse/ClickHouse/pull/32288) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix parsing error while NaN deserializing for `Nullable(Float)` for `Quoted` escaping rule. [#32190](https://github.com/ClickHouse/ClickHouse/pull/32190) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* XML dictionaries: identifiers, used in table create query, can be qualified to `default_database` during upgrade to newer version. Closes [#31963](https://github.com/ClickHouse/ClickHouse/issues/31963). [#32187](https://github.com/ClickHouse/ClickHouse/pull/32187) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Number of active replicas might be determined incorrectly when inserting with quorum if setting `replicated_can_become_leader` is disabled on some replicas. It's fixed. [#32157](https://github.com/ClickHouse/ClickHouse/pull/32157) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Dictionaries: fix cases when `{condition}` does not work for custom database queries. [#32117](https://github.com/ClickHouse/ClickHouse/pull/32117) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix `CAST` from `Nullable` with `cast_keep_nullable` (`PARAMETER_OUT_OF_BOUND` error before for i.e. `toUInt32OrDefault(toNullable(toUInt32(1)))`). [#32080](https://github.com/ClickHouse/ClickHouse/pull/32080) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix CREATE TABLE of Join Storage in some obscure cases. Close [#31680](https://github.com/ClickHouse/ClickHouse/issues/31680). [#32066](https://github.com/ClickHouse/ClickHouse/pull/32066) ([SuperDJY](https://github.com/cmsxbc)).
|
||||
* Fixed `Directory ... already exists and is not empty` error when detaching part. [#32063](https://github.com/ClickHouse/ClickHouse/pull/32063) ([tavplubix](https://github.com/tavplubix)).
|
||||
* `MaterializedMySQL` (experimental feature): Fix misinterpretation of `DECIMAL` data from MySQL. [#31990](https://github.com/ClickHouse/ClickHouse/pull/31990) ([Håvard Kvålen](https://github.com/havardk)).
|
||||
* `FileLog` (experimental feature) engine unnesessary created meta data directory when create table failed. Fix [#31962](https://github.com/ClickHouse/ClickHouse/issues/31962). [#31967](https://github.com/ClickHouse/ClickHouse/pull/31967) ([flynn](https://github.com/ucasfl)).
|
||||
* Some `GET_PART` entry might hang in replication queue if part is lost on all replicas and there are no other parts in the same partition. It's fixed in cases when partition key contains only columns of integer types or `Date[Time]`. Fixes [#31485](https://github.com/ClickHouse/ClickHouse/issues/31485). [#31887](https://github.com/ClickHouse/ClickHouse/pull/31887) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix functions `empty` and `notEmpty` with arguments of `UUID` type. Fixes [#31819](https://github.com/ClickHouse/ClickHouse/issues/31819). [#31883](https://github.com/ClickHouse/ClickHouse/pull/31883) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Change configuration path from `keeper_server.session_timeout_ms` to `keeper_server.coordination_settings.session_timeout_ms` when constructing a `KeeperTCPHandler`. Same with `operation_timeout`. [#31859](https://github.com/ClickHouse/ClickHouse/pull/31859) ([JackyWoo](https://github.com/JackyWoo)).
|
||||
* Fix invalid cast of Nullable type when nullable primary key is used. (Nullable primary key is a discouraged feature - please do not use). This fixes [#31075](https://github.com/ClickHouse/ClickHouse/issues/31075). [#31823](https://github.com/ClickHouse/ClickHouse/pull/31823) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix crash in recursive UDF in SQL. Closes [#30856](https://github.com/ClickHouse/ClickHouse/issues/30856). [#31820](https://github.com/ClickHouse/ClickHouse/pull/31820) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix crash when function `dictGet` with type is used for dictionary attribute when type is `Nullable`. Fixes [#30980](https://github.com/ClickHouse/ClickHouse/issues/30980). [#31800](https://github.com/ClickHouse/ClickHouse/pull/31800) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix crash with empty result of ODBC query (with some ODBC drivers). Closes [#31465](https://github.com/ClickHouse/ClickHouse/issues/31465). [#31766](https://github.com/ClickHouse/ClickHouse/pull/31766) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix disabling query profiler (In case of `query_profiler_real_time_period_ns>0`/`query_profiler_cpu_time_period_ns>0` query profiler can stayed enabled even after query finished). [#31740](https://github.com/ClickHouse/ClickHouse/pull/31740) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fixed rare segfault on concurrent `ATTACH PARTITION` queries. [#31738](https://github.com/ClickHouse/ClickHouse/pull/31738) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix race in JSONEachRowWithProgress output format when data and lines with progress are mixed in output. [#31736](https://github.com/ClickHouse/ClickHouse/pull/31736) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fixed `there are no such cluster here` error on execution of `ON CLUSTER` query if specified cluster name is name of `Replicated` database. [#31723](https://github.com/ClickHouse/ClickHouse/pull/31723) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix exception on some of the applications of `decrypt` function on Nullable columns. This closes [#31662](https://github.com/ClickHouse/ClickHouse/issues/31662). This closes [#31426](https://github.com/ClickHouse/ClickHouse/issues/31426). [#31707](https://github.com/ClickHouse/ClickHouse/pull/31707) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fixed function ngrams when string contains UTF-8 characters. [#31706](https://github.com/ClickHouse/ClickHouse/pull/31706) ([yandd](https://github.com/yandd)).
|
||||
* Settings `input_format_allow_errors_num` and `input_format_allow_errors_ratio` did not work for parsing of domain types, such as `IPv4`, it's fixed. Fixes [#31686](https://github.com/ClickHouse/ClickHouse/issues/31686). [#31697](https://github.com/ClickHouse/ClickHouse/pull/31697) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fixed null pointer exception in `MATERIALIZE COLUMN`. [#31679](https://github.com/ClickHouse/ClickHouse/pull/31679) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* `RENAME TABLE` query worked incorrectly on attempt to rename an DDL dictionary in `Ordinary` database, it's fixed. [#31638](https://github.com/ClickHouse/ClickHouse/pull/31638) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Implement `sparkbar` aggregate function as it was intended, see: [#26175](https://github.com/ClickHouse/ClickHouse/issues/26175)#issuecomment-960353867, [comment](https://github.com/ClickHouse/ClickHouse/issues/26175#issuecomment-961155065). [#31624](https://github.com/ClickHouse/ClickHouse/pull/31624) ([小路](https://github.com/nicelulu)).
|
||||
* Fix invalid generated JSON when only column names contain invalid UTF-8 sequences. [#31534](https://github.com/ClickHouse/ClickHouse/pull/31534) ([Kevin Michel](https://github.com/kmichel-aiven)).
|
||||
* Disable `partial_merge_join_left_table_buffer_bytes` before bug in this optimization is fixed. See [#31009](https://github.com/ClickHouse/ClickHouse/issues/31009)). Remove redundant option `partial_merge_join_optimizations`. [#31528](https://github.com/ClickHouse/ClickHouse/pull/31528) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix progress for short `INSERT SELECT` queries. [#31510](https://github.com/ClickHouse/ClickHouse/pull/31510) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix wrong behavior with group by and positional arguments. Closes [#31280](https://github.com/ClickHouse/ClickHouse/issues/31280)#issuecomment-968696186. [#31420](https://github.com/ClickHouse/ClickHouse/pull/31420) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Resolve `nullptr` in STS credentials provider for S3. [#31409](https://github.com/ClickHouse/ClickHouse/pull/31409) ([Vladimir Chebotarev](https://github.com/excitoon)).
|
||||
* Remove `notLike` function from index analysis, because it was wrong. [#31169](https://github.com/ClickHouse/ClickHouse/pull/31169) ([sundyli](https://github.com/sundy-li)).
|
||||
* Fix bug in Keeper which can lead to inability to start when some coordination logs was lost and we have more fresh snapshot than our latest log. [#31150](https://github.com/ClickHouse/ClickHouse/pull/31150) ([alesapin](https://github.com/alesapin)).
|
||||
* Rewrite right distributed table in local join. solves [#25809](https://github.com/ClickHouse/ClickHouse/issues/25809). [#31105](https://github.com/ClickHouse/ClickHouse/pull/31105) ([abel-cheng](https://github.com/abel-cheng)).
|
||||
* Fix `Merge` table with aliases and where (it did not work before at all). Closes [#28802](https://github.com/ClickHouse/ClickHouse/issues/28802). [#31044](https://github.com/ClickHouse/ClickHouse/pull/31044) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix JSON_VALUE/JSON_QUERY with quoted identifiers. This allows to have spaces in json path. Closes [#30971](https://github.com/ClickHouse/ClickHouse/issues/30971). [#31003](https://github.com/ClickHouse/ClickHouse/pull/31003) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Using `formatRow` function with not row-oriented formats led to segfault. Don't allow to use this function with such formats (because it doesn't make sense). [#31001](https://github.com/ClickHouse/ClickHouse/pull/31001) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix bug which broke select queries if they happened after dropping materialized view. Found in [#30691](https://github.com/ClickHouse/ClickHouse/issues/30691). [#30997](https://github.com/ClickHouse/ClickHouse/pull/30997) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Skip `max_partition_size_to_drop check` in case of ATTACH PARTITION ... FROM and MOVE PARTITION ... [#30995](https://github.com/ClickHouse/ClickHouse/pull/30995) ([Amr Alaa](https://github.com/amralaa-MSFT)).
|
||||
* Fix some corner cases with `INTERSECT` and `EXCEPT` operators. Closes [#30803](https://github.com/ClickHouse/ClickHouse/issues/30803). [#30965](https://github.com/ClickHouse/ClickHouse/pull/30965) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
|
||||
* Fix incorrect filtering result on non-x86 builds. This closes [#31417](https://github.com/ClickHouse/ClickHouse/issues/31417). This closes [#31524](https://github.com/ClickHouse/ClickHouse/issues/31524). [#31574](https://github.com/ClickHouse/ClickHouse/pull/31574) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Make ClickHouse build fully reproducible (byte identical on different machines). This closes [#22113](https://github.com/ClickHouse/ClickHouse/issues/22113). [#31899](https://github.com/ClickHouse/ClickHouse/pull/31899) ([alexey-milovidov](https://github.com/alexey-milovidov)). Remove filesystem path to the build directory from binaries to enable reproducible builds. This needed for [#22113](https://github.com/ClickHouse/ClickHouse/issues/22113). [#31838](https://github.com/ClickHouse/ClickHouse/pull/31838) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Use our own CMakeLists for `zlib-ng`, `cassandra`, `mariadb-connector-c` and `xz`, `re2`, `sentry`, `gsasl`, `arrow`, `protobuf`. This is needed for [#20151](https://github.com/ClickHouse/ClickHouse/issues/20151). Part of [#9226](https://github.com/ClickHouse/ClickHouse/issues/9226). A small step towards removal of annoying trash from the build system. [#30599](https://github.com/ClickHouse/ClickHouse/pull/30599) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Hermetic builds: use fixed version of libc and make sure that no source or binary files from the host OS are using during build. This closes [#27133](https://github.com/ClickHouse/ClickHouse/issues/27133). This closes [#21435](https://github.com/ClickHouse/ClickHouse/issues/21435). This closes [#30462](https://github.com/ClickHouse/ClickHouse/issues/30462). [#30011](https://github.com/ClickHouse/ClickHouse/pull/30011) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Adding function `getFuzzerData()` to easily fuzz particular functions. This closes [#23227](https://github.com/ClickHouse/ClickHouse/issues/23227). [#27526](https://github.com/ClickHouse/ClickHouse/pull/27526) ([Alexey Boykov](https://github.com/mathalex)).
|
||||
* More correct setting up capabilities inside Docker. [#31802](https://github.com/ClickHouse/ClickHouse/pull/31802) ([Constantine Peresypkin](https://github.com/pkit)).
|
||||
* Enable clang `-fstrict-vtable-pointers`, `-fwhole-program-vtables` compile options. [#20151](https://github.com/ClickHouse/ClickHouse/pull/20151) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Avoid downloading toolchain tarballs for cross-compiling for FreeBSD. [#31672](https://github.com/ClickHouse/ClickHouse/pull/31672) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Initial support for risc-v. See development/build-cross-riscv for quirks and build command that was tested. [#31309](https://github.com/ClickHouse/ClickHouse/pull/31309) ([Vladimir Smirnov](https://github.com/Civil)).
|
||||
* Support compile in arm machine with parameter "-DENABLE_TESTS=OFF". [#31007](https://github.com/ClickHouse/ClickHouse/pull/31007) ([zhanghuajie](https://github.com/zhanghuajieHIT)).
|
||||
|
||||
|
||||
### ClickHouse release v21.11, 2021-11-09
|
||||
|
||||
#### Backward Incompatible Change
|
||||
|
@ -149,6 +149,10 @@ if (ENABLE_FUZZING)
|
||||
set (ENABLE_JEMALLOC 0)
|
||||
set (ENABLE_CHECK_HEAVY_BUILDS 1)
|
||||
set (GLIBC_COMPATIBILITY OFF)
|
||||
|
||||
# For codegen_select_fuzzer
|
||||
set (ENABLE_PROTOBUF 1)
|
||||
set (USE_INTERNAL_PROTOBUF_LIBRARY 1)
|
||||
endif()
|
||||
|
||||
# Global libraries
|
||||
@ -443,7 +447,7 @@ if (MAKE_STATIC_LIBRARIES)
|
||||
# It's disabled for ARM because otherwise ClickHouse cannot run on Android.
|
||||
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -fno-pie")
|
||||
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -fno-pie")
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-no-pie")
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -no-pie -Wl,-no-pie")
|
||||
endif ()
|
||||
else ()
|
||||
set (CMAKE_POSITION_INDEPENDENT_CODE ON)
|
||||
@ -504,6 +508,7 @@ include (cmake/find/hdfs3.cmake) # uses protobuf
|
||||
include (cmake/find/poco.cmake)
|
||||
include (cmake/find/curl.cmake)
|
||||
include (cmake/find/s3.cmake)
|
||||
include (cmake/find/blob_storage.cmake)
|
||||
include (cmake/find/base64.cmake)
|
||||
include (cmake/find/parquet.cmake)
|
||||
include (cmake/find/simdjson.cmake)
|
||||
|
@ -2,7 +2,13 @@
|
||||
|
||||
ClickHouse is an open project, and you can contribute to it in many ways. You can help with ideas, code, or documentation. We appreciate any efforts that help us to make the project better.
|
||||
|
||||
Thank you.
|
||||
Thank you!
|
||||
|
||||
## Legal Info
|
||||
|
||||
When you open your first pull-request to ClickHouse repo, a bot will invite you to accept ClickHouse Individual CLA (Contributor License Agreement). It is a simple few click process. For subsequent pull-requests the bot will check if you have already signed it and won't bother you again.
|
||||
|
||||
Optionally, to make contributions even more tight legally, your employer as a legal entity may want to sign a ClickHouse Corporate CLA with ClickHouse, Inc. If you're interested to do so, contact us at [legal@clickhouse.com](mailto:legal@clickhouse.com).
|
||||
|
||||
## Technical Info
|
||||
|
||||
|
@ -27,8 +27,7 @@ execute_process(COMMAND uname -m OUTPUT_VARIABLE ARCH)
|
||||
if (OS MATCHES "Linux"
|
||||
AND NOT DEFINED CMAKE_TOOLCHAIN_FILE
|
||||
AND NOT DISABLE_HERMETIC_BUILD
|
||||
AND ($ENV{CC} MATCHES ".*clang.*" OR CMAKE_C_COMPILER MATCHES ".*clang.*")
|
||||
AND (USE_STATIC_LIBRARIES OR NOT DEFINED USE_STATIC_LIBRARIES))
|
||||
AND ($ENV{CC} MATCHES ".*clang.*" OR CMAKE_C_COMPILER MATCHES ".*clang.*"))
|
||||
|
||||
if (ARCH MATCHES "amd64|x86_64")
|
||||
set (CMAKE_TOOLCHAIN_FILE "cmake/linux/toolchain-x86_64.cmake" CACHE INTERNAL "" FORCE)
|
||||
|
@ -24,8 +24,6 @@ set (SRCS
|
||||
|
||||
if (ENABLE_REPLXX)
|
||||
list (APPEND SRCS ReplxxLineReader.cpp)
|
||||
elseif (ENABLE_READLINE)
|
||||
list (APPEND SRCS ReadlineLineReader.cpp)
|
||||
endif ()
|
||||
|
||||
if (USE_DEBUG_HELPERS)
|
||||
@ -52,28 +50,6 @@ if (OS_DARWIN AND NOT MAKE_STATIC_LIBRARIES)
|
||||
target_link_libraries(common PUBLIC -Wl,-U,_inside_main)
|
||||
endif()
|
||||
|
||||
# Allow explicit fallback to readline
|
||||
if (NOT ENABLE_REPLXX AND ENABLE_READLINE)
|
||||
message (STATUS "Attempt to fallback to readline explicitly")
|
||||
set (READLINE_PATHS "/usr/local/opt/readline/lib")
|
||||
# First try find custom lib for macos users (default lib without history support)
|
||||
find_library (READLINE_LIB NAMES readline PATHS ${READLINE_PATHS} NO_DEFAULT_PATH)
|
||||
if (NOT READLINE_LIB)
|
||||
find_library (READLINE_LIB NAMES readline PATHS ${READLINE_PATHS})
|
||||
endif ()
|
||||
|
||||
set(READLINE_INCLUDE_PATHS "/usr/local/opt/readline/include")
|
||||
find_path (READLINE_INCLUDE_DIR NAMES readline/readline.h PATHS ${READLINE_INCLUDE_PATHS} NO_DEFAULT_PATH)
|
||||
if (NOT READLINE_INCLUDE_DIR)
|
||||
find_path (READLINE_INCLUDE_DIR NAMES readline/readline.h PATHS ${READLINE_INCLUDE_PATHS})
|
||||
endif ()
|
||||
if (READLINE_INCLUDE_DIR AND READLINE_LIB)
|
||||
target_link_libraries(common PUBLIC ${READLINE_LIB})
|
||||
target_compile_definitions(common PUBLIC USE_READLINE=1)
|
||||
message (STATUS "Using readline: ${READLINE_INCLUDE_DIR} : ${READLINE_LIB}")
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
target_link_libraries (common
|
||||
PUBLIC
|
||||
${CITYHASH_LIBRARIES}
|
||||
|
@ -10,16 +10,6 @@
|
||||
#include <sys/types.h>
|
||||
|
||||
|
||||
#ifdef OS_LINUX
|
||||
/// We can detect if code is linked with one or another readline variants or open the library dynamically.
|
||||
# include <dlfcn.h>
|
||||
extern "C"
|
||||
{
|
||||
char * readline(const char *) __attribute__((__weak__));
|
||||
char * (*readline_ptr)(const char *) = readline;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef HAS_RESERVED_IDENTIFIER
|
||||
#pragma clang diagnostic ignored "-Wreserved-identifier"
|
||||
#endif
|
||||
@ -152,33 +142,6 @@ LineReader::InputStatus LineReader::readOneLine(const String & prompt)
|
||||
{
|
||||
input.clear();
|
||||
|
||||
#ifdef OS_LINUX
|
||||
if (!readline_ptr)
|
||||
{
|
||||
for (const auto * name : {"libreadline.so", "libreadline.so.0", "libeditline.so", "libeditline.so.0"})
|
||||
{
|
||||
void * dl_handle = dlopen(name, RTLD_LAZY);
|
||||
if (dl_handle)
|
||||
{
|
||||
readline_ptr = reinterpret_cast<char * (*)(const char *)>(dlsym(dl_handle, "readline"));
|
||||
if (readline_ptr)
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Minimal support for readline
|
||||
if (readline_ptr)
|
||||
{
|
||||
char * line_read = (*readline_ptr)(prompt.c_str());
|
||||
if (!line_read)
|
||||
return ABORT;
|
||||
input = line_read;
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
std::cout << prompt;
|
||||
std::getline(std::cin, input);
|
||||
|
@ -53,7 +53,6 @@ protected:
|
||||
|
||||
String input;
|
||||
|
||||
private:
|
||||
bool multiline;
|
||||
|
||||
Patterns extenders;
|
||||
|
@ -1,187 +0,0 @@
|
||||
#include <base/ReadlineLineReader.h>
|
||||
#include <base/errnoToString.h>
|
||||
#include <base/scope_guard.h>
|
||||
|
||||
#include <errno.h>
|
||||
#include <signal.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <iostream>
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
/// Trim ending whitespace inplace
|
||||
void trim(String & s)
|
||||
{
|
||||
s.erase(std::find_if(s.rbegin(), s.rend(), [](int ch) { return !std::isspace(ch); }).base(), s.end());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static const LineReader::Suggest * suggest;
|
||||
|
||||
/// Points to current word to suggest.
|
||||
static LineReader::Suggest::Words::const_iterator pos;
|
||||
/// Points after the last possible match.
|
||||
static LineReader::Suggest::Words::const_iterator end;
|
||||
|
||||
/// Set iterators to the matched range of words if any.
|
||||
static void findRange(const char * prefix, size_t prefix_length)
|
||||
{
|
||||
std::string prefix_str(prefix);
|
||||
if (auto completions = suggest->getCompletions(prefix_str, prefix_length))
|
||||
std::tie(pos, end) = *completions;
|
||||
}
|
||||
|
||||
/// Iterates through matched range.
|
||||
static char * nextMatch()
|
||||
{
|
||||
if (pos >= end)
|
||||
return nullptr;
|
||||
|
||||
/// readline will free memory by itself.
|
||||
char * word = strdup(pos->c_str());
|
||||
++pos;
|
||||
return word;
|
||||
}
|
||||
|
||||
static char * generate(const char * text, int state)
|
||||
{
|
||||
if (!suggest->ready)
|
||||
return nullptr;
|
||||
if (state == 0)
|
||||
findRange(text, strlen(text));
|
||||
|
||||
/// Do not append whitespace after word. For unknown reason, rl_completion_append_character = '\0' does not work.
|
||||
rl_completion_suppress_append = 1;
|
||||
|
||||
return nextMatch();
|
||||
};
|
||||
|
||||
ReadlineLineReader::ReadlineLineReader(
|
||||
const Suggest & suggest_, const String & history_file_path_, bool multiline_, Patterns extenders_, Patterns delimiters_)
|
||||
: LineReader(history_file_path_, multiline_, std::move(extenders_), std::move(delimiters_))
|
||||
{
|
||||
suggest = &suggest_;
|
||||
|
||||
if (!history_file_path.empty())
|
||||
{
|
||||
int res = read_history(history_file_path.c_str());
|
||||
if (res)
|
||||
std::cerr << "Cannot read history from file " + history_file_path + ": "+ errnoToString(errno) << std::endl;
|
||||
}
|
||||
|
||||
/// Added '.' to the default list. Because it is used to separate database and table.
|
||||
rl_basic_word_break_characters = word_break_characters;
|
||||
|
||||
/// Not append whitespace after single suggestion. Because whitespace after function name is meaningless.
|
||||
rl_completion_append_character = '\0';
|
||||
|
||||
rl_completion_entry_function = generate;
|
||||
|
||||
/// Install Ctrl+C signal handler that will be used in interactive mode.
|
||||
|
||||
if (rl_initialize())
|
||||
throw std::runtime_error("Cannot initialize readline");
|
||||
|
||||
auto clear_prompt_or_exit = [](int)
|
||||
{
|
||||
/// This is signal safe.
|
||||
ssize_t res = write(STDOUT_FILENO, "\n", 1);
|
||||
|
||||
/// Allow to quit client while query is in progress by pressing Ctrl+C twice.
|
||||
/// (First press to Ctrl+C will try to cancel query by InterruptListener).
|
||||
if (res == 1 && rl_line_buffer[0] && !RL_ISSTATE(RL_STATE_DONE))
|
||||
{
|
||||
rl_replace_line("", 0);
|
||||
if (rl_forced_update_display())
|
||||
_exit(0);
|
||||
}
|
||||
else
|
||||
{
|
||||
/// A little dirty, but we struggle to find better way to correctly
|
||||
/// force readline to exit after returning from the signal handler.
|
||||
_exit(0);
|
||||
}
|
||||
};
|
||||
|
||||
if (signal(SIGINT, clear_prompt_or_exit) == SIG_ERR)
|
||||
throw std::runtime_error(std::string("Cannot set signal handler for readline: ") + errnoToString(errno));
|
||||
|
||||
rl_variable_bind("completion-ignore-case", "on");
|
||||
// TODO: it doesn't work
|
||||
// history_write_timestamps = 1;
|
||||
}
|
||||
|
||||
ReadlineLineReader::~ReadlineLineReader()
|
||||
{
|
||||
}
|
||||
|
||||
LineReader::InputStatus ReadlineLineReader::readOneLine(const String & prompt)
|
||||
{
|
||||
input.clear();
|
||||
|
||||
const char* cinput = readline(prompt.c_str());
|
||||
if (cinput == nullptr)
|
||||
return (errno != EAGAIN) ? ABORT : RESET_LINE;
|
||||
input = cinput;
|
||||
|
||||
trim(input);
|
||||
return INPUT_LINE;
|
||||
}
|
||||
|
||||
void ReadlineLineReader::addToHistory(const String & line)
|
||||
{
|
||||
add_history(line.c_str());
|
||||
|
||||
// Flush changes to the disk
|
||||
// NOTE readline builds a buffer of all the lines to write, and write them in one syscall.
|
||||
// Thus there is no need to lock the history file here.
|
||||
write_history(history_file_path.c_str());
|
||||
}
|
||||
|
||||
#if RL_VERSION_MAJOR >= 7
|
||||
|
||||
#define BRACK_PASTE_PREF "\033[200~"
|
||||
#define BRACK_PASTE_SUFF "\033[201~"
|
||||
|
||||
#define BRACK_PASTE_LAST '~'
|
||||
#define BRACK_PASTE_SLEN 6
|
||||
|
||||
/// This handler bypasses some unused macro/event checkings and remove trailing newlines before insertion.
|
||||
static int clickhouse_rl_bracketed_paste_begin(int /* count */, int /* key */)
|
||||
{
|
||||
std::string buf;
|
||||
buf.reserve(128);
|
||||
|
||||
RL_SETSTATE(RL_STATE_MOREINPUT);
|
||||
SCOPE_EXIT(RL_UNSETSTATE(RL_STATE_MOREINPUT));
|
||||
int c;
|
||||
while ((c = rl_read_key()) >= 0)
|
||||
{
|
||||
if (c == '\r')
|
||||
c = '\n';
|
||||
buf.push_back(c);
|
||||
if (buf.size() >= BRACK_PASTE_SLEN && c == BRACK_PASTE_LAST && buf.substr(buf.size() - BRACK_PASTE_SLEN) == BRACK_PASTE_SUFF)
|
||||
{
|
||||
buf.resize(buf.size() - BRACK_PASTE_SLEN);
|
||||
break;
|
||||
}
|
||||
}
|
||||
trim(buf);
|
||||
return static_cast<size_t>(rl_insert_text(buf.c_str())) == buf.size() ? 0 : 1;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void ReadlineLineReader::enableBracketedPaste()
|
||||
{
|
||||
#if RL_VERSION_MAJOR >= 7
|
||||
rl_variable_bind("enable-bracketed-paste", "on");
|
||||
|
||||
/// Use our bracketed paste handler to get better user experience. See comments above.
|
||||
rl_bind_keyseq(BRACK_PASTE_PREF, clickhouse_rl_bracketed_paste_begin);
|
||||
#endif
|
||||
};
|
@ -1,19 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include "LineReader.h"
|
||||
|
||||
#include <readline/readline.h>
|
||||
#include <readline/history.h>
|
||||
|
||||
class ReadlineLineReader : public LineReader
|
||||
{
|
||||
public:
|
||||
ReadlineLineReader(const Suggest & suggest, const String & history_file_path, bool multiline, Patterns extenders_, Patterns delimiters_);
|
||||
~ReadlineLineReader() override;
|
||||
|
||||
void enableBracketedPaste() override;
|
||||
|
||||
private:
|
||||
InputStatus readOneLine(const String & prompt) override;
|
||||
void addToHistory(const String & line) override;
|
||||
};
|
@ -22,7 +22,14 @@ namespace
|
||||
/// Trim ending whitespace inplace
|
||||
void trim(String & s)
|
||||
{
|
||||
s.erase(std::find_if(s.rbegin(), s.rend(), [](int ch) { return !std::isspace(ch); }).base(), s.end());
|
||||
s.erase(std::find_if(s.rbegin(), s.rend(), [](unsigned char ch) { return !std::isspace(ch); }).base(), s.end());
|
||||
}
|
||||
|
||||
/// Check if string ends with given character after skipping whitespaces.
|
||||
bool ends_with(const std::string_view & s, const std::string_view & p)
|
||||
{
|
||||
auto ss = std::string_view(s.data(), s.rend() - std::find_if(s.rbegin(), s.rend(), [](unsigned char ch) { return !std::isspace(ch); }));
|
||||
return ss.ends_with(p);
|
||||
}
|
||||
|
||||
std::string getEditor()
|
||||
@ -189,8 +196,28 @@ ReplxxLineReader::ReplxxLineReader(
|
||||
rx.bind_key(Replxx::KEY::control('N'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::HISTORY_NEXT, code); });
|
||||
rx.bind_key(Replxx::KEY::control('P'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::HISTORY_PREVIOUS, code); });
|
||||
|
||||
auto commit_action = [this](char32_t code)
|
||||
{
|
||||
std::string_view str = rx.get_state().text();
|
||||
|
||||
/// Always commit line when we see extender at the end. It will start a new prompt.
|
||||
for (const auto * extender : extenders)
|
||||
if (ends_with(str, extender))
|
||||
return rx.invoke(Replxx::ACTION::COMMIT_LINE, code);
|
||||
|
||||
/// If we see an delimiter at the end, commit right away.
|
||||
for (const auto * delimiter : delimiters)
|
||||
if (ends_with(str, delimiter))
|
||||
return rx.invoke(Replxx::ACTION::COMMIT_LINE, code);
|
||||
|
||||
/// If we allow multiline and there is already something in the input, start a newline.
|
||||
if (multiline && !input.empty())
|
||||
return rx.invoke(Replxx::ACTION::NEW_LINE, code);
|
||||
return rx.invoke(Replxx::ACTION::COMMIT_LINE, code);
|
||||
};
|
||||
/// bind C-j to ENTER action.
|
||||
rx.bind_key(Replxx::KEY::control('J'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::COMMIT_LINE, code); });
|
||||
rx.bind_key(Replxx::KEY::control('J'), commit_action);
|
||||
rx.bind_key(Replxx::KEY::ENTER, commit_action);
|
||||
|
||||
/// By default COMPLETE_NEXT/COMPLETE_PREV was binded to C-p/C-n, re-bind
|
||||
/// to M-P/M-N (that was used for HISTORY_COMMON_PREFIX_SEARCH before, but
|
||||
|
@ -1,8 +1,11 @@
|
||||
#include <base/getPageSize.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <cstdlib>
|
||||
|
||||
Int64 getPageSize()
|
||||
{
|
||||
return sysconf(_SC_PAGESIZE);
|
||||
Int64 page_size = sysconf(_SC_PAGESIZE);
|
||||
if (page_size < 0)
|
||||
abort();
|
||||
return page_size;
|
||||
}
|
||||
|
@ -104,7 +104,7 @@ static void writeSignalIDtoSignalPipe(int sig)
|
||||
errno = saved_errno;
|
||||
}
|
||||
|
||||
/** Signal handler for HUP / USR1 */
|
||||
/** Signal handler for HUP */
|
||||
static void closeLogsSignalHandler(int sig, siginfo_t *, void *)
|
||||
{
|
||||
DENY_ALLOCATIONS_IN_SCOPE;
|
||||
@ -161,7 +161,7 @@ __attribute__((__weak__)) void collectCrashLog(
|
||||
|
||||
|
||||
/** The thread that read info about signal or std::terminate from pipe.
|
||||
* On HUP / USR1, close log files (for new files to be opened later).
|
||||
* On HUP, close log files (for new files to be opened later).
|
||||
* On information about std::terminate, write it to log.
|
||||
* On other signals, write info to log.
|
||||
*/
|
||||
@ -201,7 +201,7 @@ public:
|
||||
LOG_INFO(log, "Stop SignalListener thread");
|
||||
break;
|
||||
}
|
||||
else if (sig == SIGHUP || sig == SIGUSR1)
|
||||
else if (sig == SIGHUP)
|
||||
{
|
||||
LOG_DEBUG(log, "Received signal to close logs.");
|
||||
BaseDaemon::instance().closeLogs(BaseDaemon::instance().logger());
|
||||
@ -832,7 +832,7 @@ void BaseDaemon::initializeTerminationAndSignalProcessing()
|
||||
/// SIGTSTP is added for debugging purposes. To output a stack trace of any running thread at anytime.
|
||||
|
||||
addSignalHandler({SIGABRT, SIGSEGV, SIGILL, SIGBUS, SIGSYS, SIGFPE, SIGPIPE, SIGTSTP, SIGTRAP}, signalHandler, &handled_signals);
|
||||
addSignalHandler({SIGHUP, SIGUSR1}, closeLogsSignalHandler, &handled_signals);
|
||||
addSignalHandler({SIGHUP}, closeLogsSignalHandler, &handled_signals);
|
||||
addSignalHandler({SIGINT, SIGQUIT, SIGTERM}, terminateRequestedSignalHandler, &handled_signals);
|
||||
|
||||
#if defined(SANITIZER)
|
||||
@ -1006,7 +1006,7 @@ void BaseDaemon::setupWatchdog()
|
||||
|
||||
/// Forward signals to the child process.
|
||||
addSignalHandler(
|
||||
{SIGHUP, SIGUSR1, SIGINT, SIGQUIT, SIGTERM},
|
||||
{SIGHUP, SIGINT, SIGQUIT, SIGTERM},
|
||||
[](int sig, siginfo_t *, void *)
|
||||
{
|
||||
/// Forward all signals except INT as it can be send by terminal to the process group when user press Ctrl+C,
|
||||
|
@ -182,7 +182,6 @@ TRAP(vlimit)
|
||||
TRAP(wcsnrtombs)
|
||||
TRAP(wcsrtombs)
|
||||
TRAP(wctomb)
|
||||
TRAP(wordexp)
|
||||
TRAP(basename)
|
||||
TRAP(catgets)
|
||||
TRAP(dbm_clearerr)
|
||||
@ -195,9 +194,8 @@ TRAP(dbm_nextkey)
|
||||
TRAP(dbm_open)
|
||||
TRAP(dbm_store)
|
||||
TRAP(dirname)
|
||||
#if !defined(SANITIZER)
|
||||
TRAP(dlerror) // Used by tsan
|
||||
#endif
|
||||
// TRAP(dlerror) // It is not thread-safe. But it is used by dynamic linker to load some name resolution plugins. Also used by TSan.
|
||||
/// Note: we should better get rid of glibc, dynamic linking and all that sort of annoying garbage altogether.
|
||||
TRAP(ftw)
|
||||
TRAP(getc_unlocked)
|
||||
//TRAP(getenv) // Ok at program startup
|
||||
@ -245,4 +243,21 @@ TRAP(lgammaf32x)
|
||||
TRAP(lgammaf64)
|
||||
TRAP(lgammaf64x)
|
||||
|
||||
/// These functions are unused by ClickHouse and we should be aware if they are accidentally get used.
|
||||
/// Sometimes people report that these function contain vulnerabilities (these reports are bogus for ClickHouse).
|
||||
TRAP(mq_close)
|
||||
TRAP(mq_getattr)
|
||||
TRAP(mq_setattr)
|
||||
TRAP(mq_notify)
|
||||
TRAP(mq_open)
|
||||
TRAP(mq_receive)
|
||||
TRAP(mq_send)
|
||||
TRAP(mq_unlink)
|
||||
TRAP(mq_timedsend)
|
||||
TRAP(mq_timedreceive)
|
||||
|
||||
/// These functions are also unused by ClickHouse.
|
||||
TRAP(wordexp)
|
||||
TRAP(wordfree)
|
||||
|
||||
#endif
|
||||
|
@ -82,7 +82,9 @@ PoolWithFailover::PoolWithFailover(
|
||||
unsigned default_connections_,
|
||||
unsigned max_connections_,
|
||||
size_t max_tries_,
|
||||
uint64_t wait_timeout_)
|
||||
uint64_t wait_timeout_,
|
||||
size_t connect_timeout_,
|
||||
size_t rw_timeout_)
|
||||
: max_tries(max_tries_)
|
||||
, shareable(false)
|
||||
, wait_timeout(wait_timeout_)
|
||||
@ -93,8 +95,8 @@ PoolWithFailover::PoolWithFailover(
|
||||
replicas_by_priority[0].emplace_back(std::make_shared<Pool>(database,
|
||||
host, user, password, port,
|
||||
/* socket_ = */ "",
|
||||
MYSQLXX_DEFAULT_TIMEOUT,
|
||||
MYSQLXX_DEFAULT_RW_TIMEOUT,
|
||||
connect_timeout_,
|
||||
rw_timeout_,
|
||||
default_connections_,
|
||||
max_connections_));
|
||||
}
|
||||
|
@ -6,6 +6,7 @@
|
||||
#define MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_START_CONNECTIONS 1
|
||||
#define MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_CONNECTIONS 16
|
||||
#define MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES 3
|
||||
#define MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_CONNECTION_WAIT_TIMEOUT 5 /// in seconds
|
||||
|
||||
|
||||
namespace mysqlxx
|
||||
@ -121,7 +122,9 @@ namespace mysqlxx
|
||||
unsigned default_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_START_CONNECTIONS,
|
||||
unsigned max_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_CONNECTIONS,
|
||||
size_t max_tries_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES,
|
||||
uint64_t wait_timeout_ = UINT64_MAX);
|
||||
uint64_t wait_timeout_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_CONNECTION_WAIT_TIMEOUT,
|
||||
size_t connect_timeout = MYSQLXX_DEFAULT_TIMEOUT,
|
||||
size_t rw_timeout = MYSQLXX_DEFAULT_RW_TIMEOUT);
|
||||
|
||||
PoolWithFailover(const PoolWithFailover & other);
|
||||
|
||||
|
@ -21,4 +21,12 @@ Value Row::operator[] (const char * name) const
|
||||
throw Exception(std::string("Unknown column ") + name);
|
||||
}
|
||||
|
||||
enum enum_field_types Row::getFieldType(size_t i)
|
||||
{
|
||||
if (i >= res->getNumFields())
|
||||
throw Exception(std::string("Array Index Overflow"));
|
||||
MYSQL_FIELDS fields = res->getFields();
|
||||
return fields[i].type;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -79,6 +79,8 @@ public:
|
||||
*/
|
||||
operator private_bool_type() const { return row == nullptr ? nullptr : &Row::row; }
|
||||
|
||||
enum enum_field_types getFieldType(size_t i);
|
||||
|
||||
private:
|
||||
MYSQL_ROW row{};
|
||||
ResultBase * res{};
|
||||
|
@ -16,6 +16,8 @@ using MYSQL_ROW = char**;
|
||||
struct st_mysql_field;
|
||||
using MYSQL_FIELD = st_mysql_field;
|
||||
|
||||
enum struct enum_field_types;
|
||||
|
||||
#endif
|
||||
|
||||
namespace mysqlxx
|
||||
|
@ -2,11 +2,11 @@
|
||||
|
||||
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
||||
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
||||
SET(VERSION_REVISION 54457)
|
||||
SET(VERSION_REVISION 54458)
|
||||
SET(VERSION_MAJOR 21)
|
||||
SET(VERSION_MINOR 12)
|
||||
SET(VERSION_MINOR 13)
|
||||
SET(VERSION_PATCH 1)
|
||||
SET(VERSION_GITHASH 503a418dedf0011e9040c3a1b6913e0b5488be4c)
|
||||
SET(VERSION_DESCRIBE v21.12.1.1-prestable)
|
||||
SET(VERSION_STRING 21.12.1.1)
|
||||
SET(VERSION_GITHASH 4cc45c1e15912ee300bca7cc8b8da2b888a70e2a)
|
||||
SET(VERSION_DESCRIBE v21.13.1.1-prestable)
|
||||
SET(VERSION_STRING 21.13.1.1)
|
||||
# end of autochange
|
||||
|
30
cmake/find/blob_storage.cmake
Normal file
30
cmake/find/blob_storage.cmake
Normal file
@ -0,0 +1,30 @@
|
||||
option (ENABLE_AZURE_BLOB_STORAGE "Enable Azure blob storage" ${ENABLE_LIBRARIES})
|
||||
|
||||
option(USE_INTERNAL_AZURE_BLOB_STORAGE_LIBRARY
|
||||
"Set to FALSE to use system Azure SDK instead of bundled (OFF currently not implemented)"
|
||||
ON)
|
||||
|
||||
if (ENABLE_AZURE_BLOB_STORAGE)
|
||||
set(USE_AZURE_BLOB_STORAGE 1)
|
||||
set(AZURE_BLOB_STORAGE_LIBRARY azure_sdk)
|
||||
endif()
|
||||
|
||||
if ((NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/azure/sdk"
|
||||
OR NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/azure/cmake-modules")
|
||||
AND USE_INTERNAL_AZURE_BLOB_STORAGE_LIBRARY)
|
||||
message (WARNING "submodule contrib/azure is missing. to fix try run: \n git submodule update --init")
|
||||
set(USE_INTERNAL_AZURE_BLOB_STORAGE_LIBRARY OFF)
|
||||
set(USE_AZURE_BLOB_STORAGE 0)
|
||||
endif ()
|
||||
|
||||
if (NOT USE_INTERNAL_SSL_LIBRARY AND USE_INTERNAL_AZURE_BLOB_STORAGE_LIBRARY)
|
||||
message (FATAL_ERROR "Currently Blob Storage support can be built only with internal SSL library")
|
||||
endif()
|
||||
|
||||
if (NOT USE_INTERNAL_CURL AND USE_INTERNAL_AZURE_BLOB_STORAGE_LIBRARY)
|
||||
message (FATAL_ERROR "Currently Blob Storage support can be built only with internal curl library")
|
||||
endif()
|
||||
|
||||
if (USE_AZURE_BLOB_STORAGE)
|
||||
message (STATUS "Using Azure Blob Storage - ${USE_AZURE_BLOB_STORAGE}")
|
||||
endif()
|
@ -14,9 +14,12 @@ set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-x86_6
|
||||
|
||||
set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/x86_64-linux-gnu/libc")
|
||||
|
||||
set (CMAKE_C_FLAGS_INIT "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
set (CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
set (CMAKE_ASM_FLAGS_INIT "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
set (CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
|
||||
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
||||
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
||||
|
@ -91,6 +91,9 @@ endif ()
|
||||
if (LINKER_NAME)
|
||||
if (COMPILER_CLANG AND (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 12.0.0 OR CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 12.0.0))
|
||||
find_program (LLD_PATH NAMES ${LINKER_NAME})
|
||||
if (NOT LLD_PATH)
|
||||
message (FATAL_ERROR "Using linker ${LINKER_NAME} but can't find its path.")
|
||||
endif ()
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --ld-path=${LLD_PATH}")
|
||||
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} --ld-path=${LLD_PATH}")
|
||||
else ()
|
||||
|
4
contrib/CMakeLists.txt
vendored
4
contrib/CMakeLists.txt
vendored
@ -249,6 +249,10 @@ endif()
|
||||
# - sentry-native
|
||||
add_subdirectory (curl-cmake)
|
||||
|
||||
if (USE_INTERNAL_AZURE_BLOB_STORAGE_LIBRARY)
|
||||
add_subdirectory(azure-cmake)
|
||||
endif()
|
||||
|
||||
if (USE_SENTRY)
|
||||
add_subdirectory (sentry-native-cmake)
|
||||
endif()
|
||||
|
2
contrib/NuRaft
vendored
2
contrib/NuRaft
vendored
@ -1 +1 @@
|
||||
Subproject commit d10351f312c1ae1ca3fdda433693dfbef3acfece
|
||||
Subproject commit bb69d48e0ee35c87a0f19e509a09a914f71f0cff
|
1
contrib/azure
vendored
Submodule
1
contrib/azure
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit ac4b763d4ca40122275f1497cbdc5451337461d9
|
74
contrib/azure-cmake/CMakeLists.txt
Normal file
74
contrib/azure-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,74 @@
|
||||
set(AZURE_DIR "${ClickHouse_SOURCE_DIR}/contrib/azure")
|
||||
set(AZURE_SDK_LIBRARY_DIR "${AZURE_DIR}/sdk")
|
||||
|
||||
file(GLOB AZURE_SDK_CORE_SRC
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/cryptography/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/http/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/http/curl/*.hpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/http/curl/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/winhttp/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/io/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/private/*.hpp"
|
||||
)
|
||||
|
||||
file(GLOB AZURE_SDK_IDENTITY_SRC
|
||||
"${AZURE_SDK_LIBRARY_DIR}/identity/azure-identity/src/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/identity/azure-identity/src/private/*.hpp"
|
||||
)
|
||||
|
||||
file(GLOB AZURE_SDK_STORAGE_COMMON_SRC
|
||||
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-common/src/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-common/src/private/*.cpp"
|
||||
)
|
||||
|
||||
file(GLOB AZURE_SDK_STORAGE_BLOBS_SRC
|
||||
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-blobs/src/*.cpp"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-blobs/src/private/*.hpp"
|
||||
)
|
||||
|
||||
file(GLOB AZURE_SDK_UNIFIED_SRC
|
||||
${AZURE_SDK_CORE_SRC}
|
||||
${AZURE_SDK_IDENTITY_SRC}
|
||||
${AZURE_SDK_STORAGE_COMMON_SRC}
|
||||
${AZURE_SDK_STORAGE_BLOBS_SRC}
|
||||
)
|
||||
|
||||
set(AZURE_SDK_INCLUDES
|
||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/inc/"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/identity/azure-identity/inc/"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-common/inc/"
|
||||
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-blobs/inc/"
|
||||
)
|
||||
|
||||
include("${AZURE_DIR}/cmake-modules/AzureTransportAdapters.cmake")
|
||||
|
||||
add_library(azure_sdk ${AZURE_SDK_UNIFIED_SRC})
|
||||
|
||||
if (COMPILER_CLANG)
|
||||
target_compile_options(azure_sdk PRIVATE
|
||||
-Wno-deprecated-copy-dtor
|
||||
-Wno-extra-semi
|
||||
-Wno-suggest-destructor-override
|
||||
-Wno-inconsistent-missing-destructor-override
|
||||
-Wno-error=unknown-warning-option
|
||||
)
|
||||
|
||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 13)
|
||||
target_compile_options(azure_sdk PRIVATE -Wno-reserved-identifier)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# Originally, on Windows azure-core is built with bcrypt and crypt32 by default
|
||||
if (OPENSSL_FOUND)
|
||||
target_link_libraries(azure_sdk PRIVATE ${OPENSSL_LIBRARIES})
|
||||
endif()
|
||||
|
||||
# Originally, on Windows azure-core is built with winhttp by default
|
||||
if (CURL_FOUND)
|
||||
target_link_libraries(azure_sdk PRIVATE ${CURL_LIBRARY})
|
||||
endif()
|
||||
|
||||
target_link_libraries(azure_sdk PRIVATE ${LIBXML2_LIBRARIES})
|
||||
|
||||
target_include_directories(azure_sdk SYSTEM PUBLIC ${AZURE_SDK_INCLUDES})
|
@ -639,6 +639,7 @@ add_library(
|
||||
|
||||
"${BORINGSSL_SOURCE_DIR}/decrepit/ssl/ssl_decrepit.c"
|
||||
"${BORINGSSL_SOURCE_DIR}/decrepit/cfb/cfb.c"
|
||||
"${BORINGSSL_SOURCE_DIR}/decrepit/bio/base64_bio.c"
|
||||
)
|
||||
|
||||
add_executable(
|
||||
|
2
contrib/cassandra
vendored
2
contrib/cassandra
vendored
@ -1 +1 @@
|
||||
Subproject commit eb9b68dadbb4417a2c132ad4a1c2fa76e65e6fc1
|
||||
Subproject commit f4a31e92a25c34c02c7291ff97c7813bc83b0e09
|
2
contrib/jemalloc
vendored
2
contrib/jemalloc
vendored
@ -1 +1 @@
|
||||
Subproject commit e6891d9746143bf2cf617493d880ba5a0b9a3efd
|
||||
Subproject commit a1404807211b1612539f840b3dcb1bf38d1a269e
|
@ -1,6 +1,6 @@
|
||||
if (SANITIZE OR NOT (
|
||||
((OS_LINUX OR OS_FREEBSD) AND (ARCH_AMD64 OR ARCH_ARM OR ARCH_PPC64LE OR ARCH_RISCV64)) OR
|
||||
(OS_DARWIN AND (CMAKE_BUILD_TYPE STREQUAL "RelWithDebInfo" OR CMAKE_BUILD_TYPE STREQUAL "Debug"))
|
||||
(OS_DARWIN AND (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" OR CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG"))
|
||||
))
|
||||
if (ENABLE_JEMALLOC)
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL}
|
||||
|
2
contrib/libpqxx
vendored
2
contrib/libpqxx
vendored
@ -1 +1 @@
|
||||
Subproject commit 357608d11b7a1961c3fb7db2ef9a5dbb2e87da77
|
||||
Subproject commit 63e20f9485b8cbeabf99008123248fc9f033e766
|
@ -268,7 +268,7 @@ XMLPUBFUN void XMLCALL xmlCheckVersion(int version);
|
||||
*
|
||||
* Whether iconv support is available
|
||||
*/
|
||||
#if 1
|
||||
#if 0
|
||||
#define LIBXML_ICONV_ENABLED
|
||||
#endif
|
||||
|
||||
|
2
contrib/poco
vendored
2
contrib/poco
vendored
@ -1 +1 @@
|
||||
Subproject commit 258b9ba6cd245ff88e9346f75c43464c403f329d
|
||||
Subproject commit 520a90e02e3e5cb90afeae1846d161dbc508a6f1
|
@ -8,7 +8,7 @@ if (NOT ENABLE_REPLXX)
|
||||
add_library(replxx INTERFACE)
|
||||
target_compile_definitions(replxx INTERFACE USE_REPLXX=0)
|
||||
|
||||
message (STATUS "Not using replxx (Beware! Runtime fallback to readline is possible!)")
|
||||
message (STATUS "Not using replxx")
|
||||
return()
|
||||
endif()
|
||||
|
||||
|
2
contrib/sysroot
vendored
2
contrib/sysroot
vendored
@ -1 +1 @@
|
||||
Subproject commit 410845187f582c5e6692b53dddbe43efbb728734
|
||||
Subproject commit bbcac834526d90d1e764164b861be426891d1743
|
@ -202,10 +202,10 @@
|
||||
#define HAVE_READDIR 1
|
||||
|
||||
/* Add readline support */
|
||||
#define HAVE_READLINE 1
|
||||
/* #undef HAVE_READLINE */
|
||||
|
||||
/* Define to 1 if you have the <readline/history.h> header file. */
|
||||
#define HAVE_READLINE_HISTORY_H 1
|
||||
/* #undef HAVE_READLINE_HISTORY_H */
|
||||
|
||||
/* Use the scandir lib */
|
||||
/* #undef HAVE_SCANDIR */
|
||||
|
4
debian/changelog
vendored
4
debian/changelog
vendored
@ -1,5 +1,5 @@
|
||||
clickhouse (21.12.1.1) unstable; urgency=low
|
||||
clickhouse (21.13.1.1) unstable; urgency=low
|
||||
|
||||
* Modified source code
|
||||
|
||||
-- clickhouse-release <clickhouse-release@yandex-team.ru> Tue, 02 Nov 2021 00:56:42 +0300
|
||||
-- clickhouse-release <clickhouse-release@yandex-team.ru> Thu, 09 Dec 2021 00:32:58 +0300
|
||||
|
14
debian/rules
vendored
14
debian/rules
vendored
@ -45,6 +45,10 @@ ifdef DEB_CXX
|
||||
ifeq ($(DEB_BUILD_GNU_TYPE),$(DEB_HOST_GNU_TYPE))
|
||||
CC := $(DEB_CC)
|
||||
CXX := $(DEB_CXX)
|
||||
else ifeq (clang,$(findstring clang,$(DEB_CXX)))
|
||||
# If we crosscompile with clang, it knows what to do
|
||||
CC := $(DEB_CC)
|
||||
CXX := $(DEB_CXX)
|
||||
else
|
||||
CC := $(DEB_HOST_GNU_TYPE)-$(DEB_CC)
|
||||
CXX := $(DEB_HOST_GNU_TYPE)-$(DEB_CXX)
|
||||
@ -77,10 +81,6 @@ else
|
||||
THREADS_COUNT = 1
|
||||
endif
|
||||
|
||||
ifneq ($(THREADS_COUNT),)
|
||||
THREADS_COUNT:=-j$(THREADS_COUNT)
|
||||
endif
|
||||
|
||||
%:
|
||||
dh $@ $(DH_FLAGS) --buildsystem=cmake
|
||||
|
||||
@ -89,11 +89,11 @@ override_dh_auto_configure:
|
||||
|
||||
override_dh_auto_build:
|
||||
# Fix for ninja. Do not add -O.
|
||||
$(MAKE) $(THREADS_COUNT) -C $(BUILDDIR) $(MAKE_TARGET)
|
||||
$(MAKE) -j$(THREADS_COUNT) -C $(BUILDDIR) $(MAKE_TARGET)
|
||||
|
||||
override_dh_auto_test:
|
||||
ifeq (,$(filter nocheck,$(DEB_BUILD_OPTIONS)))
|
||||
cd $(BUILDDIR) && ctest $(THREADS_COUNT) -V
|
||||
cd $(BUILDDIR) && ctest -j$(THREADS_COUNT) -V
|
||||
endif
|
||||
|
||||
override_dh_clean:
|
||||
@ -120,7 +120,7 @@ override_dh_install:
|
||||
dh_install --list-missing --sourcedir=$(DESTDIR)
|
||||
|
||||
override_dh_auto_install:
|
||||
env DESTDIR=$(DESTDIR) $(MAKE) $(THREADS_COUNT) -C $(BUILDDIR) install
|
||||
env DESTDIR=$(DESTDIR) $(MAKE) -j$(THREADS_COUNT) -C $(BUILDDIR) install
|
||||
|
||||
override_dh_shlibdeps:
|
||||
true # We depend only on libc and dh_shlibdeps gives us wrong (too strict) dependency.
|
||||
|
@ -5,7 +5,7 @@ ARG apt_archive="http://archive.ubuntu.com"
|
||||
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
||||
|
||||
ARG repository="deb https://repo.clickhouse.com/deb/stable/ main/"
|
||||
ARG version=21.12.1.*
|
||||
ARG version=21.13.1.*
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install --yes --no-install-recommends \
|
||||
|
@ -46,7 +46,6 @@
|
||||
"name": "clickhouse/stateless-test",
|
||||
"dependent": [
|
||||
"docker/test/stateful",
|
||||
"docker/test/coverage",
|
||||
"docker/test/unit"
|
||||
]
|
||||
},
|
||||
@ -56,10 +55,6 @@
|
||||
"docker/test/stress"
|
||||
]
|
||||
},
|
||||
"docker/test/coverage": {
|
||||
"name": "clickhouse/test-coverage",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/unit": {
|
||||
"name": "clickhouse/unit-test",
|
||||
"dependent": []
|
||||
|
@ -24,41 +24,34 @@ RUN apt-get update \
|
||||
&& apt-key add /tmp/llvm-snapshot.gpg.key \
|
||||
&& export CODENAME="$(lsb_release --codename --short | tr 'A-Z' 'a-z')" \
|
||||
&& echo "deb [trusted=yes] https://apt.llvm.org/${CODENAME}/ llvm-toolchain-${CODENAME}-${LLVM_VERSION} main" >> \
|
||||
/etc/apt/sources.list
|
||||
/etc/apt/sources.list \
|
||||
&& apt-get clean
|
||||
|
||||
# initial packages
|
||||
RUN apt-get update \
|
||||
&& apt-get install \
|
||||
bash \
|
||||
fakeroot \
|
||||
ccache \
|
||||
curl \
|
||||
software-properties-common \
|
||||
--yes --no-install-recommends
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install \
|
||||
bash \
|
||||
build-essential \
|
||||
ccache \
|
||||
clang-${LLVM_VERSION} \
|
||||
clang-tidy-${LLVM_VERSION} \
|
||||
cmake \
|
||||
curl \
|
||||
fakeroot \
|
||||
gdb \
|
||||
git \
|
||||
gperf \
|
||||
clang-${LLVM_VERSION} \
|
||||
clang-tidy-${LLVM_VERSION} \
|
||||
lld-${LLVM_VERSION} \
|
||||
llvm-${LLVM_VERSION} \
|
||||
llvm-${LLVM_VERSION}-dev \
|
||||
libicu-dev \
|
||||
libreadline-dev \
|
||||
moreutils \
|
||||
ninja-build \
|
||||
pigz \
|
||||
rename \
|
||||
software-properties-common \
|
||||
tzdata \
|
||||
--yes --no-install-recommends
|
||||
--yes --no-install-recommends \
|
||||
&& apt-get clean
|
||||
|
||||
# This symlink required by gcc to find lld compiler
|
||||
RUN ln -s /usr/bin/lld-${LLVM_VERSION} /usr/bin/ld.lld
|
||||
@ -67,7 +60,7 @@ ENV CC=clang-${LLVM_VERSION}
|
||||
ENV CXX=clang++-${LLVM_VERSION}
|
||||
|
||||
# libtapi is required to support .tbh format from recent MacOS SDKs
|
||||
RUN git clone https://github.com/tpoechtrager/apple-libtapi.git \
|
||||
RUN git clone --depth 1 https://github.com/tpoechtrager/apple-libtapi.git \
|
||||
&& cd apple-libtapi \
|
||||
&& INSTALLPREFIX=/cctools ./build.sh \
|
||||
&& ./install.sh \
|
||||
@ -75,7 +68,7 @@ RUN git clone https://github.com/tpoechtrager/apple-libtapi.git \
|
||||
&& rm -rf apple-libtapi
|
||||
|
||||
# Build and install tools for cross-linking to Darwin (x86-64)
|
||||
RUN git clone https://github.com/tpoechtrager/cctools-port.git \
|
||||
RUN git clone --depth 1 https://github.com/tpoechtrager/cctools-port.git \
|
||||
&& cd cctools-port/cctools \
|
||||
&& ./configure --prefix=/cctools --with-libtapi=/cctools \
|
||||
--target=x86_64-apple-darwin \
|
||||
@ -84,7 +77,7 @@ RUN git clone https://github.com/tpoechtrager/cctools-port.git \
|
||||
&& rm -rf cctools-port
|
||||
|
||||
# Build and install tools for cross-linking to Darwin (aarch64)
|
||||
RUN git clone https://github.com/tpoechtrager/cctools-port.git \
|
||||
RUN git clone --depth 1 https://github.com/tpoechtrager/cctools-port.git \
|
||||
&& cd cctools-port/cctools \
|
||||
&& ./configure --prefix=/cctools --with-libtapi=/cctools \
|
||||
--target=aarch64-apple-darwin \
|
||||
@ -98,7 +91,8 @@ RUN wget -nv https://github.com/phracker/MacOSX-SDKs/releases/download/11.3/MacO
|
||||
# NOTE: Seems like gcc-11 is too new for ubuntu20 repository
|
||||
RUN add-apt-repository ppa:ubuntu-toolchain-r/test --yes \
|
||||
&& apt-get update \
|
||||
&& apt-get install gcc-11 g++-11 --yes
|
||||
&& apt-get install gcc-11 g++-11 --yes \
|
||||
&& apt-get clean
|
||||
|
||||
|
||||
COPY build.sh /
|
||||
|
@ -64,8 +64,14 @@ RUN add-apt-repository ppa:ubuntu-toolchain-r/test --yes \
|
||||
&& apt-get install gcc-11 g++-11 --yes
|
||||
|
||||
|
||||
# This symlink required by gcc to find lld compiler
|
||||
RUN ln -s /usr/bin/lld-${LLVM_VERSION} /usr/bin/ld.lld
|
||||
# These symlinks are required:
|
||||
# /usr/bin/ld.lld: by gcc to find lld compiler
|
||||
# /usr/bin/aarch64-linux-gnu-obj*: for debug symbols stripping
|
||||
RUN ln -s /usr/bin/lld-${LLVM_VERSION} /usr/bin/ld.lld \
|
||||
&& ln -sf /usr/lib/llvm-${LLVM_VERSION}/bin/llvm-objcopy /usr/bin/aarch64-linux-gnu-strip \
|
||||
&& ln -sf /usr/lib/llvm-${LLVM_VERSION}/bin/llvm-objcopy /usr/bin/aarch64-linux-gnu-objcopy \
|
||||
&& ln -sf /usr/lib/llvm-${LLVM_VERSION}/bin/llvm-objdump /usr/bin/aarch64-linux-gnu-objdump
|
||||
|
||||
|
||||
COPY build.sh /
|
||||
|
||||
|
@ -31,5 +31,6 @@ do
|
||||
mv "$FUZZER_PATH" /output/fuzzers
|
||||
done
|
||||
|
||||
|
||||
tar -zcvf /output/fuzzers.tar.gz /output/fuzzers
|
||||
rm -rf /output/fuzzers
|
||||
|
@ -29,7 +29,13 @@ def pull_image(image_name):
|
||||
return False
|
||||
|
||||
def build_image(image_name, filepath):
|
||||
subprocess.check_call("docker build --network=host -t {} -f {} .".format(image_name, filepath), shell=True)
|
||||
context = os.path.dirname(filepath)
|
||||
subprocess.check_call(
|
||||
"docker build --network=host -t {} -f {} {}".format(
|
||||
image_name, filepath, context
|
||||
),
|
||||
shell=True,
|
||||
)
|
||||
|
||||
def run_docker_image_with_env(image_name, output, env_variables, ch_root, ccache_dir, docker_image_version):
|
||||
env_part = " -e ".join(env_variables)
|
||||
@ -90,6 +96,7 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ
|
||||
elif is_cross_arm:
|
||||
cc = compiler[:-len(ARM_SUFFIX)]
|
||||
cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-aarch64.cmake")
|
||||
result.append("DEB_ARCH_FLAG=-aarm64")
|
||||
elif is_cross_freebsd:
|
||||
cc = compiler[:-len(FREEBSD_SUFFIX)]
|
||||
cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/freebsd/toolchain-x86_64.cmake")
|
||||
@ -98,6 +105,7 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ
|
||||
cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-ppc64le.cmake")
|
||||
else:
|
||||
cc = compiler
|
||||
result.append("DEB_ARCH_FLAG=-aamd64")
|
||||
|
||||
cxx = cc.replace('gcc', 'g++').replace('clang', 'clang++')
|
||||
|
||||
|
@ -5,7 +5,7 @@ ARG apt_archive="http://archive.ubuntu.com"
|
||||
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
||||
|
||||
ARG repository="deb https://repo.clickhouse.com/deb/stable/ main/"
|
||||
ARG version=21.12.1.*
|
||||
ARG version=21.13.1.*
|
||||
ARG gosu_ver=1.10
|
||||
|
||||
# set non-empty deb_location_url url to create a docker image
|
||||
|
@ -1,7 +1,7 @@
|
||||
FROM ubuntu:18.04
|
||||
|
||||
ARG repository="deb https://repo.clickhouse.com/deb/stable/ main/"
|
||||
ARG version=21.12.1.*
|
||||
ARG version=21.13.1.*
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y apt-transport-https dirmngr && \
|
||||
|
@ -6,7 +6,7 @@ FROM clickhouse/binary-builder
|
||||
ARG apt_archive="http://archive.ubuntu.com"
|
||||
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
||||
|
||||
RUN apt-get update && apt-get --yes --allow-unauthenticated install clang-9 libllvm9 libclang-9-dev
|
||||
RUN apt-get update && apt-get --yes --allow-unauthenticated install clang-13 libllvm13 libclang-13-dev
|
||||
|
||||
# repo versions doesn't work correctly with C++17
|
||||
# also we push reports to s3, so we add index.html to subfolder urls
|
||||
@ -23,12 +23,12 @@ ENV SOURCE_DIRECTORY=/repo_folder
|
||||
ENV BUILD_DIRECTORY=/build
|
||||
ENV HTML_RESULT_DIRECTORY=$BUILD_DIRECTORY/html_report
|
||||
ENV SHA=nosha
|
||||
ENV DATA="data"
|
||||
ENV DATA="https://s3.amazonaws.com/clickhouse-test-reports/codebrowser/data"
|
||||
|
||||
CMD mkdir -p $BUILD_DIRECTORY && cd $BUILD_DIRECTORY && \
|
||||
cmake $SOURCE_DIRECTORY -DCMAKE_CXX_COMPILER=/usr/bin/clang\+\+-13 -DCMAKE_C_COMPILER=/usr/bin/clang-13 -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DENABLE_EMBEDDED_COMPILER=0 -DENABLE_S3=0 && \
|
||||
mkdir -p $HTML_RESULT_DIRECTORY && \
|
||||
$CODEGEN -b $BUILD_DIRECTORY -a -o $HTML_RESULT_DIRECTORY -p ClickHouse:$SOURCE_DIRECTORY:$SHA -d $DATA | ts '%Y-%m-%d %H:%M:%S' && \
|
||||
cp -r $STATIC_DATA $HTML_RESULT_DIRECTORY/ &&\
|
||||
$CODEINDEX $HTML_RESULT_DIRECTORY -d $DATA | ts '%Y-%m-%d %H:%M:%S' && \
|
||||
$CODEINDEX $HTML_RESULT_DIRECTORY -d "$DATA" | ts '%Y-%m-%d %H:%M:%S' && \
|
||||
mv $HTML_RESULT_DIRECTORY /test_output
|
||||
|
@ -1,18 +0,0 @@
|
||||
# docker build -t clickhouse/test-coverage .
|
||||
FROM clickhouse/stateless-test
|
||||
|
||||
RUN apt-get update -y \
|
||||
&& env DEBIAN_FRONTEND=noninteractive \
|
||||
apt-get install --yes --no-install-recommends \
|
||||
cmake
|
||||
|
||||
COPY s3downloader /s3downloader
|
||||
COPY run.sh /run.sh
|
||||
|
||||
ENV DATASETS="hits visits"
|
||||
ENV COVERAGE_DIR=/coverage_reports
|
||||
ENV SOURCE_DIR=/build
|
||||
ENV OUTPUT_DIR=/output
|
||||
ENV IGNORE='.*contrib.*'
|
||||
|
||||
CMD ["/bin/bash", "/run.sh"]
|
@ -1,112 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
kill_clickhouse () {
|
||||
echo "clickhouse pids $(pgrep -u clickhouse)" | ts '%Y-%m-%d %H:%M:%S'
|
||||
pkill -f "clickhouse-server" 2>/dev/null
|
||||
|
||||
|
||||
for _ in {1..120}
|
||||
do
|
||||
if ! pkill -0 -f "clickhouse-server" ; then break ; fi
|
||||
echo "ClickHouse still alive" | ts '%Y-%m-%d %H:%M:%S'
|
||||
sleep 1
|
||||
done
|
||||
|
||||
if pkill -0 -f "clickhouse-server"
|
||||
then
|
||||
pstree -apgT
|
||||
jobs
|
||||
echo "Failed to kill the ClickHouse server" | ts '%Y-%m-%d %H:%M:%S'
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
start_clickhouse () {
|
||||
LLVM_PROFILE_FILE='server_%h_%p_%m.profraw' sudo -Eu clickhouse /usr/bin/clickhouse-server --config /etc/clickhouse-server/config.xml &
|
||||
counter=0
|
||||
until clickhouse-client --query "SELECT 1"
|
||||
do
|
||||
if [ "$counter" -gt 120 ]
|
||||
then
|
||||
echo "Cannot start clickhouse-server"
|
||||
cat /var/log/clickhouse-server/stdout.log
|
||||
tail -n1000 /var/log/clickhouse-server/stderr.log
|
||||
tail -n1000 /var/log/clickhouse-server/clickhouse-server.log
|
||||
break
|
||||
fi
|
||||
sleep 0.5
|
||||
counter=$((counter + 1))
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
chmod 777 /
|
||||
|
||||
dpkg -i package_folder/clickhouse-common-static_*.deb; \
|
||||
dpkg -i package_folder/clickhouse-common-static-dbg_*.deb; \
|
||||
dpkg -i package_folder/clickhouse-server_*.deb; \
|
||||
dpkg -i package_folder/clickhouse-client_*.deb; \
|
||||
dpkg -i package_folder/clickhouse-test_*.deb
|
||||
|
||||
mkdir -p /var/lib/clickhouse
|
||||
mkdir -p /var/log/clickhouse-server
|
||||
chmod 777 -R /var/log/clickhouse-server/
|
||||
|
||||
# install test configs
|
||||
/usr/share/clickhouse-test/config/install.sh
|
||||
|
||||
start_clickhouse
|
||||
|
||||
# shellcheck disable=SC2086 # No quotes because I want to split it into words.
|
||||
if ! /s3downloader --dataset-names $DATASETS; then
|
||||
echo "Cannot download datatsets"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
chmod 777 -R /var/lib/clickhouse
|
||||
|
||||
|
||||
LLVM_PROFILE_FILE='client_coverage_%5m.profraw' clickhouse-client --query "SHOW DATABASES"
|
||||
LLVM_PROFILE_FILE='client_coverage_%5m.profraw' clickhouse-client --query "ATTACH DATABASE datasets ENGINE = Ordinary"
|
||||
LLVM_PROFILE_FILE='client_coverage_%5m.profraw' clickhouse-client --query "CREATE DATABASE test"
|
||||
|
||||
kill_clickhouse
|
||||
start_clickhouse
|
||||
|
||||
LLVM_PROFILE_FILE='client_coverage_%5m.profraw' clickhouse-client --query "SHOW TABLES FROM datasets"
|
||||
LLVM_PROFILE_FILE='client_coverage_%5m.profraw' clickhouse-client --query "SHOW TABLES FROM test"
|
||||
LLVM_PROFILE_FILE='client_coverage_%5m.profraw' clickhouse-client --query "RENAME TABLE datasets.hits_v1 TO test.hits"
|
||||
LLVM_PROFILE_FILE='client_coverage_%5m.profraw' clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits"
|
||||
LLVM_PROFILE_FILE='client_coverage_%5m.profraw' clickhouse-client --query "SHOW TABLES FROM test"
|
||||
|
||||
LLVM_PROFILE_FILE='client_coverage_%5m.profraw' clickhouse-test -j 8 --testname --shard --zookeeper --print-time 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee /test_result.txt
|
||||
|
||||
readarray -t FAILED_TESTS < <(awk '/FAIL|TIMEOUT|ERROR/ { print substr($3, 1, length($3)-1) }' "/test_result.txt")
|
||||
|
||||
kill_clickhouse
|
||||
|
||||
sleep 3
|
||||
|
||||
if [[ -n "${FAILED_TESTS[*]}" ]]
|
||||
then
|
||||
# Clean the data so that there is no interference from the previous test run.
|
||||
rm -rf /var/lib/clickhouse/{{meta,}data,user_files} ||:
|
||||
|
||||
start_clickhouse
|
||||
|
||||
echo "Going to run again: ${FAILED_TESTS[*]}"
|
||||
|
||||
LLVM_PROFILE_FILE='client_coverage_%5m.profraw' clickhouse-test --order=random --testname --shard --zookeeper "${FAILED_TESTS[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a /test_result.txt
|
||||
else
|
||||
echo "No failed tests"
|
||||
fi
|
||||
|
||||
mkdir -p "$COVERAGE_DIR"
|
||||
mv /*.profraw "$COVERAGE_DIR"
|
||||
|
||||
mkdir -p "$SOURCE_DIR"/obj-x86_64-linux-gnu
|
||||
cd "$SOURCE_DIR"/obj-x86_64-linux-gnu && CC=clang-11 CXX=clang++-11 cmake .. && cd /
|
||||
llvm-profdata-11 merge -sparse "${COVERAGE_DIR}"/* -o clickhouse.profdata
|
||||
llvm-cov-11 export /usr/bin/clickhouse -instr-profile=clickhouse.profdata -j=16 -format=lcov -skip-functions -ignore-filename-regex "$IGNORE" > output.lcov
|
||||
genhtml output.lcov --ignore-errors source --output-directory "${OUTPUT_DIR}"
|
@ -1,101 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import tarfile
|
||||
import logging
|
||||
import argparse
|
||||
import requests
|
||||
import tempfile
|
||||
|
||||
|
||||
DEFAULT_URL = 'https://clickhouse-datasets.s3.yandex.net'
|
||||
|
||||
AVAILABLE_DATASETS = {
|
||||
'hits': 'hits_v1.tar',
|
||||
'visits': 'visits_v1.tar',
|
||||
}
|
||||
|
||||
RETRIES_COUNT = 5
|
||||
|
||||
def _get_temp_file_name():
|
||||
return os.path.join(tempfile._get_default_tempdir(), next(tempfile._get_candidate_names()))
|
||||
|
||||
def build_url(base_url, dataset):
|
||||
return os.path.join(base_url, dataset, 'partitions', AVAILABLE_DATASETS[dataset])
|
||||
|
||||
def dowload_with_progress(url, path):
|
||||
logging.info("Downloading from %s to temp path %s", url, path)
|
||||
for i in range(RETRIES_COUNT):
|
||||
try:
|
||||
with open(path, 'wb') as f:
|
||||
response = requests.get(url, stream=True)
|
||||
response.raise_for_status()
|
||||
total_length = response.headers.get('content-length')
|
||||
if total_length is None or int(total_length) == 0:
|
||||
logging.info("No content-length, will download file without progress")
|
||||
f.write(response.content)
|
||||
else:
|
||||
dl = 0
|
||||
total_length = int(total_length)
|
||||
logging.info("Content length is %ld bytes", total_length)
|
||||
for data in response.iter_content(chunk_size=4096):
|
||||
dl += len(data)
|
||||
f.write(data)
|
||||
if sys.stdout.isatty():
|
||||
done = int(50 * dl / total_length)
|
||||
percent = int(100 * float(dl) / total_length)
|
||||
sys.stdout.write("\r[{}{}] {}%".format('=' * done, ' ' * (50-done), percent))
|
||||
sys.stdout.flush()
|
||||
break
|
||||
except Exception as ex:
|
||||
sys.stdout.write("\n")
|
||||
time.sleep(3)
|
||||
logging.info("Exception while downloading %s, retry %s", ex, i + 1)
|
||||
if os.path.exists(path):
|
||||
os.remove(path)
|
||||
else:
|
||||
raise Exception("Cannot download dataset from {}, all retries exceeded".format(url))
|
||||
|
||||
sys.stdout.write("\n")
|
||||
logging.info("Downloading finished")
|
||||
|
||||
def unpack_to_clickhouse_directory(tar_path, clickhouse_path):
|
||||
logging.info("Will unpack data from temp path %s to clickhouse db %s", tar_path, clickhouse_path)
|
||||
with tarfile.open(tar_path, 'r') as comp_file:
|
||||
comp_file.extractall(path=clickhouse_path)
|
||||
logging.info("Unpack finished")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Simple tool for dowloading datasets for clickhouse from S3")
|
||||
|
||||
parser.add_argument('--dataset-names', required=True, nargs='+', choices=list(AVAILABLE_DATASETS.keys()))
|
||||
parser.add_argument('--url-prefix', default=DEFAULT_URL)
|
||||
parser.add_argument('--clickhouse-data-path', default='/var/lib/clickhouse/')
|
||||
|
||||
args = parser.parse_args()
|
||||
datasets = args.dataset_names
|
||||
logging.info("Will fetch following datasets: %s", ', '.join(datasets))
|
||||
for dataset in datasets:
|
||||
logging.info("Processing %s", dataset)
|
||||
temp_archive_path = _get_temp_file_name()
|
||||
try:
|
||||
download_url_for_dataset = build_url(args.url_prefix, dataset)
|
||||
dowload_with_progress(download_url_for_dataset, temp_archive_path)
|
||||
unpack_to_clickhouse_directory(temp_archive_path, args.clickhouse_data_path)
|
||||
except Exception as ex:
|
||||
logging.info("Some exception occured %s", str(ex))
|
||||
raise
|
||||
finally:
|
||||
logging.info("Will remove downloaded file %s from filesystem if it exists", temp_archive_path)
|
||||
if os.path.exists(temp_archive_path):
|
||||
os.remove(temp_archive_path)
|
||||
logging.info("Processing of %s finished", dataset)
|
||||
logging.info("Fetch finished, enjoy your tables!")
|
||||
|
||||
|
@ -111,19 +111,6 @@ function start_server
|
||||
fi
|
||||
|
||||
echo "ClickHouse server pid '$server_pid' started and responded"
|
||||
|
||||
echo "
|
||||
set follow-fork-mode child
|
||||
handle all noprint
|
||||
handle SIGSEGV stop print
|
||||
handle SIGBUS stop print
|
||||
handle SIGABRT stop print
|
||||
continue
|
||||
thread apply all backtrace
|
||||
continue
|
||||
" > script.gdb
|
||||
|
||||
gdb -batch -command script.gdb -p "$server_pid" &
|
||||
}
|
||||
|
||||
function clone_root
|
||||
@ -186,6 +173,8 @@ function clone_submodules
|
||||
contrib/dragonbox
|
||||
contrib/fast_float
|
||||
contrib/NuRaft
|
||||
contrib/jemalloc
|
||||
contrib/replxx
|
||||
)
|
||||
|
||||
git submodule sync
|
||||
@ -206,6 +195,8 @@ function run_cmake
|
||||
"-DENABLE_THINLTO=0"
|
||||
"-DUSE_UNWIND=1"
|
||||
"-DENABLE_NURAFT=1"
|
||||
"-DENABLE_JEMALLOC=1"
|
||||
"-DENABLE_REPLXX=1"
|
||||
)
|
||||
|
||||
# TODO remove this? we don't use ccache anyway. An option would be to download it
|
||||
@ -266,7 +257,13 @@ function run_tests
|
||||
start_server
|
||||
|
||||
set +e
|
||||
time clickhouse-test --hung-check -j 8 --order=random \
|
||||
local NPROC
|
||||
NPROC=$(nproc)
|
||||
NPROC=$((NPROC / 2))
|
||||
if [[ $NPROC == 0 ]]; then
|
||||
NPROC=1
|
||||
fi
|
||||
time clickhouse-test --hung-check -j "${NPROC}" --order=random \
|
||||
--fast-tests-only --no-long --testname --shard --zookeeper --check-zookeeper-session \
|
||||
-- "$FASTTEST_FOCUS" 2>&1 \
|
||||
| ts '%Y-%m-%d %H:%M:%S' \
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/bin/bash
|
||||
# shellcheck disable=SC2086,SC2001,SC2046
|
||||
# shellcheck disable=SC2086,SC2001,SC2046,SC2030,SC2031
|
||||
|
||||
set -eux
|
||||
set -o pipefail
|
||||
@ -35,7 +35,7 @@ function clone
|
||||
fi
|
||||
git diff --name-only master HEAD | tee ci-changed-files.txt
|
||||
else
|
||||
if [ -v COMMIT_SHA ]; then
|
||||
if [ -v SHA_TO_TEST ]; then
|
||||
git fetch --depth 2 origin "$SHA_TO_TEST"
|
||||
git checkout "$SHA_TO_TEST"
|
||||
echo "Checked out nominal SHA $SHA_TO_TEST for master"
|
||||
@ -155,17 +155,43 @@ function fuzz
|
||||
|
||||
kill -0 $server_pid
|
||||
|
||||
# Set follow-fork-mode to parent, because we attach to clickhouse-server, not to watchdog
|
||||
# and clickhouse-server can do fork-exec, for example, to run some bridge.
|
||||
# Do not set nostop noprint for all signals, because some it may cause gdb to hang,
|
||||
# explicitly ignore non-fatal signals that are used by server.
|
||||
# Number of SIGRTMIN can be determined only in runtime.
|
||||
RTMIN=$(kill -l SIGRTMIN)
|
||||
echo "
|
||||
set follow-fork-mode child
|
||||
handle all noprint
|
||||
handle SIGSEGV stop print
|
||||
handle SIGBUS stop print
|
||||
continue
|
||||
thread apply all backtrace
|
||||
set follow-fork-mode parent
|
||||
handle SIGHUP nostop noprint pass
|
||||
handle SIGINT nostop noprint pass
|
||||
handle SIGQUIT nostop noprint pass
|
||||
handle SIGPIPE nostop noprint pass
|
||||
handle SIGTERM nostop noprint pass
|
||||
handle SIGUSR1 nostop noprint pass
|
||||
handle SIGUSR2 nostop noprint pass
|
||||
handle SIG$RTMIN nostop noprint pass
|
||||
info signals
|
||||
continue
|
||||
backtrace full
|
||||
info locals
|
||||
info registers
|
||||
disassemble /s
|
||||
up
|
||||
info locals
|
||||
disassemble /s
|
||||
up
|
||||
info locals
|
||||
disassemble /s
|
||||
p \"done\"
|
||||
detach
|
||||
quit
|
||||
" > script.gdb
|
||||
|
||||
gdb -batch -command script.gdb -p $server_pid &
|
||||
gdb -batch -command script.gdb -p $server_pid &
|
||||
sleep 5
|
||||
# gdb will send SIGSTOP, spend some time loading debug info and then send SIGCONT, wait for it (up to send_timeout, 300s)
|
||||
time clickhouse-client --query "SELECT 'Connected to clickhouse-server after attaching gdb'" ||:
|
||||
|
||||
# Check connectivity after we attach gdb, because it might cause the server
|
||||
# to freeze and the fuzzer will fail.
|
||||
@ -189,6 +215,7 @@ continue
|
||||
--receive_data_timeout_ms=10000 \
|
||||
--stacktrace \
|
||||
--query-fuzzer-runs=1000 \
|
||||
--testmode \
|
||||
--queries-file $(ls -1 ch/tests/queries/0_stateless/*.sql | sort -R) \
|
||||
$NEW_TESTS_OPT \
|
||||
> >(tail -n 100000 > fuzzer.log) \
|
||||
|
@ -7,7 +7,6 @@ RUN apt-get update \
|
||||
&& env DEBIAN_FRONTEND=noninteractive apt-get -y install \
|
||||
tzdata \
|
||||
python3 \
|
||||
libreadline-dev \
|
||||
libicu-dev \
|
||||
bsdutils \
|
||||
gdb \
|
||||
|
@ -21,7 +21,6 @@ RUN apt-get update \
|
||||
cgroupfs-mount \
|
||||
python3-pip \
|
||||
tzdata \
|
||||
libreadline-dev \
|
||||
libicu-dev \
|
||||
bsdutils \
|
||||
curl \
|
||||
@ -73,11 +72,13 @@ RUN python3 -m pip install \
|
||||
grpcio-tools \
|
||||
kafka-python \
|
||||
kazoo \
|
||||
lz4 \
|
||||
minio \
|
||||
protobuf \
|
||||
psycopg2-binary==2.8.6 \
|
||||
pymongo \
|
||||
pymongo==3.11.0 \
|
||||
pytest \
|
||||
pytest-order==1.0.0 \
|
||||
pytest-timeout \
|
||||
pytest-xdist \
|
||||
pytest-repeat \
|
||||
@ -86,7 +87,8 @@ RUN python3 -m pip install \
|
||||
tzlocal==2.1 \
|
||||
urllib3 \
|
||||
requests-kerberos \
|
||||
pyhdfs
|
||||
pyhdfs \
|
||||
azure-storage-blob
|
||||
|
||||
COPY modprobe.sh /usr/local/bin/modprobe
|
||||
COPY dockerd-entrypoint.sh /usr/local/bin/
|
||||
|
@ -0,0 +1,13 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
azurite1:
|
||||
image: mcr.microsoft.com/azure-storage/azurite
|
||||
ports:
|
||||
- "10000:10000"
|
||||
volumes:
|
||||
- data1-1:/data1
|
||||
command: azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log
|
||||
|
||||
volumes:
|
||||
data1-1:
|
@ -1,7 +1,7 @@
|
||||
version: '2.3'
|
||||
services:
|
||||
mongo1:
|
||||
image: mongo:3.6
|
||||
image: mongo:5.0
|
||||
restart: always
|
||||
environment:
|
||||
MONGO_INITDB_ROOT_USERNAME: root
|
||||
@ -9,3 +9,9 @@ services:
|
||||
ports:
|
||||
- ${MONGO_EXTERNAL_PORT}:${MONGO_INTERNAL_PORT}
|
||||
command: --profile=2 --verbose
|
||||
|
||||
mongo2:
|
||||
image: mongo:5.0
|
||||
restart: always
|
||||
ports:
|
||||
- "27018:27017"
|
||||
|
@ -8,8 +8,8 @@ echo '{
|
||||
"ip-forward": true,
|
||||
"log-level": "debug",
|
||||
"storage-driver": "overlay2",
|
||||
"insecure-registries" : ["dockerhub-proxy.sas.yp-c.yandex.net:5000"],
|
||||
"registry-mirrors" : ["http://dockerhub-proxy.sas.yp-c.yandex.net:5000"]
|
||||
"insecure-registries" : ["dockerhub-proxy.dockerhub-proxy-zone:5000"],
|
||||
"registry-mirrors" : ["http://dockerhub-proxy.dockerhub-proxy-zone:5000"]
|
||||
}' | dd of=/etc/docker/daemon.json 2>/dev/null
|
||||
|
||||
dockerd --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 --default-address-pool base=172.17.0.0/12,size=24 &>/ClickHouse/tests/integration/dockerd.log &
|
||||
|
@ -61,7 +61,7 @@ function configure
|
||||
cp -rv right/config left ||:
|
||||
|
||||
# Start a temporary server to rename the tables
|
||||
while killall clickhouse-server; do echo . ; sleep 1 ; done
|
||||
while pkill clickhouse-serv; do echo . ; sleep 1 ; done
|
||||
echo all killed
|
||||
|
||||
set -m # Spawn temporary in its own process groups
|
||||
@ -88,7 +88,7 @@ function configure
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "create database test" ||:
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "rename table datasets.hits_v1 to test.hits" ||:
|
||||
|
||||
while killall clickhouse-server; do echo . ; sleep 1 ; done
|
||||
while pkill clickhouse-serv; do echo . ; sleep 1 ; done
|
||||
echo all killed
|
||||
|
||||
# Make copies of the original db for both servers. Use hardlinks instead
|
||||
@ -106,7 +106,7 @@ function configure
|
||||
|
||||
function restart
|
||||
{
|
||||
while killall clickhouse-server; do echo . ; sleep 1 ; done
|
||||
while pkill clickhouse-serv; do echo . ; sleep 1 ; done
|
||||
echo all killed
|
||||
|
||||
# Change the jemalloc settings here.
|
||||
@ -193,7 +193,7 @@ function run_tests
|
||||
then
|
||||
# Run only explicitly specified tests, if any.
|
||||
# shellcheck disable=SC2010
|
||||
test_files=$(ls "$test_prefix" | grep "$CHPC_TEST_GREP" | xargs -I{} -n1 readlink -f "$test_prefix/{}")
|
||||
test_files=($(ls "$test_prefix" | grep "$CHPC_TEST_GREP" | xargs -I{} -n1 readlink -f "$test_prefix/{}"))
|
||||
elif [ "$PR_TO_TEST" -ne 0 ] \
|
||||
&& [ "$(wc -l < changed-test-definitions.txt)" -gt 0 ] \
|
||||
&& [ "$(wc -l < other-changed-files.txt)" -eq 0 ]
|
||||
@ -201,10 +201,26 @@ function run_tests
|
||||
# If only the perf tests were changed in the PR, we will run only these
|
||||
# tests. The lists of changed files are prepared in entrypoint.sh because
|
||||
# it has the repository.
|
||||
test_files=$(sed "s/tests\/performance/${test_prefix//\//\\/}/" changed-test-definitions.txt)
|
||||
test_files=($(sed "s/tests\/performance/${test_prefix//\//\\/}/" changed-test-definitions.txt))
|
||||
else
|
||||
# The default -- run all tests found in the test dir.
|
||||
test_files=$(ls "$test_prefix"/*.xml)
|
||||
test_files=($(ls "$test_prefix"/*.xml))
|
||||
fi
|
||||
|
||||
# We split perf tests into multiple checks to make them faster
|
||||
if [ -v CHPC_TEST_RUN_BY_HASH_TOTAL ]; then
|
||||
# filter tests array in bash https://stackoverflow.com/a/40375567
|
||||
for index in "${!test_files[@]}"; do
|
||||
# sorry for this, just calculating hash(test_name) % total_tests_group == my_test_group_num
|
||||
test_hash_result=$(echo test_files[$index] | perl -ne 'use Digest::MD5 qw(md5); print unpack('Q', md5($_)) % $ENV{CHPC_TEST_RUN_BY_HASH_TOTAL} == $ENV{CHPC_TEST_RUN_BY_HASH_NUM};')
|
||||
# BTW, for some reason when hash(test_name) % total_tests_group != my_test_group_num perl outputs nothing, not zero
|
||||
if [ "$test_hash_result" != "1" ]; then
|
||||
# deleting element from array
|
||||
unset -v 'test_files[$index]'
|
||||
fi
|
||||
done
|
||||
# to have sequential indexes...
|
||||
test_files=("${test_files[@]}")
|
||||
fi
|
||||
|
||||
# For PRs w/o changes in test definitons, test only a subset of queries,
|
||||
@ -212,21 +228,26 @@ function run_tests
|
||||
# already set, keep those values.
|
||||
#
|
||||
# NOTE: too high CHPC_RUNS/CHPC_MAX_QUERIES may hit internal CI timeout.
|
||||
if [ "$PR_TO_TEST" -ne 0 ] && [ "$(wc -l < changed-test-definitions.txt)" -eq 0 ]
|
||||
then
|
||||
CHPC_RUNS=${CHPC_RUNS:-7}
|
||||
CHPC_MAX_QUERIES=${CHPC_MAX_QUERIES:-10}
|
||||
else
|
||||
CHPC_RUNS=${CHPC_RUNS:-13}
|
||||
CHPC_MAX_QUERIES=${CHPC_MAX_QUERIES:-0}
|
||||
fi
|
||||
# NOTE: Currently we disabled complete run even for master branch
|
||||
#if [ "$PR_TO_TEST" -ne 0 ] && [ "$(wc -l < changed-test-definitions.txt)" -eq 0 ]
|
||||
#then
|
||||
# CHPC_RUNS=${CHPC_RUNS:-7}
|
||||
# CHPC_MAX_QUERIES=${CHPC_MAX_QUERIES:-10}
|
||||
#else
|
||||
# CHPC_RUNS=${CHPC_RUNS:-13}
|
||||
# CHPC_MAX_QUERIES=${CHPC_MAX_QUERIES:-0}
|
||||
#fi
|
||||
|
||||
CHPC_RUNS=${CHPC_RUNS:-7}
|
||||
CHPC_MAX_QUERIES=${CHPC_MAX_QUERIES:-10}
|
||||
|
||||
export CHPC_RUNS
|
||||
export CHPC_MAX_QUERIES
|
||||
|
||||
# Determine which concurrent benchmarks to run. For now, the only test
|
||||
# we run as a concurrent benchmark is 'website'. Run it as benchmark if we
|
||||
# are also going to run it as a normal test.
|
||||
for test in $test_files; do echo "$test"; done | sed -n '/website/p' > benchmarks-to-run.txt
|
||||
for test in ${test_files[@]}; do echo "$test"; done | sed -n '/website/p' > benchmarks-to-run.txt
|
||||
|
||||
# Delete old report files.
|
||||
for x in {test-times,wall-clock-times}.tsv
|
||||
@ -235,8 +256,8 @@ function run_tests
|
||||
touch "$x"
|
||||
done
|
||||
|
||||
# Randomize test order.
|
||||
test_files=$(for f in $test_files; do echo "$f"; done | sort -R)
|
||||
# Randomize test order. BTW, it's not an array no more.
|
||||
test_files=$(for f in ${test_files[@]}; do echo "$f"; done | sort -R)
|
||||
|
||||
# Limit profiling time to 10 minutes, not to run for too long.
|
||||
profile_seconds_left=600
|
||||
@ -261,16 +282,24 @@ function run_tests
|
||||
# Use awk because bash doesn't support floating point arithmetic.
|
||||
profile_seconds=$(awk "BEGIN { print ($profile_seconds_left > 0 ? 10 : 0) }")
|
||||
|
||||
TIMEFORMAT=$(printf "$test_name\t%%3R\t%%3U\t%%3S\n")
|
||||
# The grep is to filter out set -x output and keep only time output.
|
||||
# The '2>&1 >/dev/null' redirects stderr to stdout, and discards stdout.
|
||||
{ \
|
||||
time "$script_dir/perf.py" --host localhost localhost --port $LEFT_SERVER_PORT $RIGHT_SERVER_PORT \
|
||||
--runs "$CHPC_RUNS" --max-queries "$CHPC_MAX_QUERIES" \
|
||||
--profile-seconds "$profile_seconds" \
|
||||
-- "$test" > "$test_name-raw.tsv" 2> "$test_name-err.log" ; \
|
||||
} 2>&1 >/dev/null | tee >(grep -v ^+ >> "wall-clock-times.tsv") \
|
||||
|| echo "Test $test_name failed with error code $?" >> "$test_name-err.log"
|
||||
(
|
||||
set +x
|
||||
argv=(
|
||||
--host localhost localhost
|
||||
--port "$LEFT_SERVER_PORT" "$RIGHT_SERVER_PORT"
|
||||
--runs "$CHPC_RUNS"
|
||||
--max-queries "$CHPC_MAX_QUERIES"
|
||||
--profile-seconds "$profile_seconds"
|
||||
|
||||
"$test"
|
||||
)
|
||||
TIMEFORMAT=$(printf "$test_name\t%%3R\t%%3U\t%%3S\n")
|
||||
# one more subshell to suppress trace output for "set +x"
|
||||
(
|
||||
time "$script_dir/perf.py" "${argv[@]}" > "$test_name-raw.tsv" 2> "$test_name-err.log"
|
||||
) 2>>wall-clock-times.tsv >/dev/null \
|
||||
|| echo "Test $test_name failed with error code $?" >> "$test_name-err.log"
|
||||
) 2>/dev/null
|
||||
|
||||
profile_seconds_left=$(awk -F' ' \
|
||||
'BEGIN { s = '$profile_seconds_left'; } /^profile-total/ { s -= $2 } END { print s }' \
|
||||
@ -278,8 +307,6 @@ function run_tests
|
||||
current_test=$((current_test + 1))
|
||||
done
|
||||
|
||||
unset TIMEFORMAT
|
||||
|
||||
wait
|
||||
}
|
||||
|
||||
@ -291,7 +318,7 @@ function get_profiles_watchdog
|
||||
|
||||
for pid in $(pgrep -f clickhouse)
|
||||
do
|
||||
gdb -p "$pid" --batch --ex "info proc all" --ex "thread apply all bt" --ex quit &> "$pid.gdb.log" &
|
||||
sudo gdb -p "$pid" --batch --ex "info proc all" --ex "thread apply all bt" --ex quit &> "$pid.gdb.log" &
|
||||
done
|
||||
wait
|
||||
|
||||
@ -518,7 +545,9 @@ unset IFS
|
||||
# all nodes.
|
||||
numactl --show
|
||||
numactl --cpunodebind=all --membind=all numactl --show
|
||||
numactl --cpunodebind=all --membind=all parallel --joblog analyze/parallel-log.txt --null < analyze/commands.txt 2>> analyze/errors.log
|
||||
# Use less jobs to avoid OOM. Some queries can consume 8+ GB of memory.
|
||||
jobs_count=$(($(grep -c ^processor /proc/cpuinfo) / 3))
|
||||
numactl --cpunodebind=all --membind=all parallel --jobs $jobs_count --joblog analyze/parallel-log.txt --null < analyze/commands.txt 2>> analyze/errors.log
|
||||
|
||||
clickhouse-local --query "
|
||||
-- Join the metric names back to the metric statistics we've calculated, and make
|
||||
@ -1409,7 +1438,7 @@ case "$stage" in
|
||||
while env kill -- -$watchdog_pid ; do sleep 1; done
|
||||
|
||||
# Stop the servers to free memory for the subsequent query analysis.
|
||||
while killall clickhouse; do echo . ; sleep 1 ; done
|
||||
while pkill clickhouse-serv; do echo . ; sleep 1 ; done
|
||||
echo Servers stopped.
|
||||
;&
|
||||
"analyze_queries")
|
||||
|
@ -16,16 +16,28 @@ right_sha=$4
|
||||
datasets=${CHPC_DATASETS-"hits1 hits10 hits100 values"}
|
||||
|
||||
declare -A dataset_paths
|
||||
dataset_paths["hits10"]="https://s3.mds.yandex.net/clickhouse-private-datasets/hits_10m_single/partitions/hits_10m_single.tar"
|
||||
dataset_paths["hits100"]="https://s3.mds.yandex.net/clickhouse-private-datasets/hits_100m_single/partitions/hits_100m_single.tar"
|
||||
dataset_paths["hits1"]="https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits_v1.tar"
|
||||
dataset_paths["values"]="https://clickhouse-datasets.s3.yandex.net/values_with_expressions/partitions/test_values.tar"
|
||||
if [[ $S3_URL == *"s3.amazonaws.com"* ]]; then
|
||||
dataset_paths["hits10"]="https://clickhouse-private-datasets.s3.amazonaws.com/hits_10m_single/partitions/hits_10m_single.tar"
|
||||
dataset_paths["hits100"]="https://clickhouse-private-datasets.s3.amazonaws.com/hits_100m_single/partitions/hits_100m_single.tar"
|
||||
dataset_paths["hits1"]="https://clickhouse-datasets.s3.amazonaws.com/hits/partitions/hits_v1.tar"
|
||||
dataset_paths["values"]="https://clickhouse-datasets.s3.amazonaws.com/values_with_expressions/partitions/test_values.tar"
|
||||
else
|
||||
dataset_paths["hits10"]="https://s3.mds.yandex.net/clickhouse-private-datasets/hits_10m_single/partitions/hits_10m_single.tar"
|
||||
dataset_paths["hits100"]="https://s3.mds.yandex.net/clickhouse-private-datasets/hits_100m_single/partitions/hits_100m_single.tar"
|
||||
dataset_paths["hits1"]="https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits_v1.tar"
|
||||
dataset_paths["values"]="https://clickhouse-datasets.s3.yandex.net/values_with_expressions/partitions/test_values.tar"
|
||||
fi
|
||||
|
||||
|
||||
function download
|
||||
{
|
||||
# Historically there were various paths for the performance test package.
|
||||
# Test all of them.
|
||||
for path in "https://clickhouse-builds.s3.yandex.net/$left_pr/$left_sha/"{,clickhouse_build_check/}"performance/performance.tgz"
|
||||
declare -a urls_to_try=("https://s3.amazonaws.com/clickhouse-builds/$left_pr/$left_sha/performance/performance.tgz"
|
||||
"https://clickhouse-builds.s3.yandex.net/$left_pr/$left_sha/clickhouse_build_check/performance/performance.tgz"
|
||||
)
|
||||
|
||||
for path in "${urls_to_try[@]}"
|
||||
do
|
||||
if curl --fail --head "$path"
|
||||
then
|
||||
|
@ -4,6 +4,27 @@ set -ex
|
||||
CHPC_CHECK_START_TIMESTAMP="$(date +%s)"
|
||||
export CHPC_CHECK_START_TIMESTAMP
|
||||
|
||||
S3_URL=${S3_URL:="https://clickhouse-builds.s3.yandex.net"}
|
||||
|
||||
COMMON_BUILD_PREFIX="/clickhouse_build_check"
|
||||
if [[ $S3_URL == *"s3.amazonaws.com"* ]]; then
|
||||
COMMON_BUILD_PREFIX=""
|
||||
fi
|
||||
|
||||
# Sometimes AWS responde with DNS error and it's impossible to retry it with
|
||||
# current curl version options.
|
||||
function curl_with_retry
|
||||
{
|
||||
for _ in 1 2 3 4; do
|
||||
if curl --fail --head "$1";then
|
||||
return 0
|
||||
else
|
||||
sleep 0.5
|
||||
fi
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
# Use the packaged repository to find the revision we will compare to.
|
||||
function find_reference_sha
|
||||
{
|
||||
@ -43,9 +64,12 @@ function find_reference_sha
|
||||
# Historically there were various path for the performance test package,
|
||||
# test all of them.
|
||||
unset found
|
||||
for path in "https://clickhouse-builds.s3.yandex.net/0/$REF_SHA/"{,clickhouse_build_check/}"performance/performance.tgz"
|
||||
declare -a urls_to_try=("https://s3.amazonaws.com/clickhouse-builds/0/$REF_SHA/performance/performance.tgz"
|
||||
"https://clickhouse-builds.s3.yandex.net/0/$REF_SHA/clickhouse_build_check/performance/performance.tgz"
|
||||
)
|
||||
for path in "${urls_to_try[@]}"
|
||||
do
|
||||
if curl --fail --head "$path"
|
||||
if curl_with_retry "$path"
|
||||
then
|
||||
found="$path"
|
||||
break
|
||||
@ -65,14 +89,11 @@ chmod 777 workspace output
|
||||
|
||||
cd workspace
|
||||
|
||||
# Download the package for the version we are going to test
|
||||
for path in "https://clickhouse-builds.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/"{,clickhouse_build_check/}"performance/performance.tgz"
|
||||
do
|
||||
if curl --fail --head "$path"
|
||||
then
|
||||
right_path="$path"
|
||||
fi
|
||||
done
|
||||
# Download the package for the version we are going to test.
|
||||
if curl_with_retry "$S3_URL/$PR_TO_TEST/$SHA_TO_TEST$COMMON_BUILD_PREFIX/performance/performance.tgz"
|
||||
then
|
||||
right_path="$S3_URL/$PR_TO_TEST/$SHA_TO_TEST$COMMON_BUILD_PREFIX/performance/performance.tgz"
|
||||
fi
|
||||
|
||||
mkdir right
|
||||
wget -nv -nd -c "$right_path" -O- | tar -C right --strip-components=1 -zxv
|
||||
|
@ -45,6 +45,7 @@ parser.add_argument('--runs', type=int, default=1, help='Number of query runs pe
|
||||
parser.add_argument('--max-queries', type=int, default=None, help='Test no more than this number of queries, chosen at random.')
|
||||
parser.add_argument('--queries-to-run', nargs='*', type=int, default=None, help='Space-separated list of indexes of queries to test.')
|
||||
parser.add_argument('--max-query-seconds', type=int, default=15, help='For how many seconds at most a query is allowed to run. The script finishes with error if this time is exceeded.')
|
||||
parser.add_argument('--prewarm-max-query-seconds', type=int, default=180, help='For how many seconds at most a prewarm (cold storage) query is allowed to run. The script finishes with error if this time is exceeded.')
|
||||
parser.add_argument('--profile-seconds', type=int, default=0, help='For how many seconds to profile a query for which the performance has changed.')
|
||||
parser.add_argument('--long', action='store_true', help='Do not skip the tests tagged as long.')
|
||||
parser.add_argument('--print-queries', action='store_true', help='Print test queries and exit.')
|
||||
@ -284,7 +285,7 @@ for query_index in queries_to_run:
|
||||
# it makes the results unstable.
|
||||
res = c.execute(q, query_id = prewarm_id,
|
||||
settings = {
|
||||
'max_execution_time': args.max_query_seconds,
|
||||
'max_execution_time': args.prewarm_max_query_seconds,
|
||||
'query_profiler_real_time_period_ns': 10000000,
|
||||
'memory_profiler_step': '4Mi',
|
||||
})
|
||||
@ -354,11 +355,9 @@ for query_index in queries_to_run:
|
||||
print(f'query\t{query_index}\t{run_id}\t{conn_index}\t{elapsed}')
|
||||
|
||||
if elapsed > args.max_query_seconds:
|
||||
# Stop processing pathologically slow queries, to avoid timing out
|
||||
# the entire test task. This shouldn't really happen, so we don't
|
||||
# need much handling for this case and can just exit.
|
||||
# Do not stop processing pathologically slow queries,
|
||||
# since this may hide errors in other queries.
|
||||
print(f'The query no. {query_index} is taking too long to run ({elapsed} s)', file=sys.stderr)
|
||||
exit(2)
|
||||
|
||||
# Be careful with the counter, after this line it's the next iteration
|
||||
# already.
|
||||
|
@ -42,7 +42,7 @@ ENV CCACHE_DIR=/test_output/ccache
|
||||
CMD echo "Running PVS version $PKG_VERSION" && mkdir -p $CCACHE_DIR && cd /repo_folder && pvs-studio-analyzer credentials $LICENCE_NAME $LICENCE_KEY -o ./licence.lic \
|
||||
&& cmake . -D"ENABLE_EMBEDDED_COMPILER"=OFF -D"DISABLE_HERMETIC_BUILD"=ON -DCMAKE_C_COMPILER=clang-13 -DCMAKE_CXX_COMPILER=clang\+\+-13 \
|
||||
&& ninja re2_st clickhouse_grpc_protos \
|
||||
&& pvs-studio-analyzer analyze -o pvs-studio.log -e contrib -j 4 -l ./licence.lic; \
|
||||
&& pvs-studio-analyzer analyze -o pvs-studio.log -e contrib -j "$(nproc)" -l ./licence.lic; \
|
||||
cp /repo_folder/pvs-studio.log /test_output; \
|
||||
plog-converter -a GA:1,2 -t fullhtml -o /test_output/pvs-studio-html-report pvs-studio.log; \
|
||||
plog-converter -a GA:1,2 -t tasklist -o /test_output/pvs-studio-task-report.txt pvs-studio.log
|
||||
|
@ -61,6 +61,7 @@ chmod 777 -R /var/lib/clickhouse
|
||||
clickhouse-client --query "SHOW DATABASES"
|
||||
|
||||
clickhouse-client --query "ATTACH DATABASE datasets ENGINE = Ordinary"
|
||||
|
||||
service clickhouse-server restart
|
||||
|
||||
# Wait for server to start accepting connections
|
||||
@ -109,15 +110,25 @@ function run_tests()
|
||||
fi
|
||||
|
||||
set +e
|
||||
clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --no-stateless --hung-check --print-time "${ADDITIONAL_OPTIONS[@]}" \
|
||||
clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --no-stateless --hung-check --print-time \
|
||||
--skip 00168_parallel_processing_on_replicas "${ADDITIONAL_OPTIONS[@]}" \
|
||||
"$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt
|
||||
|
||||
clickhouse-test --timeout 1200 --testname --shard --zookeeper --check-zookeeper-session --no-stateless --hung-check --print-time \
|
||||
00168_parallel_processing_on_replicas "${ADDITIONAL_OPTIONS[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a test_output/test_result.txt
|
||||
|
||||
set -e
|
||||
}
|
||||
|
||||
export -f run_tests
|
||||
timeout "$MAX_RUN_TIME" bash -c run_tests ||:
|
||||
|
||||
./process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
|
||||
echo "Files in current directory"
|
||||
ls -la ./
|
||||
echo "Files in root directory"
|
||||
ls -la /
|
||||
|
||||
/process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
|
||||
|
||||
grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server.log ||:
|
||||
|
||||
|
@ -49,7 +49,6 @@ RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
ENV NUM_TRIES=1
|
||||
ENV MAX_RUN_TIME=0
|
||||
|
||||
|
||||
# Download Minio-related binaries
|
||||
RUN wget 'https://dl.min.io/server/minio/release/linux-amd64/minio' \
|
||||
&& chmod +x ./minio \
|
||||
|
@ -96,6 +96,13 @@ function run_tests()
|
||||
ADDITIONAL_OPTIONS+=('8')
|
||||
fi
|
||||
|
||||
if [[ -n "$RUN_BY_HASH_NUM" ]] && [[ -n "$RUN_BY_HASH_TOTAL" ]]; then
|
||||
ADDITIONAL_OPTIONS+=('--run-by-hash-num')
|
||||
ADDITIONAL_OPTIONS+=("$RUN_BY_HASH_NUM")
|
||||
ADDITIONAL_OPTIONS+=('--run-by-hash-total')
|
||||
ADDITIONAL_OPTIONS+=("$RUN_BY_HASH_TOTAL")
|
||||
fi
|
||||
|
||||
set +e
|
||||
clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \
|
||||
--test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \
|
||||
@ -108,7 +115,12 @@ export -f run_tests
|
||||
|
||||
timeout "$MAX_RUN_TIME" bash -c run_tests ||:
|
||||
|
||||
./process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
|
||||
echo "Files in current directory"
|
||||
ls -la ./
|
||||
echo "Files in root directory"
|
||||
ls -la /
|
||||
|
||||
/process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
|
||||
|
||||
clickhouse-client -q "system flush logs" ||:
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
#!/bin/bash
|
||||
# shellcheck disable=SC2094
|
||||
# shellcheck disable=SC2086
|
||||
# shellcheck disable=SC2024
|
||||
|
||||
set -x
|
||||
|
||||
@ -59,9 +60,41 @@ function configure()
|
||||
echo "<clickhouse><asynchronous_metrics_update_period_s>1</asynchronous_metrics_update_period_s></clickhouse>" \
|
||||
> /etc/clickhouse-server/config.d/asynchronous_metrics_update_period_s.xml
|
||||
|
||||
local total_mem
|
||||
total_mem=$(awk '/MemTotal/ { print $(NF-1) }' /proc/meminfo) # KiB
|
||||
total_mem=$(( total_mem*1024 )) # bytes
|
||||
# Set maximum memory usage as half of total memory (less chance of OOM).
|
||||
echo "<clickhouse><max_server_memory_usage_to_ram_ratio>0.5</max_server_memory_usage_to_ram_ratio></clickhouse>" \
|
||||
> /etc/clickhouse-server/config.d/max_server_memory_usage_to_ram_ratio.xml
|
||||
#
|
||||
# But not via max_server_memory_usage but via max_memory_usage_for_user,
|
||||
# so that we can override this setting and execute service queries, like:
|
||||
# - hung check
|
||||
# - show/drop database
|
||||
# - ...
|
||||
#
|
||||
# So max_memory_usage_for_user will be a soft limit, and
|
||||
# max_server_memory_usage will be hard limit, and queries that should be
|
||||
# executed regardless memory limits will use max_memory_usage_for_user=0,
|
||||
# instead of relying on max_untracked_memory
|
||||
local max_server_mem
|
||||
max_server_mem=$((total_mem*75/100)) # 75%
|
||||
echo "Setting max_server_memory_usage=$max_server_mem"
|
||||
cat > /etc/clickhouse-server/config.d/max_server_memory_usage.xml <<EOL
|
||||
<clickhouse>
|
||||
<max_server_memory_usage>${max_server_mem}</max_server_memory_usage>
|
||||
</clickhouse>
|
||||
EOL
|
||||
local max_users_mem
|
||||
max_users_mem=$((total_mem*50/100)) # 50%
|
||||
echo "Setting max_memory_usage_for_user=$max_users_mem"
|
||||
cat > /etc/clickhouse-server/users.d/max_memory_usage_for_user.xml <<EOL
|
||||
<clickhouse>
|
||||
<profiles>
|
||||
<default>
|
||||
<max_memory_usage_for_user>${max_users_mem}</max_memory_usage_for_user>
|
||||
</default>
|
||||
</profiles>
|
||||
</clickhouse>
|
||||
EOL
|
||||
}
|
||||
|
||||
function stop()
|
||||
@ -102,14 +135,35 @@ function start()
|
||||
counter=$((counter + 1))
|
||||
done
|
||||
|
||||
# Set follow-fork-mode to parent, because we attach to clickhouse-server, not to watchdog
|
||||
# and clickhouse-server can do fork-exec, for example, to run some bridge.
|
||||
# Do not set nostop noprint for all signals, because some it may cause gdb to hang,
|
||||
# explicitly ignore non-fatal signals that are used by server.
|
||||
# Number of SIGRTMIN can be determined only in runtime.
|
||||
RTMIN=$(kill -l SIGRTMIN)
|
||||
echo "
|
||||
set follow-fork-mode child
|
||||
handle all noprint
|
||||
handle SIGSEGV stop print
|
||||
handle SIGBUS stop print
|
||||
handle SIGABRT stop print
|
||||
set follow-fork-mode parent
|
||||
handle SIGHUP nostop noprint pass
|
||||
handle SIGINT nostop noprint pass
|
||||
handle SIGQUIT nostop noprint pass
|
||||
handle SIGPIPE nostop noprint pass
|
||||
handle SIGTERM nostop noprint pass
|
||||
handle SIGUSR1 nostop noprint pass
|
||||
handle SIGUSR2 nostop noprint pass
|
||||
handle SIG$RTMIN nostop noprint pass
|
||||
info signals
|
||||
continue
|
||||
thread apply all backtrace
|
||||
backtrace full
|
||||
info locals
|
||||
info registers
|
||||
disassemble /s
|
||||
up
|
||||
info locals
|
||||
disassemble /s
|
||||
up
|
||||
info locals
|
||||
disassemble /s
|
||||
p \"done\"
|
||||
detach
|
||||
quit
|
||||
" > script.gdb
|
||||
@ -117,7 +171,10 @@ quit
|
||||
# FIXME Hung check may work incorrectly because of attached gdb
|
||||
# 1. False positives are possible
|
||||
# 2. We cannot attach another gdb to get stacktraces if some queries hung
|
||||
gdb -batch -command script.gdb -p "$(cat /var/run/clickhouse-server/clickhouse-server.pid)" >> /test_output/gdb.log &
|
||||
gdb -batch -command script.gdb -p "$(cat /var/run/clickhouse-server/clickhouse-server.pid)" | ts '%Y-%m-%d %H:%M:%S' >> /test_output/gdb.log &
|
||||
sleep 5
|
||||
# gdb will send SIGSTOP, spend some time loading debug info and then send SIGCONT, wait for it (up to send_timeout, 300s)
|
||||
time clickhouse-client --query "SELECT 'Connected to clickhouse-server after attaching gdb'" ||:
|
||||
}
|
||||
|
||||
install_packages package_folder
|
||||
@ -190,6 +247,9 @@ zgrep -Fa " <Fatal> " /var/log/clickhouse-server/clickhouse-server.log* > /dev/n
|
||||
zgrep -Fa "########################################" /test_output/* > /dev/null \
|
||||
&& echo -e 'Killed by signal (output files)\tFAIL' >> /test_output/test_results.tsv
|
||||
|
||||
zgrep -Fa " received signal " /test_output/gdb.log > /dev/null \
|
||||
&& echo -e 'Found signal in gdb.log\tFAIL' >> /test_output/test_results.tsv
|
||||
|
||||
echo -e "Backward compatibility check\n"
|
||||
|
||||
echo "Download previous release server"
|
||||
|
@ -76,6 +76,9 @@ def call_with_retry(query, timeout=30, retry_count=5):
|
||||
else:
|
||||
break
|
||||
|
||||
def make_query_command(query):
|
||||
return f"""clickhouse client -q "{query}" --max_untracked_memory=1Gi --memory_profiler_step=1Gi --max_memory_usage_for_user=0"""
|
||||
|
||||
|
||||
def prepare_for_hung_check(drop_databases):
|
||||
# FIXME this function should not exist, but...
|
||||
@ -89,40 +92,41 @@ def prepare_for_hung_check(drop_databases):
|
||||
logging.info("Will terminate gdb (if any)")
|
||||
call_with_retry("kill -TERM $(pidof gdb)")
|
||||
|
||||
# Some tests set too low memory limit for default user and forget to reset in back.
|
||||
# It may cause SYSTEM queries to fail, let's disable memory limit.
|
||||
call_with_retry("clickhouse client --max_memory_usage_for_user=0 -q 'SELECT 1 FORMAT Null'")
|
||||
call_with_retry(make_query_command('SELECT 1 FORMAT Null'))
|
||||
|
||||
# Some tests execute SYSTEM STOP MERGES or similar queries.
|
||||
# It may cause some ALTERs to hang.
|
||||
# Possibly we should fix tests and forbid to use such queries without specifying table.
|
||||
call_with_retry("clickhouse client -q 'SYSTEM START MERGES'")
|
||||
call_with_retry("clickhouse client -q 'SYSTEM START DISTRIBUTED SENDS'")
|
||||
call_with_retry("clickhouse client -q 'SYSTEM START TTL MERGES'")
|
||||
call_with_retry("clickhouse client -q 'SYSTEM START MOVES'")
|
||||
call_with_retry("clickhouse client -q 'SYSTEM START FETCHES'")
|
||||
call_with_retry("clickhouse client -q 'SYSTEM START REPLICATED SENDS'")
|
||||
call_with_retry("clickhouse client -q 'SYSTEM START REPLICATION QUEUES'")
|
||||
call_with_retry(make_query_command('SYSTEM START MERGES'))
|
||||
call_with_retry(make_query_command('SYSTEM START DISTRIBUTED SENDS'))
|
||||
call_with_retry(make_query_command('SYSTEM START TTL MERGES'))
|
||||
call_with_retry(make_query_command('SYSTEM START MOVES'))
|
||||
call_with_retry(make_query_command('SYSTEM START FETCHES'))
|
||||
call_with_retry(make_query_command('SYSTEM START REPLICATED SENDS'))
|
||||
call_with_retry(make_query_command('SYSTEM START REPLICATION QUEUES'))
|
||||
call_with_retry(make_query_command('SYSTEM DROP MARK CACHE'))
|
||||
|
||||
# Issue #21004, live views are experimental, so let's just suppress it
|
||||
call_with_retry("""clickhouse client -q "KILL QUERY WHERE upper(query) LIKE 'WATCH %'" """)
|
||||
call_with_retry(make_query_command("KILL QUERY WHERE upper(query) LIKE 'WATCH %'"))
|
||||
|
||||
# Kill other queries which known to be slow
|
||||
# It's query from 01232_preparing_sets_race_condition_long, it may take up to 1000 seconds in slow builds
|
||||
call_with_retry("""clickhouse client -q "KILL QUERY WHERE query LIKE 'insert into tableB select %'" """)
|
||||
call_with_retry(make_query_command("KILL QUERY WHERE query LIKE 'insert into tableB select %'"))
|
||||
# Long query from 00084_external_agregation
|
||||
call_with_retry("""clickhouse client -q "KILL QUERY WHERE query LIKE 'SELECT URL, uniq(SearchPhrase) AS u FROM test.hits GROUP BY URL ORDER BY u %'" """)
|
||||
call_with_retry(make_query_command("KILL QUERY WHERE query LIKE 'SELECT URL, uniq(SearchPhrase) AS u FROM test.hits GROUP BY URL ORDER BY u %'"))
|
||||
|
||||
if drop_databases:
|
||||
for i in range(5):
|
||||
try:
|
||||
# Here we try to drop all databases in async mode. If some queries really hung, than drop will hung too.
|
||||
# Otherwise we will get rid of queries which wait for background pool. It can take a long time on slow builds (more than 900 seconds).
|
||||
databases = check_output('clickhouse client -q "SHOW DATABASES"', shell=True, timeout=30).decode('utf-8').strip().split()
|
||||
#
|
||||
# Also specify max_untracked_memory to allow 1GiB of memory to overcommit.
|
||||
databases = check_output(make_query_command('SHOW DATABASES'), shell=True, timeout=30).decode('utf-8').strip().split()
|
||||
for db in databases:
|
||||
if db == "system":
|
||||
continue
|
||||
command = f'clickhouse client -q "DROP DATABASE {db}"'
|
||||
command = make_query_command(f'DROP DATABASE {db}')
|
||||
# we don't wait for drop
|
||||
Popen(command, shell=True)
|
||||
break
|
||||
@ -134,9 +138,15 @@ def prepare_for_hung_check(drop_databases):
|
||||
|
||||
|
||||
# Wait for last queries to finish if any, not longer than 300 seconds
|
||||
call("""clickhouse client -q "select sleepEachRow((
|
||||
select maxOrDefault(300 - elapsed) + 1 from system.processes where query not like '%from system.processes%' and elapsed < 300
|
||||
) / 300) from numbers(300) format Null" """, shell=True, stderr=STDOUT, timeout=330)
|
||||
call(make_query_command("""
|
||||
select sleepEachRow((
|
||||
select maxOrDefault(300 - elapsed) + 1
|
||||
from system.processes
|
||||
where query not like '%from system.processes%' and elapsed < 300
|
||||
) / 300)
|
||||
from numbers(300)
|
||||
format Null
|
||||
"""), shell=True, stderr=STDOUT, timeout=330)
|
||||
|
||||
# Even if all clickhouse-test processes are finished, there are probably some sh scripts,
|
||||
# which still run some new queries. Let's ignore them.
|
||||
@ -190,7 +200,24 @@ if __name__ == "__main__":
|
||||
if args.hung_check:
|
||||
have_long_running_queries = prepare_for_hung_check(args.drop_databases)
|
||||
logging.info("Checking if some queries hung")
|
||||
cmd = "{} {} {}".format(args.test_cmd, "--hung-check", "00001_select_1")
|
||||
cmd = ' '.join([args.test_cmd,
|
||||
# Do not track memory allocations up to 1Gi,
|
||||
# this will allow to ignore server memory limit (max_server_memory_usage) for this query.
|
||||
#
|
||||
# NOTE: memory_profiler_step should be also adjusted, because:
|
||||
#
|
||||
# untracked_memory_limit = min(settings.max_untracked_memory, settings.memory_profiler_step)
|
||||
#
|
||||
# NOTE: that if there will be queries with GROUP BY, this trick
|
||||
# will not work due to CurrentMemoryTracker::check() from
|
||||
# Aggregator code.
|
||||
# But right now it should work, since neither hung check, nor 00001_select_1 has GROUP BY.
|
||||
"--client-option", "max_untracked_memory=1Gi",
|
||||
"--client-option", "max_memory_usage_for_user=0",
|
||||
"--client-option", "memory_profiler_step=1Gi",
|
||||
"--hung-check",
|
||||
"00001_select_1"
|
||||
])
|
||||
res = call(cmd, shell=True, stderr=STDOUT)
|
||||
hung_check_status = "No queries hung\tOK\n"
|
||||
if res != 0 and have_long_running_queries:
|
||||
|
@ -21,7 +21,6 @@ RUN apt-get update \
|
||||
cgroupfs-mount \
|
||||
python3-pip \
|
||||
tzdata \
|
||||
libreadline-dev \
|
||||
libicu-dev \
|
||||
bsdutils \
|
||||
curl \
|
||||
|
@ -5,8 +5,8 @@ echo "Configure to use Yandex dockerhub-proxy"
|
||||
mkdir -p /etc/docker/
|
||||
cat > /etc/docker/daemon.json << EOF
|
||||
{
|
||||
"insecure-registries" : ["dockerhub-proxy.sas.yp-c.yandex.net:5000"],
|
||||
"registry-mirrors" : ["http://dockerhub-proxy.sas.yp-c.yandex.net:5000"]
|
||||
"insecure-registries" : ["dockerhub-proxy.dockerhub-proxy-zone:5000"],
|
||||
"registry-mirrors" : ["http://dockerhub-proxy.dockerhub-proxy-zone:5000"]
|
||||
}
|
||||
EOF
|
||||
|
||||
|
@ -106,20 +106,20 @@ Build ClickHouse. Run ClickHouse from the terminal: change directory to `program
|
||||
|
||||
Note that all clickhouse tools (server, client, etc) are just symlinks to a single binary named `clickhouse`. You can find this binary at `programs/clickhouse`. All tools can also be invoked as `clickhouse tool` instead of `clickhouse-tool`.
|
||||
|
||||
Alternatively you can install ClickHouse package: either stable release from Yandex repository or you can build package for yourself with `./release` in ClickHouse sources root. Then start the server with `sudo service clickhouse-server start` (or stop to stop the server). Look for logs at `/etc/clickhouse-server/clickhouse-server.log`.
|
||||
Alternatively you can install ClickHouse package: either stable release from ClickHouse repository or you can build package for yourself with `./release` in ClickHouse sources root. Then start the server with `sudo clickhouse start` (or stop to stop the server). Look for logs at `/etc/clickhouse-server/clickhouse-server.log`.
|
||||
|
||||
When ClickHouse is already installed on your system, you can build a new `clickhouse` binary and replace the existing binary:
|
||||
|
||||
``` bash
|
||||
$ sudo service clickhouse-server stop
|
||||
$ sudo clickhouse stop
|
||||
$ sudo cp ./clickhouse /usr/bin/
|
||||
$ sudo service clickhouse-server start
|
||||
$ sudo clickhouse start
|
||||
```
|
||||
|
||||
Also you can stop system clickhouse-server and run your own with the same configuration but with logging to terminal:
|
||||
|
||||
``` bash
|
||||
$ sudo service clickhouse-server stop
|
||||
$ sudo clickhouse stop
|
||||
$ sudo -u clickhouse /usr/bin/clickhouse server --config-file /etc/clickhouse-server/config.xml
|
||||
```
|
||||
|
||||
@ -257,9 +257,9 @@ There are five variants (Debug, ASan, TSan, MSan, UBSan).
|
||||
|
||||
Thread Fuzzer (please don't mix up with Thread Sanitizer) is another kind of fuzzing that allows to randomize thread order of execution. It helps to find even more special cases.
|
||||
|
||||
## Security Audit {#security-audit}
|
||||
## Security Audit
|
||||
|
||||
People from Yandex Security Team do some basic overview of ClickHouse capabilities from the security standpoint.
|
||||
People from Yandex Security Team did some basic overview of ClickHouse capabilities from the security standpoint.
|
||||
|
||||
## Static Analyzers {#static-analyzers}
|
||||
|
||||
@ -326,15 +326,11 @@ There is automated check for flaky tests. It runs all new tests 100 times (for f
|
||||
|
||||
## Testflows
|
||||
|
||||
[Testflows](https://testflows.com/) is an enterprise-grade testing framework. It is used by Altinity for some of the tests and we run these tests in our CI.
|
||||
|
||||
## Yandex Checks (only for Yandex employees)
|
||||
|
||||
These checks are importing ClickHouse code into Yandex internal monorepository, so ClickHouse codebase can be used as a library by other products at Yandex (YT and YDB). Note that clickhouse-server itself is not being build from internal repo and unmodified open-source build is used for Yandex applications.
|
||||
[Testflows](https://testflows.com/) is an enterprise-grade open-source testing framework, which is used to test a subset of ClickHouse.
|
||||
|
||||
## Test Automation {#test-automation}
|
||||
|
||||
We run tests with Yandex internal CI and job automation system named “Sandbox”.
|
||||
We run tests with [GitHub Actions](https://github.com/features/actions).
|
||||
|
||||
Build jobs and tests are run in Sandbox on per commit basis. Resulting packages and test results are published in GitHub and can be downloaded by direct links. Artifacts are stored for several months. When you send a pull request on GitHub, we tag it as “can be tested” and our CI system will build ClickHouse packages (release, debug, with address sanitizer, etc) for you.
|
||||
|
||||
|
@ -17,6 +17,7 @@ ClickHouse server works as MySQL replica. It reads binlog and performs DDL and D
|
||||
``` sql
|
||||
CREATE DATABASE [IF NOT EXISTS] db_name [ON CLUSTER cluster]
|
||||
ENGINE = MaterializedMySQL('host:port', ['database' | database], 'user', 'password') [SETTINGS ...]
|
||||
[TABLE OVERRIDE table1 (...), TABLE OVERRIDE table2 (...)]
|
||||
```
|
||||
|
||||
**Engine Parameters**
|
||||
@ -82,6 +83,7 @@ When working with the `MaterializedMySQL` database engine, [ReplacingMergeTree](
|
||||
| VARCHAR, VAR_STRING | [String](../../sql-reference/data-types/string.md) |
|
||||
| BLOB | [String](../../sql-reference/data-types/string.md) |
|
||||
| BINARY | [FixedString](../../sql-reference/data-types/fixedstring.md) |
|
||||
| BIT | [UInt64](../../sql-reference/data-types/int-uint.md) |
|
||||
|
||||
[Nullable](../../sql-reference/data-types/nullable.md) is supported.
|
||||
|
||||
@ -109,15 +111,19 @@ MySQL DDL queries are converted into the corresponding ClickHouse DDL queries ([
|
||||
|
||||
- MySQL `DELETE` query is converted into `INSERT` with `_sign=-1`.
|
||||
|
||||
- MySQL `UPDATE` query is converted into `INSERT` with `_sign=-1` and `INSERT` with `_sign=1`.
|
||||
- MySQL `UPDATE` query is converted into `INSERT` with `_sign=-1` and `INSERT` with `_sign=1` if the primary key has been changed, or
|
||||
`INSERT` with `_sign=1` if not.
|
||||
|
||||
### Selecting from MaterializedMySQL Tables {#select}
|
||||
|
||||
`SELECT` query from `MaterializedMySQL` tables has some specifics:
|
||||
|
||||
- If `_version` is not specified in the `SELECT` query, [FINAL](../../sql-reference/statements/select/from.md#select-from-final) modifier is used. So only rows with `MAX(_version)` are selected.
|
||||
- If `_version` is not specified in the `SELECT` query, the
|
||||
[FINAL](../../sql-reference/statements/select/from.md#select-from-final) modifier is used, so only rows with
|
||||
`MAX(_version)` are returned for each primary key value.
|
||||
|
||||
- If `_sign` is not specified in the `SELECT` query, `WHERE _sign=1` is used by default. So the deleted rows are not included into the result set.
|
||||
- If `_sign` is not specified in the `SELECT` query, `WHERE _sign=1` is used by default. So the deleted rows are not
|
||||
included into the result set.
|
||||
|
||||
- The result includes columns comments in case they exist in MySQL database tables.
|
||||
|
||||
@ -125,15 +131,95 @@ MySQL DDL queries are converted into the corresponding ClickHouse DDL queries ([
|
||||
|
||||
MySQL `PRIMARY KEY` and `INDEX` clauses are converted into `ORDER BY` tuples in ClickHouse tables.
|
||||
|
||||
ClickHouse has only one physical order, which is determined by `ORDER BY` clause. To create a new physical order, use [materialized views](../../sql-reference/statements/create/view.md#materialized).
|
||||
ClickHouse has only one physical order, which is determined by `ORDER BY` clause. To create a new physical order, use
|
||||
[materialized views](../../sql-reference/statements/create/view.md#materialized).
|
||||
|
||||
**Notes**
|
||||
|
||||
- Rows with `_sign=-1` are not deleted physically from the tables.
|
||||
- Cascade `UPDATE/DELETE` queries are not supported by the `MaterializedMySQL` engine.
|
||||
- Cascade `UPDATE/DELETE` queries are not supported by the `MaterializedMySQL` engine, as they are not visible in the
|
||||
MySQL binlog.
|
||||
- Replication can be easily broken.
|
||||
- Manual operations on database and tables are forbidden.
|
||||
- `MaterializedMySQL` is influenced by [optimize_on_insert](../../operations/settings/settings.md#optimize-on-insert) setting. The data is merged in the corresponding table in the `MaterializedMySQL` database when a table in the MySQL server changes.
|
||||
- `MaterializedMySQL` is affected by the [optimize_on_insert](../../operations/settings/settings.md#optimize-on-insert)
|
||||
setting. Data is merged in the corresponding table in the `MaterializedMySQL` database when a table in the MySQL
|
||||
server changes.
|
||||
|
||||
### Table Overrides {#table-overrides}
|
||||
|
||||
Table overrides can be used to customize the ClickHouse DDL queries, allowing you to make schema optimizations for your
|
||||
application. This is especially useful for controlling partitioning, which is important for the overall performance of
|
||||
MaterializedMySQL.
|
||||
|
||||
These are the schema conversion manipulations you can do with table overrides for MaterializedMySQL:
|
||||
|
||||
* Modify column type. Must be compatible with the original type, or replication will fail. For example,
|
||||
you can modify a UInt32 column to UInt64, but you can not modify a String column to Array(String).
|
||||
* Modify [column TTL](../table-engines/mergetree-family/mergetree/#mergetree-column-ttl).
|
||||
* Modify [column compression codec](../../sql-reference/statements/create/table/#codecs).
|
||||
* Add [ALIAS columns](../../sql-reference/statements/create/table/#alias).
|
||||
* Add [skipping indexes](../table-engines/mergetree-family/mergetree/#table_engine-mergetree-data_skipping-indexes)
|
||||
* Add [projections](../table-engines/mergetree-family/mergetree/#projections). Note that projection optimizations are
|
||||
disabled when using `SELECT ... FINAL` (which MaterializedMySQL does by default), so their utility is limited here.
|
||||
`INDEX ... TYPE hypothesis` as [described in the v21.12 blog post]](https://clickhouse.com/blog/en/2021/clickhouse-v21.12-released/)
|
||||
may be more useful in this case.
|
||||
* Modify [PARTITION BY](../table-engines/mergetree-family/custom-partitioning-key/)
|
||||
* Modify [ORDER BY](../table-engines/mergetree-family/mergetree/#mergetree-query-clauses)
|
||||
* Modify [PRIMARY KEY](../table-engines/mergetree-family/mergetree/#mergetree-query-clauses)
|
||||
* Add [SAMPLE BY](../table-engines/mergetree-family/mergetree/#mergetree-query-clauses)
|
||||
* Add [table TTL](../table-engines/mergetree-family/mergetree/#mergetree-query-clauses)
|
||||
|
||||
```sql
|
||||
CREATE DATABASE db_name ENGINE = MaterializedMySQL(...)
|
||||
[SETTINGS ...]
|
||||
[TABLE OVERRIDE table_name (
|
||||
[COLUMNS (
|
||||
[col_name [datatype] [ALIAS expr] [CODEC(...)] [TTL expr], ...]
|
||||
[INDEX index_name expr TYPE indextype[(...)] GRANULARITY val, ...]
|
||||
[PROJECTION projection_name (SELECT <COLUMN LIST EXPR> [GROUP BY] [ORDER BY]), ...]
|
||||
)]
|
||||
[ORDER BY expr]
|
||||
[PRIMARY KEY expr]
|
||||
[PARTITION BY expr]
|
||||
[SAMPLE BY expr]
|
||||
[TTL expr]
|
||||
), ...]
|
||||
```
|
||||
|
||||
Example:
|
||||
|
||||
```sql
|
||||
CREATE DATABASE db_name ENGINE = MaterializedMySQL(...)
|
||||
TABLE OVERRIDE table1 (
|
||||
COLUMNS (
|
||||
userid UUID,
|
||||
category LowCardinality(String),
|
||||
timestamp DateTime CODEC(Delta, Default)
|
||||
)
|
||||
PARTITION BY toYear(timestamp)
|
||||
),
|
||||
TABLE OVERRIDE table2 (
|
||||
COLUMNS (
|
||||
client_ip String TTL created + INTERVAL 72 HOUR
|
||||
)
|
||||
SAMPLE BY ip_hash
|
||||
)
|
||||
```
|
||||
|
||||
The `COLUMNS` list is sparse; existing columns are modified as specified, extra ALIAS columns are added. It is not
|
||||
possible to add ordinary or MATERIALIZED columns. Modified columns with a different type must be assignable from the
|
||||
original type. There is currently no validation of this or similar issues when the `CREATE DATABASE` query executes, so
|
||||
extra care needs to be taken.
|
||||
|
||||
You may specify overrides for tables that do not exist yet.
|
||||
|
||||
!!! warning "Warning"
|
||||
It is easy to break replication with table overrides if not used with care. For example:
|
||||
|
||||
* If an ALIAS column is added with a table override, and a column with the same name is later added to the source
|
||||
MySQL table, the converted ALTER TABLE query in ClickHouse will fail and replication stops.
|
||||
* It is currently possible to add overrides that reference nullable columns where not-nullable are required, such as in
|
||||
`ORDER BY` or `PARTITION BY`. This will cause CREATE TABLE queries that will fail, also causing replication to stop.
|
||||
|
||||
## Examples of Use {#examples-of-use}
|
||||
|
||||
@ -150,11 +236,9 @@ mysql> SELECT * FROM test;
|
||||
```
|
||||
|
||||
```text
|
||||
+---+------+------+
|
||||
| a | b | c |
|
||||
+---+------+------+
|
||||
| 2 | 222 | Wow! |
|
||||
+---+------+------+
|
||||
┌─a─┬───b─┬─c────┐
|
||||
│ 2 │ 222 │ Wow! │
|
||||
└───┴─────┴──────┘
|
||||
```
|
||||
|
||||
Database in ClickHouse, exchanging data with the MySQL server:
|
||||
|
@ -70,7 +70,7 @@ ALTER DATABASE postgres_database MODIFY SETTING materialized_postgresql_max_bloc
|
||||
|
||||
## PostgreSQL schema {#schema}
|
||||
|
||||
PostgreSQL [schema](https://www.postgresql.org/docs/9.1/ddl-schemas.html) can be used in two ways.
|
||||
PostgreSQL [schema](https://www.postgresql.org/docs/9.1/ddl-schemas.html) can be configured in 3 ways (starting from version 21.12).
|
||||
|
||||
1. One schema for one `MaterializedPostgreSQL` database engine. Requires to use setting `materialized_postgresql_schema`.
|
||||
Tables are accessed via table name only:
|
||||
|
@ -5,8 +5,7 @@ toc_title: HDFS
|
||||
|
||||
# HDFS {#table_engines-hdfs}
|
||||
|
||||
This engine provides integration with [Apache Hadoop](https://en.wikipedia.org/wiki/Apache_Hadoop) ecosystem by allowing to manage data on [HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html) via ClickHouse. This engine is similar
|
||||
to the [File](../../../engines/table-engines/special/file.md#table_engines-file) and [URL](../../../engines/table-engines/special/url.md#table_engines-url) engines, but provides Hadoop-specific features.
|
||||
This engine provides integration with the [Apache Hadoop](https://en.wikipedia.org/wiki/Apache_Hadoop) ecosystem by allowing to manage data on [HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html) via ClickHouse. This engine is similar to the [File](../../../engines/table-engines/special/file.md#table_engines-file) and [URL](../../../engines/table-engines/special/url.md#table_engines-url) engines, but provides Hadoop-specific features.
|
||||
|
||||
## Usage {#usage}
|
||||
|
||||
@ -14,12 +13,13 @@ to the [File](../../../engines/table-engines/special/file.md#table_engines-file)
|
||||
ENGINE = HDFS(URI, format)
|
||||
```
|
||||
|
||||
The `URI` parameter is the whole file URI in HDFS.
|
||||
The `format` parameter specifies one of the available file formats. To perform
|
||||
**Engine Parameters**
|
||||
|
||||
- `URI` - whole file URI in HDFS. The path part of `URI` may contain globs. In this case the table would be readonly.
|
||||
- `format` - specifies one of the available file formats. To perform
|
||||
`SELECT` queries, the format must be supported for input, and to perform
|
||||
`INSERT` queries – for output. The available formats are listed in the
|
||||
[Formats](../../../interfaces/formats.md#formats) section.
|
||||
The path part of `URI` may contain globs. In this case the table would be readonly.
|
||||
|
||||
**Example:**
|
||||
|
||||
@ -71,12 +71,12 @@ Constructions with `{}` are similar to the [remote](../../../sql-reference/table
|
||||
|
||||
1. Suppose we have several files in TSV format with the following URIs on HDFS:
|
||||
|
||||
- 'hdfs://hdfs1:9000/some_dir/some_file_1'
|
||||
- 'hdfs://hdfs1:9000/some_dir/some_file_2'
|
||||
- 'hdfs://hdfs1:9000/some_dir/some_file_3'
|
||||
- 'hdfs://hdfs1:9000/another_dir/some_file_1'
|
||||
- 'hdfs://hdfs1:9000/another_dir/some_file_2'
|
||||
- 'hdfs://hdfs1:9000/another_dir/some_file_3'
|
||||
- 'hdfs://hdfs1:9000/some_dir/some_file_1'
|
||||
- 'hdfs://hdfs1:9000/some_dir/some_file_2'
|
||||
- 'hdfs://hdfs1:9000/some_dir/some_file_3'
|
||||
- 'hdfs://hdfs1:9000/another_dir/some_file_1'
|
||||
- 'hdfs://hdfs1:9000/another_dir/some_file_2'
|
||||
- 'hdfs://hdfs1:9000/another_dir/some_file_3'
|
||||
|
||||
1. There are several ways to make a table consisting of all six files:
|
||||
|
||||
@ -132,6 +132,7 @@ Similar to GraphiteMergeTree, the HDFS engine supports extended configuration us
|
||||
|
||||
|
||||
| **parameter** | **default value** |
|
||||
| - | - |
|
||||
| rpc\_client\_connect\_tcpnodelay | true |
|
||||
| dfs\_client\_read\_shortcircuit | true |
|
||||
| output\_replace-datanode-on-failure | true |
|
||||
@ -181,25 +182,26 @@ Similar to GraphiteMergeTree, the HDFS engine supports extended configuration us
|
||||
#### ClickHouse extras {#clickhouse-extras}
|
||||
|
||||
| **parameter** | **default value** |
|
||||
| - | - |
|
||||
|hadoop\_kerberos\_keytab | "" |
|
||||
|hadoop\_kerberos\_principal | "" |
|
||||
|hadoop\_kerberos\_kinit\_command | kinit |
|
||||
|libhdfs3\_conf | "" |
|
||||
|
||||
### Limitations {#limitations}
|
||||
* hadoop\_security\_kerberos\_ticket\_cache\_path and libhdfs3\_conf can be global only, not user specific
|
||||
* `hadoop_security_kerberos_ticket_cache_path` and `libhdfs3_conf` can be global only, not user specific
|
||||
|
||||
## Kerberos support {#kerberos-support}
|
||||
|
||||
If hadoop\_security\_authentication parameter has value 'kerberos', ClickHouse authentifies via Kerberos facility.
|
||||
Parameters [here](#clickhouse-extras) and hadoop\_security\_kerberos\_ticket\_cache\_path may be of help.
|
||||
If the `hadoop_security_authentication` parameter has the value `kerberos`, ClickHouse authenticates via Kerberos.
|
||||
Parameters are [here](#clickhouse-extras) and `hadoop_security_kerberos_ticket_cache_path` may be of help.
|
||||
Note that due to libhdfs3 limitations only old-fashioned approach is supported,
|
||||
datanode communications are not secured by SASL (HADOOP\_SECURE\_DN\_USER is a reliable indicator of such
|
||||
security approach). Use tests/integration/test\_storage\_kerberized\_hdfs/hdfs_configs/bootstrap.sh for reference.
|
||||
datanode communications are not secured by SASL (`HADOOP_SECURE_DN_USER` is a reliable indicator of such
|
||||
security approach). Use `tests/integration/test_storage_kerberized_hdfs/hdfs_configs/bootstrap.sh` for reference.
|
||||
|
||||
If hadoop\_kerberos\_keytab, hadoop\_kerberos\_principal or hadoop\_kerberos\_kinit\_command is specified, kinit will be invoked. hadoop\_kerberos\_keytab and hadoop\_kerberos\_principal are mandatory in this case. kinit tool and krb5 configuration files are required.
|
||||
If `hadoop_kerberos_keytab`, `hadoop_kerberos_principal` or `hadoop_kerberos_kinit_command` is specified, `kinit` will be invoked. `hadoop_kerberos_keytab` and `hadoop_kerberos_principal` are mandatory in this case. `kinit` tool and krb5 configuration files are required.
|
||||
|
||||
## HDFS Namenode HA support{#namenode-ha}
|
||||
## HDFS Namenode HA support {#namenode-ha}
|
||||
|
||||
libhdfs3 support HDFS namenode HA.
|
||||
|
||||
|
@ -37,6 +37,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
[rabbitmq_skip_broken_messages = N,]
|
||||
[rabbitmq_max_block_size = N,]
|
||||
[rabbitmq_flush_interval_ms = N]
|
||||
[rabbitmq_queue_settings_list = 'x-dead-letter-exchange=my-dlx,x-max-length=10,x-overflow=reject-publish']
|
||||
```
|
||||
|
||||
Required parameters:
|
||||
@ -59,6 +60,7 @@ Optional parameters:
|
||||
- `rabbitmq_skip_broken_messages` – RabbitMQ message parser tolerance to schema-incompatible messages per block. Default: `0`. If `rabbitmq_skip_broken_messages = N` then the engine skips *N* RabbitMQ messages that cannot be parsed (a message equals a row of data).
|
||||
- `rabbitmq_max_block_size`
|
||||
- `rabbitmq_flush_interval_ms`
|
||||
- `rabbitmq_queue_settings_list` - allows to set RabbitMQ settings when creating a queue. Available settings: `x-max-length`, `x-max-length-bytes`, `x-message-ttl`, `x-expires`, `x-priority`, `x-max-priority`, `x-overflow`, `x-dead-letter-exchange`, `x-queue-type`. The `durable` setting is enabled automatically for the queue.
|
||||
|
||||
SSL connection:
|
||||
|
||||
|
@ -66,9 +66,9 @@ WHERE table = 'visits'
|
||||
└───────────┴────────────────┴────────┘
|
||||
```
|
||||
|
||||
The `partition` column contains the names of the partitions. There are two partitions in this example: `201901` and `201902`. You can use this column value to specify the partition name in [ALTER … PARTITION](#alter_manipulations-with-partitions) queries.
|
||||
The `partition` column contains the names of the partitions. There are two partitions in this example: `201901` and `201902`. You can use this column value to specify the partition name in [ALTER … PARTITION](../../../sql-reference/statements/alter/partition.md) queries.
|
||||
|
||||
The `name` column contains the names of the partition data parts. You can use this column to specify the name of the part in the [ALTER ATTACH PART](#alter_attach-partition) query.
|
||||
The `name` column contains the names of the partition data parts. You can use this column to specify the name of the part in the [ALTER ATTACH PART](../../../sql-reference/statements/alter/partition.md#alter_attach-partition) query.
|
||||
|
||||
Let’s break down the name of the first part: `201901_1_3_1`:
|
||||
|
||||
|
@ -262,7 +262,7 @@ In the example below, the index can’t be used.
|
||||
SELECT count() FROM table WHERE CounterID = 34 OR URL LIKE '%upyachka%'
|
||||
```
|
||||
|
||||
To check whether ClickHouse can use the index when running a query, use the settings [force_index_by_date](../../../operations/settings/settings.md#settings-force_index_by_date) and [force_primary_key](../../../operations/settings/settings.md).
|
||||
To check whether ClickHouse can use the index when running a query, use the settings [force_index_by_date](../../../operations/settings/settings.md#settings-force_index_by_date) and [force_primary_key](../../../operations/settings/settings.md#force-primary-key).
|
||||
|
||||
The key for partitioning by month allows reading only those data blocks which contain dates from the proper range. In this case, the data block may contain data for many dates (up to an entire month). Within a block, data is sorted by primary key, which might not contain the date as the first column. Because of this, using a query with only a date condition that does not specify the primary key prefix will cause more data to be read than for a single date.
|
||||
|
||||
|
@ -8,24 +8,43 @@ toc_title: Distributed
|
||||
Tables with Distributed engine do not store any data of their own, but allow distributed query processing on multiple servers.
|
||||
Reading is automatically parallelized. During a read, the table indexes on remote servers are used, if there are any.
|
||||
|
||||
The Distributed engine accepts parameters:
|
||||
## Creating a Table {#distributed-creating-a-table}
|
||||
|
||||
- the cluster name in the server’s config file
|
||||
``` sql
|
||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
(
|
||||
name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1],
|
||||
name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2],
|
||||
...
|
||||
) ENGINE = Distributed(cluster, database, table[, sharding_key[, policy_name]])
|
||||
[SETTINGS name=value, ...]
|
||||
```
|
||||
|
||||
- the name of a remote database
|
||||
### From a Table {#distributed-from-a-table}
|
||||
When the `Distributed` table is pointing to a table on the current server you can adopt that table's schema:
|
||||
|
||||
- the name of a remote table
|
||||
``` sql
|
||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] AS [db2.]name2 ENGINE = Distributed(cluster, database, table[, sharding_key[, policy_name]]) [SETTINGS name=value, ...]
|
||||
```
|
||||
|
||||
- (optionally) sharding key
|
||||
**Distributed Parameters**
|
||||
|
||||
- (optionally) policy name, it will be used to store temporary files for async send
|
||||
- `cluster` - the cluster name in the server’s config file
|
||||
|
||||
See also:
|
||||
- `database` - the name of a remote database
|
||||
|
||||
- [insert_distributed_sync](../../../operations/settings/settings.md#insert_distributed_sync) setting
|
||||
- [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes) for the examples
|
||||
- `table` - the name of a remote table
|
||||
|
||||
Also, it accepts the following settings:
|
||||
- `sharding_key` - (optionally) sharding key
|
||||
|
||||
- `policy_name` - (optionally) policy name, it will be used to store temporary files for async send
|
||||
|
||||
**See Also**
|
||||
|
||||
- [insert_distributed_sync](../../../operations/settings/settings.md#insert_distributed_sync) setting
|
||||
- [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes) for the examples
|
||||
|
||||
**Distributed Settings**
|
||||
|
||||
- `fsync_after_insert` - do the `fsync` for the file data after asynchronous insert to Distributed. Guarantees that the OS flushed the whole inserted data to a file **on the initiator node** disk.
|
||||
|
||||
@ -59,24 +78,25 @@ Also, it accepts the following settings:
|
||||
- [prefer_localhost_replica](../../../operations/settings/settings.md#settings-prefer-localhost-replica) setting
|
||||
- `bytes_to_throw_insert` handled before `bytes_to_delay_insert`, so you should not set it to the value less then `bytes_to_delay_insert`
|
||||
|
||||
Example:
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
Distributed(logs, default, hits[, sharding_key[, policy_name]])
|
||||
CREATE TABLE hits_all AS hits
|
||||
ENGINE = Distributed(logs, default, hits[, sharding_key[, policy_name]])
|
||||
SETTINGS
|
||||
fsync_after_insert=0,
|
||||
fsync_directories=0;
|
||||
```
|
||||
|
||||
Data will be read from all servers in the `logs` cluster, from the default.hits table located on every server in the cluster.
|
||||
Data will be read from all servers in the `logs` cluster, from the `default.hits` table located on every server in the cluster.
|
||||
Data is not only read but is partially processed on the remote servers (to the extent that this is possible).
|
||||
For example, for a query with GROUP BY, data will be aggregated on remote servers, and the intermediate states of aggregate functions will be sent to the requestor server. Then data will be further aggregated.
|
||||
For example, for a query with `GROUP BY`, data will be aggregated on remote servers, and the intermediate states of aggregate functions will be sent to the requestor server. Then data will be further aggregated.
|
||||
|
||||
Instead of the database name, you can use a constant expression that returns a string. For example: currentDatabase().
|
||||
Instead of the database name, you can use a constant expression that returns a string. For example: `currentDatabase()`.
|
||||
|
||||
logs – The cluster name in the server’s config file.
|
||||
## Clusters {#distributed-clusters}
|
||||
|
||||
Clusters are set like this:
|
||||
Clusters are configured in the [server configuration file](../../../operations/configuration-files.md):
|
||||
|
||||
``` xml
|
||||
<remote_servers>
|
||||
@ -132,12 +152,13 @@ Replicas are duplicating servers (in order to read all the data, you can access
|
||||
Cluster names must not contain dots.
|
||||
|
||||
The parameters `host`, `port`, and optionally `user`, `password`, `secure`, `compression` are specified for each server:
|
||||
|
||||
- `host` – The address of the remote server. You can use either the domain or the IPv4 or IPv6 address. If you specify the domain, the server makes a DNS request when it starts, and the result is stored as long as the server is running. If the DNS request fails, the server does not start. If you change the DNS record, restart the server.
|
||||
- `port` – The TCP port for messenger activity (`tcp_port` in the config, usually set to 9000). Do not confuse it with http_port.
|
||||
- `user` – Name of the user for connecting to a remote server. Default value: default. This user must have access to connect to the specified server. Access is configured in the users.xml file. For more information, see the section [Access rights](../../../operations/access-rights.md).
|
||||
- `port` – The TCP port for messenger activity (`tcp_port` in the config, usually set to 9000). Not to be confused with `http_port`.
|
||||
- `user` – Name of the user for connecting to a remote server. Default value is the `default` user. This user must have access to connect to the specified server. Access is configured in the `users.xml` file. For more information, see the section [Access rights](../../../operations/access-rights.md).
|
||||
- `password` – The password for connecting to a remote server (not masked). Default value: empty string.
|
||||
- `secure` - Use ssl for connection, usually you also should define `port` = 9440. Server should listen on `<tcp_port_secure>9440</tcp_port_secure>` and have correct certificates.
|
||||
- `compression` - Use data compression. Default value: true.
|
||||
- `secure` - Whether to use a secure SSL/TLS connection. Usually also requires specifying the port (the default secure port is `9440`). The server should listen on `<tcp_port_secure>9440</tcp_port_secure>` and be configured with correct certificates.
|
||||
- `compression` - Use data compression. Default value: `true`.
|
||||
|
||||
When specifying replicas, one of the available replicas will be selected for each of the shards when reading. You can configure the algorithm for load balancing (the preference for which replica to access) – see the [load_balancing](../../../operations/settings/settings.md#settings-load_balancing) setting.
|
||||
If the connection with the server is not established, there will be an attempt to connect with a short timeout. If the connection failed, the next replica will be selected, and so on for all the replicas. If the connection attempt failed for all the replicas, the attempt will be repeated the same way, several times.
|
||||
@ -149,40 +170,42 @@ You can specify as many clusters as you wish in the configuration.
|
||||
|
||||
To view your clusters, use the `system.clusters` table.
|
||||
|
||||
The Distributed engine allows working with a cluster like a local server. However, the cluster is inextensible: you must write its configuration in the server config file (even better, for all the cluster’s servers).
|
||||
The `Distributed` engine allows working with a cluster like a local server. However, the cluster's configuration cannot be specified dynamically, it has to be configured in the server config file. Usually, all servers in a cluster will have the same cluster config (though this is not required). Clusters from the config file are updated on the fly, without restarting the server.
|
||||
|
||||
The Distributed engine requires writing clusters to the config file. Clusters from the config file are updated on the fly, without restarting the server. If you need to send a query to an unknown set of shards and replicas each time, you do not need to create a Distributed table – use the `remote` table function instead. See the section [Table functions](../../../sql-reference/table-functions/index.md).
|
||||
If you need to send a query to an unknown set of shards and replicas each time, you do not need to create a `Distributed` table – use the `remote` table function instead. See the section [Table functions](../../../sql-reference/table-functions/index.md).
|
||||
|
||||
## Writing data {#distributed-writing-data}
|
||||
|
||||
There are two methods for writing data to a cluster:
|
||||
|
||||
First, you can define which servers to write which data to and perform the write directly on each shard. In other words, perform INSERT in the tables that the distributed table “looks at”. This is the most flexible solution as you can use any sharding scheme, which could be non-trivial due to the requirements of the subject area. This is also the most optimal solution since data can be written to different shards completely independently.
|
||||
First, you can define which servers to write which data to and perform the write directly on each shard. In other words, perform direct `INSERT` statements on the remote tables in the cluster that the `Distributed` table is pointing to. This is the most flexible solution as you can use any sharding scheme, even one that is non-trivial due to the requirements of the subject area. This is also the most optimal solution since data can be written to different shards completely independently.
|
||||
|
||||
Second, you can perform INSERT in a Distributed table. In this case, the table will distribute the inserted data across the servers itself. In order to write to a Distributed table, it must have a sharding key set (the last parameter). In addition, if there is only one shard, the write operation works without specifying the sharding key, since it does not mean anything in this case.
|
||||
Second, you can perform `INSERT` statements on a `Distributed` table. In this case, the table will distribute the inserted data across the servers itself. In order to write to a `Distributed` table, it must have the `sharding_key` parameter configured (except if there is only one shard).
|
||||
|
||||
Each shard can have a weight defined in the config file. By default, the weight is equal to one. Data is distributed across shards in the amount proportional to the shard weight. For example, if there are two shards and the first has a weight of 9 while the second has a weight of 10, the first will be sent 9 / 19 parts of the rows, and the second will be sent 10 / 19.
|
||||
Each shard can have a `<weight>` defined in the config file. By default, the weight is `1`. Data is distributed across shards in the amount proportional to the shard weight. All shard weights are summed up, then each shard's weight is divided by the total to determine each shard's proportion. For example, if there are two shards and the first has a weight of 1 while the second has a weight of 2, the first will be sent one third (1 / 3) of inserted rows and the second will be sent two thirds (2 / 3).
|
||||
|
||||
Each shard can have the `internal_replication` parameter defined in the config file.
|
||||
Each shard can have the `internal_replication` parameter defined in the config file. If this parameter is set to `true`, the write operation selects the first healthy replica and writes data to it. Use this if the tables underlying the `Distributed` table are replicated tables (e.g. any of the `Replicated*MergeTree` table engines). One of the table replicas will receive the write and it will be replicated to the other replicas automatically.
|
||||
|
||||
If this parameter is set to `true`, the write operation selects the first healthy replica and writes data to it. Use this alternative if the Distributed table “looks at” replicated tables. In other words, if the table where data will be written is going to replicate them itself.
|
||||
|
||||
If it is set to `false` (the default), data is written to all replicas. In essence, this means that the Distributed table replicates data itself. This is worse than using replicated tables, because the consistency of replicas is not checked, and over time they will contain slightly different data.
|
||||
If `internal_replication` is set to `false` (the default), data is written to all replicas. In this case, the `Distributed` table replicates data itself. This is worse than using replicated tables because the consistency of replicas is not checked and, over time, they will contain slightly different data.
|
||||
|
||||
To select the shard that a row of data is sent to, the sharding expression is analyzed, and its remainder is taken from dividing it by the total weight of the shards. The row is sent to the shard that corresponds to the half-interval of the remainders from `prev_weights` to `prev_weights + weight`, where `prev_weights` is the total weight of the shards with the smallest number, and `weight` is the weight of this shard. For example, if there are two shards, and the first has a weight of 9 while the second has a weight of 10, the row will be sent to the first shard for the remainders from the range \[0, 9), and to the second for the remainders from the range \[9, 19).
|
||||
|
||||
The sharding expression can be any expression from constants and table columns that returns an integer. For example, you can use the expression `rand()` for random distribution of data, or `UserID` for distribution by the remainder from dividing the user’s ID (then the data of a single user will reside on a single shard, which simplifies running IN and JOIN by users). If one of the columns is not distributed evenly enough, you can wrap it in a hash function: intHash64(UserID).
|
||||
The sharding expression can be any expression from constants and table columns that returns an integer. For example, you can use the expression `rand()` for random distribution of data, or `UserID` for distribution by the remainder from dividing the user’s ID (then the data of a single user will reside on a single shard, which simplifies running `IN` and `JOIN` by users). If one of the columns is not distributed evenly enough, you can wrap it in a hash function e.g. `intHash64(UserID)`.
|
||||
|
||||
A simple remainder from the division is a limited solution for sharding and isn’t always appropriate. It works for medium and large volumes of data (dozens of servers), but not for very large volumes of data (hundreds of servers or more). In the latter case, use the sharding scheme required by the subject area, rather than using entries in Distributed tables.
|
||||
|
||||
SELECT queries are sent to all the shards and work regardless of how data is distributed across the shards (they can be distributed completely randomly). When you add a new shard, you do not have to transfer old data into it. Instead, you can write new data to it by using a heavier weight – the data will be distributed slightly unevenly, but queries will work correctly and efficiently.
|
||||
A simple remainder from the division is a limited solution for sharding and isn’t always appropriate. It works for medium and large volumes of data (dozens of servers), but not for very large volumes of data (hundreds of servers or more). In the latter case, use the sharding scheme required by the subject area rather than using entries in `Distributed` tables.
|
||||
|
||||
You should be concerned about the sharding scheme in the following cases:
|
||||
|
||||
- Queries are used that require joining data (IN or JOIN) by a specific key. If data is sharded by this key, you can use local IN or JOIN instead of GLOBAL IN or GLOBAL JOIN, which is much more efficient.
|
||||
- A large number of servers is used (hundreds or more) with a large number of small queries (queries of individual clients - websites, advertisers, or partners). In order for the small queries to not affect the entire cluster, it makes sense to locate data for a single client on a single shard. Alternatively, as we’ve done in Yandex.Metrica, you can set up bi-level sharding: divide the entire cluster into “layers”, where a layer may consist of multiple shards. Data for a single client is located on a single layer, but shards can be added to a layer as necessary, and data is randomly distributed within them. Distributed tables are created for each layer, and a single shared distributed table is created for global queries.
|
||||
- Queries are used that require joining data (`IN` or `JOIN`) by a specific key. If data is sharded by this key, you can use local `IN` or `JOIN` instead of `GLOBAL IN` or `GLOBAL JOIN`, which is much more efficient.
|
||||
- A large number of servers is used (hundreds or more) with a large number of small queries, for example, queries for data of individual clients (e.g. websites, advertisers, or partners). In order for the small queries to not affect the entire cluster, it makes sense to locate data for a single client on a single shard. Alternatively, as we’ve done in Yandex.Metrica, you can set up bi-level sharding: divide the entire cluster into “layers”, where a layer may consist of multiple shards. Data for a single client is located on a single layer, but shards can be added to a layer as necessary, and data is randomly distributed within them. `Distributed` tables are created for each layer, and a single shared distributed table is created for global queries.
|
||||
|
||||
Data is written asynchronously. When inserted in the table, the data block is just written to the local file system. The data is sent to the remote servers in the background as soon as possible. The periodicity for sending data is managed by the [distributed_directory_monitor_sleep_time_ms](../../../operations/settings/settings.md#distributed_directory_monitor_sleep_time_ms) and [distributed_directory_monitor_max_sleep_time_ms](../../../operations/settings/settings.md#distributed_directory_monitor_max_sleep_time_ms) settings. The `Distributed` engine sends each file with inserted data separately, but you can enable batch sending of files with the [distributed_directory_monitor_batch_inserts](../../../operations/settings/settings.md#distributed_directory_monitor_batch_inserts) setting. This setting improves cluster performance by better utilizing local server and network resources. You should check whether data is sent successfully by checking the list of files (data waiting to be sent) in the table directory: `/var/lib/clickhouse/data/database/table/`. The number of threads performing background tasks can be set by [background_distributed_schedule_pool_size](../../../operations/settings/settings.md#background_distributed_schedule_pool_size) setting.
|
||||
|
||||
If the server ceased to exist or had a rough restart (for example, after a device failure) after an INSERT to a Distributed table, the inserted data might be lost. If a damaged data part is detected in the table directory, it is transferred to the `broken` subdirectory and no longer used.
|
||||
If the server ceased to exist or had a rough restart (for example, due to a hardware failure) after an `INSERT` to a `Distributed` table, the inserted data might be lost. If a damaged data part is detected in the table directory, it is transferred to the `broken` subdirectory and no longer used.
|
||||
|
||||
## Reading data {#distributed-reading-data}
|
||||
|
||||
When querying a `Distributed` table, `SELECT` queries are sent to all shards and work regardless of how data is distributed across the shards (they can be distributed completely randomly). When you add a new shard, you do not have to transfer old data into it. Instead, you can write new data to it by using a heavier weight – the data will be distributed slightly unevenly, but queries will work correctly and efficiently.
|
||||
|
||||
When the `max_parallel_replicas` option is enabled, query processing is parallelized across all replicas within a single shard. For more information, see the section [max_parallel_replicas](../../../operations/settings/settings.md#settings-max_parallel_replicas).
|
||||
|
||||
|
@ -204,7 +204,7 @@ When parsing with this format, tabs or linefeeds are not allowed in each field.
|
||||
|
||||
This format is also available under the name `TSVRawWithNames`.
|
||||
|
||||
## TabSeparatedWithNamesAndTypes {#tabseparatedrawwithnamesandtypes}
|
||||
## TabSeparatedRawWithNamesAndTypes {#tabseparatedrawwithnamesandtypes}
|
||||
|
||||
Differs from `TabSeparatedWithNamesAndTypes` format in that the rows are written without escaping.
|
||||
When parsing with this format, tabs or linefeeds are not allowed in each field.
|
||||
|
99
docs/en/interfaces/grpc.md
Normal file
99
docs/en/interfaces/grpc.md
Normal file
@ -0,0 +1,99 @@
|
||||
---
|
||||
toc_priority: 19
|
||||
toc_title: gRPC Interface
|
||||
---
|
||||
|
||||
# gRPC Interface {#grpc-interface}
|
||||
|
||||
## Introduction {#grpc-interface-introduction}
|
||||
|
||||
ClickHouse supports [gRPC](https://grpc.io/) interface. It is an open source remote procedure call system that uses HTTP/2 and [Protocol Buffers](https://en.wikipedia.org/wiki/Protocol_Buffers). The implementation of gRPC in ClickHouse supports:
|
||||
|
||||
- SSL;
|
||||
- authentication;
|
||||
- sessions;
|
||||
- compression;
|
||||
- parallel queries through the same channel;
|
||||
- cancellation of queries;
|
||||
- getting progress and logs;
|
||||
- external tables.
|
||||
|
||||
The specification of the interface is described in [clickhouse_grpc.proto](https://github.com/ClickHouse/ClickHouse/blob/master/src/Server/grpc_protos/clickhouse_grpc.proto).
|
||||
|
||||
## gRPC Configuration {#grpc-interface-configuration}
|
||||
|
||||
To use the gRPC interface set `grpc_port` in the main [server configuration](../operations/configuration-files.md). Other configuration options see in the following example:
|
||||
|
||||
```xml
|
||||
<grpc_port>9100</grpc_port>
|
||||
<grpc>
|
||||
<enable_ssl>false</enable_ssl>
|
||||
|
||||
<!-- The following two files are used only if SSL is enabled -->
|
||||
<ssl_cert_file>/path/to/ssl_cert_file</ssl_cert_file>
|
||||
<ssl_key_file>/path/to/ssl_key_file</ssl_key_file>
|
||||
|
||||
<!-- Whether server requests client for a certificate -->
|
||||
<ssl_require_client_auth>false</ssl_require_client_auth>
|
||||
|
||||
<!-- The following file is used only if ssl_require_client_auth=true -->
|
||||
<ssl_ca_cert_file>/path/to/ssl_ca_cert_file</ssl_ca_cert_file>
|
||||
|
||||
<!-- Default compression algorithm (applied if client doesn't specify another algorithm, see result_compression in QueryInfo).
|
||||
Supported algorithms: none, deflate, gzip, stream_gzip -->
|
||||
<compression>deflate</compression>
|
||||
|
||||
<!-- Default compression level (applied if client doesn't specify another level, see result_compression in QueryInfo).
|
||||
Supported levels: none, low, medium, high -->
|
||||
<compression_level>medium</compression_level>
|
||||
|
||||
<!-- Send/receive message size limits in bytes. -1 means unlimited -->
|
||||
<max_send_message_size>-1</max_send_message_size>
|
||||
<max_receive_message_size>-1</max_receive_message_size>
|
||||
|
||||
<!-- Enable if you want to get detailed logs -->
|
||||
<verbose_logs>false</verbose_logs>
|
||||
</grpc>
|
||||
```
|
||||
|
||||
## Built-in Client {#grpc-client}
|
||||
|
||||
You can write a client in any of the programming languages supported by gRPC using the provided [specification](https://github.com/ClickHouse/ClickHouse/blob/master/src/Server/grpc_protos/clickhouse_grpc.proto).
|
||||
Or you can use a built-in Python client. It is placed in [utils/grpc-client/clickhouse-grpc-client.py](https://github.com/ClickHouse/ClickHouse/blob/master/utils/grpc-client/clickhouse-grpc-client.py) in the repository. The built-in client requires [grpcio and grpcio-tools](https://grpc.io/docs/languages/python/quickstart) Python modules.
|
||||
|
||||
The client supports the following arguments:
|
||||
|
||||
- `--help` – Shows a help message and exits.
|
||||
- `--host HOST, -h HOST` – A server name. Default value: `localhost`. You can use IPv4 or IPv6 addresses also.
|
||||
- `--port PORT` – A port to connect to. This port should be enabled in the ClickHouse server configuration (see `grpc_port`). Default value: `9100`.
|
||||
- `--user USER_NAME, -u USER_NAME` – A user name. Default value: `default`.
|
||||
- `--password PASSWORD` – A password. Default value: empty string.
|
||||
- `--query QUERY, -q QUERY` – A query to process when using non-interactive mode.
|
||||
- `--database DATABASE, -d DATABASE` – A default database. If not specified, the current database set in the server settings is used (`default` by default).
|
||||
- `--format OUTPUT_FORMAT, -f OUTPUT_FORMAT` – A result output [format](formats.md). Default value for interactive mode: `PrettyCompact`.
|
||||
- `--debug` – Enables showing debug information.
|
||||
|
||||
To run the client in an interactive mode call it without `--query` argument.
|
||||
|
||||
In a batch mode query data can be passed via `stdin`.
|
||||
|
||||
**Client Usage Example**
|
||||
|
||||
In the following example a table is created and loaded with data from a CSV file. Then the content of the table is queried.
|
||||
|
||||
``` bash
|
||||
./clickhouse-grpc-client.py -q "CREATE TABLE grpc_example_table (id UInt32, text String) ENGINE = MergeTree() ORDER BY id;"
|
||||
echo "0,Input data for" > a.txt ; echo "1,gRPC protocol example" >> a.txt
|
||||
cat a.txt | ./clickhouse-grpc-client.py -q "INSERT INTO grpc_example_table FORMAT CSV"
|
||||
|
||||
./clickhouse-grpc-client.py --format PrettyCompact -q "SELECT * FROM grpc_example_table;"
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─id─┬─text──────────────────┐
|
||||
│ 0 │ Input data for │
|
||||
│ 1 │ gRPC protocol example │
|
||||
└────┴───────────────────────┘
|
||||
```
|
@ -426,6 +426,9 @@ Next are the configuration methods for different `type`.
|
||||
|
||||
The following example defines the values of [max_threads](../operations/settings/settings.md#settings-max_threads) and `max_alter_threads` settings, then queries the system table to check whether these settings were set successfully.
|
||||
|
||||
!!! note "Warning"
|
||||
To keep the default `handlers` such as` query`, `play`,` ping`, use the `<defaults/>` rule.
|
||||
|
||||
Example:
|
||||
|
||||
``` xml
|
||||
@ -443,6 +446,7 @@ Example:
|
||||
<query>SELECT name, value FROM system.settings WHERE name = {name_2:String}</query>
|
||||
</handler>
|
||||
</rule>
|
||||
<defaults/>
|
||||
</http_handlers>
|
||||
```
|
||||
|
||||
@ -475,6 +479,7 @@ Example:
|
||||
<query_param_name>query_param</query_param_name>
|
||||
</handler>
|
||||
</rule>
|
||||
<defaults/>
|
||||
</http_handlers>
|
||||
```
|
||||
|
||||
@ -505,6 +510,7 @@ Return a message.
|
||||
<response_content>Say Hi!</response_content>
|
||||
</handler>
|
||||
</rule>
|
||||
<defaults/>
|
||||
</http_handlers>
|
||||
```
|
||||
|
||||
|
@ -6,10 +6,11 @@ toc_title: Introduction
|
||||
|
||||
# Interfaces {#interfaces}
|
||||
|
||||
ClickHouse provides two network interfaces (both can be optionally wrapped in TLS for additional security):
|
||||
ClickHouse provides three network interfaces (they can be optionally wrapped in TLS for additional security):
|
||||
|
||||
- [HTTP](http.md), which is documented and easy to use directly.
|
||||
- [Native TCP](../interfaces/tcp.md), which has less overhead.
|
||||
- [gRPC](grpc.md).
|
||||
|
||||
In most cases it is recommended to use appropriate tool or library instead of interacting with those directly. Officially supported by Yandex are the following:
|
||||
|
||||
@ -24,4 +25,3 @@ There are also a wide range of third-party libraries for working with ClickHouse
|
||||
- [Integrations](../interfaces/third-party/integrations.md)
|
||||
- [Visual interfaces](../interfaces/third-party/gui.md)
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/interfaces/) <!--hide-->
|
||||
|
20
docs/en/interfaces/third-party/gui.md
vendored
20
docs/en/interfaces/third-party/gui.md
vendored
@ -220,4 +220,24 @@ SeekTable is [free](https://www.seektable.com/help/cloud-pricing) for personal/i
|
||||
|
||||
[Chadmin](https://github.com/bun4uk/chadmin) is a simple UI where you can visualize your currently running queries on your ClickHouse cluster and info about them and kill them if you want.
|
||||
|
||||
### TABLUM.IO {#tablum_io}
|
||||
|
||||
[TABLUM.IO](https://tablum.io/) — an online query and analytics tool for ETL and visualization. It allows connecting to ClickHouse, query data via a versatile SQL console as well as to load data from static files and 3rd party services. TABLUM.IO can visualize data results as charts and tables.
|
||||
|
||||
Features:
|
||||
- ETL: data loading from popular databases, local and remote files, API invocations.
|
||||
- Versatile SQL console with syntax highlight and visual query builder.
|
||||
- Data visualization as charts and tables.
|
||||
- Data materialization and sub-queries.
|
||||
- Data reporting to Slack, Telegram or email.
|
||||
- Data pipelining via proprietary API.
|
||||
- Data export in JSON, CSV, SQL, HTML formats.
|
||||
- Web-based interface.
|
||||
|
||||
TABLUM.IO can be run as a self-hosted solution (as a docker image) or in the cloud.
|
||||
License: [commercial](https://tablum.io/pricing) product with 3-month free period.
|
||||
|
||||
Try it out for free [in the cloud](https://tablum.io/try).
|
||||
Learn more about the product at [TABLUM.IO](https://tablum.io/)
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/interfaces/third-party/gui/) <!--hide-->
|
||||
|
@ -87,7 +87,7 @@ toc_title: Adopters
|
||||
| <a href="https://kontur.ru" class="favicon">Kontur</a> | Software Development | Metrics | — | — | [Talk in Russian, November 2018](https://www.youtube.com/watch?v=U4u4Bd0FtrY) |
|
||||
| <a href="https://www.kuaishou.com/" class="favicon">Kuaishou</a> | Video | — | — | — | [ClickHouse Meetup, October 2018](https://clickhouse.com/blog/en/2018/clickhouse-community-meetup-in-beijing-on-october-28-2018/) |
|
||||
| <a href="https://www.kgk-global.com/en/" class="favicon">KGK Global</a> | Vehicle monitoring | — | — | — | [Press release, June 2021](https://zoom.cnews.ru/news/item/530921) |
|
||||
| <a href="https://www.lbl.gov" class="favicon">Lawrence Berkeley National Laboratory</a> | Research | Traffic analysis | 1 server | 11.8 TiB | [Slides in English, April 2019](https://www.smitasin.com/presentations/2019-04-17_DOE-NSM.pdf) |
|
||||
| <a href="https://www.lbl.gov" class="favicon">Lawrence Berkeley National Laboratory</a> | Research | Traffic analysis | 5 servers | 55 TiB | [Slides in English, April 2019](https://www.smitasin.com/presentations/2019-04-17_DOE-NSM.pdf) |
|
||||
| <a href="https://lifestreet.com/" class="favicon">LifeStreet</a> | Ad network | Main product | 75 servers (3 replicas) | 5.27 PiB | [Blog post in Russian, February 2017](https://habr.com/en/post/322620/) |
|
||||
| <a href="https://mcs.mail.ru/" class="favicon">Mail.ru Cloud Solutions</a> | Cloud services | Main product | — | — | [Article in Russian](https://mcs.mail.ru/help/db-create/clickhouse#) |
|
||||
| <a href="https://maxilect.com/" class="favicon">MAXILECT</a> | Ad Tech, Blockchain, ML, AI | — | — | — | [Job advertisement, 2021](https://www.linkedin.com/feed/update/urn:li:activity:6780842017229430784/) |
|
||||
@ -178,5 +178,9 @@ toc_title: Adopters
|
||||
| <a href="https://promo.croc.ru/digitalworker" class="favicon">Цифровой Рабочий</a> | Industrial IoT, Analytics | — | — | — | [Blog post in Russian, March 2021](https://habr.com/en/company/croc/blog/548018/) |
|
||||
| <a href="https://shop.okraina.ru/" class="favicon">ООО «МПЗ Богородский»</a> | Agriculture | — | — | — | [Article in Russian, November 2020](https://cloud.yandex.ru/cases/okraina) |
|
||||
| <a href="https://domclick.ru/" class="favicon">ДомКлик</a> | Real Estate | — | — | — | [Article in Russian, October 2021](https://habr.com/ru/company/domclick/blog/585936/) |
|
||||
| <a href="https://futurragroup.com/" class="favicon">Futurra Group</a> | Analytics | — | — | — | [Article in Russian, December 2021](https://dou.ua/forums/topic/35587/) |
|
||||
| <a href="https://usetech.com/" class="favicon">UseTech</a> | Software Development | — | — | — | [Job Posting, December 2021](https://vk.com/wall136266658_2418) |
|
||||
| <a href="https://lookforsale.ru/" class="favicon">Lookforsale</a> | E-Commerce | — | — | — | [Job Posting, December 2021](https://telegram.me/javascript_jobs/587318) |
|
||||
| <a href="https://rvision.pro/en/" class="favicon">R-Vision</a> | Information Security | — | — | — | [Article in Russian, December 2021](https://www.anti-malware.ru/reviews/R-Vision-SENSE-15) |
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/introduction/adopters/) <!--hide-->
|
||||
|
@ -16,6 +16,11 @@ ZooKeeper is one of the first well-known open-source coordination systems. It's
|
||||
|
||||
By default, ClickHouse Keeper provides the same guarantees as ZooKeeper (linearizable writes, non-linearizable reads). It has a compatible client-server protocol, so any standard ZooKeeper client can be used to interact with ClickHouse Keeper. Snapshots and logs have an incompatible format with ZooKeeper, but `clickhouse-keeper-converter` tool allows to convert ZooKeeper data to ClickHouse Keeper snapshot. Interserver protocol in ClickHouse Keeper is also incompatible with ZooKeeper so mixed ZooKeeper / ClickHouse Keeper cluster is impossible.
|
||||
|
||||
ClickHouse Keeper supports Access Control List (ACL) the same way as [ZooKeeper](https://zookeeper.apache.org/doc/r3.1.2/zookeeperProgrammers.html#sc_ZooKeeperAccessControl) does. ClickHouse Keeper supports the same set of permissions and has the identical built-in schemes: `world`, `auth`, `digest`, `host` and `ip`. Digest authentication scheme uses pair `username:password`. Password is encoded in Base64.
|
||||
|
||||
!!! info "Note"
|
||||
External integrations are not supported.
|
||||
|
||||
## Configuration
|
||||
|
||||
ClickHouse Keeper can be used as a standalone replacement for ZooKeeper or as an internal part of the ClickHouse server, but in both cases configuration is almost the same `.xml` file. The main ClickHouse Keeper configuration tag is `<keeper_server>`. Keeper configuration has the following parameters:
|
||||
@ -105,7 +110,7 @@ ClickHouse Keeper is bundled into the ClickHouse server package, just add config
|
||||
clickhouse-keeper --config /etc/your_path_to_config/config.xml --daemon
|
||||
```
|
||||
|
||||
## Four Latter Word Commands
|
||||
## Four Letter Word Commands
|
||||
|
||||
ClickHouse Keeper also provides 4lw commands which are almost the same with Zookeeper. Each command is composed of four letters such as `mntr`, `stat` etc. There are some more interesting commands: `stat` gives some general information about the server and connected clients, while `srvr` and `cons` give extended details on server and connections respectively.
|
||||
|
||||
@ -118,13 +123,13 @@ echo mntr | nc localhost 9181
|
||||
|
||||
Bellow is the detailed 4lw commands:
|
||||
|
||||
- ruok : Tests if server is running in a non-error state. The server will respond with imok if it is running. Otherwise it will not respond at all. A response of "imok" does not necessarily indicate that the server has joined the quorum, just that the server process is active and bound to the specified client port. Use "stat" for details on state wrt quorum and client connection information.
|
||||
- `ruok`: Tests if server is running in a non-error state. The server will respond with imok if it is running. Otherwise it will not respond at all. A response of "imok" does not necessarily indicate that the server has joined the quorum, just that the server process is active and bound to the specified client port. Use "stat" for details on state wrt quorum and client connection information.
|
||||
|
||||
```
|
||||
imok
|
||||
```
|
||||
|
||||
- mntr : Outputs a list of variables that could be used for monitoring the health of the cluster.
|
||||
- `mntr`: Outputs a list of variables that could be used for monitoring the health of the cluster.
|
||||
|
||||
```
|
||||
zk_version v21.11.1.1-prestable-7a4a0b0edef0ad6e0aa662cd3b90c3f4acf796e7
|
||||
@ -146,12 +151,11 @@ zk_followers 0
|
||||
zk_synced_followers 0
|
||||
```
|
||||
|
||||
- srvr : Lists full details for the server.
|
||||
- `srvr`: Lists full details for the server.
|
||||
|
||||
```
|
||||
ClickHouse Keeper version: v21.11.1.1-prestable-7a4a0b0edef0ad6e0aa662cd3b90c3f4acf796e7
|
||||
Latency min/avg/max: 0/0/0
|
||||
|
||||
Received: 2
|
||||
Sent : 2
|
||||
Connections: 1
|
||||
@ -161,16 +165,14 @@ Mode: leader
|
||||
Node count: 4
|
||||
```
|
||||
|
||||
- stat : Lists brief details for the server and connected clients.
|
||||
- `stat`: Lists brief details for the server and connected clients.
|
||||
|
||||
```
|
||||
ClickHouse Keeper version: v21.11.1.1-prestable-7a4a0b0edef0ad6e0aa662cd3b90c3f4acf796e7
|
||||
Clients:
|
||||
192.168.1.1:52852(recved=0,sent=0)
|
||||
192.168.1.1:52042(recved=24,sent=48)
|
||||
|
||||
Latency min/avg/max: 0/0/0
|
||||
|
||||
Received: 4
|
||||
Sent : 4
|
||||
Connections: 1
|
||||
@ -178,16 +180,15 @@ Outstanding: 0
|
||||
Zxid: 36
|
||||
Mode: leader
|
||||
Node count: 4
|
||||
|
||||
```
|
||||
|
||||
- srst : Reset server statistics. The command will affect the result of `srvr`, `mntr` and `stat`.
|
||||
- `srst`: Reset server statistics. The command will affect the result of `srvr`, `mntr` and `stat`.
|
||||
|
||||
```
|
||||
Server stats reset.
|
||||
```
|
||||
|
||||
- conf : Print details about serving configuration.
|
||||
- `conf`: Print details about serving configuration.
|
||||
|
||||
```
|
||||
server_id=1
|
||||
@ -220,20 +221,20 @@ compress_snapshots_with_zstd_format=true
|
||||
configuration_change_tries_count=20
|
||||
```
|
||||
|
||||
- cons : List full connection/session details for all clients connected to this server. Includes information on numbers of packets received/sent, session id, operation latencies, last operation performed, etc...
|
||||
- `cons`: List full connection/session details for all clients connected to this server. Includes information on numbers of packets received/sent, session id, operation latencies, last operation performed, etc...
|
||||
|
||||
```
|
||||
192.168.1.1:52163(recved=0,sent=0,sid=0xffffffffffffffff,lop=NA,est=1636454787393,to=30000,lzxid=0xffffffffffffffff,lresp=0,llat=0,minlat=0,avglat=0,maxlat=0)
|
||||
192.168.1.1:52042(recved=9,sent=18,sid=0x0000000000000001,lop=List,est=1636454739887,to=30000,lcxid=0x0000000000000005,lzxid=0x0000000000000005,lresp=1636454739892,llat=0,minlat=0,avglat=0,maxlat=0)
|
||||
```
|
||||
|
||||
- crst : Reset connection/session statistics for all connections.
|
||||
- `crst`: Reset connection/session statistics for all connections.
|
||||
|
||||
```
|
||||
Connection stats reset.
|
||||
```
|
||||
|
||||
- envi : Print details about serving environment
|
||||
- `envi`: Print details about serving environment
|
||||
|
||||
```
|
||||
Environment:
|
||||
@ -250,41 +251,41 @@ user.tmp=/var/folders/b4/smbq5mfj7578f2jzwn602tt40000gn/T/
|
||||
```
|
||||
|
||||
|
||||
- dirs : Shows the total size of snapshot and log files in bytes
|
||||
- `dirs`: Shows the total size of snapshot and log files in bytes
|
||||
|
||||
```
|
||||
snapshot_dir_size: 0
|
||||
log_dir_size: 3875
|
||||
```
|
||||
|
||||
- isro: Tests if server is running in read-only mode. The server will respond with "ro" if in read-only mode or "rw" if not in read-only mode.
|
||||
- `isro`: Tests if server is running in read-only mode. The server will respond with "ro" if in read-only mode or "rw" if not in read-only mode.
|
||||
|
||||
```
|
||||
rw
|
||||
```
|
||||
|
||||
- wchs : Lists brief information on watches for the server.
|
||||
- `wchs`: Lists brief information on watches for the server.
|
||||
|
||||
```
|
||||
1 connections watching 1 paths
|
||||
Total watches:1
|
||||
```
|
||||
|
||||
- wchc : Lists detailed information on watches for the server, by session. This outputs a list of sessions(connections) with associated watches (paths). Note, depending on the number of watches this operation may be expensive (ie impact server performance), use it carefully.
|
||||
- `wchc`: Lists detailed information on watches for the server, by session. This outputs a list of sessions (connections) with associated watches (paths). Note, depending on the number of watches this operation may be expensive (ie impact server performance), use it carefully.
|
||||
|
||||
```
|
||||
0x0000000000000001
|
||||
/clickhouse/task_queue/ddl
|
||||
```
|
||||
|
||||
- wchp : Lists detailed information on watches for the server, by path. This outputs a list of paths (znodes) with associated sessions. Note, depending on the number of watches this operation may be expensive (ie impact server performance), use it carefully.
|
||||
- `wchp`: Lists detailed information on watches for the server, by path. This outputs a list of paths (znodes) with associated sessions. Note, depending on the number of watches this operation may be expensive (i. e. impact server performance), use it carefully.
|
||||
|
||||
```
|
||||
/clickhouse/task_queue/ddl
|
||||
0x0000000000000001
|
||||
```
|
||||
|
||||
- dump : Lists the outstanding sessions and ephemeral nodes. This only works on the leader.
|
||||
- `dump`: Lists the outstanding sessions and ephemeral nodes. This only works on the leader.
|
||||
|
||||
```
|
||||
Sessions dump (2):
|
||||
|
@ -14,11 +14,11 @@ To enable Kerberos, one should include `kerberos` section in `config.xml`. This
|
||||
#### Parameters:
|
||||
|
||||
- `principal` - canonical service principal name that will be acquired and used when accepting security contexts.
|
||||
- This parameter is optional, if omitted, the default principal will be used.
|
||||
- This parameter is optional, if omitted, the default principal will be used.
|
||||
|
||||
|
||||
- `realm` - a realm, that will be used to restrict authentication to only those requests whose initiator's realm matches it.
|
||||
- This parameter is optional, if omitted, no additional filtering by realm will be applied.
|
||||
- This parameter is optional, if omitted, no additional filtering by realm will be applied.
|
||||
|
||||
Example (goes into `config.xml`):
|
||||
|
||||
@ -75,7 +75,7 @@ In order to enable Kerberos authentication for the user, specify `kerberos` sect
|
||||
Parameters:
|
||||
|
||||
- `realm` - a realm that will be used to restrict authentication to only those requests whose initiator's realm matches it.
|
||||
- This parameter is optional, if omitted, no additional filtering by realm will be applied.
|
||||
- This parameter is optional, if omitted, no additional filtering by realm will be applied.
|
||||
|
||||
Example (goes into `users.xml`):
|
||||
|
||||
|
@ -505,7 +505,7 @@ Keys:
|
||||
- `level` – Logging level. Acceptable values: `trace`, `debug`, `information`, `warning`, `error`.
|
||||
- `log` – The log file. Contains all the entries according to `level`.
|
||||
- `errorlog` – Error log file.
|
||||
- `size` – Size of the file. Applies to `log`and`errorlog`. Once the file reaches `size`, ClickHouse archives and renames it, and creates a new log file in its place.
|
||||
- `size` – Size of the file. Applies to `log` and `errorlog`. Once the file reaches `size`, ClickHouse archives and renames it, and creates a new log file in its place.
|
||||
- `count` – The number of archived log files that ClickHouse stores.
|
||||
|
||||
**Example**
|
||||
@ -750,9 +750,13 @@ The value 0 means that you can delete all tables without any restrictions.
|
||||
|
||||
## max_thread_pool_size {#max-thread-pool-size}
|
||||
|
||||
The maximum number of threads in the Global Thread pool.
|
||||
ClickHouse uses threads from the Global Thread pool to process queries. If there is no idle thread to process a query, then a new thread is created in the pool. `max_thread_pool_size` limits the maximum number of threads in the pool.
|
||||
|
||||
Default value: 10000.
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
|
||||
Default value: `10000`.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -762,9 +766,13 @@ Default value: 10000.
|
||||
|
||||
## max_thread_pool_free_size {#max-thread-pool-free-size}
|
||||
|
||||
The number of threads that are always held in the Global Thread pool.
|
||||
If the number of **idle** threads in the Global Thread pool is greater than `max_thread_pool_free_size`, then ClickHouse releases resources occupied by some threads and the pool size is decreased. Threads can be created again if necessary.
|
||||
|
||||
Default value: 1000.
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
|
||||
Default value: `1000`.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -774,9 +782,13 @@ Default value: 1000.
|
||||
|
||||
## thread_pool_queue_size {#thread-pool-queue-size}
|
||||
|
||||
The limit to the number of jobs that can be scheduled on the Global Thread pool. Increasing queue size leads to larger memory usage. It is recommended to keep this value equal to the `max_thread_pool_size`.
|
||||
The maximum number of jobs that can be scheduled on the Global Thread pool. Increasing queue size leads to larger memory usage. It is recommended to keep this value equal to [max_thread_pool_size](#max-thread-pool-size).
|
||||
|
||||
Default value: 10000.
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
|
||||
Default value: `10000`.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -1443,7 +1455,7 @@ You can also define sections `memory` — means storing information only in memo
|
||||
|
||||
To add an LDAP server as a remote user directory of users that are not defined locally, define a single `ldap` section with a following parameters:
|
||||
- `server` — one of LDAP server names defined in `ldap_servers` config section. This parameter is mandatory and cannot be empty.
|
||||
- `roles` — section with a list of locally defined roles that will be assigned to each user retrieved from the LDAP server. If no roles are specified, user will not be able to perform any actions after authentication. If any of the listed roles is not defined locally at the time of authentication, the authenthication attept will fail as if the provided password was incorrect.
|
||||
- `roles` — section with a list of locally defined roles that will be assigned to each user retrieved from the LDAP server. If no roles are specified, user will not be able to perform any actions after authentication. If any of the listed roles is not defined locally at the time of authentication, the authentication attempt will fail as if the provided password was incorrect.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -1507,3 +1519,4 @@ Possible values:
|
||||
- Positive integer.
|
||||
|
||||
Default value: `10000`.
|
||||
|
||||
|
@ -356,3 +356,24 @@ Possible values:
|
||||
- 1 — Parts are detached.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## merge_tree_clear_old_temporary_directories_interval_seconds {#setting-merge-tree-clear-old-temporary-directories-interval-seconds}
|
||||
|
||||
Sets the interval in seconds for ClickHouse to execute the cleanup of old temporary directories.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Any positive integer.
|
||||
|
||||
Default value: `60` seconds.
|
||||
|
||||
## merge_tree_clear_old_parts_interval_seconds {#setting-merge-tree-clear-old-parts-interval-seconds}
|
||||
|
||||
Sets the interval in seconds for ClickHouse to execute the cleanup of old parts, WALs, and mutations.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Any positive integer.
|
||||
|
||||
Default value: `1` second.
|
||||
|
||||
|
@ -885,26 +885,6 @@ Possible values:
|
||||
|
||||
Default value: 2013265920.
|
||||
|
||||
## merge_tree_clear_old_temporary_directories_interval_seconds {#setting-merge-tree-clear-old-temporary-directories-interval-seconds}
|
||||
|
||||
Sets the interval in seconds for ClickHouse to execute the cleanup of old temporary directories.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Any positive integer.
|
||||
|
||||
Default value: `60` seconds.
|
||||
|
||||
## merge_tree_clear_old_parts_interval_seconds {#setting-merge-tree-clear-old-parts-interval-seconds}
|
||||
|
||||
Sets the interval in seconds for ClickHouse to execute the cleanup of old parts, WALs, and mutations.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Any positive integer.
|
||||
|
||||
Default value: `1` second.
|
||||
|
||||
## min_bytes_to_use_direct_io {#settings-min-bytes-to-use-direct-io}
|
||||
|
||||
The minimum data volume required for using direct I/O access to the storage disk.
|
||||
@ -992,9 +972,16 @@ log_queries_min_type='EXCEPTION_WHILE_PROCESSING'
|
||||
|
||||
Setting up query threads logging.
|
||||
|
||||
Queries’ threads run by ClickHouse with this setup are logged according to the rules in the [query_thread_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) server configuration parameter.
|
||||
Query threads log into [system.query_thread_log](../../operations/system-tables/query_thread_log.md) table. This setting have effect only when [log_queries](#settings-log-queries) is true. Queries’ threads run by ClickHouse with this setup are logged according to the rules in the [query_thread_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) server configuration parameter.
|
||||
|
||||
Example:
|
||||
Possible values:
|
||||
|
||||
- 0 — Disabled.
|
||||
- 1 — Enabled.
|
||||
|
||||
Default value: `1`.
|
||||
|
||||
**Example**
|
||||
|
||||
``` text
|
||||
log_query_threads=1
|
||||
@ -1700,18 +1687,17 @@ Quorum writes
|
||||
|
||||
`INSERT` succeeds only when ClickHouse manages to correctly write data to the `insert_quorum` of replicas during the `insert_quorum_timeout`. If for any reason the number of replicas with successful writes does not reach the `insert_quorum`, the write is considered failed and ClickHouse will delete the inserted block from all the replicas where data has already been written.
|
||||
|
||||
All the replicas in the quorum are consistent, i.e., they contain data from all previous `INSERT` queries. The `INSERT` sequence is linearized.
|
||||
When `insert_quorum_parallel` is disabled, all replicas in the quorum are consistent, i.e. they contain data from all previous `INSERT` queries (the `INSERT` sequence is linearized). When reading data written using `insert_quorum` and `insert_quorum_parallel` is disabled, you can turn on sequential consistency for `SELECT` queries using [select_sequential_consistency](#settings-select_sequential_consistency).
|
||||
|
||||
When reading the data written from the `insert_quorum`, you can use the [select_sequential_consistency](#settings-select_sequential_consistency) option.
|
||||
|
||||
ClickHouse generates an exception
|
||||
ClickHouse generates an exception:
|
||||
|
||||
- If the number of available replicas at the time of the query is less than the `insert_quorum`.
|
||||
- At an attempt to write data when the previous block has not yet been inserted in the `insert_quorum` of replicas. This situation may occur if the user tries to perform an `INSERT` before the previous one with the `insert_quorum` is completed.
|
||||
- When `insert_quorum_parallel` is disabled and an attempt to write data is made when the previous block has not yet been inserted in `insert_quorum` of replicas. This situation may occur if the user tries to perform another `INSERT` query to the same table before the previous one with `insert_quorum` is completed.
|
||||
|
||||
See also:
|
||||
|
||||
- [insert_quorum_timeout](#settings-insert_quorum_timeout)
|
||||
- [insert_quorum_parallel](#settings-insert_quorum_parallel)
|
||||
- [select_sequential_consistency](#settings-select_sequential_consistency)
|
||||
|
||||
## insert_quorum_timeout {#settings-insert_quorum_timeout}
|
||||
@ -1723,11 +1709,29 @@ Default value: 600 000 milliseconds (ten minutes).
|
||||
See also:
|
||||
|
||||
- [insert_quorum](#settings-insert_quorum)
|
||||
- [insert_quorum_parallel](#settings-insert_quorum_parallel)
|
||||
- [select_sequential_consistency](#settings-select_sequential_consistency)
|
||||
|
||||
## insert_quorum_parallel {#settings-insert_quorum_parallel}
|
||||
|
||||
Enables or disables parallelism for quorum `INSERT` queries. If enabled, additional `INSERT` queries can be sent while previous queries have not yet finished. If disabled, additional writes to the same table will be rejected.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Disabled.
|
||||
- 1 — Enabled.
|
||||
|
||||
Default value: 1.
|
||||
|
||||
See also:
|
||||
|
||||
- [insert_quorum](#settings-insert_quorum)
|
||||
- [insert_quorum_timeout](#settings-insert_quorum_timeout)
|
||||
- [select_sequential_consistency](#settings-select_sequential_consistency)
|
||||
|
||||
## select_sequential_consistency {#settings-select_sequential_consistency}
|
||||
|
||||
Enables or disables sequential consistency for `SELECT` queries:
|
||||
Enables or disables sequential consistency for `SELECT` queries. Requires `insert_quorum_parallel` to be disabled (enabled by default).
|
||||
|
||||
Possible values:
|
||||
|
||||
@ -1740,10 +1744,13 @@ Usage
|
||||
|
||||
When sequential consistency is enabled, ClickHouse allows the client to execute the `SELECT` query only for those replicas that contain data from all previous `INSERT` queries executed with `insert_quorum`. If the client refers to a partial replica, ClickHouse will generate an exception. The SELECT query will not include data that has not yet been written to the quorum of replicas.
|
||||
|
||||
When `insert_quorum_parallel` is enabled (the default), then `select_sequential_consistency` does not work. This is because parallel `INSERT` queries can be written to different sets of quorum replicas so there is no guarantee a single replica will have received all writes.
|
||||
|
||||
See also:
|
||||
|
||||
- [insert_quorum](#settings-insert_quorum)
|
||||
- [insert_quorum_timeout](#settings-insert_quorum_timeout)
|
||||
- [insert_quorum_parallel](#settings-insert_quorum_parallel)
|
||||
|
||||
## insert_deduplicate {#settings-insert-deduplicate}
|
||||
|
||||
@ -4057,6 +4064,41 @@ Possible values:
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## alter_partition_verbose_result {#alter-partition-verbose-result}
|
||||
|
||||
Enables or disables the display of information about the parts to which the manipulation operations with partitions and parts have been successfully applied.
|
||||
Applicable to [ATTACH PARTITION|PART](../../sql-reference/statements/alter/partition.md#alter_attach-partition) and to [FREEZE PARTITION](../../sql-reference/statements/alter/partition.md#alter_freeze-partition).
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — disable verbosity.
|
||||
- 1 — enable verbosity.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
CREATE TABLE test(a Int64, d Date, s String) ENGINE = MergeTree PARTITION BY toYYYYMM(d) ORDER BY a;
|
||||
INSERT INTO test VALUES(1, '2021-01-01', '');
|
||||
INSERT INTO test VALUES(1, '2021-01-01', '');
|
||||
ALTER TABLE test DETACH PARTITION ID '202101';
|
||||
|
||||
ALTER TABLE test ATTACH PARTITION ID '202101' SETTINGS alter_partition_verbose_result = 1;
|
||||
|
||||
┌─command_type─────┬─partition_id─┬─part_name────┬─old_part_name─┐
|
||||
│ ATTACH PARTITION │ 202101 │ 202101_7_7_0 │ 202101_5_5_0 │
|
||||
│ ATTACH PARTITION │ 202101 │ 202101_8_8_0 │ 202101_6_6_0 │
|
||||
└──────────────────┴──────────────┴──────────────┴───────────────┘
|
||||
|
||||
ALTER TABLE test FREEZE SETTINGS alter_partition_verbose_result = 1;
|
||||
|
||||
┌─command_type─┬─partition_id─┬─part_name────┬─backup_name─┬─backup_path───────────────────┬─part_backup_path────────────────────────────────────────────┐
|
||||
│ FREEZE ALL │ 202101 │ 202101_7_7_0 │ 8 │ /var/lib/clickhouse/shadow/8/ │ /var/lib/clickhouse/shadow/8/data/default/test/202101_7_7_0 │
|
||||
│ FREEZE ALL │ 202101 │ 202101_8_8_0 │ 8 │ /var/lib/clickhouse/shadow/8/ │ /var/lib/clickhouse/shadow/8/data/default/test/202101_8_8_0 │
|
||||
└──────────────┴──────────────┴──────────────┴─────────────┴───────────────────────────────┴─────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## format_capn_proto_enum_comparising_mode {#format-capn-proto-enum-comparising-mode}
|
||||
|
||||
Determines how to map ClickHouse `Enum` data type and [CapnProto](../../interfaces/formats.md#capnproto) `Enum` data type from schema.
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user