mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-29 02:52:13 +00:00
Merge branch 'master' into revert-48277-revert-48196-jit-randomization
This commit is contained in:
commit
9ccfa1ede8
261
.github/workflows/master.yml
vendored
261
.github/workflows/master.yml
vendored
@ -850,6 +850,48 @@ jobs:
|
|||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
|
BuilderBinRISCV64:
|
||||||
|
needs: [DockerHubPush]
|
||||||
|
runs-on: [self-hosted, builder]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/build_check
|
||||||
|
IMAGES_PATH=${{runner.temp}}/images_path
|
||||||
|
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||||
|
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||||
|
BUILD_NAME=binary_riscv64
|
||||||
|
EOF
|
||||||
|
- name: Download changed images
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
name: changed_images
|
||||||
|
path: ${{ env.IMAGES_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
submodules: true
|
||||||
|
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||||
|
- name: Build
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||||
|
- name: Upload build URLs to artifacts
|
||||||
|
if: ${{ success() || failure() }}
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: ${{ env.BUILD_URLS }}
|
||||||
|
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
############################################################################################
|
############################################################################################
|
||||||
##################################### Docker images #######################################
|
##################################### Docker images #######################################
|
||||||
############################################################################################
|
############################################################################################
|
||||||
@ -932,6 +974,7 @@ jobs:
|
|||||||
- BuilderBinDarwinAarch64
|
- BuilderBinDarwinAarch64
|
||||||
- BuilderBinFreeBSD
|
- BuilderBinFreeBSD
|
||||||
- BuilderBinPPC64
|
- BuilderBinPPC64
|
||||||
|
- BuilderBinRISCV64
|
||||||
- BuilderBinAmd64Compat
|
- BuilderBinAmd64Compat
|
||||||
- BuilderBinAarch64V80Compat
|
- BuilderBinAarch64V80Compat
|
||||||
- BuilderBinClangTidy
|
- BuilderBinClangTidy
|
||||||
@ -2827,6 +2870,216 @@ jobs:
|
|||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
IntegrationTestsAnalyzerAsan0:
|
||||||
|
needs: [BuilderDebAsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Integration tests (asan, analyzer)
|
||||||
|
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=0
|
||||||
|
RUN_BY_HASH_TOTAL=6
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Integration test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
IntegrationTestsAnalyzerAsan1:
|
||||||
|
needs: [BuilderDebAsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Integration tests (asan, analyzer)
|
||||||
|
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=1
|
||||||
|
RUN_BY_HASH_TOTAL=6
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Integration test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
IntegrationTestsAnalyzerAsan2:
|
||||||
|
needs: [BuilderDebAsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Integration tests (asan, analyzer)
|
||||||
|
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=2
|
||||||
|
RUN_BY_HASH_TOTAL=6
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Integration test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
IntegrationTestsAnalyzerAsan3:
|
||||||
|
needs: [BuilderDebAsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Integration tests (asan, analyzer)
|
||||||
|
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=3
|
||||||
|
RUN_BY_HASH_TOTAL=6
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Integration test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
IntegrationTestsAnalyzerAsan4:
|
||||||
|
needs: [BuilderDebAsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Integration tests (asan, analyzer)
|
||||||
|
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=4
|
||||||
|
RUN_BY_HASH_TOTAL=6
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Integration test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
IntegrationTestsAnalyzerAsan5:
|
||||||
|
needs: [BuilderDebAsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Integration tests (asan, analyzer)
|
||||||
|
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=5
|
||||||
|
RUN_BY_HASH_TOTAL=6
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Integration test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
IntegrationTestsTsan0:
|
IntegrationTestsTsan0:
|
||||||
needs: [BuilderDebTsan]
|
needs: [BuilderDebTsan]
|
||||||
runs-on: [self-hosted, stress-tester]
|
runs-on: [self-hosted, stress-tester]
|
||||||
@ -3390,7 +3643,7 @@ jobs:
|
|||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
TEMP_PATH=${{runner.temp}}/unit_tests_asan
|
TEMP_PATH=${{runner.temp}}/unit_tests_asan
|
||||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
CHECK_NAME=Unit tests (release-clang)
|
CHECK_NAME=Unit tests (release)
|
||||||
REPO_COPY=${{runner.temp}}/unit_tests_asan/ClickHouse
|
REPO_COPY=${{runner.temp}}/unit_tests_asan/ClickHouse
|
||||||
EOF
|
EOF
|
||||||
- name: Download json reports
|
- name: Download json reports
|
||||||
@ -3920,6 +4173,12 @@ jobs:
|
|||||||
- IntegrationTestsAsan3
|
- IntegrationTestsAsan3
|
||||||
- IntegrationTestsAsan4
|
- IntegrationTestsAsan4
|
||||||
- IntegrationTestsAsan5
|
- IntegrationTestsAsan5
|
||||||
|
- IntegrationTestsAnalyzerAsan0
|
||||||
|
- IntegrationTestsAnalyzerAsan1
|
||||||
|
- IntegrationTestsAnalyzerAsan2
|
||||||
|
- IntegrationTestsAnalyzerAsan3
|
||||||
|
- IntegrationTestsAnalyzerAsan4
|
||||||
|
- IntegrationTestsAnalyzerAsan5
|
||||||
- IntegrationTestsRelease0
|
- IntegrationTestsRelease0
|
||||||
- IntegrationTestsRelease1
|
- IntegrationTestsRelease1
|
||||||
- IntegrationTestsRelease2
|
- IntegrationTestsRelease2
|
||||||
|
45
.github/workflows/nightly.yml
vendored
45
.github/workflows/nightly.yml
vendored
@ -75,51 +75,6 @@ jobs:
|
|||||||
Codebrowser:
|
Codebrowser:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
uses: ./.github/workflows/woboq.yml
|
uses: ./.github/workflows/woboq.yml
|
||||||
BuilderCoverity:
|
|
||||||
needs: DockerHubPush
|
|
||||||
runs-on: [self-hosted, builder]
|
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
BUILD_NAME=coverity
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
EOF
|
|
||||||
echo "COVERITY_TOKEN=${{ secrets.COVERITY_TOKEN }}" >> "$GITHUB_ENV"
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
|
||||||
name: changed_images
|
|
||||||
path: ${{ env.IMAGES_PATH }}
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload Coverity Analysis
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
run: |
|
|
||||||
curl --form token="${COVERITY_TOKEN}" \
|
|
||||||
--form email='security+coverity@clickhouse.com' \
|
|
||||||
--form file="@$TEMP_PATH/$BUILD_NAME/coverity-scan.tar.gz" \
|
|
||||||
--form version="${GITHUB_REF#refs/heads/}-${GITHUB_SHA::6}" \
|
|
||||||
--form description="Nighly Scan: $(date +'%Y-%m-%dT%H:%M:%S')" \
|
|
||||||
https://scan.coverity.com/builds?project=ClickHouse%2FClickHouse
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
SonarCloud:
|
SonarCloud:
|
||||||
runs-on: [self-hosted, builder]
|
runs-on: [self-hosted, builder]
|
||||||
env:
|
env:
|
||||||
|
260
.github/workflows/pull_request.yml
vendored
260
.github/workflows/pull_request.yml
vendored
@ -911,6 +911,47 @@ jobs:
|
|||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
|
BuilderBinRISCV64:
|
||||||
|
needs: [DockerHubPush, FastTest, StyleCheck]
|
||||||
|
runs-on: [self-hosted, builder]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/build_check
|
||||||
|
IMAGES_PATH=${{runner.temp}}/images_path
|
||||||
|
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||||
|
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||||
|
BUILD_NAME=binary_riscv64
|
||||||
|
EOF
|
||||||
|
- name: Download changed images
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
name: changed_images
|
||||||
|
path: ${{ env.IMAGES_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
submodules: true
|
||||||
|
- name: Build
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||||
|
- name: Upload build URLs to artifacts
|
||||||
|
if: ${{ success() || failure() }}
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: ${{ env.BUILD_URLS }}
|
||||||
|
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
############################################################################################
|
############################################################################################
|
||||||
##################################### Docker images #######################################
|
##################################### Docker images #######################################
|
||||||
############################################################################################
|
############################################################################################
|
||||||
@ -992,6 +1033,7 @@ jobs:
|
|||||||
- BuilderBinDarwinAarch64
|
- BuilderBinDarwinAarch64
|
||||||
- BuilderBinFreeBSD
|
- BuilderBinFreeBSD
|
||||||
- BuilderBinPPC64
|
- BuilderBinPPC64
|
||||||
|
- BuilderBinRISCV64
|
||||||
- BuilderBinAmd64Compat
|
- BuilderBinAmd64Compat
|
||||||
- BuilderBinAarch64V80Compat
|
- BuilderBinAarch64V80Compat
|
||||||
- BuilderBinClangTidy
|
- BuilderBinClangTidy
|
||||||
@ -3861,6 +3903,216 @@ jobs:
|
|||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
IntegrationTestsAnalyzerAsan0:
|
||||||
|
needs: [BuilderDebAsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Integration tests (asan, analyzer)
|
||||||
|
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=0
|
||||||
|
RUN_BY_HASH_TOTAL=6
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Integration test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
IntegrationTestsAnalyzerAsan1:
|
||||||
|
needs: [BuilderDebAsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Integration tests (asan, analyzer)
|
||||||
|
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=1
|
||||||
|
RUN_BY_HASH_TOTAL=6
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Integration test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
IntegrationTestsAnalyzerAsan2:
|
||||||
|
needs: [BuilderDebAsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Integration tests (asan, analyzer)
|
||||||
|
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=2
|
||||||
|
RUN_BY_HASH_TOTAL=6
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Integration test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
IntegrationTestsAnalyzerAsan3:
|
||||||
|
needs: [BuilderDebAsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Integration tests (asan, analyzer)
|
||||||
|
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=3
|
||||||
|
RUN_BY_HASH_TOTAL=6
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Integration test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
IntegrationTestsAnalyzerAsan4:
|
||||||
|
needs: [BuilderDebAsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Integration tests (asan, analyzer)
|
||||||
|
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=4
|
||||||
|
RUN_BY_HASH_TOTAL=6
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Integration test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
IntegrationTestsAnalyzerAsan5:
|
||||||
|
needs: [BuilderDebAsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Integration tests (asan, analyzer)
|
||||||
|
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=5
|
||||||
|
RUN_BY_HASH_TOTAL=6
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Integration test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
IntegrationTestsTsan0:
|
IntegrationTestsTsan0:
|
||||||
needs: [BuilderDebTsan]
|
needs: [BuilderDebTsan]
|
||||||
runs-on: [self-hosted, stress-tester]
|
runs-on: [self-hosted, stress-tester]
|
||||||
@ -4289,7 +4541,7 @@ jobs:
|
|||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
TEMP_PATH=${{runner.temp}}/unit_tests_asan
|
TEMP_PATH=${{runner.temp}}/unit_tests_asan
|
||||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
CHECK_NAME=Unit tests (release-clang)
|
CHECK_NAME=Unit tests (release)
|
||||||
REPO_COPY=${{runner.temp}}/unit_tests_asan/ClickHouse
|
REPO_COPY=${{runner.temp}}/unit_tests_asan/ClickHouse
|
||||||
EOF
|
EOF
|
||||||
- name: Download json reports
|
- name: Download json reports
|
||||||
@ -4847,6 +5099,12 @@ jobs:
|
|||||||
- IntegrationTestsAsan3
|
- IntegrationTestsAsan3
|
||||||
- IntegrationTestsAsan4
|
- IntegrationTestsAsan4
|
||||||
- IntegrationTestsAsan5
|
- IntegrationTestsAsan5
|
||||||
|
- IntegrationTestsAnalyzerAsan0
|
||||||
|
- IntegrationTestsAnalyzerAsan1
|
||||||
|
- IntegrationTestsAnalyzerAsan2
|
||||||
|
- IntegrationTestsAnalyzerAsan3
|
||||||
|
- IntegrationTestsAnalyzerAsan4
|
||||||
|
- IntegrationTestsAnalyzerAsan5
|
||||||
- IntegrationTestsRelease0
|
- IntegrationTestsRelease0
|
||||||
- IntegrationTestsRelease1
|
- IntegrationTestsRelease1
|
||||||
- IntegrationTestsRelease2
|
- IntegrationTestsRelease2
|
||||||
|
3
.gitignore
vendored
3
.gitignore
vendored
@ -69,6 +69,7 @@ cmake-build-*
|
|||||||
*.pyc
|
*.pyc
|
||||||
__pycache__
|
__pycache__
|
||||||
*.pytest_cache
|
*.pytest_cache
|
||||||
|
.mypy_cache
|
||||||
|
|
||||||
test.cpp
|
test.cpp
|
||||||
CPackConfig.cmake
|
CPackConfig.cmake
|
||||||
@ -161,8 +162,10 @@ tests/queries/0_stateless/test_*
|
|||||||
tests/queries/0_stateless/*.binary
|
tests/queries/0_stateless/*.binary
|
||||||
tests/queries/0_stateless/*.generated-expect
|
tests/queries/0_stateless/*.generated-expect
|
||||||
tests/queries/0_stateless/*.expect.history
|
tests/queries/0_stateless/*.expect.history
|
||||||
|
tests/integration/**/_gen
|
||||||
|
|
||||||
# rust
|
# rust
|
||||||
/rust/**/target
|
/rust/**/target
|
||||||
# It is autogenerated from *.in
|
# It is autogenerated from *.in
|
||||||
/rust/**/.cargo/config.toml
|
/rust/**/.cargo/config.toml
|
||||||
|
/rust/**/vendor
|
||||||
|
6
.gitmodules
vendored
6
.gitmodules
vendored
@ -258,9 +258,6 @@
|
|||||||
[submodule "contrib/wyhash"]
|
[submodule "contrib/wyhash"]
|
||||||
path = contrib/wyhash
|
path = contrib/wyhash
|
||||||
url = https://github.com/wangyi-fudan/wyhash
|
url = https://github.com/wangyi-fudan/wyhash
|
||||||
[submodule "contrib/hashidsxx"]
|
|
||||||
path = contrib/hashidsxx
|
|
||||||
url = https://github.com/schoentoon/hashidsxx
|
|
||||||
[submodule "contrib/nats-io"]
|
[submodule "contrib/nats-io"]
|
||||||
path = contrib/nats-io
|
path = contrib/nats-io
|
||||||
url = https://github.com/ClickHouse/nats.c
|
url = https://github.com/ClickHouse/nats.c
|
||||||
@ -343,3 +340,6 @@
|
|||||||
[submodule "contrib/c-ares"]
|
[submodule "contrib/c-ares"]
|
||||||
path = contrib/c-ares
|
path = contrib/c-ares
|
||||||
url = https://github.com/c-ares/c-ares.git
|
url = https://github.com/c-ares/c-ares.git
|
||||||
|
[submodule "contrib/incbin"]
|
||||||
|
path = contrib/incbin
|
||||||
|
url = https://github.com/graphitemaster/incbin.git
|
||||||
|
176
CHANGELOG.md
176
CHANGELOG.md
@ -1,4 +1,5 @@
|
|||||||
### Table of Contents
|
### Table of Contents
|
||||||
|
**[ClickHouse release v23.7, 2023-07-27](#237)**<br/>
|
||||||
**[ClickHouse release v23.6, 2023-06-30](#236)**<br/>
|
**[ClickHouse release v23.6, 2023-06-30](#236)**<br/>
|
||||||
**[ClickHouse release v23.5, 2023-06-08](#235)**<br/>
|
**[ClickHouse release v23.5, 2023-06-08](#235)**<br/>
|
||||||
**[ClickHouse release v23.4, 2023-04-26](#234)**<br/>
|
**[ClickHouse release v23.4, 2023-04-26](#234)**<br/>
|
||||||
@ -9,6 +10,181 @@
|
|||||||
|
|
||||||
# 2023 Changelog
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### <a id="237"></a> ClickHouse release 23.7, 2023-07-27
|
||||||
|
|
||||||
|
#### Backward Incompatible Change
|
||||||
|
* Add `NAMED COLLECTION` access type (aliases `USE NAMED COLLECTION`, `NAMED COLLECTION USAGE`). This PR is backward incompatible because this access type is disabled by default (because a parent access type `NAMED COLLECTION ADMIN` is disabled by default as well). Proposed in [#50277](https://github.com/ClickHouse/ClickHouse/issues/50277). To grant use `GRANT NAMED COLLECTION ON collection_name TO user` or `GRANT NAMED COLLECTION ON * TO user`, to be able to give these grants `named_collection_admin` is required in config (previously it was named `named_collection_control`, so will remain as an alias). [#50625](https://github.com/ClickHouse/ClickHouse/pull/50625) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fixing a typo in the `system.parts` column name `last_removal_attemp_time`. Now it is named `last_removal_attempt_time`. [#52104](https://github.com/ClickHouse/ClickHouse/pull/52104) ([filimonov](https://github.com/filimonov)).
|
||||||
|
* Bump version of the distributed_ddl_entry_format_version to 5 by default (enables opentelemetry and initial_query_idd pass through). This will not allow to process existing entries for distributed DDL after *downgrade* (but note, that usually there should be no such unprocessed entries). [#52128](https://github.com/ClickHouse/ClickHouse/pull/52128) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Check projection metadata the same way we check ordinary metadata. This change may prevent the server from starting in case there was a table with an invalid projection. An example is a projection that created positional columns in PK (e.g. `projection p (select * order by 1, 4)` which is not allowed in table PK and can cause a crash during insert/merge). Drop such projections before the update. Fixes [#52353](https://github.com/ClickHouse/ClickHouse/issues/52353). [#52361](https://github.com/ClickHouse/ClickHouse/pull/52361) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* The experimental feature `hashid` is removed due to a bug. The quality of implementation was questionable at the start, and it didn't get through the experimental status. This closes [#52406](https://github.com/ClickHouse/ClickHouse/issues/52406). [#52449](https://github.com/ClickHouse/ClickHouse/pull/52449) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
|
#### New Feature
|
||||||
|
* Added `Overlay` database engine to combine multiple databases into one. Added `Filesystem` database engine to represent a directory in the filesystem as a set of implicitly available tables with auto-detected formats and structures. A new `S3` database engine allows to read-only interact with s3 storage by representing a prefix as a set of tables. A new `HDFS` database engine allows to interact with HDFS storage in the same way. [#48821](https://github.com/ClickHouse/ClickHouse/pull/48821) ([alekseygolub](https://github.com/alekseygolub)).
|
||||||
|
* Add support for external disks in Keeper for storing snapshots and logs. [#50098](https://github.com/ClickHouse/ClickHouse/pull/50098) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Add support for multi-directory selection (`{}`) globs. [#50559](https://github.com/ClickHouse/ClickHouse/pull/50559) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||||
|
* Kafka connector can fetch Avro schema from schema registry with basic authentication using url-encoded credentials. [#49664](https://github.com/ClickHouse/ClickHouse/pull/49664) ([Ilya Golshtein](https://github.com/ilejn)).
|
||||||
|
* Add function `arrayJaccardIndex` which computes the Jaccard similarity between two arrays. [#50076](https://github.com/ClickHouse/ClickHouse/pull/50076) ([FFFFFFFHHHHHHH](https://github.com/FFFFFFFHHHHHHH)).
|
||||||
|
* Add a column `is_obsolete` to `system.settings` and similar tables. Closes [#50819](https://github.com/ClickHouse/ClickHouse/issues/50819). [#50826](https://github.com/ClickHouse/ClickHouse/pull/50826) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Implement support of encrypted elements in configuration file. Added possibility to use encrypted text in leaf elements of configuration file. The text is encrypted using encryption codecs from `<encryption_codecs>` section. [#50986](https://github.com/ClickHouse/ClickHouse/pull/50986) ([Roman Vasin](https://github.com/rvasin)).
|
||||||
|
* Grace Hash Join algorithm is now applicable to FULL and RIGHT JOINs. [#49483](https://github.com/ClickHouse/ClickHouse/issues/49483). [#51013](https://github.com/ClickHouse/ClickHouse/pull/51013) ([lgbo](https://github.com/lgbo-ustc)).
|
||||||
|
* Add `SYSTEM STOP LISTEN` query for more graceful termination. Closes [#47972](https://github.com/ClickHouse/ClickHouse/issues/47972). [#51016](https://github.com/ClickHouse/ClickHouse/pull/51016) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Add `input_format_csv_allow_variable_number_of_columns` options. [#51273](https://github.com/ClickHouse/ClickHouse/pull/51273) ([Dmitry Kardymon](https://github.com/kardymonds)).
|
||||||
|
* Another boring feature: add function `substring_index`, as in Spark or MySQL. [#51472](https://github.com/ClickHouse/ClickHouse/pull/51472) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* A system table `jemalloc_bins` to show stats for jemalloc bins. Example `SELECT *, size * (nmalloc - ndalloc) AS allocated_bytes FROM system.jemalloc_bins WHERE allocated_bytes > 0 ORDER BY allocated_bytes DESC LIMIT 10`. Enjoy. [#51674](https://github.com/ClickHouse/ClickHouse/pull/51674) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Add `RowBinaryWithDefaults` format with extra byte before each column as a flag for using the column's default value. Closes [#50854](https://github.com/ClickHouse/ClickHouse/issues/50854). [#51695](https://github.com/ClickHouse/ClickHouse/pull/51695) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Added `default_temporary_table_engine` setting. Same as `default_table_engine` but for temporary tables. [#51292](https://github.com/ClickHouse/ClickHouse/issues/51292). [#51708](https://github.com/ClickHouse/ClickHouse/pull/51708) ([velavokr](https://github.com/velavokr)).
|
||||||
|
* Added new `initcap` / `initcapUTF8` functions which convert the first letter of each word to upper case and the rest to lower case. [#51735](https://github.com/ClickHouse/ClickHouse/pull/51735) ([Dmitry Kardymon](https://github.com/kardymonds)).
|
||||||
|
* Create table now supports `PRIMARY KEY` syntax in column definition. Columns are added to primary index in the same order columns are defined. [#51881](https://github.com/ClickHouse/ClickHouse/pull/51881) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Added the possibility to use date and time format specifiers in log and error log file names, either in config files (`log` and `errorlog` tags) or command line arguments (`--log-file` and `--errorlog-file`). [#51945](https://github.com/ClickHouse/ClickHouse/pull/51945) ([Victor Krasnov](https://github.com/sirvickr)).
|
||||||
|
* Added Peak Memory Usage statistic to HTTP headers. [#51946](https://github.com/ClickHouse/ClickHouse/pull/51946) ([Dmitry Kardymon](https://github.com/kardymonds)).
|
||||||
|
* Added new `hasSubsequence` (+`CaseInsensitive` and `UTF8` versions) functions to match subsequences in strings. [#52050](https://github.com/ClickHouse/ClickHouse/pull/52050) ([Dmitry Kardymon](https://github.com/kardymonds)).
|
||||||
|
* Add `array_agg` as alias of `groupArray` for PostgreSQL compatibility. Closes [#52100](https://github.com/ClickHouse/ClickHouse/issues/52100). ### Documentation entry for user-facing changes. [#52135](https://github.com/ClickHouse/ClickHouse/pull/52135) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Add `any_value` as a compatibility alias for `any` aggregate function. Closes [#52140](https://github.com/ClickHouse/ClickHouse/issues/52140). [#52147](https://github.com/ClickHouse/ClickHouse/pull/52147) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Add aggregate function `array_concat_agg` for compatibility with BigQuery, it's alias of `groupArrayArray`. Closes [#52139](https://github.com/ClickHouse/ClickHouse/issues/52139). [#52149](https://github.com/ClickHouse/ClickHouse/pull/52149) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Add `OCTET_LENGTH` as an alias to `length`. Closes [#52153](https://github.com/ClickHouse/ClickHouse/issues/52153). [#52176](https://github.com/ClickHouse/ClickHouse/pull/52176) ([FFFFFFFHHHHHHH](https://github.com/FFFFFFFHHHHHHH)).
|
||||||
|
* Added `firstLine` function to extract the first line from the multi-line string. This closes [#51172](https://github.com/ClickHouse/ClickHouse/issues/51172). [#52209](https://github.com/ClickHouse/ClickHouse/pull/52209) ([Mikhail Koviazin](https://github.com/mkmkme)).
|
||||||
|
* Implement KQL-style formatting for the `Interval` data type. This is only needed for compatibility with the `Kusto` query language. [#45671](https://github.com/ClickHouse/ClickHouse/pull/45671) ([ltrk2](https://github.com/ltrk2)).
|
||||||
|
* Added query `SYSTEM FLUSH ASYNC INSERT QUEUE` which flushes all pending asynchronous inserts to the destination tables. Added a server-side setting `async_insert_queue_flush_on_shutdown` (`true` by default) which determines whether to flush queue of asynchronous inserts on graceful shutdown. Setting `async_insert_threads` is now a server-side setting. [#49160](https://github.com/ClickHouse/ClickHouse/pull/49160) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Aliases `current_database` and a new function `current_schemas` for compatibility with PostgreSQL. [#51076](https://github.com/ClickHouse/ClickHouse/pull/51076) ([Pedro Riera](https://github.com/priera)).
|
||||||
|
* Add alias for functions `today` (now available under the `curdate`/`current_date` names) and `now` (`current_timestamp`). [#52106](https://github.com/ClickHouse/ClickHouse/pull/52106) ([Lloyd-Pottiger](https://github.com/Lloyd-Pottiger)).
|
||||||
|
* Support `async_deduplication_token` for async insert. [#52136](https://github.com/ClickHouse/ClickHouse/pull/52136) ([Han Fei](https://github.com/hanfei1991)).
|
||||||
|
* Add new setting `disable_url_encoding` that allows to disable decoding/encoding path in uri in URL engine. [#52337](https://github.com/ClickHouse/ClickHouse/pull/52337) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
|
||||||
|
#### Performance Improvement
|
||||||
|
* Writing parquet files is 10x faster, it's multi-threaded now. Almost the same speed as reading. [#49367](https://github.com/ClickHouse/ClickHouse/pull/49367) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Enable automatic selection of the sparse serialization format by default. It improves performance. The format is supported since version 22.1. After this change, downgrading to versions older than 22.1 might not be possible. You can turn off the usage of the sparse serialization format by providing the `ratio_of_defaults_for_sparse_serialization = 1` setting for your MergeTree tables. [#49631](https://github.com/ClickHouse/ClickHouse/pull/49631) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Enable `move_all_conditions_to_prewhere` and `enable_multiple_prewhere_read_steps` settings by default. [#46365](https://github.com/ClickHouse/ClickHouse/pull/46365) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Improves performance of some queries by tuning allocator. [#46416](https://github.com/ClickHouse/ClickHouse/pull/46416) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Now we use fixed-size tasks in `MergeTreePrefetchedReadPool` as in `MergeTreeReadPool`. Also from now we use connection pool for S3 requests. [#49732](https://github.com/ClickHouse/ClickHouse/pull/49732) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* More pushdown to the right side of join. [#50532](https://github.com/ClickHouse/ClickHouse/pull/50532) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Improve grace_hash join by reserving hash table's size (resubmit). [#50875](https://github.com/ClickHouse/ClickHouse/pull/50875) ([lgbo](https://github.com/lgbo-ustc)).
|
||||||
|
* Waiting on lock in `OpenedFileCache` could be noticeable sometimes. We sharded it into multiple sub-maps (each with its own lock) to avoid contention. [#51341](https://github.com/ClickHouse/ClickHouse/pull/51341) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Move conditions with primary key columns to the end of PREWHERE chain. The idea is that conditions with PK columns are likely to be used in PK analysis and will not contribute much more to PREWHERE filtering. [#51958](https://github.com/ClickHouse/ClickHouse/pull/51958) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Speed up `COUNT(DISTINCT)` for String types by inlining SipHash. The performance experiments of *OnTime* on the ICX device (Intel Xeon Platinum 8380 CPU, 80 cores, 160 threads) show that this change could bring an improvement of *11.6%* to the QPS of the query *Q8* while having no impact on others. [#52036](https://github.com/ClickHouse/ClickHouse/pull/52036) ([Zhiguo Zhou](https://github.com/ZhiguoZh)).
|
||||||
|
* Enable `allow_vertical_merges_from_compact_to_wide_parts` by default. It will save memory usage during merges. [#52295](https://github.com/ClickHouse/ClickHouse/pull/52295) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix incorrect projection analysis which invalidates primary keys. This issue only exists when `query_plan_optimize_primary_key = 1, query_plan_optimize_projection = 1`. This fixes [#48823](https://github.com/ClickHouse/ClickHouse/issues/48823). This fixes [#51173](https://github.com/ClickHouse/ClickHouse/issues/51173). [#52308](https://github.com/ClickHouse/ClickHouse/pull/52308) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Reduce the number of syscalls in `FileCache::loadMetadata` - this speeds up server startup if the filesystem cache is configured. [#52435](https://github.com/ClickHouse/ClickHouse/pull/52435) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Allow to have strict lower boundary for file segment size by downloading remaining data in the background. Minimum size of file segment (if actual file size is bigger) is configured as cache configuration setting `boundary_alignment`, by default `4Mi`. Number of background threads are configured as cache configuration setting `background_download_threads`, by default `2`. Also `max_file_segment_size` was increased from `8Mi` to `32Mi` in this PR. [#51000](https://github.com/ClickHouse/ClickHouse/pull/51000) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Decreased default timeouts for S3 from 30 seconds to 3 seconds, and for other HTTP from 180 seconds to 30 seconds. [#51171](https://github.com/ClickHouse/ClickHouse/pull/51171) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* New setting `merge_tree_determine_task_size_by_prewhere_columns` added. If set to `true` only sizes of the columns from `PREWHERE` section will be considered to determine reading task size. Otherwise all the columns from query are considered. [#52606](https://github.com/ClickHouse/ClickHouse/pull/52606) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Use read_bytes/total_bytes_to_read for progress bar in s3/file/url/... table functions for better progress indication. [#51286](https://github.com/ClickHouse/ClickHouse/pull/51286) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Introduce a table setting `wait_for_unique_parts_send_before_shutdown_ms` which specify the amount of time replica will wait before closing interserver handler for replicated sends. Also fix inconsistency with shutdown of tables and interserver handlers: now server shutdown tables first and only after it shut down interserver handlers. [#51851](https://github.com/ClickHouse/ClickHouse/pull/51851) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Allow SQL standard `FETCH` without `OFFSET`. See https://antonz.org/sql-fetch/. [#51293](https://github.com/ClickHouse/ClickHouse/pull/51293) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Allow filtering HTTP headers for the URL/S3 table functions with the new `http_forbid_headers` section in config. Both exact matching and regexp filters are available. [#51038](https://github.com/ClickHouse/ClickHouse/pull/51038) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Don't show messages about `16 EiB` free space in logs, as they don't make sense. This closes [#49320](https://github.com/ClickHouse/ClickHouse/issues/49320). [#49342](https://github.com/ClickHouse/ClickHouse/pull/49342) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Properly check the limit for the `sleepEachRow` function. Add a setting `function_sleep_max_microseconds_per_block`. This is needed for generic query fuzzer. [#49343](https://github.com/ClickHouse/ClickHouse/pull/49343) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix two issues in `geoHash` functions. [#50066](https://github.com/ClickHouse/ClickHouse/pull/50066) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Log async insert flush queries into `system.query_log`. [#51160](https://github.com/ClickHouse/ClickHouse/pull/51160) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Functions `date_diff` and `age` now support millisecond/microsecond unit and work with microsecond precision. [#51291](https://github.com/ClickHouse/ClickHouse/pull/51291) ([Dmitry Kardymon](https://github.com/kardymonds)).
|
||||||
|
* Improve parsing of path in clickhouse-keeper-client. [#51359](https://github.com/ClickHouse/ClickHouse/pull/51359) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* A third-party product depending on ClickHouse (Gluten: a Plugin to Double SparkSQL's Performance) had a bug. This fix avoids heap overflow in that third-party product while reading from HDFS. [#51386](https://github.com/ClickHouse/ClickHouse/pull/51386) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Add ability to disable native copy for S3 (setting for BACKUP/RESTORE `allow_s3_native_copy`, and `s3_allow_native_copy` for `s3`/`s3_plain` disks). [#51448](https://github.com/ClickHouse/ClickHouse/pull/51448) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Add column `primary_key_size` to `system.parts` table to show compressed primary key size on disk. Closes [#51400](https://github.com/ClickHouse/ClickHouse/issues/51400). [#51496](https://github.com/ClickHouse/ClickHouse/pull/51496) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
* Allow running `clickhouse-local` without procfs, without home directory existing, and without name resolution plugins from glibc. [#51518](https://github.com/ClickHouse/ClickHouse/pull/51518) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add placeholder `%a` for rull filename in rename_files_after_processing setting. [#51603](https://github.com/ClickHouse/ClickHouse/pull/51603) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Add column `modification_time` into `system.parts_columns`. [#51685](https://github.com/ClickHouse/ClickHouse/pull/51685) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Add new setting `input_format_csv_use_default_on_bad_values` to CSV format that allows to insert default value when parsing of a single field failed. [#51716](https://github.com/ClickHouse/ClickHouse/pull/51716) ([KevinyhZou](https://github.com/KevinyhZou)).
|
||||||
|
* Added a crash log flush to the disk after the unexpected crash. [#51720](https://github.com/ClickHouse/ClickHouse/pull/51720) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||||
|
* Fix behavior in dashboard page where errors unrelated to authentication are not shown. Also fix 'overlapping' chart behavior. [#51744](https://github.com/ClickHouse/ClickHouse/pull/51744) ([Zach Naimon](https://github.com/ArctypeZach)).
|
||||||
|
* Allow UUID to UInt128 conversion. [#51765](https://github.com/ClickHouse/ClickHouse/pull/51765) ([Dmitry Kardymon](https://github.com/kardymonds)).
|
||||||
|
* Added support for function `range` of Nullable arguments. [#51767](https://github.com/ClickHouse/ClickHouse/pull/51767) ([Dmitry Kardymon](https://github.com/kardymonds)).
|
||||||
|
* Convert condition like `toyear(x) = c` to `c1 <= x < c2`. [#51795](https://github.com/ClickHouse/ClickHouse/pull/51795) ([Han Fei](https://github.com/hanfei1991)).
|
||||||
|
* Improve MySQL compatibility of the statement `SHOW INDEX`. [#51796](https://github.com/ClickHouse/ClickHouse/pull/51796) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix `use_structure_from_insertion_table_in_table_functions` does not work with `MATERIALIZED` and `ALIAS` columns. Closes [#51817](https://github.com/ClickHouse/ClickHouse/issues/51817). Closes [#51019](https://github.com/ClickHouse/ClickHouse/issues/51019). [#51825](https://github.com/ClickHouse/ClickHouse/pull/51825) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Cache dictionary now requests only unique keys from source. Closes [#51762](https://github.com/ClickHouse/ClickHouse/issues/51762). [#51853](https://github.com/ClickHouse/ClickHouse/pull/51853) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Fixed the case when settings were not applied for EXPLAIN query when FORMAT was provided. [#51859](https://github.com/ClickHouse/ClickHouse/pull/51859) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Allow SETTINGS before FORMAT in DESCRIBE TABLE query for compatibility with SELECT query. Closes [#51544](https://github.com/ClickHouse/ClickHouse/issues/51544). [#51899](https://github.com/ClickHouse/ClickHouse/pull/51899) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Var-Int encoded integers (e.g. used by the native protocol) can now use the full 64-bit range. 3rd party clients are advised to update their var-int code accordingly. [#51905](https://github.com/ClickHouse/ClickHouse/pull/51905) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Update certificates when they change without the need to manually SYSTEM RELOAD CONFIG. [#52030](https://github.com/ClickHouse/ClickHouse/pull/52030) ([Mike Kot](https://github.com/myrrc)).
|
||||||
|
* Added `allow_create_index_without_type` setting that allow to ignore `ADD INDEX` queries without specified `TYPE`. Standard SQL queries will just succeed without changing table schema. [#52056](https://github.com/ClickHouse/ClickHouse/pull/52056) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Log messages are written to the `system.text_log` from the server startup. [#52113](https://github.com/ClickHouse/ClickHouse/pull/52113) ([Dmitry Kardymon](https://github.com/kardymonds)).
|
||||||
|
* In cases where the HTTP endpoint has multiple IP addresses and the first of them is unreachable, a timeout exception was thrown. Made session creation with handling all resolved endpoints. [#52116](https://github.com/ClickHouse/ClickHouse/pull/52116) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||||
|
* Avro input format now supports Union even if it contains only a single type. Closes [#52131](https://github.com/ClickHouse/ClickHouse/issues/52131). [#52137](https://github.com/ClickHouse/ClickHouse/pull/52137) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Add setting `optimize_use_implicit_projections` to disable implicit projections (currently only `min_max_count` projection). [#52152](https://github.com/ClickHouse/ClickHouse/pull/52152) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* It was possible to use the function `hasToken` for infinite loop. Now this possibility is removed. This closes [#52156](https://github.com/ClickHouse/ClickHouse/issues/52156). [#52160](https://github.com/ClickHouse/ClickHouse/pull/52160) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Create ZK ancestors optimistically. [#52195](https://github.com/ClickHouse/ClickHouse/pull/52195) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix [#50582](https://github.com/ClickHouse/ClickHouse/issues/50582). Avoid the `Not found column ... in block` error in some cases of reading in-order and constants. [#52259](https://github.com/ClickHouse/ClickHouse/pull/52259) ([Chen768959](https://github.com/Chen768959)).
|
||||||
|
* Check whether S2 geo primitives are invalid as early as possible on ClickHouse side. This closes: [#27090](https://github.com/ClickHouse/ClickHouse/issues/27090). [#52260](https://github.com/ClickHouse/ClickHouse/pull/52260) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Add back missing projection QueryAccessInfo when `query_plan_optimize_projection = 1`. This fixes [#50183](https://github.com/ClickHouse/ClickHouse/issues/50183) . This fixes [#50093](https://github.com/ClickHouse/ClickHouse/issues/50093). [#52327](https://github.com/ClickHouse/ClickHouse/pull/52327) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* When `ZooKeeperRetriesControl` rethrows an error, it's more useful to see its original stack trace, not the one from `ZooKeeperRetriesControl` itself. [#52347](https://github.com/ClickHouse/ClickHouse/pull/52347) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Wait for zero copy replication lock even if some disks don't support it. [#52376](https://github.com/ClickHouse/ClickHouse/pull/52376) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Now interserver port will be closed only after tables are shut down. [#52498](https://github.com/ClickHouse/ClickHouse/pull/52498) ([alesapin](https://github.com/alesapin)).
|
||||||
|
|
||||||
|
#### Experimental Feature
|
||||||
|
* Added support for [PRQL](https://prql-lang.org/) as a query language. [#50686](https://github.com/ClickHouse/ClickHouse/pull/50686) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Allow to add disk name for custom disks. Previously custom disks would use an internal generated disk name. Now it will be possible with `disk = disk_<name>(...)` (e.g. disk will have name `name`) . [#51552](https://github.com/ClickHouse/ClickHouse/pull/51552) ([Kseniia Sumarokova](https://github.com/kssenii)). This syntax can be changed in this release.
|
||||||
|
* (experimental MaterializedMySQL) Fixed crash when `mysqlxx::Pool::Entry` is used after it was disconnected. [#52063](https://github.com/ClickHouse/ClickHouse/pull/52063) ([Val Doroshchuk](https://github.com/valbok)).
|
||||||
|
* (experimental MaterializedMySQL) `CREATE TABLE ... AS SELECT` .. is now supported in MaterializedMySQL. [#52067](https://github.com/ClickHouse/ClickHouse/pull/52067) ([Val Doroshchuk](https://github.com/valbok)).
|
||||||
|
* (experimental MaterializedMySQL) Introduced automatic conversion of text types to utf8 for MaterializedMySQL. [#52084](https://github.com/ClickHouse/ClickHouse/pull/52084) ([Val Doroshchuk](https://github.com/valbok)).
|
||||||
|
* (experimental MaterializedMySQL) Now unquoted UTF-8 strings are supported in DDL for MaterializedMySQL. [#52318](https://github.com/ClickHouse/ClickHouse/pull/52318) ([Val Doroshchuk](https://github.com/valbok)).
|
||||||
|
* (experimental MaterializedMySQL) Now double quoted comments are supported in MaterializedMySQL. [#52355](https://github.com/ClickHouse/ClickHouse/pull/52355) ([Val Doroshchuk](https://github.com/valbok)).
|
||||||
|
* Upgrade Intel QPL from v1.1.0 to v1.2.0 2. Upgrade Intel accel-config from v3.5 to v4.0 3. Fixed issue that Device IOTLB miss has big perf. impact for IAA accelerators. [#52180](https://github.com/ClickHouse/ClickHouse/pull/52180) ([jasperzhu](https://github.com/jinjunzh)).
|
||||||
|
* The `session_timezone` setting (new in version 23.6) is demoted to experimental. [#52445](https://github.com/ClickHouse/ClickHouse/pull/52445) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Support ZooKeeper `reconfig` command for ClickHouse Keeper with incremental reconfiguration which can be enabled via `keeper_server.enable_reconfiguration` setting. Support adding servers, removing servers, and changing server priorities. [#49450](https://github.com/ClickHouse/ClickHouse/pull/49450) ([Mike Kot](https://github.com/myrrc)). It is suspected that this feature is incomplete.
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
* Add experimental ClickHouse builds for Linux RISC-V 64 to CI. [#31398](https://github.com/ClickHouse/ClickHouse/pull/31398) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add integration test check with the enabled Analyzer. [#50926](https://github.com/ClickHouse/ClickHouse/pull/50926) [#52210](https://github.com/ClickHouse/ClickHouse/pull/52210) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Reproducible builds for Rust. [#52395](https://github.com/ClickHouse/ClickHouse/pull/52395) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Update Cargo dependencies. [#51721](https://github.com/ClickHouse/ClickHouse/pull/51721) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Make the function `CHColumnToArrowColumn::fillArrowArrayWithArrayColumnData` to work with nullable arrays, which are not possible in ClickHouse, but needed for Gluten. [#52112](https://github.com/ClickHouse/ClickHouse/pull/52112) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* We've updated the CCTZ library to master, but there are no user-visible changes. [#52124](https://github.com/ClickHouse/ClickHouse/pull/52124) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* The `system.licenses` table now includes the hard-forked library Poco. This closes [#52066](https://github.com/ClickHouse/ClickHouse/issues/52066). [#52127](https://github.com/ClickHouse/ClickHouse/pull/52127) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Check that there are no cases of bad punctuation: whitespace before a comma like `Hello ,world` instead of `Hello, world`. [#52549](https://github.com/ClickHouse/ClickHouse/pull/52549) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
* Fix MaterializedPostgreSQL syncTables [#49698](https://github.com/ClickHouse/ClickHouse/pull/49698) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix projection with optimize_aggregators_of_group_by_keys [#49709](https://github.com/ClickHouse/ClickHouse/pull/49709) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Fix optimize_skip_unused_shards with JOINs [#51037](https://github.com/ClickHouse/ClickHouse/pull/51037) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix formatDateTime() with fractional negative datetime64 [#51290](https://github.com/ClickHouse/ClickHouse/pull/51290) ([Dmitry Kardymon](https://github.com/kardymonds)).
|
||||||
|
* Functions `hasToken*` were totally wrong. Add a test for [#43358](https://github.com/ClickHouse/ClickHouse/issues/43358) [#51378](https://github.com/ClickHouse/ClickHouse/pull/51378) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix optimization to move functions before sorting. [#51481](https://github.com/ClickHouse/ClickHouse/pull/51481) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix Block structure mismatch in Pipe::unitePipes for FINAL [#51492](https://github.com/ClickHouse/ClickHouse/pull/51492) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Fix SIGSEGV for clusters with zero weight across all shards (fixes INSERT INTO FUNCTION clusterAllReplicas()) [#51545](https://github.com/ClickHouse/ClickHouse/pull/51545) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix timeout for hedged requests [#51582](https://github.com/ClickHouse/ClickHouse/pull/51582) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix logical error in ANTI join with NULL [#51601](https://github.com/ClickHouse/ClickHouse/pull/51601) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Fix for moving 'IN' conditions to PREWHERE [#51610](https://github.com/ClickHouse/ClickHouse/pull/51610) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Do not apply PredicateExpressionsOptimizer for ASOF/ANTI join [#51633](https://github.com/ClickHouse/ClickHouse/pull/51633) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Fix async insert with deduplication for ReplicatedMergeTree using merging algorithms [#51676](https://github.com/ClickHouse/ClickHouse/pull/51676) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix reading from empty column in `parseSipHashKey` [#51804](https://github.com/ClickHouse/ClickHouse/pull/51804) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Fix segfault when create invalid EmbeddedRocksdb table [#51847](https://github.com/ClickHouse/ClickHouse/pull/51847) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Fix inserts into MongoDB tables [#51876](https://github.com/ClickHouse/ClickHouse/pull/51876) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Fix deadlock on DatabaseCatalog shutdown [#51908](https://github.com/ClickHouse/ClickHouse/pull/51908) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix error in subquery operators [#51922](https://github.com/ClickHouse/ClickHouse/pull/51922) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix async connect to hosts with multiple ips [#51934](https://github.com/ClickHouse/ClickHouse/pull/51934) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Do not remove inputs after ActionsDAG::merge [#51947](https://github.com/ClickHouse/ClickHouse/pull/51947) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Check refcount in `RemoveManyObjectStorageOperation::finalize` instead of `execute` [#51954](https://github.com/ClickHouse/ClickHouse/pull/51954) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Allow parametric UDFs [#51964](https://github.com/ClickHouse/ClickHouse/pull/51964) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Small fix for toDateTime64() for dates after 2283-12-31 [#52130](https://github.com/ClickHouse/ClickHouse/pull/52130) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||||
|
* Fix ORDER BY tuple of WINDOW functions [#52145](https://github.com/ClickHouse/ClickHouse/pull/52145) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix incorrect projection analysis when aggregation expression contains monotonic functions [#52151](https://github.com/ClickHouse/ClickHouse/pull/52151) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Fix error in `groupArrayMoving` functions [#52161](https://github.com/ClickHouse/ClickHouse/pull/52161) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Disable direct join for range dictionary [#52187](https://github.com/ClickHouse/ClickHouse/pull/52187) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Fix sticky mutations test (and extremely rare race condition) [#52197](https://github.com/ClickHouse/ClickHouse/pull/52197) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fix race in Web disk [#52211](https://github.com/ClickHouse/ClickHouse/pull/52211) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix data race in Connection::setAsyncCallback on unknown packet from server [#52219](https://github.com/ClickHouse/ClickHouse/pull/52219) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix temp data deletion on startup, add test [#52275](https://github.com/ClickHouse/ClickHouse/pull/52275) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Don't use minmax_count projections when counting nullable columns [#52297](https://github.com/ClickHouse/ClickHouse/pull/52297) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* MergeTree/ReplicatedMergeTree should use server timezone for log entries [#52325](https://github.com/ClickHouse/ClickHouse/pull/52325) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix parameterized view with cte and multiple usage [#52328](https://github.com/ClickHouse/ClickHouse/pull/52328) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Disable expression templates for time intervals [#52335](https://github.com/ClickHouse/ClickHouse/pull/52335) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix `apply_snapshot` in Keeper [#52358](https://github.com/ClickHouse/ClickHouse/pull/52358) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Update build-osx.md [#52377](https://github.com/ClickHouse/ClickHouse/pull/52377) ([AlexBykovski](https://github.com/AlexBykovski)).
|
||||||
|
* Fix `countSubstrings()` hang with empty needle and a column haystack [#52409](https://github.com/ClickHouse/ClickHouse/pull/52409) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* Fix normal projection with merge table [#52432](https://github.com/ClickHouse/ClickHouse/pull/52432) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Fix possible double-free in Aggregator [#52439](https://github.com/ClickHouse/ClickHouse/pull/52439) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Fixed inserting into Buffer engine [#52440](https://github.com/ClickHouse/ClickHouse/pull/52440) ([Vasily Nemkov](https://github.com/Enmk)).
|
||||||
|
* The implementation of AnyHash was non-conformant. [#52448](https://github.com/ClickHouse/ClickHouse/pull/52448) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Check recursion depth in OptimizedRegularExpression [#52451](https://github.com/ClickHouse/ClickHouse/pull/52451) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix data-race DatabaseReplicated::startupTables()/canExecuteReplicatedMetadataAlter() [#52490](https://github.com/ClickHouse/ClickHouse/pull/52490) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix abort in function `transform` [#52513](https://github.com/ClickHouse/ClickHouse/pull/52513) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix lightweight delete after drop of projection [#52517](https://github.com/ClickHouse/ClickHouse/pull/52517) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fix possible error "Cannot drain connections: cancel first" [#52585](https://github.com/ClickHouse/ClickHouse/pull/52585) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
|
||||||
|
|
||||||
### <a id="236"></a> ClickHouse release 23.6, 2023-06-29
|
### <a id="236"></a> ClickHouse release 23.6, 2023-06-29
|
||||||
|
|
||||||
#### Backward Incompatible Change
|
#### Backward Incompatible Change
|
||||||
|
@ -87,7 +87,6 @@ if (ENABLE_FUZZING)
|
|||||||
set (ENABLE_CLICKHOUSE_ODBC_BRIDGE OFF)
|
set (ENABLE_CLICKHOUSE_ODBC_BRIDGE OFF)
|
||||||
set (ENABLE_LIBRARIES 0)
|
set (ENABLE_LIBRARIES 0)
|
||||||
set (ENABLE_SSL 1)
|
set (ENABLE_SSL 1)
|
||||||
set (USE_UNWIND ON)
|
|
||||||
set (ENABLE_EMBEDDED_COMPILER 0)
|
set (ENABLE_EMBEDDED_COMPILER 0)
|
||||||
set (ENABLE_EXAMPLES 0)
|
set (ENABLE_EXAMPLES 0)
|
||||||
set (ENABLE_UTILS 0)
|
set (ENABLE_UTILS 0)
|
||||||
@ -166,8 +165,14 @@ elseif(GLIBC_COMPATIBILITY)
|
|||||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Glibc compatibility cannot be enabled in current configuration")
|
message (${RECONFIGURE_MESSAGE_LEVEL} "Glibc compatibility cannot be enabled in current configuration")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
# Make sure the final executable has symbols exported
|
if (OS_LINUX)
|
||||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -rdynamic")
|
# We should not export dynamic symbols, because:
|
||||||
|
# - The main clickhouse binary does not use dlopen,
|
||||||
|
# and whatever is poisoning it by LD_PRELOAD should not link to our symbols.
|
||||||
|
# - The clickhouse-odbc-bridge and clickhouse-library-bridge binaries
|
||||||
|
# should not expose their symbols to ODBC drivers and libraries.
|
||||||
|
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-export-dynamic")
|
||||||
|
endif ()
|
||||||
|
|
||||||
if (OS_DARWIN)
|
if (OS_DARWIN)
|
||||||
# The `-all_load` flag forces loading of all symbols from all libraries,
|
# The `-all_load` flag forces loading of all symbols from all libraries,
|
||||||
@ -344,9 +349,9 @@ if (COMPILER_CLANG)
|
|||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fdiagnostics-absolute-paths")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fdiagnostics-absolute-paths")
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fdiagnostics-absolute-paths")
|
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fdiagnostics-absolute-paths")
|
||||||
|
|
||||||
if (NOT ENABLE_TESTS AND NOT SANITIZE)
|
if (NOT ENABLE_TESTS AND NOT SANITIZE AND OS_LINUX)
|
||||||
# https://clang.llvm.org/docs/ThinLTO.html
|
# https://clang.llvm.org/docs/ThinLTO.html
|
||||||
# Applies to clang only.
|
# Applies to clang and linux only.
|
||||||
# Disabled when building with tests or sanitizers.
|
# Disabled when building with tests or sanitizers.
|
||||||
option(ENABLE_THINLTO "Clang-specific link time optimization" ON)
|
option(ENABLE_THINLTO "Clang-specific link time optimization" ON)
|
||||||
endif()
|
endif()
|
||||||
|
@ -23,7 +23,7 @@ curl https://clickhouse.com/ | sh
|
|||||||
|
|
||||||
## Upcoming Events
|
## Upcoming Events
|
||||||
|
|
||||||
* [**v23.6 Release Webinar**](https://clickhouse.com/company/events/v23-6-release-call?utm_source=github&utm_medium=social&utm_campaign=release-webinar-2023-06) - Jun 29 - 23.6 is rapidly approaching. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release.
|
* [**v23.7 Release Webinar**](https://clickhouse.com/company/events/v23-7-community-release-call?utm_source=github&utm_medium=social&utm_campaign=release-webinar-2023-07) - Jul 27 - 23.7 is rapidly approaching. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release.
|
||||||
* [**ClickHouse Meetup in Boston**](https://www.meetup.com/clickhouse-boston-user-group/events/293913596) - Jul 18
|
* [**ClickHouse Meetup in Boston**](https://www.meetup.com/clickhouse-boston-user-group/events/293913596) - Jul 18
|
||||||
* [**ClickHouse Meetup in NYC**](https://www.meetup.com/clickhouse-new-york-user-group/events/293913441) - Jul 19
|
* [**ClickHouse Meetup in NYC**](https://www.meetup.com/clickhouse-new-york-user-group/events/293913441) - Jul 19
|
||||||
* [**ClickHouse Meetup in Toronto**](https://www.meetup.com/clickhouse-toronto-user-group/events/294183127) - Jul 20
|
* [**ClickHouse Meetup in Toronto**](https://www.meetup.com/clickhouse-toronto-user-group/events/294183127) - Jul 20
|
||||||
@ -34,13 +34,13 @@ Also, keep an eye out for upcoming meetups around the world. Somewhere else you
|
|||||||
|
|
||||||
## Recent Recordings
|
## Recent Recordings
|
||||||
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
||||||
* **Recording available**: [**v23.4 Release Webinar**](https://www.youtube.com/watch?v=4rrf6bk_mOg) Faster Parquet Reading, Asynchonous Connections to Reoplicas, Trailing Comma before FROM, extractKeyValuePairs, integrations updates, and so much more! Watch it now!
|
* **Recording available**: [**v23.6 Release Webinar**](https://www.youtube.com/watch?v=cuf_hYn7dqU) All the features of 23.6, one convenient video! Watch it now!
|
||||||
* **All release webinar recordings**: [YouTube playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3jAlSy1JxyP8zluvXaN3nxU)
|
* **All release webinar recordings**: [YouTube playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3jAlSy1JxyP8zluvXaN3nxU)
|
||||||
|
|
||||||
|
|
||||||
## Interested in joining ClickHouse and making it your full time job?
|
## Interested in joining ClickHouse and making it your full-time job?
|
||||||
|
|
||||||
We are a globally diverse and distributed team, united behind a common goal of creating industry-leading, real-time analytics. Here, you will have an opportunity to solve some of the most cutting edge technical challenges and have direct ownership of your work and vision. If you are a contributor by nature, a thinker as well as a doer - we’ll definitely click!
|
We are a globally diverse and distributed team, united behind a common goal of creating industry-leading, real-time analytics. Here, you will have an opportunity to solve some of the most cutting-edge technical challenges and have direct ownership of your work and vision. If you are a contributor by nature, a thinker and a doer - we’ll definitely click!
|
||||||
|
|
||||||
Check out our **current openings** here: https://clickhouse.com/company/careers
|
Check out our **current openings** here: https://clickhouse.com/company/careers
|
||||||
|
|
||||||
|
@ -13,9 +13,10 @@ The following versions of ClickHouse server are currently being supported with s
|
|||||||
|
|
||||||
| Version | Supported |
|
| Version | Supported |
|
||||||
|:-|:-|
|
|:-|:-|
|
||||||
|
| 23.7 | ✔️ |
|
||||||
| 23.6 | ✔️ |
|
| 23.6 | ✔️ |
|
||||||
| 23.5 | ✔️ |
|
| 23.5 | ✔️ |
|
||||||
| 23.4 | ✔️ |
|
| 23.4 | ❌ |
|
||||||
| 23.3 | ✔️ |
|
| 23.3 | ✔️ |
|
||||||
| 23.2 | ❌ |
|
| 23.2 | ❌ |
|
||||||
| 23.1 | ❌ |
|
| 23.1 | ❌ |
|
||||||
|
@ -448,7 +448,7 @@ inline char * find_last_not_symbols_or_null(char * begin, char * end)
|
|||||||
/// See https://github.com/boostorg/algorithm/issues/63
|
/// See https://github.com/boostorg/algorithm/issues/63
|
||||||
/// And https://bugs.llvm.org/show_bug.cgi?id=41141
|
/// And https://bugs.llvm.org/show_bug.cgi?id=41141
|
||||||
template <char... symbols, typename To>
|
template <char... symbols, typename To>
|
||||||
inline void splitInto(To & to, const std::string & what, bool token_compress = false)
|
inline To & splitInto(To & to, std::string_view what, bool token_compress = false)
|
||||||
{
|
{
|
||||||
const char * pos = what.data();
|
const char * pos = what.data();
|
||||||
const char * end = pos + what.size();
|
const char * end = pos + what.size();
|
||||||
@ -464,4 +464,6 @@ inline void splitInto(To & to, const std::string & what, bool token_compress = f
|
|||||||
else
|
else
|
||||||
pos = delimiter_or_end;
|
pos = delimiter_or_end;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return to;
|
||||||
}
|
}
|
||||||
|
@ -15,10 +15,9 @@
|
|||||||
|
|
||||||
|
|
||||||
static thread_local uint64_t current_tid = 0;
|
static thread_local uint64_t current_tid = 0;
|
||||||
uint64_t getThreadId()
|
|
||||||
|
static void setCurrentThreadId()
|
||||||
{
|
{
|
||||||
if (!current_tid)
|
|
||||||
{
|
|
||||||
#if defined(OS_ANDROID)
|
#if defined(OS_ANDROID)
|
||||||
current_tid = gettid();
|
current_tid = gettid();
|
||||||
#elif defined(OS_LINUX)
|
#elif defined(OS_LINUX)
|
||||||
@ -33,7 +32,17 @@ uint64_t getThreadId()
|
|||||||
if (0 != pthread_threadid_np(nullptr, ¤t_tid))
|
if (0 != pthread_threadid_np(nullptr, ¤t_tid))
|
||||||
throw std::logic_error("pthread_threadid_np returned error");
|
throw std::logic_error("pthread_threadid_np returned error");
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uint64_t getThreadId()
|
||||||
|
{
|
||||||
|
if (!current_tid)
|
||||||
|
setCurrentThreadId();
|
||||||
|
|
||||||
return current_tid;
|
return current_tid;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void updateCurrentThreadIdAfterFork()
|
||||||
|
{
|
||||||
|
setCurrentThreadId();
|
||||||
|
}
|
||||||
|
@ -3,3 +3,5 @@
|
|||||||
|
|
||||||
/// Obtain thread id from OS. The value is cached in thread local variable.
|
/// Obtain thread id from OS. The value is cached in thread local variable.
|
||||||
uint64_t getThreadId();
|
uint64_t getThreadId();
|
||||||
|
|
||||||
|
void updateCurrentThreadIdAfterFork();
|
||||||
|
9
base/base/move_extend.h
Normal file
9
base/base/move_extend.h
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
/// Extend @p to by moving elements from @p from to @p to end
|
||||||
|
/// @return @p to iterator to first of moved elements.
|
||||||
|
template <class To, class From>
|
||||||
|
typename To::iterator moveExtend(To & to, From && from)
|
||||||
|
{
|
||||||
|
return to.insert(to.end(), std::make_move_iterator(from.begin()), std::make_move_iterator(from.end()));
|
||||||
|
}
|
@ -67,6 +67,8 @@ public:
|
|||||||
|
|
||||||
Message(
|
Message(
|
||||||
const std::string & source, const std::string & text, Priority prio, const char * file, int line, std::string_view fmt_str = {});
|
const std::string & source, const std::string & text, Priority prio, const char * file, int line, std::string_view fmt_str = {});
|
||||||
|
Message(
|
||||||
|
std::string && source, std::string && text, Priority prio, const char * file, int line, std::string_view fmt_str);
|
||||||
/// Creates a Message with the given source, text, priority,
|
/// Creates a Message with the given source, text, priority,
|
||||||
/// source file path and line.
|
/// source file path and line.
|
||||||
///
|
///
|
||||||
|
@ -57,7 +57,7 @@ public:
|
|||||||
URI();
|
URI();
|
||||||
/// Creates an empty URI.
|
/// Creates an empty URI.
|
||||||
|
|
||||||
explicit URI(const std::string & uri);
|
explicit URI(const std::string & uri, bool disable_url_encoding = false);
|
||||||
/// Parses an URI from the given string. Throws a
|
/// Parses an URI from the given string. Throws a
|
||||||
/// SyntaxException if the uri is not valid.
|
/// SyntaxException if the uri is not valid.
|
||||||
|
|
||||||
@ -350,6 +350,10 @@ protected:
|
|||||||
static const std::string ILLEGAL;
|
static const std::string ILLEGAL;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
void encodePath(std::string & encodedStr) const;
|
||||||
|
void decodePath(const std::string & encodedStr);
|
||||||
|
|
||||||
|
|
||||||
std::string _scheme;
|
std::string _scheme;
|
||||||
std::string _userInfo;
|
std::string _userInfo;
|
||||||
std::string _host;
|
std::string _host;
|
||||||
@ -357,6 +361,8 @@ private:
|
|||||||
std::string _path;
|
std::string _path;
|
||||||
std::string _query;
|
std::string _query;
|
||||||
std::string _fragment;
|
std::string _fragment;
|
||||||
|
|
||||||
|
bool _disable_url_encoding = false;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -60,6 +60,19 @@ Message::Message(const std::string& source, const std::string& text, Priority pr
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Message::Message(std::string && source, std::string && text, Priority prio, const char * file, int line, std::string_view fmt_str):
|
||||||
|
_source(std::move(source)),
|
||||||
|
_text(std::move(text)),
|
||||||
|
_prio(prio),
|
||||||
|
_tid(0),
|
||||||
|
_file(file),
|
||||||
|
_line(line),
|
||||||
|
_pMap(0),
|
||||||
|
_fmt_str(fmt_str)
|
||||||
|
{
|
||||||
|
init();
|
||||||
|
}
|
||||||
|
|
||||||
Message::Message(const Message& msg):
|
Message::Message(const Message& msg):
|
||||||
_source(msg._source),
|
_source(msg._source),
|
||||||
_text(msg._text),
|
_text(msg._text),
|
||||||
|
@ -36,8 +36,8 @@ URI::URI():
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
URI::URI(const std::string& uri):
|
URI::URI(const std::string& uri, bool decode_and_encode_path):
|
||||||
_port(0)
|
_port(0), _disable_url_encoding(decode_and_encode_path)
|
||||||
{
|
{
|
||||||
parse(uri);
|
parse(uri);
|
||||||
}
|
}
|
||||||
@ -107,7 +107,8 @@ URI::URI(const URI& uri):
|
|||||||
_port(uri._port),
|
_port(uri._port),
|
||||||
_path(uri._path),
|
_path(uri._path),
|
||||||
_query(uri._query),
|
_query(uri._query),
|
||||||
_fragment(uri._fragment)
|
_fragment(uri._fragment),
|
||||||
|
_disable_url_encoding(uri._disable_url_encoding)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -119,7 +120,8 @@ URI::URI(const URI& baseURI, const std::string& relativeURI):
|
|||||||
_port(baseURI._port),
|
_port(baseURI._port),
|
||||||
_path(baseURI._path),
|
_path(baseURI._path),
|
||||||
_query(baseURI._query),
|
_query(baseURI._query),
|
||||||
_fragment(baseURI._fragment)
|
_fragment(baseURI._fragment),
|
||||||
|
_disable_url_encoding(baseURI._disable_url_encoding)
|
||||||
{
|
{
|
||||||
resolve(relativeURI);
|
resolve(relativeURI);
|
||||||
}
|
}
|
||||||
@ -151,6 +153,7 @@ URI& URI::operator = (const URI& uri)
|
|||||||
_path = uri._path;
|
_path = uri._path;
|
||||||
_query = uri._query;
|
_query = uri._query;
|
||||||
_fragment = uri._fragment;
|
_fragment = uri._fragment;
|
||||||
|
_disable_url_encoding = uri._disable_url_encoding;
|
||||||
}
|
}
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
@ -181,6 +184,7 @@ void URI::swap(URI& uri)
|
|||||||
std::swap(_path, uri._path);
|
std::swap(_path, uri._path);
|
||||||
std::swap(_query, uri._query);
|
std::swap(_query, uri._query);
|
||||||
std::swap(_fragment, uri._fragment);
|
std::swap(_fragment, uri._fragment);
|
||||||
|
std::swap(_disable_url_encoding, uri._disable_url_encoding);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -201,7 +205,7 @@ std::string URI::toString() const
|
|||||||
std::string uri;
|
std::string uri;
|
||||||
if (isRelative())
|
if (isRelative())
|
||||||
{
|
{
|
||||||
encode(_path, RESERVED_PATH, uri);
|
encodePath(uri);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -217,7 +221,7 @@ std::string URI::toString() const
|
|||||||
{
|
{
|
||||||
if (!auth.empty() && _path[0] != '/')
|
if (!auth.empty() && _path[0] != '/')
|
||||||
uri += '/';
|
uri += '/';
|
||||||
encode(_path, RESERVED_PATH, uri);
|
encodePath(uri);
|
||||||
}
|
}
|
||||||
else if (!_query.empty() || !_fragment.empty())
|
else if (!_query.empty() || !_fragment.empty())
|
||||||
{
|
{
|
||||||
@ -313,7 +317,7 @@ void URI::setAuthority(const std::string& authority)
|
|||||||
void URI::setPath(const std::string& path)
|
void URI::setPath(const std::string& path)
|
||||||
{
|
{
|
||||||
_path.clear();
|
_path.clear();
|
||||||
decode(path, _path);
|
decodePath(path);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -418,7 +422,7 @@ void URI::setPathEtc(const std::string& pathEtc)
|
|||||||
std::string URI::getPathEtc() const
|
std::string URI::getPathEtc() const
|
||||||
{
|
{
|
||||||
std::string pathEtc;
|
std::string pathEtc;
|
||||||
encode(_path, RESERVED_PATH, pathEtc);
|
encodePath(pathEtc);
|
||||||
if (!_query.empty())
|
if (!_query.empty())
|
||||||
{
|
{
|
||||||
pathEtc += '?';
|
pathEtc += '?';
|
||||||
@ -436,7 +440,7 @@ std::string URI::getPathEtc() const
|
|||||||
std::string URI::getPathAndQuery() const
|
std::string URI::getPathAndQuery() const
|
||||||
{
|
{
|
||||||
std::string pathAndQuery;
|
std::string pathAndQuery;
|
||||||
encode(_path, RESERVED_PATH, pathAndQuery);
|
encodePath(pathAndQuery);
|
||||||
if (!_query.empty())
|
if (!_query.empty())
|
||||||
{
|
{
|
||||||
pathAndQuery += '?';
|
pathAndQuery += '?';
|
||||||
@ -681,6 +685,21 @@ void URI::decode(const std::string& str, std::string& decodedStr, bool plusAsSpa
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void URI::encodePath(std::string & encodedStr) const
|
||||||
|
{
|
||||||
|
if (_disable_url_encoding)
|
||||||
|
encodedStr = _path;
|
||||||
|
else
|
||||||
|
encode(_path, RESERVED_PATH, encodedStr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void URI::decodePath(const std::string & encodedStr)
|
||||||
|
{
|
||||||
|
if (_disable_url_encoding)
|
||||||
|
_path = encodedStr;
|
||||||
|
else
|
||||||
|
decode(encodedStr, _path);
|
||||||
|
}
|
||||||
|
|
||||||
bool URI::isWellKnownPort() const
|
bool URI::isWellKnownPort() const
|
||||||
{
|
{
|
||||||
@ -820,7 +839,7 @@ void URI::parsePath(std::string::const_iterator& it, const std::string::const_it
|
|||||||
{
|
{
|
||||||
std::string path;
|
std::string path;
|
||||||
while (it != end && *it != '?' && *it != '#') path += *it++;
|
while (it != end && *it != '?' && *it != '#') path += *it++;
|
||||||
decode(path, _path);
|
decodePath(path);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -306,7 +306,7 @@ namespace Net
|
|||||||
DEFAULT_KEEP_ALIVE_TIMEOUT = 8
|
DEFAULT_KEEP_ALIVE_TIMEOUT = 8
|
||||||
};
|
};
|
||||||
|
|
||||||
void reconnect();
|
virtual void reconnect();
|
||||||
/// Connects the underlying socket to the HTTP server.
|
/// Connects the underlying socket to the HTTP server.
|
||||||
|
|
||||||
int write(const char * buffer, std::streamsize length);
|
int write(const char * buffer, std::streamsize length);
|
||||||
|
@ -2,11 +2,11 @@
|
|||||||
|
|
||||||
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
||||||
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
||||||
SET(VERSION_REVISION 54476)
|
SET(VERSION_REVISION 54477)
|
||||||
SET(VERSION_MAJOR 23)
|
SET(VERSION_MAJOR 23)
|
||||||
SET(VERSION_MINOR 7)
|
SET(VERSION_MINOR 8)
|
||||||
SET(VERSION_PATCH 1)
|
SET(VERSION_PATCH 1)
|
||||||
SET(VERSION_GITHASH d1c7e13d08868cb04d3562dcced704dd577cb1df)
|
SET(VERSION_GITHASH a70127baecc451f1f7073bad7b6198f6703441d8)
|
||||||
SET(VERSION_DESCRIBE v23.7.1.1-testing)
|
SET(VERSION_DESCRIBE v23.8.1.1-testing)
|
||||||
SET(VERSION_STRING 23.7.1.1)
|
SET(VERSION_STRING 23.8.1.1)
|
||||||
# end of autochange
|
# end of autochange
|
||||||
|
@ -15,6 +15,7 @@ set(CMAKE_OSX_DEPLOYMENT_TARGET 10.15)
|
|||||||
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||||
find_package(Threads REQUIRED)
|
find_package(Threads REQUIRED)
|
||||||
|
|
||||||
|
include (cmake/unwind.cmake)
|
||||||
include (cmake/cxx.cmake)
|
include (cmake/cxx.cmake)
|
||||||
link_libraries(global-group)
|
link_libraries(global-group)
|
||||||
|
|
||||||
|
@ -1,58 +0,0 @@
|
|||||||
# Embed a set of resource files into a resulting object file.
|
|
||||||
#
|
|
||||||
# Signature: `clickhouse_embed_binaries(TARGET <target> RESOURCE_DIR <dir> RESOURCES <resource> ...)
|
|
||||||
#
|
|
||||||
# This will generate a static library target named `<target>`, which contains the contents of
|
|
||||||
# each `<resource>` file. The files should be located in `<dir>`. <dir> defaults to
|
|
||||||
# ${CMAKE_CURRENT_SOURCE_DIR}, and the resources may not be empty.
|
|
||||||
#
|
|
||||||
# Each resource will result in three symbols in the final archive, based on the name `<resource>`.
|
|
||||||
# These are:
|
|
||||||
# 1. `_binary_<name>_start`: Points to the start of the binary data from `<resource>`.
|
|
||||||
# 2. `_binary_<name>_end`: Points to the end of the binary data from `<resource>`.
|
|
||||||
# 2. `_binary_<name>_size`: Points to the size of the binary data from `<resource>`.
|
|
||||||
#
|
|
||||||
# `<name>` is a normalized name derived from `<resource>`, by replacing the characters "./-" with
|
|
||||||
# the character "_", and the character "+" with "_PLUS_". This scheme is similar to those generated
|
|
||||||
# by `ld -r -b binary`, and matches the expectations in `./base/common/getResource.cpp`.
|
|
||||||
macro(clickhouse_embed_binaries)
|
|
||||||
set(one_value_args TARGET RESOURCE_DIR)
|
|
||||||
set(resources RESOURCES)
|
|
||||||
cmake_parse_arguments(EMBED "" "${one_value_args}" ${resources} ${ARGN})
|
|
||||||
|
|
||||||
if (NOT DEFINED EMBED_TARGET)
|
|
||||||
message(FATAL_ERROR "A target name must be provided for embedding binary resources into")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (NOT DEFINED EMBED_RESOURCE_DIR)
|
|
||||||
set(EMBED_RESOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
list(LENGTH EMBED_RESOURCES N_RESOURCES)
|
|
||||||
if (N_RESOURCES LESS 1)
|
|
||||||
message(FATAL_ERROR "The list of binary resources to embed may not be empty")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
add_library("${EMBED_TARGET}" STATIC)
|
|
||||||
set_target_properties("${EMBED_TARGET}" PROPERTIES LINKER_LANGUAGE C)
|
|
||||||
|
|
||||||
set(EMBED_TEMPLATE_FILE "${PROJECT_SOURCE_DIR}/programs/embed_binary.S.in")
|
|
||||||
|
|
||||||
foreach(RESOURCE_FILE ${EMBED_RESOURCES})
|
|
||||||
set(ASSEMBLY_FILE_NAME "${RESOURCE_FILE}.S")
|
|
||||||
set(BINARY_FILE_NAME "${RESOURCE_FILE}")
|
|
||||||
|
|
||||||
# Normalize the name of the resource.
|
|
||||||
string(REGEX REPLACE "[\./-]" "_" SYMBOL_NAME "${RESOURCE_FILE}") # - must be last in regex
|
|
||||||
string(REPLACE "+" "_PLUS_" SYMBOL_NAME "${SYMBOL_NAME}")
|
|
||||||
|
|
||||||
# Generate the configured assembly file in the output directory.
|
|
||||||
configure_file("${EMBED_TEMPLATE_FILE}" "${CMAKE_CURRENT_BINARY_DIR}/${ASSEMBLY_FILE_NAME}" @ONLY)
|
|
||||||
|
|
||||||
# Set the include directory for relative paths specified for `.incbin` directive.
|
|
||||||
set_property(SOURCE "${CMAKE_CURRENT_BINARY_DIR}/${ASSEMBLY_FILE_NAME}" APPEND PROPERTY INCLUDE_DIRECTORIES "${EMBED_RESOURCE_DIR}")
|
|
||||||
|
|
||||||
target_sources("${EMBED_TARGET}" PRIVATE "${CMAKE_CURRENT_BINARY_DIR}/${ASSEMBLY_FILE_NAME}")
|
|
||||||
set_target_properties("${EMBED_TARGET}" PROPERTIES OBJECT_DEPENDS "${RESOURCE_FILE}")
|
|
||||||
endforeach()
|
|
||||||
endmacro()
|
|
@ -1,38 +1,39 @@
|
|||||||
# Usage:
|
# Limit compiler/linker job concurrency to avoid OOMs on subtrees where compilation/linking is memory-intensive.
|
||||||
# set (MAX_COMPILER_MEMORY 2000 CACHE INTERNAL "") # In megabytes
|
#
|
||||||
# set (MAX_LINKER_MEMORY 3500 CACHE INTERNAL "")
|
# Usage from CMake:
|
||||||
|
# set (MAX_COMPILER_MEMORY 2000 CACHE INTERNAL "") # megabyte
|
||||||
|
# set (MAX_LINKER_MEMORY 3500 CACHE INTERNAL "") # megabyte
|
||||||
# include (cmake/limit_jobs.cmake)
|
# include (cmake/limit_jobs.cmake)
|
||||||
|
#
|
||||||
|
# (bigger values mean fewer jobs)
|
||||||
|
|
||||||
cmake_host_system_information(RESULT TOTAL_PHYSICAL_MEMORY QUERY TOTAL_PHYSICAL_MEMORY) # Not available under freebsd
|
cmake_host_system_information(RESULT TOTAL_PHYSICAL_MEMORY QUERY TOTAL_PHYSICAL_MEMORY)
|
||||||
cmake_host_system_information(RESULT NUMBER_OF_LOGICAL_CORES QUERY NUMBER_OF_LOGICAL_CORES)
|
cmake_host_system_information(RESULT NUMBER_OF_LOGICAL_CORES QUERY NUMBER_OF_LOGICAL_CORES)
|
||||||
|
|
||||||
# 1 if not set
|
# Set to disable the automatic job-limiting
|
||||||
option(PARALLEL_COMPILE_JOBS "Maximum number of concurrent compilation jobs" "")
|
option(PARALLEL_COMPILE_JOBS "Maximum number of concurrent compilation jobs" OFF)
|
||||||
|
option(PARALLEL_LINK_JOBS "Maximum number of concurrent link jobs" OFF)
|
||||||
|
|
||||||
# 1 if not set
|
if (NOT PARALLEL_COMPILE_JOBS AND MAX_COMPILER_MEMORY)
|
||||||
option(PARALLEL_LINK_JOBS "Maximum number of concurrent link jobs" "")
|
|
||||||
|
|
||||||
if (NOT PARALLEL_COMPILE_JOBS AND TOTAL_PHYSICAL_MEMORY AND MAX_COMPILER_MEMORY)
|
|
||||||
math(EXPR PARALLEL_COMPILE_JOBS ${TOTAL_PHYSICAL_MEMORY}/${MAX_COMPILER_MEMORY})
|
math(EXPR PARALLEL_COMPILE_JOBS ${TOTAL_PHYSICAL_MEMORY}/${MAX_COMPILER_MEMORY})
|
||||||
|
|
||||||
if (NOT PARALLEL_COMPILE_JOBS)
|
if (NOT PARALLEL_COMPILE_JOBS)
|
||||||
set (PARALLEL_COMPILE_JOBS 1)
|
set (PARALLEL_COMPILE_JOBS 1)
|
||||||
endif ()
|
endif ()
|
||||||
|
if (PARALLEL_COMPILE_JOBS LESS NUMBER_OF_LOGICAL_CORES)
|
||||||
|
message(WARNING "The auto-calculated compile jobs limit (${PARALLEL_COMPILE_JOBS}) underutilizes CPU cores (${NUMBER_OF_LOGICAL_CORES}). Set PARALLEL_COMPILE_JOBS to override.")
|
||||||
|
endif()
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (PARALLEL_COMPILE_JOBS AND (NOT NUMBER_OF_LOGICAL_CORES OR PARALLEL_COMPILE_JOBS LESS NUMBER_OF_LOGICAL_CORES))
|
if (NOT PARALLEL_LINK_JOBS AND MAX_LINKER_MEMORY)
|
||||||
set(CMAKE_JOB_POOL_COMPILE compile_job_pool${CMAKE_CURRENT_SOURCE_DIR})
|
|
||||||
string (REGEX REPLACE "[^a-zA-Z0-9]+" "_" CMAKE_JOB_POOL_COMPILE ${CMAKE_JOB_POOL_COMPILE})
|
|
||||||
set_property(GLOBAL APPEND PROPERTY JOB_POOLS ${CMAKE_JOB_POOL_COMPILE}=${PARALLEL_COMPILE_JOBS})
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
|
|
||||||
if (NOT PARALLEL_LINK_JOBS AND TOTAL_PHYSICAL_MEMORY AND MAX_LINKER_MEMORY)
|
|
||||||
math(EXPR PARALLEL_LINK_JOBS ${TOTAL_PHYSICAL_MEMORY}/${MAX_LINKER_MEMORY})
|
math(EXPR PARALLEL_LINK_JOBS ${TOTAL_PHYSICAL_MEMORY}/${MAX_LINKER_MEMORY})
|
||||||
|
|
||||||
if (NOT PARALLEL_LINK_JOBS)
|
if (NOT PARALLEL_LINK_JOBS)
|
||||||
set (PARALLEL_LINK_JOBS 1)
|
set (PARALLEL_LINK_JOBS 1)
|
||||||
endif ()
|
endif ()
|
||||||
|
if (PARALLEL_LINK_JOBS LESS NUMBER_OF_LOGICAL_CORES)
|
||||||
|
message(WARNING "The auto-calculated link jobs limit (${PARALLEL_LINK_JOBS}) underutilizes CPU cores (${NUMBER_OF_LOGICAL_CORES}). Set PARALLEL_LINK_JOBS to override.")
|
||||||
|
endif()
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
# ThinLTO provides its own parallel linking
|
# ThinLTO provides its own parallel linking
|
||||||
@ -46,14 +47,16 @@ if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND ENABLE_THINLTO AND PARALLE
|
|||||||
set (PARALLEL_LINK_JOBS 2)
|
set (PARALLEL_LINK_JOBS 2)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (PARALLEL_LINK_JOBS AND (NOT NUMBER_OF_LOGICAL_CORES OR PARALLEL_LINK_JOBS LESS NUMBER_OF_LOGICAL_CORES))
|
message(STATUS "Building sub-tree with ${PARALLEL_COMPILE_JOBS} compile jobs and ${PARALLEL_LINK_JOBS} linker jobs (system: ${NUMBER_OF_LOGICAL_CORES} cores, ${TOTAL_PHYSICAL_MEMORY} MB DRAM, 'OFF' means the native core count).")
|
||||||
|
|
||||||
|
if (PARALLEL_COMPILE_JOBS LESS NUMBER_OF_LOGICAL_CORES)
|
||||||
|
set(CMAKE_JOB_POOL_COMPILE compile_job_pool${CMAKE_CURRENT_SOURCE_DIR})
|
||||||
|
string (REGEX REPLACE "[^a-zA-Z0-9]+" "_" CMAKE_JOB_POOL_COMPILE ${CMAKE_JOB_POOL_COMPILE})
|
||||||
|
set_property(GLOBAL APPEND PROPERTY JOB_POOLS ${CMAKE_JOB_POOL_COMPILE}=${PARALLEL_COMPILE_JOBS})
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
if (PARALLEL_LINK_JOBS LESS NUMBER_OF_LOGICAL_CORES)
|
||||||
set(CMAKE_JOB_POOL_LINK link_job_pool${CMAKE_CURRENT_SOURCE_DIR})
|
set(CMAKE_JOB_POOL_LINK link_job_pool${CMAKE_CURRENT_SOURCE_DIR})
|
||||||
string (REGEX REPLACE "[^a-zA-Z0-9]+" "_" CMAKE_JOB_POOL_LINK ${CMAKE_JOB_POOL_LINK})
|
string (REGEX REPLACE "[^a-zA-Z0-9]+" "_" CMAKE_JOB_POOL_LINK ${CMAKE_JOB_POOL_LINK})
|
||||||
set_property(GLOBAL APPEND PROPERTY JOB_POOLS ${CMAKE_JOB_POOL_LINK}=${PARALLEL_LINK_JOBS})
|
set_property(GLOBAL APPEND PROPERTY JOB_POOLS ${CMAKE_JOB_POOL_LINK}=${PARALLEL_LINK_JOBS})
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (PARALLEL_COMPILE_JOBS OR PARALLEL_LINK_JOBS)
|
|
||||||
message(STATUS
|
|
||||||
"${CMAKE_CURRENT_SOURCE_DIR}: Have ${TOTAL_PHYSICAL_MEMORY} megabytes of memory.
|
|
||||||
Limiting concurrent linkers jobs to ${PARALLEL_LINK_JOBS} and compiler jobs to ${PARALLEL_COMPILE_JOBS} (system has ${NUMBER_OF_LOGICAL_CORES} logical cores)")
|
|
||||||
endif ()
|
|
||||||
|
@ -22,8 +22,9 @@ macro(clickhouse_split_debug_symbols)
|
|||||||
# Splits debug symbols into separate file, leaves the binary untouched:
|
# Splits debug symbols into separate file, leaves the binary untouched:
|
||||||
COMMAND "${OBJCOPY_PATH}" --only-keep-debug "${STRIP_DESTINATION_DIR}/bin/${STRIP_TARGET}" "${STRIP_DESTINATION_DIR}/lib/debug/bin/${STRIP_TARGET}.debug"
|
COMMAND "${OBJCOPY_PATH}" --only-keep-debug "${STRIP_DESTINATION_DIR}/bin/${STRIP_TARGET}" "${STRIP_DESTINATION_DIR}/lib/debug/bin/${STRIP_TARGET}.debug"
|
||||||
COMMAND chmod 0644 "${STRIP_DESTINATION_DIR}/lib/debug/bin/${STRIP_TARGET}.debug"
|
COMMAND chmod 0644 "${STRIP_DESTINATION_DIR}/lib/debug/bin/${STRIP_TARGET}.debug"
|
||||||
# Strips binary, sections '.note' & '.comment' are removed in line with Debian's stripping policy: www.debian.org/doc/debian-policy/ch-files.html, section '.clickhouse.hash' is needed for integrity check:
|
# Strips binary, sections '.note' & '.comment' are removed in line with Debian's stripping policy: www.debian.org/doc/debian-policy/ch-files.html, section '.clickhouse.hash' is needed for integrity check.
|
||||||
COMMAND "${STRIP_PATH}" --remove-section=.comment --remove-section=.note --keep-section=.clickhouse.hash "${STRIP_DESTINATION_DIR}/bin/${STRIP_TARGET}"
|
# Also, after we disabled the export of symbols for dynamic linking, we still to keep a static symbol table for good stack traces.
|
||||||
|
COMMAND "${STRIP_PATH}" --strip-debug --remove-section=.comment --remove-section=.note "${STRIP_DESTINATION_DIR}/bin/${STRIP_TARGET}"
|
||||||
# Associate stripped binary with debug symbols:
|
# Associate stripped binary with debug symbols:
|
||||||
COMMAND "${OBJCOPY_PATH}" --add-gnu-debuglink "${STRIP_DESTINATION_DIR}/lib/debug/bin/${STRIP_TARGET}.debug" "${STRIP_DESTINATION_DIR}/bin/${STRIP_TARGET}"
|
COMMAND "${OBJCOPY_PATH}" --add-gnu-debuglink "${STRIP_DESTINATION_DIR}/lib/debug/bin/${STRIP_TARGET}.debug" "${STRIP_DESTINATION_DIR}/bin/${STRIP_TARGET}"
|
||||||
COMMENT "Stripping clickhouse binary" VERBATIM
|
COMMENT "Stripping clickhouse binary" VERBATIM
|
||||||
|
@ -33,6 +33,18 @@ if (CMAKE_CROSSCOMPILING)
|
|||||||
elseif (ARCH_PPC64LE)
|
elseif (ARCH_PPC64LE)
|
||||||
set (ENABLE_GRPC OFF CACHE INTERNAL "")
|
set (ENABLE_GRPC OFF CACHE INTERNAL "")
|
||||||
set (ENABLE_SENTRY OFF CACHE INTERNAL "")
|
set (ENABLE_SENTRY OFF CACHE INTERNAL "")
|
||||||
|
elseif (ARCH_RISCV64)
|
||||||
|
# RISC-V support is preliminary
|
||||||
|
set (GLIBC_COMPATIBILITY OFF CACHE INTERNAL "")
|
||||||
|
set (ENABLE_LDAP OFF CACHE INTERNAL "")
|
||||||
|
set (OPENSSL_NO_ASM ON CACHE INTERNAL "")
|
||||||
|
set (ENABLE_JEMALLOC ON CACHE INTERNAL "")
|
||||||
|
set (ENABLE_PARQUET OFF CACHE INTERNAL "")
|
||||||
|
set (ENABLE_GRPC OFF CACHE INTERNAL "")
|
||||||
|
set (ENABLE_HDFS OFF CACHE INTERNAL "")
|
||||||
|
set (ENABLE_MYSQL OFF CACHE INTERNAL "")
|
||||||
|
# It might be ok, but we need to update 'sysroot'
|
||||||
|
set (ENABLE_RUST OFF CACHE INTERNAL "")
|
||||||
elseif (ARCH_S390X)
|
elseif (ARCH_S390X)
|
||||||
set (ENABLE_GRPC OFF CACHE INTERNAL "")
|
set (ENABLE_GRPC OFF CACHE INTERNAL "")
|
||||||
set (ENABLE_SENTRY OFF CACHE INTERNAL "")
|
set (ENABLE_SENTRY OFF CACHE INTERNAL "")
|
||||||
|
@ -1,13 +1 @@
|
|||||||
option (USE_UNWIND "Enable libunwind (better stacktraces)" ${ENABLE_LIBRARIES})
|
add_subdirectory(contrib/libunwind-cmake)
|
||||||
|
|
||||||
if (USE_UNWIND)
|
|
||||||
add_subdirectory(contrib/libunwind-cmake)
|
|
||||||
set (UNWIND_LIBRARIES unwind)
|
|
||||||
set (EXCEPTION_HANDLING_LIBRARY ${UNWIND_LIBRARIES})
|
|
||||||
|
|
||||||
message (STATUS "Using libunwind: ${UNWIND_LIBRARIES}")
|
|
||||||
else ()
|
|
||||||
set (EXCEPTION_HANDLING_LIBRARY gcc_eh)
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
message (STATUS "Using exception handler: ${EXCEPTION_HANDLING_LIBRARY}")
|
|
||||||
|
3
contrib/CMakeLists.txt
vendored
3
contrib/CMakeLists.txt
vendored
@ -164,14 +164,13 @@ add_contrib (libpq-cmake libpq)
|
|||||||
add_contrib (nuraft-cmake NuRaft)
|
add_contrib (nuraft-cmake NuRaft)
|
||||||
add_contrib (fast_float-cmake fast_float)
|
add_contrib (fast_float-cmake fast_float)
|
||||||
add_contrib (datasketches-cpp-cmake datasketches-cpp)
|
add_contrib (datasketches-cpp-cmake datasketches-cpp)
|
||||||
add_contrib (hashidsxx-cmake hashidsxx)
|
add_contrib (incbin-cmake incbin)
|
||||||
|
|
||||||
option(ENABLE_NLP "Enable NLP functions support" ${ENABLE_LIBRARIES})
|
option(ENABLE_NLP "Enable NLP functions support" ${ENABLE_LIBRARIES})
|
||||||
if (ENABLE_NLP)
|
if (ENABLE_NLP)
|
||||||
add_contrib (libstemmer-c-cmake libstemmer_c)
|
add_contrib (libstemmer-c-cmake libstemmer_c)
|
||||||
add_contrib (wordnet-blast-cmake wordnet-blast)
|
add_contrib (wordnet-blast-cmake wordnet-blast)
|
||||||
add_contrib (lemmagen-c-cmake lemmagen-c)
|
add_contrib (lemmagen-c-cmake lemmagen-c)
|
||||||
add_contrib (nlp-data-cmake nlp-data)
|
|
||||||
add_contrib (cld2-cmake cld2)
|
add_contrib (cld2-cmake cld2)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
2
contrib/NuRaft
vendored
2
contrib/NuRaft
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 491eaf592d950e0e37accbe8b3f217e068c9fecf
|
Subproject commit eb1572129c71beb2156dcdaadc3fb136954aed96
|
@ -502,9 +502,10 @@ target_include_directories(_parquet SYSTEM BEFORE
|
|||||||
"${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src"
|
"${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src"
|
||||||
"${CMAKE_CURRENT_SOURCE_DIR}/cpp/src")
|
"${CMAKE_CURRENT_SOURCE_DIR}/cpp/src")
|
||||||
target_link_libraries(_parquet
|
target_link_libraries(_parquet
|
||||||
PUBLIC _arrow
|
PUBLIC
|
||||||
PRIVATE
|
_arrow
|
||||||
ch_contrib::thrift
|
ch_contrib::thrift
|
||||||
|
PRIVATE
|
||||||
boost::headers_only
|
boost::headers_only
|
||||||
boost::regex
|
boost::regex
|
||||||
OpenSSL::Crypto OpenSSL::SSL)
|
OpenSSL::Crypto OpenSSL::SSL)
|
||||||
|
2
contrib/cctz
vendored
2
contrib/cctz
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 5e05432420f9692418e2e12aff09859e420b14a2
|
Subproject commit 8529bcef5cd996b7c0f4d7475286b76b5d126c4c
|
@ -1,4 +1,3 @@
|
|||||||
include(${ClickHouse_SOURCE_DIR}/cmake/embed_binary.cmake)
|
|
||||||
set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/cctz")
|
set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/cctz")
|
||||||
|
|
||||||
set (SRCS
|
set (SRCS
|
||||||
@ -23,12 +22,10 @@ if (OS_FREEBSD)
|
|||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
# Related to time_zones table:
|
# Related to time_zones table:
|
||||||
# StorageSystemTimeZones.generated.cpp is autogenerated each time during a build
|
# TimeZones.generated.cpp is autogenerated each time during a build
|
||||||
# data in this file will be used to populate the system.time_zones table, this is specific to OS_LINUX
|
set(TIMEZONES_FILE "${CMAKE_CURRENT_BINARY_DIR}/TimeZones.generated.cpp")
|
||||||
# as the library that's built using embedded tzdata is also specific to OS_LINUX
|
|
||||||
set(SYSTEM_STORAGE_TZ_FILE "${PROJECT_BINARY_DIR}/src/Storages/System/StorageSystemTimeZones.generated.cpp")
|
|
||||||
# remove existing copies so that its generated fresh on each build.
|
# remove existing copies so that its generated fresh on each build.
|
||||||
file(REMOVE ${SYSTEM_STORAGE_TZ_FILE})
|
file(REMOVE ${TIMEZONES_FILE})
|
||||||
|
|
||||||
# get the list of timezones from tzdata shipped with cctz
|
# get the list of timezones from tzdata shipped with cctz
|
||||||
set(TZDIR "${LIBRARY_DIR}/testdata/zoneinfo")
|
set(TZDIR "${LIBRARY_DIR}/testdata/zoneinfo")
|
||||||
@ -36,28 +33,44 @@ file(STRINGS "${LIBRARY_DIR}/testdata/version" TZDATA_VERSION)
|
|||||||
set_property(GLOBAL PROPERTY TZDATA_VERSION_PROP "${TZDATA_VERSION}")
|
set_property(GLOBAL PROPERTY TZDATA_VERSION_PROP "${TZDATA_VERSION}")
|
||||||
message(STATUS "Packaging with tzdata version: ${TZDATA_VERSION}")
|
message(STATUS "Packaging with tzdata version: ${TZDATA_VERSION}")
|
||||||
|
|
||||||
set(TIMEZONE_RESOURCE_FILES)
|
|
||||||
|
|
||||||
# each file in that dir (except of tab and localtime) store the info about timezone
|
# each file in that dir (except of tab and localtime) store the info about timezone
|
||||||
execute_process(COMMAND
|
execute_process(COMMAND
|
||||||
bash -c "cd ${TZDIR} && find * -type f -and ! -name '*.tab' -and ! -name 'localtime' | LC_ALL=C sort | paste -sd ';' -"
|
bash -c "cd ${TZDIR} && find * -type f -and ! -name '*.tab' -and ! -name 'localtime' | LC_ALL=C sort | paste -sd ';' -"
|
||||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||||
OUTPUT_VARIABLE TIMEZONES)
|
OUTPUT_VARIABLE TIMEZONES)
|
||||||
|
|
||||||
file(APPEND ${SYSTEM_STORAGE_TZ_FILE} "// autogenerated by ClickHouse/contrib/cctz-cmake/CMakeLists.txt\n")
|
file(APPEND ${TIMEZONES_FILE} "// autogenerated by ClickHouse/contrib/cctz-cmake/CMakeLists.txt\n")
|
||||||
file(APPEND ${SYSTEM_STORAGE_TZ_FILE} "const char * auto_time_zones[] {\n" )
|
file(APPEND ${TIMEZONES_FILE} "#include <incbin.h>\n")
|
||||||
|
|
||||||
|
set (COUNTER 1)
|
||||||
|
foreach(TIMEZONE ${TIMEZONES})
|
||||||
|
file(APPEND ${TIMEZONES_FILE} "INCBIN(resource_timezone${COUNTER}, \"${TZDIR}/${TIMEZONE}\");\n")
|
||||||
|
MATH(EXPR COUNTER "${COUNTER}+1")
|
||||||
|
endforeach(TIMEZONE)
|
||||||
|
|
||||||
|
file(APPEND ${TIMEZONES_FILE} "const char * auto_time_zones[] {\n" )
|
||||||
|
|
||||||
foreach(TIMEZONE ${TIMEZONES})
|
foreach(TIMEZONE ${TIMEZONES})
|
||||||
file(APPEND ${SYSTEM_STORAGE_TZ_FILE} " \"${TIMEZONE}\",\n")
|
file(APPEND ${TIMEZONES_FILE} " \"${TIMEZONE}\",\n")
|
||||||
list(APPEND TIMEZONE_RESOURCE_FILES "${TIMEZONE}")
|
MATH(EXPR COUNTER "${COUNTER}+1")
|
||||||
endforeach(TIMEZONE)
|
endforeach(TIMEZONE)
|
||||||
file(APPEND ${SYSTEM_STORAGE_TZ_FILE} " nullptr};\n")
|
|
||||||
clickhouse_embed_binaries(
|
file(APPEND ${TIMEZONES_FILE} " nullptr\n};\n\n")
|
||||||
TARGET tzdata
|
|
||||||
RESOURCE_DIR "${TZDIR}"
|
file(APPEND ${TIMEZONES_FILE} "#include <string_view>\n\n")
|
||||||
RESOURCES ${TIMEZONE_RESOURCE_FILES}
|
file(APPEND ${TIMEZONES_FILE} "std::string_view getTimeZone(const char * name)\n{\n" )
|
||||||
)
|
|
||||||
add_dependencies(_cctz tzdata)
|
set (COUNTER 1)
|
||||||
target_link_libraries(_cctz INTERFACE "-Wl,${WHOLE_ARCHIVE} $<TARGET_FILE:tzdata> -Wl,${NO_WHOLE_ARCHIVE}")
|
foreach(TIMEZONE ${TIMEZONES})
|
||||||
|
file(APPEND ${TIMEZONES_FILE} " if (std::string_view(\"${TIMEZONE}\") == name) return { reinterpret_cast<const char *>(gresource_timezone${COUNTER}Data), gresource_timezone${COUNTER}Size };\n")
|
||||||
|
MATH(EXPR COUNTER "${COUNTER}+1")
|
||||||
|
endforeach(TIMEZONE)
|
||||||
|
|
||||||
|
file(APPEND ${TIMEZONES_FILE} " return {};\n")
|
||||||
|
file(APPEND ${TIMEZONES_FILE} "}\n")
|
||||||
|
|
||||||
|
add_library (tzdata ${TIMEZONES_FILE})
|
||||||
|
target_link_libraries(tzdata ch_contrib::incbin)
|
||||||
|
target_link_libraries(_cctz tzdata)
|
||||||
|
|
||||||
add_library(ch_contrib::cctz ALIAS _cctz)
|
add_library(ch_contrib::cctz ALIAS _cctz)
|
||||||
|
1
contrib/hashidsxx
vendored
1
contrib/hashidsxx
vendored
@ -1 +0,0 @@
|
|||||||
Subproject commit 783f6911ccfdaca83e3cfac084c4aad888a80cee
|
|
@ -1,14 +0,0 @@
|
|||||||
set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/hashidsxx")
|
|
||||||
|
|
||||||
set (SRCS
|
|
||||||
"${LIBRARY_DIR}/hashids.cpp"
|
|
||||||
)
|
|
||||||
|
|
||||||
set (HDRS
|
|
||||||
"${LIBRARY_DIR}/hashids.h"
|
|
||||||
)
|
|
||||||
|
|
||||||
add_library(_hashidsxx ${SRCS} ${HDRS})
|
|
||||||
target_include_directories(_hashidsxx SYSTEM PUBLIC "${LIBRARY_DIR}")
|
|
||||||
|
|
||||||
add_library(ch_contrib::hashidsxx ALIAS _hashidsxx)
|
|
2
contrib/idxd-config
vendored
2
contrib/idxd-config
vendored
@ -1 +1 @@
|
|||||||
Subproject commit f6605c41a735e3fdfef2d2d18655a33af6490b99
|
Subproject commit a836ce0e42052a69bffbbc14239ab4097f3b77f1
|
1
contrib/incbin
vendored
Submodule
1
contrib/incbin
vendored
Submodule
@ -0,0 +1 @@
|
|||||||
|
Subproject commit 6e576cae5ab5810f25e2631f2e0b80cbe7dc8cbf
|
8
contrib/incbin-cmake/CMakeLists.txt
Normal file
8
contrib/incbin-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/incbin")
|
||||||
|
add_library(_incbin INTERFACE)
|
||||||
|
target_include_directories(_incbin SYSTEM INTERFACE ${LIBRARY_DIR})
|
||||||
|
add_library(ch_contrib::incbin ALIAS _incbin)
|
||||||
|
|
||||||
|
# Warning "incbin is incompatible with bitcode. Using the library will break upload to App Store if you have bitcode enabled.
|
||||||
|
# Add `#define INCBIN_SILENCE_BITCODE_WARNING` before including this header to silence this warning."
|
||||||
|
target_compile_definitions(_incbin INTERFACE INCBIN_SILENCE_BITCODE_WARNING)
|
@ -1,5 +1,5 @@
|
|||||||
if (SANITIZE OR NOT (
|
if (SANITIZE OR NOT (
|
||||||
((OS_LINUX OR OS_FREEBSD) AND (ARCH_AMD64 OR ARCH_AARCH64 OR ARCH_PPC64LE OR ARCH_RISCV64)) OR
|
((OS_LINUX OR OS_FREEBSD) AND (ARCH_AMD64 OR ARCH_AARCH64 OR ARCH_PPC64LE OR ARCH_RISCV64 OR ARCH_S390X)) OR
|
||||||
(OS_DARWIN AND (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" OR CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG"))
|
(OS_DARWIN AND (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" OR CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG"))
|
||||||
))
|
))
|
||||||
if (ENABLE_JEMALLOC)
|
if (ENABLE_JEMALLOC)
|
||||||
@ -17,17 +17,17 @@ if (NOT ENABLE_JEMALLOC)
|
|||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (NOT OS_LINUX)
|
if (NOT OS_LINUX)
|
||||||
message (WARNING "jemalloc support on non-linux is EXPERIMENTAL")
|
message (WARNING "jemalloc support on non-Linux is EXPERIMENTAL")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (OS_LINUX)
|
if (OS_LINUX)
|
||||||
# ThreadPool select job randomly, and there can be some threads that had been
|
# ThreadPool select job randomly, and there can be some threads that have been
|
||||||
# performed some memory heavy task before and will be inactive for some time,
|
# performed some memory-heavy tasks before and will be inactive for some time,
|
||||||
# but until it will became active again, the memory will not be freed since by
|
# but until it becomes active again, the memory will not be freed since, by
|
||||||
# default each thread has it's own arena, but there should be not more then
|
# default, each thread has its arena, but there should be no more than
|
||||||
# 4*CPU arenas (see opt.nareans description).
|
# 4*CPU arenas (see opt.nareans description).
|
||||||
#
|
#
|
||||||
# By enabling percpu_arena number of arenas limited to number of CPUs and hence
|
# By enabling percpu_arena number of arenas is limited to the number of CPUs, and hence
|
||||||
# this problem should go away.
|
# this problem should go away.
|
||||||
#
|
#
|
||||||
# muzzy_decay_ms -- use MADV_FREE when available on newer Linuxes, to
|
# muzzy_decay_ms -- use MADV_FREE when available on newer Linuxes, to
|
||||||
@ -38,7 +38,7 @@ if (OS_LINUX)
|
|||||||
else()
|
else()
|
||||||
set (JEMALLOC_CONFIG_MALLOC_CONF "oversize_threshold:0,muzzy_decay_ms:5000,dirty_decay_ms:5000")
|
set (JEMALLOC_CONFIG_MALLOC_CONF "oversize_threshold:0,muzzy_decay_ms:5000,dirty_decay_ms:5000")
|
||||||
endif()
|
endif()
|
||||||
# CACHE variable is empty, to allow changing defaults without necessity
|
# CACHE variable is empty to allow changing defaults without the necessity
|
||||||
# to purge cache
|
# to purge cache
|
||||||
set (JEMALLOC_CONFIG_MALLOC_CONF_OVERRIDE "" CACHE STRING "Change default configuration string of JEMalloc" )
|
set (JEMALLOC_CONFIG_MALLOC_CONF_OVERRIDE "" CACHE STRING "Change default configuration string of JEMalloc" )
|
||||||
if (JEMALLOC_CONFIG_MALLOC_CONF_OVERRIDE)
|
if (JEMALLOC_CONFIG_MALLOC_CONF_OVERRIDE)
|
||||||
@ -148,6 +148,8 @@ elseif (ARCH_PPC64LE)
|
|||||||
set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_ppc64le")
|
set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_ppc64le")
|
||||||
elseif (ARCH_RISCV64)
|
elseif (ARCH_RISCV64)
|
||||||
set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_riscv64")
|
set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_riscv64")
|
||||||
|
elseif (ARCH_S390X)
|
||||||
|
set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_s390x")
|
||||||
else ()
|
else ()
|
||||||
message (FATAL_ERROR "internal jemalloc: This arch is not supported")
|
message (FATAL_ERROR "internal jemalloc: This arch is not supported")
|
||||||
endif ()
|
endif ()
|
||||||
@ -170,16 +172,13 @@ endif ()
|
|||||||
|
|
||||||
target_compile_definitions(_jemalloc PRIVATE -DJEMALLOC_PROF=1)
|
target_compile_definitions(_jemalloc PRIVATE -DJEMALLOC_PROF=1)
|
||||||
|
|
||||||
if (USE_UNWIND)
|
# jemalloc provides support for two different libunwind flavors: the original HP libunwind and the one coming with gcc / g++ / libstdc++.
|
||||||
# jemalloc provides support for two different libunwind flavors: the original HP libunwind and the one coming with gcc / g++ / libstdc++.
|
# The latter is identified by `JEMALLOC_PROF_LIBGCC` and uses `_Unwind_Backtrace` method instead of `unw_backtrace`.
|
||||||
# The latter is identified by `JEMALLOC_PROF_LIBGCC` and uses `_Unwind_Backtrace` method instead of `unw_backtrace`.
|
# At the time ClickHouse uses LLVM libunwind which follows libgcc's way of backtracking.
|
||||||
# At the time ClickHouse uses LLVM libunwind which follows libgcc's way of backtracing.
|
#
|
||||||
|
# ClickHouse has to provide `unw_backtrace` method by the means of [commit 8e2b31e](https://github.com/ClickHouse/libunwind/commit/8e2b31e766dd502f6df74909e04a7dbdf5182eb1).
|
||||||
# ClickHouse has to provide `unw_backtrace` method by the means of [commit 8e2b31e](https://github.com/ClickHouse/libunwind/commit/8e2b31e766dd502f6df74909e04a7dbdf5182eb1).
|
target_compile_definitions (_jemalloc PRIVATE -DJEMALLOC_PROF_LIBGCC=1)
|
||||||
|
target_link_libraries (_jemalloc PRIVATE unwind)
|
||||||
target_compile_definitions (_jemalloc PRIVATE -DJEMALLOC_PROF_LIBGCC=1)
|
|
||||||
target_link_libraries (_jemalloc PRIVATE unwind)
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
# for RTLD_NEXT
|
# for RTLD_NEXT
|
||||||
target_compile_options(_jemalloc PRIVATE -D_GNU_SOURCE)
|
target_compile_options(_jemalloc PRIVATE -D_GNU_SOURCE)
|
||||||
|
@ -0,0 +1,435 @@
|
|||||||
|
/* include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */
|
||||||
|
#ifndef JEMALLOC_INTERNAL_DEFS_H_
|
||||||
|
#define JEMALLOC_INTERNAL_DEFS_H_
|
||||||
|
/*
|
||||||
|
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
|
||||||
|
* public APIs to be prefixed. This makes it possible, with some care, to use
|
||||||
|
* multiple allocators simultaneously.
|
||||||
|
*/
|
||||||
|
/* #undef JEMALLOC_PREFIX */
|
||||||
|
/* #undef JEMALLOC_CPREFIX */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Define overrides for non-standard allocator-related functions if they are
|
||||||
|
* present on the system.
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_OVERRIDE___LIBC_CALLOC
|
||||||
|
#define JEMALLOC_OVERRIDE___LIBC_FREE
|
||||||
|
#define JEMALLOC_OVERRIDE___LIBC_MALLOC
|
||||||
|
#define JEMALLOC_OVERRIDE___LIBC_MEMALIGN
|
||||||
|
#define JEMALLOC_OVERRIDE___LIBC_REALLOC
|
||||||
|
#define JEMALLOC_OVERRIDE___LIBC_VALLOC
|
||||||
|
#define JEMALLOC_OVERRIDE___LIBC_PVALLOC
|
||||||
|
/* #undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
|
||||||
|
* For shared libraries, symbol visibility mechanisms prevent these symbols
|
||||||
|
* from being exported, but for static libraries, naming collisions are a real
|
||||||
|
* possibility.
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_PRIVATE_NAMESPACE je_
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Hyper-threaded CPUs may need a special instruction inside spin loops in
|
||||||
|
* order to yield to another virtual CPU.
|
||||||
|
*/
|
||||||
|
#define CPU_SPINWAIT
|
||||||
|
/* 1 if CPU_SPINWAIT is defined, 0 otherwise. */
|
||||||
|
#define HAVE_CPU_SPINWAIT 0
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Number of significant bits in virtual addresses. This may be less than the
|
||||||
|
* total number of bits in a pointer, e.g. on x64, for which the uppermost 16
|
||||||
|
* bits are the same as bit 47.
|
||||||
|
*/
|
||||||
|
#define LG_VADDR 64
|
||||||
|
|
||||||
|
/* Defined if C11 atomics are available. */
|
||||||
|
#define JEMALLOC_C11_ATOMICS
|
||||||
|
|
||||||
|
/* Defined if GCC __atomic atomics are available. */
|
||||||
|
#define JEMALLOC_GCC_ATOMIC_ATOMICS
|
||||||
|
/* and the 8-bit variant support. */
|
||||||
|
#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS
|
||||||
|
|
||||||
|
/* Defined if GCC __sync atomics are available. */
|
||||||
|
#define JEMALLOC_GCC_SYNC_ATOMICS
|
||||||
|
/* and the 8-bit variant support. */
|
||||||
|
#define JEMALLOC_GCC_U8_SYNC_ATOMICS
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if __builtin_clz() and __builtin_clzl() are available.
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_HAVE_BUILTIN_CLZ
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
|
||||||
|
*/
|
||||||
|
/* #undef JEMALLOC_OS_UNFAIR_LOCK */
|
||||||
|
|
||||||
|
/* Defined if syscall(2) is usable. */
|
||||||
|
#define JEMALLOC_USE_SYSCALL
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if secure_getenv(3) is available.
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_HAVE_SECURE_GETENV
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if issetugid(2) is available.
|
||||||
|
*/
|
||||||
|
/* #undef JEMALLOC_HAVE_ISSETUGID */
|
||||||
|
|
||||||
|
/* Defined if pthread_atfork(3) is available. */
|
||||||
|
#define JEMALLOC_HAVE_PTHREAD_ATFORK
|
||||||
|
|
||||||
|
/* Defined if pthread_setname_np(3) is available. */
|
||||||
|
#define JEMALLOC_HAVE_PTHREAD_SETNAME_NP
|
||||||
|
|
||||||
|
/* Defined if pthread_getname_np(3) is available. */
|
||||||
|
#define JEMALLOC_HAVE_PTHREAD_GETNAME_NP
|
||||||
|
|
||||||
|
/* Defined if pthread_get_name_np(3) is available. */
|
||||||
|
/* #undef JEMALLOC_HAVE_PTHREAD_GET_NAME_NP */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_HAVE_CLOCK_MONOTONIC
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if mach_absolute_time() is available.
|
||||||
|
*/
|
||||||
|
/* #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if clock_gettime(CLOCK_REALTIME, ...) is available.
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_HAVE_CLOCK_REALTIME
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if _malloc_thread_cleanup() exists. At least in the case of
|
||||||
|
* FreeBSD, pthread_key_create() allocates, which if used during malloc
|
||||||
|
* bootstrapping will cause recursion into the pthreads library. Therefore, if
|
||||||
|
* _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
|
||||||
|
* malloc_tsd.
|
||||||
|
*/
|
||||||
|
/* #undef JEMALLOC_MALLOC_THREAD_CLEANUP */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if threaded initialization is known to be safe on this platform.
|
||||||
|
* Among other things, it must be possible to initialize a mutex without
|
||||||
|
* triggering allocation in order for threaded allocation to be safe.
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_THREADED_INIT
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if the pthreads implementation defines
|
||||||
|
* _pthread_mutex_init_calloc_cb(), in which case the function is used in order
|
||||||
|
* to avoid recursive allocation during mutex initialization.
|
||||||
|
*/
|
||||||
|
/* #undef JEMALLOC_MUTEX_INIT_CB */
|
||||||
|
|
||||||
|
/* Non-empty if the tls_model attribute is supported. */
|
||||||
|
#define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec")))
|
||||||
|
|
||||||
|
/*
|
||||||
|
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
|
||||||
|
* inline functions.
|
||||||
|
*/
|
||||||
|
/* #undef JEMALLOC_DEBUG */
|
||||||
|
|
||||||
|
/* JEMALLOC_STATS enables statistics calculation. */
|
||||||
|
#define JEMALLOC_STATS
|
||||||
|
|
||||||
|
/* JEMALLOC_EXPERIMENTAL_SMALLOCX_API enables experimental smallocx API. */
|
||||||
|
/* #undef JEMALLOC_EXPERIMENTAL_SMALLOCX_API */
|
||||||
|
|
||||||
|
/* JEMALLOC_PROF enables allocation profiling. */
|
||||||
|
/* #undef JEMALLOC_PROF */
|
||||||
|
|
||||||
|
/* Use libunwind for profile backtracing if defined. */
|
||||||
|
/* #undef JEMALLOC_PROF_LIBUNWIND */
|
||||||
|
|
||||||
|
/* Use libgcc for profile backtracing if defined. */
|
||||||
|
/* #undef JEMALLOC_PROF_LIBGCC */
|
||||||
|
|
||||||
|
/* Use gcc intrinsics for profile backtracing if defined. */
|
||||||
|
/* #undef JEMALLOC_PROF_GCC */
|
||||||
|
|
||||||
|
/* JEMALLOC_PAGEID enabled page id */
|
||||||
|
/* #undef JEMALLOC_PAGEID */
|
||||||
|
|
||||||
|
/* JEMALLOC_HAVE_PRCTL checks prctl */
|
||||||
|
#define JEMALLOC_HAVE_PRCTL
|
||||||
|
|
||||||
|
/*
|
||||||
|
* JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
|
||||||
|
* segment (DSS).
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_DSS
|
||||||
|
|
||||||
|
/* Support memory filling (junk/zero). */
|
||||||
|
#define JEMALLOC_FILL
|
||||||
|
|
||||||
|
/* Support utrace(2)-based tracing. */
|
||||||
|
/* #undef JEMALLOC_UTRACE */
|
||||||
|
|
||||||
|
/* Support utrace(2)-based tracing (label based signature). */
|
||||||
|
/* #undef JEMALLOC_UTRACE_LABEL */
|
||||||
|
|
||||||
|
/* Support optional abort() on OOM. */
|
||||||
|
/* #undef JEMALLOC_XMALLOC */
|
||||||
|
|
||||||
|
/* Support lazy locking (avoid locking unless a second thread is launched). */
|
||||||
|
/* #undef JEMALLOC_LAZY_LOCK */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
|
||||||
|
* classes).
|
||||||
|
*/
|
||||||
|
/* #undef LG_QUANTUM */
|
||||||
|
|
||||||
|
/* One page is 2^LG_PAGE bytes. */
|
||||||
|
#define LG_PAGE 12
|
||||||
|
|
||||||
|
/* Maximum number of regions in a slab. */
|
||||||
|
/* #undef CONFIG_LG_SLAB_MAXREGS */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
|
||||||
|
* system does not explicitly support huge pages; system calls that require
|
||||||
|
* explicit huge page support are separately configured.
|
||||||
|
*/
|
||||||
|
#define LG_HUGEPAGE 20
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If defined, adjacent virtual memory mappings with identical attributes
|
||||||
|
* automatically coalesce, and they fragment when changes are made to subranges.
|
||||||
|
* This is the normal order of things for mmap()/munmap(), but on Windows
|
||||||
|
* VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
|
||||||
|
* mappings do *not* coalesce/fragment.
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_MAPS_COALESCE
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If defined, retain memory for later reuse by default rather than using e.g.
|
||||||
|
* munmap() to unmap freed extents. This is enabled on 64-bit Linux because
|
||||||
|
* common sequences of mmap()/munmap() calls will cause virtual memory map
|
||||||
|
* holes.
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_RETAIN
|
||||||
|
|
||||||
|
/* TLS is used to map arenas and magazine caches to threads. */
|
||||||
|
#define JEMALLOC_TLS
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Used to mark unreachable code to quiet "end of non-void" compiler warnings.
|
||||||
|
* Don't use this directly; instead use unreachable() from util.h
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable
|
||||||
|
|
||||||
|
/*
|
||||||
|
* ffs*() functions to use for bitmapping. Don't use these directly; instead,
|
||||||
|
* use ffs_*() from util.h.
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll
|
||||||
|
#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl
|
||||||
|
#define JEMALLOC_INTERNAL_FFS __builtin_ffs
|
||||||
|
|
||||||
|
/*
|
||||||
|
* popcount*() functions to use for bitmapping.
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl
|
||||||
|
#define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If defined, explicitly attempt to more uniformly distribute large allocation
|
||||||
|
* pointer alignments across all cache indices.
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_CACHE_OBLIVIOUS
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If defined, enable logging facilities. We make this a configure option to
|
||||||
|
* avoid taking extra branches everywhere.
|
||||||
|
*/
|
||||||
|
/* #undef JEMALLOC_LOG */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If defined, use readlinkat() (instead of readlink()) to follow
|
||||||
|
* /etc/malloc_conf.
|
||||||
|
*/
|
||||||
|
/* #undef JEMALLOC_READLINKAT */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
|
||||||
|
*/
|
||||||
|
/* #undef JEMALLOC_ZONE */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Methods for determining whether the OS overcommits.
|
||||||
|
* JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
|
||||||
|
* /proc/sys/vm.overcommit_memory file.
|
||||||
|
* JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
|
||||||
|
*/
|
||||||
|
/* #undef JEMALLOC_SYSCTL_VM_OVERCOMMIT */
|
||||||
|
#define JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
|
||||||
|
|
||||||
|
/* Defined if madvise(2) is available. */
|
||||||
|
#define JEMALLOC_HAVE_MADVISE
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
|
||||||
|
* arguments to madvise(2).
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_HAVE_MADVISE_HUGE
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Methods for purging unused pages differ between operating systems.
|
||||||
|
*
|
||||||
|
* madvise(..., MADV_FREE) : This marks pages as being unused, such that they
|
||||||
|
* will be discarded rather than swapped out.
|
||||||
|
* madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is
|
||||||
|
* defined, this immediately discards pages,
|
||||||
|
* such that new pages will be demand-zeroed if
|
||||||
|
* the address region is later touched;
|
||||||
|
* otherwise this behaves similarly to
|
||||||
|
* MADV_FREE, though typically with higher
|
||||||
|
* system overhead.
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_PURGE_MADVISE_FREE
|
||||||
|
#define JEMALLOC_PURGE_MADVISE_DONTNEED
|
||||||
|
#define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
|
||||||
|
|
||||||
|
/* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */
|
||||||
|
/* #undef JEMALLOC_DEFINE_MADVISE_FREE */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if MADV_DO[NT]DUMP is supported as an argument to madvise.
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_MADVISE_DONTDUMP
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if MADV_[NO]CORE is supported as an argument to madvise.
|
||||||
|
*/
|
||||||
|
/* #undef JEMALLOC_MADVISE_NOCORE */
|
||||||
|
|
||||||
|
/* Defined if mprotect(2) is available. */
|
||||||
|
#define JEMALLOC_HAVE_MPROTECT
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if transparent huge pages (THPs) are supported via the
|
||||||
|
* MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
|
||||||
|
*/
|
||||||
|
/* #undef JEMALLOC_THP */
|
||||||
|
|
||||||
|
/* Defined if posix_madvise is available. */
|
||||||
|
/* #undef JEMALLOC_HAVE_POSIX_MADVISE */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Method for purging unused pages using posix_madvise.
|
||||||
|
*
|
||||||
|
* posix_madvise(..., POSIX_MADV_DONTNEED)
|
||||||
|
*/
|
||||||
|
/* #undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED */
|
||||||
|
/* #undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if memcntl page admin call is supported
|
||||||
|
*/
|
||||||
|
/* #undef JEMALLOC_HAVE_MEMCNTL */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if malloc_size is supported
|
||||||
|
*/
|
||||||
|
/* #undef JEMALLOC_HAVE_MALLOC_SIZE */
|
||||||
|
|
||||||
|
/* Define if operating system has alloca.h header. */
|
||||||
|
#define JEMALLOC_HAS_ALLOCA_H
|
||||||
|
|
||||||
|
/* C99 restrict keyword supported. */
|
||||||
|
#define JEMALLOC_HAS_RESTRICT
|
||||||
|
|
||||||
|
/* For use by hash code. */
|
||||||
|
#define JEMALLOC_BIG_ENDIAN
|
||||||
|
|
||||||
|
/* sizeof(int) == 2^LG_SIZEOF_INT. */
|
||||||
|
#define LG_SIZEOF_INT 2
|
||||||
|
|
||||||
|
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
|
||||||
|
#define LG_SIZEOF_LONG 3
|
||||||
|
|
||||||
|
/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
|
||||||
|
#define LG_SIZEOF_LONG_LONG 3
|
||||||
|
|
||||||
|
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
|
||||||
|
#define LG_SIZEOF_INTMAX_T 3
|
||||||
|
|
||||||
|
/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
|
||||||
|
/* #undef JEMALLOC_GLIBC_MALLOC_HOOK */
|
||||||
|
|
||||||
|
/* glibc memalign hook. */
|
||||||
|
/* #undef JEMALLOC_GLIBC_MEMALIGN_HOOK */
|
||||||
|
|
||||||
|
/* pthread support */
|
||||||
|
#define JEMALLOC_HAVE_PTHREAD
|
||||||
|
|
||||||
|
/* dlsym() support */
|
||||||
|
#define JEMALLOC_HAVE_DLSYM
|
||||||
|
|
||||||
|
/* Adaptive mutex support in pthreads. */
|
||||||
|
#define JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
|
||||||
|
|
||||||
|
/* GNU specific sched_getcpu support */
|
||||||
|
#define JEMALLOC_HAVE_SCHED_GETCPU
|
||||||
|
|
||||||
|
/* GNU specific sched_setaffinity support */
|
||||||
|
#define JEMALLOC_HAVE_SCHED_SETAFFINITY
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If defined, all the features necessary for background threads are present.
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_BACKGROUND_THREAD
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If defined, jemalloc symbols are not exported (doesn't work when
|
||||||
|
* JEMALLOC_PREFIX is not defined).
|
||||||
|
*/
|
||||||
|
/* #undef JEMALLOC_EXPORT */
|
||||||
|
|
||||||
|
/* config.malloc_conf options string. */
|
||||||
|
#define JEMALLOC_CONFIG_MALLOC_CONF ""
|
||||||
|
|
||||||
|
/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
|
||||||
|
#define JEMALLOC_IS_MALLOC
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if strerror_r returns char * if _GNU_SOURCE is defined.
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE
|
||||||
|
|
||||||
|
/* Performs additional safety checks when defined. */
|
||||||
|
/* #undef JEMALLOC_OPT_SAFETY_CHECKS */
|
||||||
|
|
||||||
|
/* Is C++ support being built? */
|
||||||
|
#define JEMALLOC_ENABLE_CXX
|
||||||
|
|
||||||
|
/* Performs additional size checks when defined. */
|
||||||
|
/* #undef JEMALLOC_OPT_SIZE_CHECKS */
|
||||||
|
|
||||||
|
/* Allows sampled junk and stash for checking use-after-free when defined. */
|
||||||
|
/* #undef JEMALLOC_UAF_DETECTION */
|
||||||
|
|
||||||
|
/* Darwin VM_MAKE_TAG support */
|
||||||
|
/* #undef JEMALLOC_HAVE_VM_MAKE_TAG */
|
||||||
|
|
||||||
|
/* If defined, realloc(ptr, 0) defaults to "free" instead of "alloc". */
|
||||||
|
#define JEMALLOC_ZERO_REALLOC_DEFAULT_FREE
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
|
@ -61,9 +61,7 @@ target_include_directories(cxx SYSTEM BEFORE PUBLIC $<$<COMPILE_LANGUAGE:CXX>:$
|
|||||||
target_compile_definitions(cxx PRIVATE -D_LIBCPP_BUILDING_LIBRARY -DLIBCXX_BUILDING_LIBCXXABI)
|
target_compile_definitions(cxx PRIVATE -D_LIBCPP_BUILDING_LIBRARY -DLIBCXX_BUILDING_LIBCXXABI)
|
||||||
|
|
||||||
# Enable capturing stack traces for all exceptions.
|
# Enable capturing stack traces for all exceptions.
|
||||||
if (USE_UNWIND)
|
target_compile_definitions(cxx PUBLIC -DSTD_EXCEPTION_HAS_STACK_TRACE=1)
|
||||||
target_compile_definitions(cxx PUBLIC -DSTD_EXCEPTION_HAS_STACK_TRACE=1)
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
if (USE_MUSL)
|
if (USE_MUSL)
|
||||||
target_compile_definitions(cxx PUBLIC -D_LIBCPP_HAS_MUSL_LIBC=1)
|
target_compile_definitions(cxx PUBLIC -D_LIBCPP_HAS_MUSL_LIBC=1)
|
||||||
|
@ -35,12 +35,10 @@ target_include_directories(cxxabi SYSTEM BEFORE
|
|||||||
)
|
)
|
||||||
target_compile_definitions(cxxabi PRIVATE -D_LIBCPP_BUILDING_LIBRARY)
|
target_compile_definitions(cxxabi PRIVATE -D_LIBCPP_BUILDING_LIBRARY)
|
||||||
target_compile_options(cxxabi PRIVATE -nostdinc++ -fno-sanitize=undefined -Wno-macro-redefined) # If we don't disable UBSan, infinite recursion happens in dynamic_cast.
|
target_compile_options(cxxabi PRIVATE -nostdinc++ -fno-sanitize=undefined -Wno-macro-redefined) # If we don't disable UBSan, infinite recursion happens in dynamic_cast.
|
||||||
target_link_libraries(cxxabi PUBLIC ${EXCEPTION_HANDLING_LIBRARY})
|
target_link_libraries(cxxabi PUBLIC unwind)
|
||||||
|
|
||||||
# Enable capturing stack traces for all exceptions.
|
# Enable capturing stack traces for all exceptions.
|
||||||
if (USE_UNWIND)
|
target_compile_definitions(cxxabi PUBLIC -DSTD_EXCEPTION_HAS_STACK_TRACE=1)
|
||||||
target_compile_definitions(cxxabi PUBLIC -DSTD_EXCEPTION_HAS_STACK_TRACE=1)
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
install(
|
install(
|
||||||
TARGETS cxxabi
|
TARGETS cxxabi
|
||||||
|
@ -1,15 +0,0 @@
|
|||||||
include(${ClickHouse_SOURCE_DIR}/cmake/embed_binary.cmake)
|
|
||||||
|
|
||||||
set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/nlp-data")
|
|
||||||
|
|
||||||
add_library (_nlp_data INTERFACE)
|
|
||||||
|
|
||||||
clickhouse_embed_binaries(
|
|
||||||
TARGET nlp_dictionaries
|
|
||||||
RESOURCE_DIR "${LIBRARY_DIR}"
|
|
||||||
RESOURCES charset.zst tonality_ru.zst programming.zst
|
|
||||||
)
|
|
||||||
|
|
||||||
add_dependencies(_nlp_data nlp_dictionaries)
|
|
||||||
target_link_libraries(_nlp_data INTERFACE "-Wl,${WHOLE_ARCHIVE} $<TARGET_FILE:nlp_dictionaries> -Wl,${NO_WHOLE_ARCHIVE}")
|
|
||||||
add_library(ch_contrib::nlp_data ALIAS _nlp_data)
|
|
2
contrib/qpl
vendored
2
contrib/qpl
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 3f8f5cea27739f5261e8fd577dc233ffe88bf679
|
Subproject commit faaf19350459c076e66bb5df11743c3fade59b73
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
esac
|
esac
|
||||||
|
|
||||||
ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release"
|
ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release"
|
||||||
ARG VERSION="23.6.1.1524"
|
ARG VERSION="23.7.1.2470"
|
||||||
ARG PACKAGES="clickhouse-keeper"
|
ARG PACKAGES="clickhouse-keeper"
|
||||||
|
|
||||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||||
|
@ -49,8 +49,8 @@ ENV CARGO_HOME=/rust/cargo
|
|||||||
ENV PATH="/rust/cargo/bin:${PATH}"
|
ENV PATH="/rust/cargo/bin:${PATH}"
|
||||||
RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \
|
RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \
|
||||||
chmod 777 -R /rust && \
|
chmod 777 -R /rust && \
|
||||||
rustup toolchain install nightly && \
|
rustup toolchain install nightly-2023-07-04 && \
|
||||||
rustup default nightly && \
|
rustup default nightly-2023-07-04 && \
|
||||||
rustup component add rust-src && \
|
rustup component add rust-src && \
|
||||||
rustup target add aarch64-unknown-linux-gnu && \
|
rustup target add aarch64-unknown-linux-gnu && \
|
||||||
rustup target add x86_64-apple-darwin && \
|
rustup target add x86_64-apple-darwin && \
|
||||||
@ -58,6 +58,33 @@ RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \
|
|||||||
rustup target add aarch64-apple-darwin && \
|
rustup target add aarch64-apple-darwin && \
|
||||||
rustup target add powerpc64le-unknown-linux-gnu
|
rustup target add powerpc64le-unknown-linux-gnu
|
||||||
|
|
||||||
|
# Create vendor cache for cargo.
|
||||||
|
#
|
||||||
|
# Note, that the config.toml for the root is used, you will not be able to
|
||||||
|
# install any other crates, except those which had been vendored (since if
|
||||||
|
# there is "replace-with" for some source, then cargo will not look to other
|
||||||
|
# remotes except this).
|
||||||
|
#
|
||||||
|
# Notes for the command itself:
|
||||||
|
# - --chown is required to preserve the rights
|
||||||
|
# - unstable-options for -C
|
||||||
|
# - chmod is required to fix the permissions, since builds are running from a different user
|
||||||
|
# - copy of the Cargo.lock is required for proper dependencies versions
|
||||||
|
# - cargo vendor --sync is requried to overcome [1] bug.
|
||||||
|
#
|
||||||
|
# [1]: https://github.com/rust-lang/wg-cargo-std-aware/issues/23
|
||||||
|
COPY --chown=root:root /rust /rust/packages
|
||||||
|
RUN cargo -Z unstable-options -C /rust/packages vendor > $CARGO_HOME/config.toml && \
|
||||||
|
cp "$(rustc --print=sysroot)"/lib/rustlib/src/rust/Cargo.lock "$(rustc --print=sysroot)"/lib/rustlib/src/rust/library/test/ && \
|
||||||
|
cargo -Z unstable-options -C /rust/packages vendor --sync "$(rustc --print=sysroot)"/lib/rustlib/src/rust/library/test/Cargo.toml && \
|
||||||
|
rm "$(rustc --print=sysroot)"/lib/rustlib/src/rust/library/test/Cargo.lock && \
|
||||||
|
sed -i "s#\"vendor\"#\"/rust/vendor\"#" $CARGO_HOME/config.toml && \
|
||||||
|
cat $CARGO_HOME/config.toml && \
|
||||||
|
mv /rust/packages/vendor /rust/vendor && \
|
||||||
|
chmod -R o=r+X /rust/vendor && \
|
||||||
|
ls -R -l /rust/packages && \
|
||||||
|
rm -r /rust/packages
|
||||||
|
|
||||||
# NOTE: Seems like gcc-11 is too new for ubuntu20 repository
|
# NOTE: Seems like gcc-11 is too new for ubuntu20 repository
|
||||||
# A cross-linker for RISC-V 64 (we need it, because LLVM's LLD does not work):
|
# A cross-linker for RISC-V 64 (we need it, because LLVM's LLD does not work):
|
||||||
RUN add-apt-repository ppa:ubuntu-toolchain-r/test --yes \
|
RUN add-apt-repository ppa:ubuntu-toolchain-r/test --yes \
|
||||||
|
@ -64,7 +64,7 @@ then
|
|||||||
ninja $NINJA_FLAGS clickhouse-keeper
|
ninja $NINJA_FLAGS clickhouse-keeper
|
||||||
|
|
||||||
ls -la ./programs/
|
ls -la ./programs/
|
||||||
ldd ./programs/clickhouse-keeper
|
ldd ./programs/clickhouse-keeper ||:
|
||||||
|
|
||||||
if [ -n "$MAKE_DEB" ]; then
|
if [ -n "$MAKE_DEB" ]; then
|
||||||
# No quotes because I want it to expand to nothing if empty.
|
# No quotes because I want it to expand to nothing if empty.
|
||||||
@ -80,19 +80,9 @@ else
|
|||||||
cmake --debug-trycompile -DCMAKE_VERBOSE_MAKEFILE=1 -LA "-DCMAKE_BUILD_TYPE=$BUILD_TYPE" "-DSANITIZE=$SANITIZER" -DENABLE_CHECK_HEAVY_BUILDS=1 "${CMAKE_FLAGS[@]}" ..
|
cmake --debug-trycompile -DCMAKE_VERBOSE_MAKEFILE=1 -LA "-DCMAKE_BUILD_TYPE=$BUILD_TYPE" "-DSANITIZE=$SANITIZER" -DENABLE_CHECK_HEAVY_BUILDS=1 "${CMAKE_FLAGS[@]}" ..
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "coverity" == "$COMBINED_OUTPUT" ]
|
|
||||||
then
|
|
||||||
mkdir -p /workdir/cov-analysis
|
|
||||||
|
|
||||||
wget --post-data "token=$COVERITY_TOKEN&project=ClickHouse%2FClickHouse" -qO- https://scan.coverity.com/download/linux64 | tar xz -C /workdir/cov-analysis --strip-components 1
|
|
||||||
export PATH=$PATH:/workdir/cov-analysis/bin
|
|
||||||
cov-configure --config ./coverity.config --template --comptype clangcc --compiler "$CC"
|
|
||||||
SCAN_WRAPPER="cov-build --config ./coverity.config --dir cov-int"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# No quotes because I want it to expand to nothing if empty.
|
# No quotes because I want it to expand to nothing if empty.
|
||||||
# shellcheck disable=SC2086 # No quotes because I want it to expand to nothing if empty.
|
# shellcheck disable=SC2086 # No quotes because I want it to expand to nothing if empty.
|
||||||
$SCAN_WRAPPER ninja $NINJA_FLAGS $BUILD_TARGET
|
ninja $NINJA_FLAGS $BUILD_TARGET
|
||||||
|
|
||||||
ls -la ./programs
|
ls -la ./programs
|
||||||
|
|
||||||
@ -175,13 +165,6 @@ then
|
|||||||
mv "$COMBINED_OUTPUT.tar.zst" /output
|
mv "$COMBINED_OUTPUT.tar.zst" /output
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "coverity" == "$COMBINED_OUTPUT" ]
|
|
||||||
then
|
|
||||||
# Coverity does not understand ZSTD.
|
|
||||||
tar -cvz -f "coverity-scan.tar.gz" cov-int
|
|
||||||
mv "coverity-scan.tar.gz" /output
|
|
||||||
fi
|
|
||||||
|
|
||||||
ccache_status
|
ccache_status
|
||||||
ccache --evict-older-than 1d
|
ccache --evict-older-than 1d
|
||||||
|
|
||||||
|
1
docker/packager/binary/rust
Symbolic link
1
docker/packager/binary/rust
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
../../../rust
|
@ -138,6 +138,7 @@ def parse_env_variables(
|
|||||||
ARM_V80COMPAT_SUFFIX = "-aarch64-v80compat"
|
ARM_V80COMPAT_SUFFIX = "-aarch64-v80compat"
|
||||||
FREEBSD_SUFFIX = "-freebsd"
|
FREEBSD_SUFFIX = "-freebsd"
|
||||||
PPC_SUFFIX = "-ppc64le"
|
PPC_SUFFIX = "-ppc64le"
|
||||||
|
RISCV_SUFFIX = "-riscv64"
|
||||||
AMD64_COMPAT_SUFFIX = "-amd64-compat"
|
AMD64_COMPAT_SUFFIX = "-amd64-compat"
|
||||||
|
|
||||||
result = []
|
result = []
|
||||||
@ -150,6 +151,7 @@ def parse_env_variables(
|
|||||||
is_cross_arm = compiler.endswith(ARM_SUFFIX)
|
is_cross_arm = compiler.endswith(ARM_SUFFIX)
|
||||||
is_cross_arm_v80compat = compiler.endswith(ARM_V80COMPAT_SUFFIX)
|
is_cross_arm_v80compat = compiler.endswith(ARM_V80COMPAT_SUFFIX)
|
||||||
is_cross_ppc = compiler.endswith(PPC_SUFFIX)
|
is_cross_ppc = compiler.endswith(PPC_SUFFIX)
|
||||||
|
is_cross_riscv = compiler.endswith(RISCV_SUFFIX)
|
||||||
is_cross_freebsd = compiler.endswith(FREEBSD_SUFFIX)
|
is_cross_freebsd = compiler.endswith(FREEBSD_SUFFIX)
|
||||||
is_amd64_compat = compiler.endswith(AMD64_COMPAT_SUFFIX)
|
is_amd64_compat = compiler.endswith(AMD64_COMPAT_SUFFIX)
|
||||||
|
|
||||||
@ -206,6 +208,11 @@ def parse_env_variables(
|
|||||||
cmake_flags.append(
|
cmake_flags.append(
|
||||||
"-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-ppc64le.cmake"
|
"-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-ppc64le.cmake"
|
||||||
)
|
)
|
||||||
|
elif is_cross_riscv:
|
||||||
|
cc = compiler[: -len(RISCV_SUFFIX)]
|
||||||
|
cmake_flags.append(
|
||||||
|
"-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-riscv64.cmake"
|
||||||
|
)
|
||||||
elif is_amd64_compat:
|
elif is_amd64_compat:
|
||||||
cc = compiler[: -len(AMD64_COMPAT_SUFFIX)]
|
cc = compiler[: -len(AMD64_COMPAT_SUFFIX)]
|
||||||
result.append("DEB_ARCH=amd64")
|
result.append("DEB_ARCH=amd64")
|
||||||
@ -246,11 +253,6 @@ def parse_env_variables(
|
|||||||
cmake_flags.append(f"-DCMAKE_C_COMPILER={cc}")
|
cmake_flags.append(f"-DCMAKE_C_COMPILER={cc}")
|
||||||
cmake_flags.append(f"-DCMAKE_CXX_COMPILER={cxx}")
|
cmake_flags.append(f"-DCMAKE_CXX_COMPILER={cxx}")
|
||||||
|
|
||||||
# Create combined output archive for performance tests.
|
|
||||||
if package_type == "coverity":
|
|
||||||
result.append("COMBINED_OUTPUT=coverity")
|
|
||||||
result.append('COVERITY_TOKEN="$COVERITY_TOKEN"')
|
|
||||||
|
|
||||||
if sanitizer:
|
if sanitizer:
|
||||||
result.append(f"SANITIZER={sanitizer}")
|
result.append(f"SANITIZER={sanitizer}")
|
||||||
if build_type:
|
if build_type:
|
||||||
@ -349,7 +351,7 @@ def parse_args() -> argparse.Namespace:
|
|||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--package-type",
|
"--package-type",
|
||||||
choices=["deb", "binary", "coverity"],
|
choices=["deb", "binary"],
|
||||||
required=True,
|
required=True,
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
@ -370,6 +372,7 @@ def parse_args() -> argparse.Namespace:
|
|||||||
"clang-16-aarch64",
|
"clang-16-aarch64",
|
||||||
"clang-16-aarch64-v80compat",
|
"clang-16-aarch64-v80compat",
|
||||||
"clang-16-ppc64le",
|
"clang-16-ppc64le",
|
||||||
|
"clang-16-riscv64",
|
||||||
"clang-16-amd64-compat",
|
"clang-16-amd64-compat",
|
||||||
"clang-16-freebsd",
|
"clang-16-freebsd",
|
||||||
),
|
),
|
||||||
|
@ -33,7 +33,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
# lts / testing / prestable / etc
|
# lts / testing / prestable / etc
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||||
ARG VERSION="23.6.1.1524"
|
ARG VERSION="23.7.1.2470"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
|
|
||||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||||
|
@ -23,7 +23,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
|||||||
|
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||||
ARG VERSION="23.6.1.1524"
|
ARG VERSION="23.7.1.2470"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
|
|
||||||
# set non-empty deb_location_url url to create a docker image
|
# set non-empty deb_location_url url to create a docker image
|
||||||
|
@ -97,8 +97,8 @@ docker run -d \
|
|||||||
|
|
||||||
You may also want to mount:
|
You may also want to mount:
|
||||||
|
|
||||||
* `/etc/clickhouse-server/config.d/*.xml` - files with server configuration adjustmenets
|
* `/etc/clickhouse-server/config.d/*.xml` - files with server configuration adjustments
|
||||||
* `/etc/clickhouse-server/users.d/*.xml` - files with user settings adjustmenets
|
* `/etc/clickhouse-server/users.d/*.xml` - files with user settings adjustments
|
||||||
* `/docker-entrypoint-initdb.d/` - folder with database initialization scripts (see below).
|
* `/docker-entrypoint-initdb.d/` - folder with database initialization scripts (see below).
|
||||||
|
|
||||||
### Linux capabilities
|
### Linux capabilities
|
||||||
|
@ -11,6 +11,7 @@ RUN apt-get update \
|
|||||||
pv \
|
pv \
|
||||||
ripgrep \
|
ripgrep \
|
||||||
zstd \
|
zstd \
|
||||||
|
locales \
|
||||||
--yes --no-install-recommends
|
--yes --no-install-recommends
|
||||||
|
|
||||||
# Sanitizer options for services (clickhouse-server)
|
# Sanitizer options for services (clickhouse-server)
|
||||||
@ -28,6 +29,9 @@ ENV TSAN_OPTIONS='halt_on_error=1 history_size=7 memory_limit_mb=46080 second_de
|
|||||||
ENV UBSAN_OPTIONS='print_stacktrace=1'
|
ENV UBSAN_OPTIONS='print_stacktrace=1'
|
||||||
ENV MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1'
|
ENV MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1'
|
||||||
|
|
||||||
|
RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen en_US.UTF-8
|
||||||
|
ENV LC_ALL en_US.UTF-8
|
||||||
|
|
||||||
ENV TZ=Europe/Moscow
|
ENV TZ=Europe/Moscow
|
||||||
RUN ln -snf "/usr/share/zoneinfo/$TZ" /etc/localtime && echo "$TZ" > /etc/timezone
|
RUN ln -snf "/usr/share/zoneinfo/$TZ" /etc/localtime && echo "$TZ" > /etc/timezone
|
||||||
|
|
||||||
|
@ -141,13 +141,13 @@ function clone_submodules
|
|||||||
contrib/jemalloc
|
contrib/jemalloc
|
||||||
contrib/replxx
|
contrib/replxx
|
||||||
contrib/wyhash
|
contrib/wyhash
|
||||||
contrib/hashidsxx
|
|
||||||
contrib/c-ares
|
contrib/c-ares
|
||||||
contrib/morton-nd
|
contrib/morton-nd
|
||||||
contrib/xxHash
|
contrib/xxHash
|
||||||
contrib/simdjson
|
contrib/simdjson
|
||||||
contrib/liburing
|
contrib/liburing
|
||||||
contrib/libfiu
|
contrib/libfiu
|
||||||
|
contrib/incbin
|
||||||
)
|
)
|
||||||
|
|
||||||
git submodule sync
|
git submodule sync
|
||||||
@ -166,7 +166,6 @@ function run_cmake
|
|||||||
"-DENABLE_UTILS=0"
|
"-DENABLE_UTILS=0"
|
||||||
"-DENABLE_EMBEDDED_COMPILER=0"
|
"-DENABLE_EMBEDDED_COMPILER=0"
|
||||||
"-DENABLE_THINLTO=0"
|
"-DENABLE_THINLTO=0"
|
||||||
"-DUSE_UNWIND=1"
|
|
||||||
"-DENABLE_NURAFT=1"
|
"-DENABLE_NURAFT=1"
|
||||||
"-DENABLE_SIMDJSON=1"
|
"-DENABLE_SIMDJSON=1"
|
||||||
"-DENABLE_JEMALLOC=1"
|
"-DENABLE_JEMALLOC=1"
|
||||||
|
@ -291,7 +291,7 @@ quit
|
|||||||
if [ "$server_died" == 1 ]
|
if [ "$server_died" == 1 ]
|
||||||
then
|
then
|
||||||
# The server has died.
|
# The server has died.
|
||||||
if ! rg --text -o 'Received signal.*|Logical error.*|Assertion.*failed|Failed assertion.*|.*runtime error: .*|.*is located.*|(SUMMARY|ERROR): [a-zA-Z]+Sanitizer:.*|.*_LIBCPP_ASSERT.*' server.log > description.txt
|
if ! rg --text -o 'Received signal.*|Logical error.*|Assertion.*failed|Failed assertion.*|.*runtime error: .*|.*is located.*|(SUMMARY|ERROR): [a-zA-Z]+Sanitizer:.*|.*_LIBCPP_ASSERT.*|.*Child process was terminated by signal 9.*' server.log > description.txt
|
||||||
then
|
then
|
||||||
echo "Lost connection to server. See the logs." > description.txt
|
echo "Lost connection to server. See the logs." > description.txt
|
||||||
fi
|
fi
|
||||||
|
@ -47,11 +47,13 @@ ENV TZ=Etc/UTC
|
|||||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||||
|
|
||||||
ENV DOCKER_CHANNEL stable
|
ENV DOCKER_CHANNEL stable
|
||||||
|
# Unpin the docker version after the release 24.0.3 is released
|
||||||
|
# https://github.com/moby/moby/issues/45770#issuecomment-1618255130
|
||||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
|
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
|
||||||
&& add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -c -s) ${DOCKER_CHANNEL}" \
|
&& add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -c -s) ${DOCKER_CHANNEL}" \
|
||||||
&& apt-get update \
|
&& apt-get update \
|
||||||
&& env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
|
&& env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
|
||||||
docker-ce \
|
docker-ce='5:23.*' \
|
||||||
&& rm -rf \
|
&& rm -rf \
|
||||||
/var/lib/apt/lists/* \
|
/var/lib/apt/lists/* \
|
||||||
/var/cache/debconf \
|
/var/cache/debconf \
|
||||||
@ -96,6 +98,7 @@ RUN python3 -m pip install --no-cache-dir \
|
|||||||
redis \
|
redis \
|
||||||
requests-kerberos \
|
requests-kerberos \
|
||||||
tzlocal==2.1 \
|
tzlocal==2.1 \
|
||||||
|
retry \
|
||||||
urllib3
|
urllib3
|
||||||
|
|
||||||
# Hudi supports only spark 3.3.*, not 3.4
|
# Hudi supports only spark 3.3.*, not 3.4
|
||||||
@ -132,4 +135,5 @@ ENV MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1'
|
|||||||
|
|
||||||
EXPOSE 2375
|
EXPOSE 2375
|
||||||
ENTRYPOINT ["dockerd-entrypoint.sh"]
|
ENTRYPOINT ["dockerd-entrypoint.sh"]
|
||||||
CMD ["sh", "-c", "pytest $PYTEST_OPTS"]
|
# To pass additional arguments (i.e. list of tests) use PYTEST_ADDOPTS
|
||||||
|
CMD ["sh", "-c", "pytest"]
|
||||||
|
@ -4,6 +4,8 @@ services:
|
|||||||
kafka_zookeeper:
|
kafka_zookeeper:
|
||||||
image: zookeeper:3.4.9
|
image: zookeeper:3.4.9
|
||||||
hostname: kafka_zookeeper
|
hostname: kafka_zookeeper
|
||||||
|
ports:
|
||||||
|
- 2181:2181
|
||||||
environment:
|
environment:
|
||||||
ZOO_MY_ID: 1
|
ZOO_MY_ID: 1
|
||||||
ZOO_PORT: 2181
|
ZOO_PORT: 2181
|
||||||
@ -15,15 +17,14 @@ services:
|
|||||||
image: confluentinc/cp-kafka:5.2.0
|
image: confluentinc/cp-kafka:5.2.0
|
||||||
hostname: kafka1
|
hostname: kafka1
|
||||||
ports:
|
ports:
|
||||||
- ${KAFKA_EXTERNAL_PORT:-8081}:${KAFKA_EXTERNAL_PORT:-8081}
|
- ${KAFKA_EXTERNAL_PORT}:${KAFKA_EXTERNAL_PORT}
|
||||||
environment:
|
environment:
|
||||||
KAFKA_ADVERTISED_LISTENERS: INSIDE://localhost:${KAFKA_EXTERNAL_PORT},OUTSIDE://kafka1:19092
|
KAFKA_ADVERTISED_LISTENERS: INSIDE://localhost:${KAFKA_EXTERNAL_PORT},OUTSIDE://kafka1:19092
|
||||||
KAFKA_ADVERTISED_HOST_NAME: kafka1
|
KAFKA_ADVERTISED_HOST_NAME: kafka1
|
||||||
KAFKA_LISTENERS: INSIDE://0.0.0.0:${KAFKA_EXTERNAL_PORT},OUTSIDE://0.0.0.0:19092
|
|
||||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT
|
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT
|
||||||
KAFKA_INTER_BROKER_LISTENER_NAME: INSIDE
|
KAFKA_INTER_BROKER_LISTENER_NAME: INSIDE
|
||||||
KAFKA_BROKER_ID: 1
|
KAFKA_BROKER_ID: 1
|
||||||
KAFKA_ZOOKEEPER_CONNECT: "kafka_zookeeper:2181"
|
KAFKA_ZOOKEEPER_CONNECT: kafka_zookeeper:2181
|
||||||
KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO"
|
KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO"
|
||||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
||||||
depends_on:
|
depends_on:
|
||||||
@ -35,13 +36,38 @@ services:
|
|||||||
image: confluentinc/cp-schema-registry:5.2.0
|
image: confluentinc/cp-schema-registry:5.2.0
|
||||||
hostname: schema-registry
|
hostname: schema-registry
|
||||||
ports:
|
ports:
|
||||||
- ${SCHEMA_REGISTRY_EXTERNAL_PORT:-12313}:${SCHEMA_REGISTRY_INTERNAL_PORT:-12313}
|
- ${SCHEMA_REGISTRY_EXTERNAL_PORT}:${SCHEMA_REGISTRY_EXTERNAL_PORT}
|
||||||
environment:
|
environment:
|
||||||
SCHEMA_REGISTRY_HOST_NAME: schema-registry
|
SCHEMA_REGISTRY_HOST_NAME: schema-registry
|
||||||
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
|
||||||
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka1:19092
|
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka1:19092
|
||||||
|
SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:${SCHEMA_REGISTRY_EXTERNAL_PORT}
|
||||||
|
SCHEMA_REGISTRY_SCHEMA_REGISTRY_GROUP_ID: noauth
|
||||||
depends_on:
|
depends_on:
|
||||||
- kafka_zookeeper
|
- kafka_zookeeper
|
||||||
- kafka1
|
- kafka1
|
||||||
|
restart: always
|
||||||
|
security_opt:
|
||||||
|
- label:disable
|
||||||
|
|
||||||
|
schema-registry-auth:
|
||||||
|
image: confluentinc/cp-schema-registry:5.2.0
|
||||||
|
hostname: schema-registry-auth
|
||||||
|
ports:
|
||||||
|
- ${SCHEMA_REGISTRY_AUTH_EXTERNAL_PORT}:${SCHEMA_REGISTRY_AUTH_EXTERNAL_PORT}
|
||||||
|
environment:
|
||||||
|
SCHEMA_REGISTRY_HOST_NAME: schema-registry-auth
|
||||||
|
SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:${SCHEMA_REGISTRY_AUTH_EXTERNAL_PORT}
|
||||||
|
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka1:19092
|
||||||
|
SCHEMA_REGISTRY_AUTHENTICATION_METHOD: BASIC
|
||||||
|
SCHEMA_REGISTRY_AUTHENTICATION_ROLES: user
|
||||||
|
SCHEMA_REGISTRY_AUTHENTICATION_REALM: RealmFooBar
|
||||||
|
SCHEMA_REGISTRY_OPTS: "-Djava.security.auth.login.config=/etc/schema-registry/secrets/schema_registry_jaas.conf"
|
||||||
|
SCHEMA_REGISTRY_SCHEMA_REGISTRY_GROUP_ID: auth
|
||||||
|
volumes:
|
||||||
|
- ${SCHEMA_REGISTRY_DIR:-}/secrets:/etc/schema-registry/secrets
|
||||||
|
depends_on:
|
||||||
|
- kafka_zookeeper
|
||||||
|
- kafka1
|
||||||
|
restart: always
|
||||||
security_opt:
|
security_opt:
|
||||||
- label:disable
|
- label:disable
|
||||||
|
@ -92,8 +92,8 @@ sudo clickhouse stop ||:
|
|||||||
|
|
||||||
for _ in $(seq 1 60); do if [[ $(wget --timeout=1 -q 'localhost:8123' -O-) == 'Ok.' ]]; then sleep 1 ; else break; fi ; done
|
for _ in $(seq 1 60); do if [[ $(wget --timeout=1 -q 'localhost:8123' -O-) == 'Ok.' ]]; then sleep 1 ; else break; fi ; done
|
||||||
|
|
||||||
grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server.log ||:
|
rg -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server.log ||:
|
||||||
pigz < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.gz &
|
zstd < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.zst &
|
||||||
|
|
||||||
# Compressed (FIXME: remove once only github actions will be left)
|
# Compressed (FIXME: remove once only github actions will be left)
|
||||||
rm /var/log/clickhouse-server/clickhouse-server.log
|
rm /var/log/clickhouse-server/clickhouse-server.log
|
||||||
|
@ -33,7 +33,6 @@ RUN apt-get update -y \
|
|||||||
qemu-user-static \
|
qemu-user-static \
|
||||||
sqlite3 \
|
sqlite3 \
|
||||||
sudo \
|
sudo \
|
||||||
telnet \
|
|
||||||
tree \
|
tree \
|
||||||
unixodbc \
|
unixodbc \
|
||||||
wget \
|
wget \
|
||||||
|
@ -4,6 +4,9 @@
|
|||||||
set -e -x -a
|
set -e -x -a
|
||||||
|
|
||||||
# Choose random timezone for this test run.
|
# Choose random timezone for this test run.
|
||||||
|
#
|
||||||
|
# NOTE: that clickhouse-test will randomize session_timezone by itself as well
|
||||||
|
# (it will choose between default server timezone and something specific).
|
||||||
TZ="$(rg -v '#' /usr/share/zoneinfo/zone.tab | awk '{print $3}' | shuf | head -n1)"
|
TZ="$(rg -v '#' /usr/share/zoneinfo/zone.tab | awk '{print $3}' | shuf | head -n1)"
|
||||||
echo "Choosen random timezone $TZ"
|
echo "Choosen random timezone $TZ"
|
||||||
ln -snf "/usr/share/zoneinfo/$TZ" /etc/localtime && echo "$TZ" > /etc/timezone
|
ln -snf "/usr/share/zoneinfo/$TZ" /etc/localtime && echo "$TZ" > /etc/timezone
|
||||||
|
@ -8,8 +8,6 @@ RUN apt-get update -y \
|
|||||||
apt-get install --yes --no-install-recommends \
|
apt-get install --yes --no-install-recommends \
|
||||||
bash \
|
bash \
|
||||||
tzdata \
|
tzdata \
|
||||||
fakeroot \
|
|
||||||
debhelper \
|
|
||||||
parallel \
|
parallel \
|
||||||
expect \
|
expect \
|
||||||
python3 \
|
python3 \
|
||||||
@ -20,7 +18,6 @@ RUN apt-get update -y \
|
|||||||
sudo \
|
sudo \
|
||||||
openssl \
|
openssl \
|
||||||
netcat-openbsd \
|
netcat-openbsd \
|
||||||
telnet \
|
|
||||||
brotli \
|
brotli \
|
||||||
&& apt-get clean
|
&& apt-get clean
|
||||||
|
|
||||||
|
@ -14,6 +14,7 @@ ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
|
|||||||
|
|
||||||
# Stress tests and upgrade check uses similar code that was placed
|
# Stress tests and upgrade check uses similar code that was placed
|
||||||
# in a separate bash library. See tests/ci/stress_tests.lib
|
# in a separate bash library. See tests/ci/stress_tests.lib
|
||||||
|
source /usr/share/clickhouse-test/ci/attach_gdb.lib
|
||||||
source /usr/share/clickhouse-test/ci/stress_tests.lib
|
source /usr/share/clickhouse-test/ci/stress_tests.lib
|
||||||
|
|
||||||
install_packages package_folder
|
install_packages package_folder
|
||||||
@ -52,7 +53,7 @@ azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log &
|
|||||||
|
|
||||||
start
|
start
|
||||||
|
|
||||||
shellcheck disable=SC2086 # No quotes because I want to split it into words.
|
# shellcheck disable=SC2086 # No quotes because I want to split it into words.
|
||||||
/s3downloader --url-prefix "$S3_URL" --dataset-names $DATASETS
|
/s3downloader --url-prefix "$S3_URL" --dataset-names $DATASETS
|
||||||
chmod 777 -R /var/lib/clickhouse
|
chmod 777 -R /var/lib/clickhouse
|
||||||
clickhouse-client --query "ATTACH DATABASE IF NOT EXISTS datasets ENGINE = Ordinary"
|
clickhouse-client --query "ATTACH DATABASE IF NOT EXISTS datasets ENGINE = Ordinary"
|
||||||
|
@ -18,10 +18,14 @@ RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
|
|||||||
python3-pip \
|
python3-pip \
|
||||||
shellcheck \
|
shellcheck \
|
||||||
yamllint \
|
yamllint \
|
||||||
|
locales \
|
||||||
&& pip3 install black==23.1.0 boto3 codespell==2.2.1 mypy==1.3.0 PyGithub unidiff pylint==2.6.2 \
|
&& pip3 install black==23.1.0 boto3 codespell==2.2.1 mypy==1.3.0 PyGithub unidiff pylint==2.6.2 \
|
||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& rm -rf /root/.cache/pip
|
&& rm -rf /root/.cache/pip
|
||||||
|
|
||||||
|
RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen en_US.UTF-8
|
||||||
|
ENV LC_ALL en_US.UTF-8
|
||||||
|
|
||||||
# Architecture of the image when BuildKit/buildx is used
|
# Architecture of the image when BuildKit/buildx is used
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
|
|
||||||
|
@ -8,8 +8,6 @@ RUN apt-get update -y \
|
|||||||
apt-get install --yes --no-install-recommends \
|
apt-get install --yes --no-install-recommends \
|
||||||
bash \
|
bash \
|
||||||
tzdata \
|
tzdata \
|
||||||
fakeroot \
|
|
||||||
debhelper \
|
|
||||||
parallel \
|
parallel \
|
||||||
expect \
|
expect \
|
||||||
python3 \
|
python3 \
|
||||||
@ -20,7 +18,6 @@ RUN apt-get update -y \
|
|||||||
sudo \
|
sudo \
|
||||||
openssl \
|
openssl \
|
||||||
netcat-openbsd \
|
netcat-openbsd \
|
||||||
telnet \
|
|
||||||
brotli \
|
brotli \
|
||||||
&& apt-get clean
|
&& apt-get clean
|
||||||
|
|
||||||
|
@ -16,6 +16,7 @@ ln -s /usr/share/clickhouse-test/ci/get_previous_release_tag.py /usr/bin/get_pre
|
|||||||
|
|
||||||
# Stress tests and upgrade check uses similar code that was placed
|
# Stress tests and upgrade check uses similar code that was placed
|
||||||
# in a separate bash library. See tests/ci/stress_tests.lib
|
# in a separate bash library. See tests/ci/stress_tests.lib
|
||||||
|
source /usr/share/clickhouse-test/ci/attach_gdb.lib
|
||||||
source /usr/share/clickhouse-test/ci/stress_tests.lib
|
source /usr/share/clickhouse-test/ci/stress_tests.lib
|
||||||
|
|
||||||
azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log &
|
azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log &
|
||||||
@ -61,12 +62,20 @@ configure
|
|||||||
|
|
||||||
# it contains some new settings, but we can safely remove it
|
# it contains some new settings, but we can safely remove it
|
||||||
rm /etc/clickhouse-server/config.d/merge_tree.xml
|
rm /etc/clickhouse-server/config.d/merge_tree.xml
|
||||||
|
rm /etc/clickhouse-server/config.d/enable_wait_for_shutdown_replicated_tables.xml
|
||||||
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
|
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
|
||||||
|
|
||||||
start
|
start
|
||||||
stop
|
stop
|
||||||
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.initial.log
|
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.initial.log
|
||||||
|
|
||||||
|
# Start server from previous release
|
||||||
|
# Let's enable S3 storage by default
|
||||||
|
export USE_S3_STORAGE_FOR_MERGE_TREE=1
|
||||||
|
# Previous version may not be ready for fault injections
|
||||||
|
export ZOOKEEPER_FAULT_INJECTION=0
|
||||||
|
configure
|
||||||
|
|
||||||
# force_sync=false doesn't work correctly on some older versions
|
# force_sync=false doesn't work correctly on some older versions
|
||||||
sudo cat /etc/clickhouse-server/config.d/keeper_port.xml \
|
sudo cat /etc/clickhouse-server/config.d/keeper_port.xml \
|
||||||
| sed "s|<force_sync>false</force_sync>|<force_sync>true</force_sync>|" \
|
| sed "s|<force_sync>false</force_sync>|<force_sync>true</force_sync>|" \
|
||||||
@ -76,19 +85,14 @@ sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-serv
|
|||||||
# But we still need default disk because some tables loaded only into it
|
# But we still need default disk because some tables loaded only into it
|
||||||
sudo cat /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml \
|
sudo cat /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml \
|
||||||
| sed "s|<main><disk>s3</disk></main>|<main><disk>s3</disk></main><default><disk>default</disk></default>|" \
|
| sed "s|<main><disk>s3</disk></main>|<main><disk>s3</disk></main><default><disk>default</disk></default>|" \
|
||||||
> /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp mv /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
> /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp
|
||||||
|
mv /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
||||||
sudo chown clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
sudo chown clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
||||||
sudo chgrp clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
sudo chgrp clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
||||||
|
|
||||||
# Start server from previous release
|
|
||||||
# Let's enable S3 storage by default
|
|
||||||
export USE_S3_STORAGE_FOR_MERGE_TREE=1
|
|
||||||
# Previous version may not be ready for fault injections
|
|
||||||
export ZOOKEEPER_FAULT_INJECTION=0
|
|
||||||
configure
|
|
||||||
|
|
||||||
# it contains some new settings, but we can safely remove it
|
# it contains some new settings, but we can safely remove it
|
||||||
rm /etc/clickhouse-server/config.d/merge_tree.xml
|
rm /etc/clickhouse-server/config.d/merge_tree.xml
|
||||||
|
rm /etc/clickhouse-server/config.d/enable_wait_for_shutdown_replicated_tables.xml
|
||||||
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
|
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
|
||||||
|
|
||||||
start
|
start
|
||||||
|
@ -44,7 +44,6 @@ RUN apt-get update \
|
|||||||
clang-${LLVM_VERSION} \
|
clang-${LLVM_VERSION} \
|
||||||
clang-tidy-${LLVM_VERSION} \
|
clang-tidy-${LLVM_VERSION} \
|
||||||
cmake \
|
cmake \
|
||||||
fakeroot \
|
|
||||||
gdb \
|
gdb \
|
||||||
git \
|
git \
|
||||||
gperf \
|
gperf \
|
||||||
@ -94,7 +93,10 @@ RUN mkdir /tmp/ccache \
|
|||||||
&& rm -rf /tmp/ccache
|
&& rm -rf /tmp/ccache
|
||||||
|
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
ARG SCCACHE_VERSION=v0.4.1
|
ARG SCCACHE_VERSION=v0.5.4
|
||||||
|
ENV SCCACHE_IGNORE_SERVER_IO_ERROR=1
|
||||||
|
# sccache requires a value for the region. So by default we use The Default Region
|
||||||
|
ENV SCCACHE_REGION=us-east-1
|
||||||
RUN arch=${TARGETARCH:-amd64} \
|
RUN arch=${TARGETARCH:-amd64} \
|
||||||
&& case $arch in \
|
&& case $arch in \
|
||||||
amd64) rarch=x86_64 ;; \
|
amd64) rarch=x86_64 ;; \
|
||||||
|
@ -33,6 +33,9 @@ then
|
|||||||
elif [ "${ARCH}" = "powerpc64le" -o "${ARCH}" = "ppc64le" ]
|
elif [ "${ARCH}" = "powerpc64le" -o "${ARCH}" = "ppc64le" ]
|
||||||
then
|
then
|
||||||
DIR="powerpc64le"
|
DIR="powerpc64le"
|
||||||
|
elif [ "${ARCH}" = "riscv64" ]
|
||||||
|
then
|
||||||
|
DIR="riscv64"
|
||||||
fi
|
fi
|
||||||
elif [ "${OS}" = "FreeBSD" ]
|
elif [ "${OS}" = "FreeBSD" ]
|
||||||
then
|
then
|
||||||
|
20
docs/changelogs/v22.8.20.11-lts.md
Normal file
20
docs/changelogs/v22.8.20.11-lts.md
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2023
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v22.8.20.11-lts (c9ca79e24e8) FIXME as compared to v22.8.19.10-lts (989bc2fe8b0)
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Fix broken index analysis when binary operator contains a null constant argument [#50177](https://github.com/ClickHouse/ClickHouse/pull/50177) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Fix incorrect constant folding [#50536](https://github.com/ClickHouse/ClickHouse/pull/50536) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix fuzzer failure in ActionsDAG [#51301](https://github.com/ClickHouse/ClickHouse/pull/51301) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix segfault in MathUnary [#51499](https://github.com/ClickHouse/ClickHouse/pull/51499) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Decoupled commits from [#51180](https://github.com/ClickHouse/ClickHouse/issues/51180) for backports [#51561](https://github.com/ClickHouse/ClickHouse/pull/51561) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
23
docs/changelogs/v23.3.8.21-lts.md
Normal file
23
docs/changelogs/v23.3.8.21-lts.md
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2023
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v23.3.8.21-lts (1675f2264f3) FIXME as compared to v23.3.7.5-lts (bc683c11c92)
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Fix backward compatibility for IP types hashing in aggregate functions [#50551](https://github.com/ClickHouse/ClickHouse/pull/50551) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Fix segfault in MathUnary [#51499](https://github.com/ClickHouse/ClickHouse/pull/51499) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Fix for moving 'IN' conditions to PREWHERE [#51610](https://github.com/ClickHouse/ClickHouse/pull/51610) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Fix reading from empty column in `parseSipHashKey` [#51804](https://github.com/ClickHouse/ClickHouse/pull/51804) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Check refcount in `RemoveManyObjectStorageOperation::finalize` instead of `execute` [#51954](https://github.com/ClickHouse/ClickHouse/pull/51954) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Allow parametric UDFs [#51964](https://github.com/ClickHouse/ClickHouse/pull/51964) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Decoupled commits from [#51180](https://github.com/ClickHouse/ClickHouse/issues/51180) for backports [#51561](https://github.com/ClickHouse/ClickHouse/pull/51561) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Fix MergeTreeMarksLoader segfaulting if marks file is longer than expected [#51636](https://github.com/ClickHouse/ClickHouse/pull/51636) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
|
26
docs/changelogs/v23.4.6.25-stable.md
Normal file
26
docs/changelogs/v23.4.6.25-stable.md
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2023
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v23.4.6.25-stable (a06848b1770) FIXME as compared to v23.4.5.22-stable (0ced5d6a8da)
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Backported in [#51234](https://github.com/ClickHouse/ClickHouse/issues/51234): Improve the progress bar for file/s3/hdfs/url table functions by using chunk size from source data and using incremental total size counting in each thread. Fix the progress bar for *Cluster functions. This closes [#47250](https://github.com/ClickHouse/ClickHouse/issues/47250). [#51088](https://github.com/ClickHouse/ClickHouse/pull/51088) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Fix backward compatibility for IP types hashing in aggregate functions [#50551](https://github.com/ClickHouse/ClickHouse/pull/50551) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Fix segfault in MathUnary [#51499](https://github.com/ClickHouse/ClickHouse/pull/51499) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Fix for moving 'IN' conditions to PREWHERE [#51610](https://github.com/ClickHouse/ClickHouse/pull/51610) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Fix reading from empty column in `parseSipHashKey` [#51804](https://github.com/ClickHouse/ClickHouse/pull/51804) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Allow parametric UDFs [#51964](https://github.com/ClickHouse/ClickHouse/pull/51964) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Decoupled commits from [#51180](https://github.com/ClickHouse/ClickHouse/issues/51180) for backports [#51561](https://github.com/ClickHouse/ClickHouse/pull/51561) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Fix MergeTreeMarksLoader segfaulting if marks file is longer than expected [#51636](https://github.com/ClickHouse/ClickHouse/pull/51636) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Fix source image for sqllogic [#51728](https://github.com/ClickHouse/ClickHouse/pull/51728) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
25
docs/changelogs/v23.6.2.18-stable.md
Normal file
25
docs/changelogs/v23.6.2.18-stable.md
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2023
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v23.6.2.18-stable (89f39a7ccfe) FIXME as compared to v23.6.1.1524-stable (d1c7e13d088)
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
* Backported in [#51888](https://github.com/ClickHouse/ClickHouse/issues/51888): Update cargo dependencies. [#51721](https://github.com/ClickHouse/ClickHouse/pull/51721) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Fix reading from empty column in `parseSipHashKey` [#51804](https://github.com/ClickHouse/ClickHouse/pull/51804) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Allow parametric UDFs [#51964](https://github.com/ClickHouse/ClickHouse/pull/51964) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Remove the usage of Analyzer setting in the client [#51578](https://github.com/ClickHouse/ClickHouse/pull/51578) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix 02116_tuple_element with Analyzer [#51669](https://github.com/ClickHouse/ClickHouse/pull/51669) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix SQLLogic docker images [#51719](https://github.com/ClickHouse/ClickHouse/pull/51719) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix source image for sqllogic [#51728](https://github.com/ClickHouse/ClickHouse/pull/51728) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Pin for docker-ce [#51743](https://github.com/ClickHouse/ClickHouse/pull/51743) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
452
docs/changelogs/v23.7.1.2470-stable.md
Normal file
452
docs/changelogs/v23.7.1.2470-stable.md
Normal file
@ -0,0 +1,452 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2023
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v23.7.1.2470-stable (a70127baecc) FIXME as compared to v23.6.1.1524-stable (d1c7e13d088)
|
||||||
|
|
||||||
|
#### Backward Incompatible Change
|
||||||
|
* Add ` NAMED COLLECTION` access type (aliases `USE NAMED COLLECTION`, `NAMED COLLECTION USAGE`). This PR is backward incompatible because this access type is disabled by default (because a parent access type `NAMED COLLECTION ADMIN` is disabled by default as well). Proposed in [#50277](https://github.com/ClickHouse/ClickHouse/issues/50277). To grant use `GRANT NAMED COLLECTION ON collection_name TO user` or `GRANT NAMED COLLECTION ON * TO user`, to be able to give these grants `named_collection_admin` is required in config (previously it was named `named_collection_control`, so will remain as an alias). [#50625](https://github.com/ClickHouse/ClickHouse/pull/50625) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fixing a typo in the `system.parts` column name `last_removal_attemp_time`. Now it is named `last_removal_attempt_time`. [#52104](https://github.com/ClickHouse/ClickHouse/pull/52104) ([filimonov](https://github.com/filimonov)).
|
||||||
|
* Bump version of the distributed_ddl_entry_format_version to 5 by default (enables opentelemetry and initial_query_idd pass through). This will not allow to process existing entries for distributed DDL after **downgrade** (but note, that usually there should be no such unprocessed entries). [#52128](https://github.com/ClickHouse/ClickHouse/pull/52128) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Check projection metadata the same way we check ordinary metadata. This change may prevent the server from starting in case there was a table with an invalid projection. An example is a projection that created positional columns in PK (e.g. `projection p (select * order by 1, 4)` which is not allowed in table PK and can cause a crash during insert/merge). Drop such projections before the update. Fixes [#52353](https://github.com/ClickHouse/ClickHouse/issues/52353). [#52361](https://github.com/ClickHouse/ClickHouse/pull/52361) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* The experimental feature `hashid` is removed due to a bug. The quality of implementation was questionable at the start, and it didn't get through the experimental status. This closes [#52406](https://github.com/ClickHouse/ClickHouse/issues/52406). [#52449](https://github.com/ClickHouse/ClickHouse/pull/52449) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* The function `toDecimalString` is removed due to subpar implementation quality. This closes [#52407](https://github.com/ClickHouse/ClickHouse/issues/52407). [#52450](https://github.com/ClickHouse/ClickHouse/pull/52450) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
|
#### New Feature
|
||||||
|
* Implement KQL-style formatting for Interval. [#45671](https://github.com/ClickHouse/ClickHouse/pull/45671) ([ltrk2](https://github.com/ltrk2)).
|
||||||
|
* Support ZooKeeper `reconfig` command for CH Keeper with incremental reconfiguration which can be enabled via `keeper_server.enable_reconfiguration` setting. Support adding servers, removing servers, and changing server priorities. [#49450](https://github.com/ClickHouse/ClickHouse/pull/49450) ([Mike Kot](https://github.com/myrrc)).
|
||||||
|
* Kafka connector can fetch avro schema from schema registry with basic authentication using url-encoded credentials. [#49664](https://github.com/ClickHouse/ClickHouse/pull/49664) ([Ilya Golshtein](https://github.com/ilejn)).
|
||||||
|
* Add function `arrayJaccardIndex` which computes the Jaccard similarity between two arrays. [#50076](https://github.com/ClickHouse/ClickHouse/pull/50076) ([FFFFFFFHHHHHHH](https://github.com/FFFFFFFHHHHHHH)).
|
||||||
|
* Added support for prql as a query language. [#50686](https://github.com/ClickHouse/ClickHouse/pull/50686) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Add a column is_obsolete to system.settings and similar tables. Closes [#50819](https://github.com/ClickHouse/ClickHouse/issues/50819). [#50826](https://github.com/ClickHouse/ClickHouse/pull/50826) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Implement support of encrypted elements in configuration file Added possibility to use encrypted text in leaf elements of configuration file. The text is encrypted using encryption codecs from <encryption_codecs> section. [#50986](https://github.com/ClickHouse/ClickHouse/pull/50986) ([Roman Vasin](https://github.com/rvasin)).
|
||||||
|
* Just a new request of [#49483](https://github.com/ClickHouse/ClickHouse/issues/49483). [#51013](https://github.com/ClickHouse/ClickHouse/pull/51013) ([lgbo](https://github.com/lgbo-ustc)).
|
||||||
|
* Add SYSTEM STOP LISTEN query. Closes [#47972](https://github.com/ClickHouse/ClickHouse/issues/47972). [#51016](https://github.com/ClickHouse/ClickHouse/pull/51016) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Add input_format_csv_allow_variable_number_of_columns options. [#51273](https://github.com/ClickHouse/ClickHouse/pull/51273) ([Dmitry Kardymon](https://github.com/kardymonds)).
|
||||||
|
* Another boring feature: add function substring_index, as in spark or mysql. [#51472](https://github.com/ClickHouse/ClickHouse/pull/51472) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Show stats for jemalloc bins. Example ``` SELECT *, size * (nmalloc - ndalloc) AS allocated_bytes FROM system.jemalloc_bins WHERE allocated_bytes > 0 ORDER BY allocated_bytes DESC LIMIT 10. [#51674](https://github.com/ClickHouse/ClickHouse/pull/51674) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Add RowBinaryWithDefaults format with extra byte before each column for using column default value. Closes [#50854](https://github.com/ClickHouse/ClickHouse/issues/50854). [#51695](https://github.com/ClickHouse/ClickHouse/pull/51695) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Added `default_temporary_table_engine` setting. Same as `default_table_engine` but for temporary tables. [#51292](https://github.com/ClickHouse/ClickHouse/issues/51292). [#51708](https://github.com/ClickHouse/ClickHouse/pull/51708) ([velavokr](https://github.com/velavokr)).
|
||||||
|
* Added new initcap / initcapUTF8 functions which convert the first letter of each word to upper case and the rest to lower case. [#51735](https://github.com/ClickHouse/ClickHouse/pull/51735) ([Dmitry Kardymon](https://github.com/kardymonds)).
|
||||||
|
* Create table now supports `PRIMARY KEY` syntax in column definition. Columns are added to primary index in the same order columns are defined. [#51881](https://github.com/ClickHouse/ClickHouse/pull/51881) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Added the possibility to use date and time format specifiers in log and error log file names, either in config files (`log` and `errorlog` tags) or command line arguments (`--log-file` and `--errorlog-file`). [#51945](https://github.com/ClickHouse/ClickHouse/pull/51945) ([Victor Krasnov](https://github.com/sirvickr)).
|
||||||
|
* Added Peak Memory Usage (for query) to client final statistics, and to http header. [#51946](https://github.com/ClickHouse/ClickHouse/pull/51946) ([Dmitry Kardymon](https://github.com/kardymonds)).
|
||||||
|
* Added new hasSubsequence() (+CaseInsensitive + UTF8 versions) functions. [#52050](https://github.com/ClickHouse/ClickHouse/pull/52050) ([Dmitry Kardymon](https://github.com/kardymonds)).
|
||||||
|
* Add `array_agg` as alias of `groupArray` for PostgreSQL compatibility. Closes [#52100](https://github.com/ClickHouse/ClickHouse/issues/52100). ### Documentation entry for user-facing changes. [#52135](https://github.com/ClickHouse/ClickHouse/pull/52135) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Add `any_value` as a compatibility alias for `any` aggregate function. Closes [#52140](https://github.com/ClickHouse/ClickHouse/issues/52140). [#52147](https://github.com/ClickHouse/ClickHouse/pull/52147) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Add aggregate function `array_concat_agg` for compatibility with BigQuery, it's alias of `groupArrayArray`. Closes [#52139](https://github.com/ClickHouse/ClickHouse/issues/52139). [#52149](https://github.com/ClickHouse/ClickHouse/pull/52149) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Add `OCTET_LENGTH` as an alias to `length`. Closes [#52153](https://github.com/ClickHouse/ClickHouse/issues/52153). [#52176](https://github.com/ClickHouse/ClickHouse/pull/52176) ([FFFFFFFHHHHHHH](https://github.com/FFFFFFFHHHHHHH)).
|
||||||
|
* Re-add SipHash keyed functions. [#52206](https://github.com/ClickHouse/ClickHouse/pull/52206) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||||
|
* Added `firstLine` function to extract the first line from the multi-line string. This closes [#51172](https://github.com/ClickHouse/ClickHouse/issues/51172). [#52209](https://github.com/ClickHouse/ClickHouse/pull/52209) ([Mikhail Koviazin](https://github.com/mkmkme)).
|
||||||
|
|
||||||
|
#### Performance Improvement
|
||||||
|
* Enable `move_all_conditions_to_prewhere` and `enable_multiple_prewhere_read_steps` settings by default. [#46365](https://github.com/ClickHouse/ClickHouse/pull/46365) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Improves performance of some queries by tuning allocator. [#46416](https://github.com/ClickHouse/ClickHouse/pull/46416) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Writing parquet files is 10x faster, it's multi-threaded now. Almost the same speed as reading. [#49367](https://github.com/ClickHouse/ClickHouse/pull/49367) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Enable automatic selection of the sparse serialization format by default. It improves performance. The format is supported since version 22.1. After this change, downgrading to versions older than 22.1 might not be possible. You can turn off the usage of the sparse serialization format by providing the `ratio_of_defaults_for_sparse_serialization = 1` setting for your MergeTree tables. [#49631](https://github.com/ClickHouse/ClickHouse/pull/49631) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Now we use fixed-size tasks in `MergeTreePrefetchedReadPool` as in `MergeTreeReadPool`. Also from now we use connection pool for S3 requests. [#49732](https://github.com/ClickHouse/ClickHouse/pull/49732) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* More pushdown to the right side of join. [#50532](https://github.com/ClickHouse/ClickHouse/pull/50532) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Improve grace_hash join by reserving hash table's size (resubmit). [#50875](https://github.com/ClickHouse/ClickHouse/pull/50875) ([lgbo](https://github.com/lgbo-ustc)).
|
||||||
|
* Waiting on lock in `OpenedFileCache` could be noticeable sometimes. We sharded it into multiple sub-maps (each with its own lock) to avoid contention. [#51341](https://github.com/ClickHouse/ClickHouse/pull/51341) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Remove duplicate condition in functionunixtimestamp64.h. [#51857](https://github.com/ClickHouse/ClickHouse/pull/51857) ([lcjh](https://github.com/ljhcage)).
|
||||||
|
* The idea is that conditions with PK columns are likely to be used in PK analysis and will not contribute much more to PREWHERE filtering. [#51958](https://github.com/ClickHouse/ClickHouse/pull/51958) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* 1. Add rewriter for both old and new analyzer. 2. Add settings `optimize_uniq_to_count` which default is 0. [#52004](https://github.com/ClickHouse/ClickHouse/pull/52004) ([JackyWoo](https://github.com/JackyWoo)).
|
||||||
|
* The performance experiments of **OnTime** on the ICX device (Intel Xeon Platinum 8380 CPU, 80 cores, 160 threads) show that this change could bring an improvement of **11.6%** to the QPS of the query **Q8** while having no impact on others. [#52036](https://github.com/ClickHouse/ClickHouse/pull/52036) ([Zhiguo Zhou](https://github.com/ZhiguoZh)).
|
||||||
|
* Enable `allow_vertical_merges_from_compact_to_wide_parts` by default. It will save memory usage during merges. [#52295](https://github.com/ClickHouse/ClickHouse/pull/52295) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix incorrect projection analysis which invalidates primary keys. This issue only exists when `query_plan_optimize_primary_key = 1, query_plan_optimize_projection = 1` . This fixes [#48823](https://github.com/ClickHouse/ClickHouse/issues/48823) . This fixes [#51173](https://github.com/ClickHouse/ClickHouse/issues/51173) . [#52308](https://github.com/ClickHouse/ClickHouse/pull/52308) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Reduce the number of syscalls in FileCache::loadMetadata. [#52435](https://github.com/ClickHouse/ClickHouse/pull/52435) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Added query `SYSTEM FLUSH ASYNC INSERT QUEUE` which flushes all pending asynchronous inserts to the destination tables. Added a server-side setting `async_insert_queue_flush_on_shutdown` (`true` by default) which determines whether to flush queue of asynchronous inserts on graceful shutdown. Setting `async_insert_threads` is now a server-side setting. [#49160](https://github.com/ClickHouse/ClickHouse/pull/49160) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Don't show messages about `16 EiB` free space in logs, as they don't make sense. This closes [#49320](https://github.com/ClickHouse/ClickHouse/issues/49320). [#49342](https://github.com/ClickHouse/ClickHouse/pull/49342) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Properly check the limit for the `sleepEachRow` function. Add a setting `function_sleep_max_microseconds_per_block`. This is needed for generic query fuzzer. [#49343](https://github.com/ClickHouse/ClickHouse/pull/49343) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix two issues: ``` select geohashEncode(120.2, number::Float64) from numbers(10);. [#50066](https://github.com/ClickHouse/ClickHouse/pull/50066) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Add support for external disks in Keeper for storing snapshots and logs. [#50098](https://github.com/ClickHouse/ClickHouse/pull/50098) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Add support for multi-directory selection (`{}`) globs. [#50559](https://github.com/ClickHouse/ClickHouse/pull/50559) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||||
|
* Allow to have strict lower boundary for file segment size by downloading remaining data in the background. Minimum size of file segment (if actual file size is bigger) is configured as cache configuration setting `boundary_alignment`, by default `4Mi`. Number of background threads are configured as cache configuration setting `background_download_threads`, by default `2`. Also `max_file_segment_size` was increased from `8Mi` to `32Mi` in this PR. [#51000](https://github.com/ClickHouse/ClickHouse/pull/51000) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Allow filtering HTTP headers with `http_forbid_headers` section in config. Both exact matching and regexp filters are available. [#51038](https://github.com/ClickHouse/ClickHouse/pull/51038) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* #50727 new alias for function current_database and added new function current_schemas. [#51076](https://github.com/ClickHouse/ClickHouse/pull/51076) ([Pedro Riera](https://github.com/priera)).
|
||||||
|
* Log async insert flush queries into to system.query_log. [#51160](https://github.com/ClickHouse/ClickHouse/pull/51160) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Decreased default timeouts for S3 from 30 seconds to 3 seconds, and for other HTTP from 180 seconds to 30 seconds. [#51171](https://github.com/ClickHouse/ClickHouse/pull/51171) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Use read_bytes/total_bytes_to_read for progress bar in s3/file/url/... table functions for better progress indication. [#51286](https://github.com/ClickHouse/ClickHouse/pull/51286) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Functions "date_diff() and age()" now support millisecond/microsecond unit and work with microsecond precision. [#51291](https://github.com/ClickHouse/ClickHouse/pull/51291) ([Dmitry Kardymon](https://github.com/kardymonds)).
|
||||||
|
* Allow SQL standard `FETCH` without `OFFSET`. See https://antonz.org/sql-fetch/. [#51293](https://github.com/ClickHouse/ClickHouse/pull/51293) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Improve parsing of path in clickhouse-keeper-client. [#51359](https://github.com/ClickHouse/ClickHouse/pull/51359) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* A third-party product depending on ClickHouse (Gluten: Plugin to Double SparkSQL's Performance) had a bug. This fix avoids heap overflow in that third-party product while reading from HDFS. [#51386](https://github.com/ClickHouse/ClickHouse/pull/51386) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Fix checking error caused by uninitialized class members. [#51418](https://github.com/ClickHouse/ClickHouse/pull/51418) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Add ability to disable native copy for S3 (setting for BACKUP/RESTORE `allow_s3_native_copy`, and `s3_allow_native_copy` for `s3`/`s3_plain` disks). [#51448](https://github.com/ClickHouse/ClickHouse/pull/51448) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Add column `primary_key_size` to `system.parts` table to show compressed primary key size on disk. Closes [#51400](https://github.com/ClickHouse/ClickHouse/issues/51400). [#51496](https://github.com/ClickHouse/ClickHouse/pull/51496) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
* Allow running `clickhouse-local` without procfs, without home directory existing, and without name resolution plugins from glibc. [#51518](https://github.com/ClickHouse/ClickHouse/pull/51518) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Correcting the message of modify storage policy https://github.com/clickhouse/clickhouse/issues/51516 ### documentation entry for user-facing changes. [#51519](https://github.com/ClickHouse/ClickHouse/pull/51519) ([xiaolei565](https://github.com/xiaolei565)).
|
||||||
|
* Support `DROP FILESYSTEM CACHE <cache_name> KEY <key> [ OFFSET <offset>]`. [#51547](https://github.com/ClickHouse/ClickHouse/pull/51547) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Allow to add disk name for custom disks. Previously custom disks would use an internal generated disk name. Now it will be possible with `disk = disk_<name>(...)` (e.g. disk will have name `name`) . [#51552](https://github.com/ClickHouse/ClickHouse/pull/51552) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Add placeholder `%a` for rull filename in rename_files_after_processing setting. [#51603](https://github.com/ClickHouse/ClickHouse/pull/51603) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Add column modification time into system.parts_columns. [#51685](https://github.com/ClickHouse/ClickHouse/pull/51685) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Add new setting `input_format_csv_use_default_on_bad_values` to CSV format that allows to insert default value when parsing of a single field failed. [#51716](https://github.com/ClickHouse/ClickHouse/pull/51716) ([KevinyhZou](https://github.com/KevinyhZou)).
|
||||||
|
* Added a crash log flush to the disk after the unexpected crash. [#51720](https://github.com/ClickHouse/ClickHouse/pull/51720) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||||
|
* Fix behavior in dashboard page where errors unrelated to authentication are not shown. Also fix 'overlapping' chart behavior. [#51744](https://github.com/ClickHouse/ClickHouse/pull/51744) ([Zach Naimon](https://github.com/ArctypeZach)).
|
||||||
|
* Allow UUID to UInt128 conversion. [#51765](https://github.com/ClickHouse/ClickHouse/pull/51765) ([Dmitry Kardymon](https://github.com/kardymonds)).
|
||||||
|
* Added support for function range of Nullable arguments. [#51767](https://github.com/ClickHouse/ClickHouse/pull/51767) ([Dmitry Kardymon](https://github.com/kardymonds)).
|
||||||
|
* Convert condition like `toyear(x) = c` to `c1 <= x < c2`. [#51795](https://github.com/ClickHouse/ClickHouse/pull/51795) ([Han Fei](https://github.com/hanfei1991)).
|
||||||
|
* Improve MySQL compatibility of statement SHOW INDEX. [#51796](https://github.com/ClickHouse/ClickHouse/pull/51796) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix `use_structure_from_insertion_table_in_table_functions` does not work with `MATERIALIZED` and `ALIAS` columns. Closes [#51817](https://github.com/ClickHouse/ClickHouse/issues/51817). Closes [#51019](https://github.com/ClickHouse/ClickHouse/issues/51019). [#51825](https://github.com/ClickHouse/ClickHouse/pull/51825) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Introduce a table setting `wait_for_unique_parts_send_before_shutdown_ms` which specify the amount of time replica will wait before closing interserver handler for replicated sends. Also fix inconsistency with shutdown of tables and interserver handlers: now server shutdown tables first and only after it shut down interserver handlers. [#51851](https://github.com/ClickHouse/ClickHouse/pull/51851) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* CacheDictionary request only unique keys from source. Closes [#51762](https://github.com/ClickHouse/ClickHouse/issues/51762). [#51853](https://github.com/ClickHouse/ClickHouse/pull/51853) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Fixed settings not applied for explain query when format provided. [#51859](https://github.com/ClickHouse/ClickHouse/pull/51859) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Allow SETTINGS before FORMAT in DESCRIBE TABLE query for compatibility with SELECT query. Closes [#51544](https://github.com/ClickHouse/ClickHouse/issues/51544). [#51899](https://github.com/ClickHouse/ClickHouse/pull/51899) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Var-int encoded integers (e.g. used by the native protocol) can now use the full 64-bit range. 3rd party clients are advised to update their var-int code accordingly. [#51905](https://github.com/ClickHouse/ClickHouse/pull/51905) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Update certificates when they change without the need to manually SYSTEM RELOAD CONFIG. [#52030](https://github.com/ClickHouse/ClickHouse/pull/52030) ([Mike Kot](https://github.com/myrrc)).
|
||||||
|
* Added `allow_create_index_without_type` setting that allow to ignore `ADD INDEX` queries without specified `TYPE`. Standard SQL queries will just succeed without changing table schema. [#52056](https://github.com/ClickHouse/ClickHouse/pull/52056) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Fixed crash when mysqlxx::Pool::Entry is used after it was disconnected. [#52063](https://github.com/ClickHouse/ClickHouse/pull/52063) ([Val Doroshchuk](https://github.com/valbok)).
|
||||||
|
* CREATE TABLE ... AS SELECT .. is now supported in MaterializedMySQL. [#52067](https://github.com/ClickHouse/ClickHouse/pull/52067) ([Val Doroshchuk](https://github.com/valbok)).
|
||||||
|
* Introduced automatic conversion of text types to utf8 for MaterializedMySQL. [#52084](https://github.com/ClickHouse/ClickHouse/pull/52084) ([Val Doroshchuk](https://github.com/valbok)).
|
||||||
|
* Add alias for functions `today` (now available under the `curdate`/`current_date` names) and `now` (`current_timestamp`). [#52106](https://github.com/ClickHouse/ClickHouse/pull/52106) ([Lloyd-Pottiger](https://github.com/Lloyd-Pottiger)).
|
||||||
|
* Log messages are written to text_log from the beginning. [#52113](https://github.com/ClickHouse/ClickHouse/pull/52113) ([Dmitry Kardymon](https://github.com/kardymonds)).
|
||||||
|
* In cases where the HTTP endpoint has multiple IP addresses and the first of them is unreachable, a timeout exception will be thrown. Made session creation with handling all resolved endpoints. [#52116](https://github.com/ClickHouse/ClickHouse/pull/52116) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||||
|
* Support async_deduplication_token for async insert. [#52136](https://github.com/ClickHouse/ClickHouse/pull/52136) ([Han Fei](https://github.com/hanfei1991)).
|
||||||
|
* Avro input format support Union with single type. Closes [#52131](https://github.com/ClickHouse/ClickHouse/issues/52131). [#52137](https://github.com/ClickHouse/ClickHouse/pull/52137) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Add setting `optimize_use_implicit_projections` to disable implicit projections (currently only `min_max_count` projection). This is defaulted to false until [#52075](https://github.com/ClickHouse/ClickHouse/issues/52075) is fixed. [#52152](https://github.com/ClickHouse/ClickHouse/pull/52152) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* It was possible to use the function `hasToken` for infinite loop. Now this possibility is removed. This closes [#52156](https://github.com/ClickHouse/ClickHouse/issues/52156). [#52160](https://github.com/ClickHouse/ClickHouse/pull/52160) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* 1. Upgrade Intel QPL from v1.1.0 to v1.2.0 2. Upgrade Intel accel-config from v3.5 to v4.0 3. Fixed issue that Device IOTLB miss has big perf. impact for IAA accelerators. [#52180](https://github.com/ClickHouse/ClickHouse/pull/52180) ([jasperzhu](https://github.com/jinjunzh)).
|
||||||
|
* Functions "date_diff() and age()" now support millisecond/microsecond unit and work with microsecond precision. [#52181](https://github.com/ClickHouse/ClickHouse/pull/52181) ([Dmitry Kardymon](https://github.com/kardymonds)).
|
||||||
|
* Create ZK ancestors optimistically. [#52195](https://github.com/ClickHouse/ClickHouse/pull/52195) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix [#50582](https://github.com/ClickHouse/ClickHouse/issues/50582). Avoid the `Not found column ... in block` error in some cases of reading in-order and constants. [#52259](https://github.com/ClickHouse/ClickHouse/pull/52259) ([Chen768959](https://github.com/Chen768959)).
|
||||||
|
* Check whether S2 geo primitives are invalid as early as possible on ClickHouse side. This closes: [#27090](https://github.com/ClickHouse/ClickHouse/issues/27090). [#52260](https://github.com/ClickHouse/ClickHouse/pull/52260) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Now unquoted utf-8 strings are supported in DDL for MaterializedMySQL. [#52318](https://github.com/ClickHouse/ClickHouse/pull/52318) ([Val Doroshchuk](https://github.com/valbok)).
|
||||||
|
* Add back missing projection QueryAccessInfo when `query_plan_optimize_projection = 1`. This fixes [#50183](https://github.com/ClickHouse/ClickHouse/issues/50183) . This fixes [#50093](https://github.com/ClickHouse/ClickHouse/issues/50093) . [#52327](https://github.com/ClickHouse/ClickHouse/pull/52327) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Add new setting `disable_url_encoding` that allows to disable decoding/encoding path in uri in URL engine. [#52337](https://github.com/ClickHouse/ClickHouse/pull/52337) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* When `ZooKeeperRetriesControl` rethrows an error, it's more useful to see its original stack trace, not the one from `ZooKeeperRetriesControl` itself. [#52347](https://github.com/ClickHouse/ClickHouse/pull/52347) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Now double quoted comments are supported in MaterializedMySQL. [#52355](https://github.com/ClickHouse/ClickHouse/pull/52355) ([Val Doroshchuk](https://github.com/valbok)).
|
||||||
|
* Wait for zero copy replication lock even if some disks don't support it. [#52376](https://github.com/ClickHouse/ClickHouse/pull/52376) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Now it's possible to specify min (`memory_profiler_sample_min_allocation_size`) and max (`memory_profiler_sample_max_allocation_size`) size for allocations to be tracked with sampling memory profiler. [#52419](https://github.com/ClickHouse/ClickHouse/pull/52419) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* The `session_timezone` setting is demoted to experimental. [#52445](https://github.com/ClickHouse/ClickHouse/pull/52445) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Now interserver port will be closed only after tables are shut down. [#52498](https://github.com/ClickHouse/ClickHouse/pull/52498) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Added field `refcount` to `system.remote_data_paths` table. [#52518](https://github.com/ClickHouse/ClickHouse/pull/52518) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* New setting `merge_tree_determine_task_size_by_prewhere_columns` added. If set to `true` only sizes of the columns from `PREWHERE` section will be considered to determine reading task size. Otherwise all the columns from query are considered. [#52606](https://github.com/ClickHouse/ClickHouse/pull/52606) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
* Add experimental ClickHouse builds for Linux RISC-V 64 to CI. [#31398](https://github.com/ClickHouse/ClickHouse/pull/31398) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed CRC32(WeakHash32) issue for s390x. [#50365](https://github.com/ClickHouse/ClickHouse/pull/50365) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||||
|
* Add integration test check with the enabled analyzer. [#50926](https://github.com/ClickHouse/ClickHouse/pull/50926) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Update cargo dependencies. [#51721](https://github.com/ClickHouse/ClickHouse/pull/51721) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fixed several issues found by OSS-Fuzz. [#51736](https://github.com/ClickHouse/ClickHouse/pull/51736) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* There were a couple of failures because of (?) S3 availability. The sccache has a feature of failing over to local compilation. [#51893](https://github.com/ClickHouse/ClickHouse/pull/51893) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* 02242_delete_user_race and 02243_drop_user_grant_race tests have been corrected. [#51923](https://github.com/ClickHouse/ClickHouse/pull/51923) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||||
|
* Make the function `CHColumnToArrowColumn::fillArrowArrayWithArrayColumnData` to work with nullable arrays, which are not possible in ClickHouse, but needed for Gluten. [#52112](https://github.com/ClickHouse/ClickHouse/pull/52112) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* We've updated the CCTZ library to master, but there are no user-visible changes. [#52124](https://github.com/ClickHouse/ClickHouse/pull/52124) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* The `system.licenses` table now includes the hard-forked library Poco. This closes [#52066](https://github.com/ClickHouse/ClickHouse/issues/52066). [#52127](https://github.com/ClickHouse/ClickHouse/pull/52127) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Follow up [#50926](https://github.com/ClickHouse/ClickHouse/issues/50926). Add integration tests check with enabled analyzer to master. [#52210](https://github.com/ClickHouse/ClickHouse/pull/52210) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Reproducible builds for Rust. [#52395](https://github.com/ClickHouse/ClickHouse/pull/52395) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Improve the startup time of `clickhouse-client` and `clickhouse-local` in debug and sanitizer builds. This closes [#52228](https://github.com/ClickHouse/ClickHouse/issues/52228). [#52489](https://github.com/ClickHouse/ClickHouse/pull/52489) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Check that there are no cases of bad punctuation: whitespace before a comma like `Hello ,world` instead of `Hello, world`. [#52549](https://github.com/ClickHouse/ClickHouse/pull/52549) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Fix materialised pg syncTables [#49698](https://github.com/ClickHouse/ClickHouse/pull/49698) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix projection with optimize_aggregators_of_group_by_keys [#49709](https://github.com/ClickHouse/ClickHouse/pull/49709) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Fix optimize_skip_unused_shards with JOINs [#51037](https://github.com/ClickHouse/ClickHouse/pull/51037) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix formatDateTime() with fractional negative datetime64 [#51290](https://github.com/ClickHouse/ClickHouse/pull/51290) ([Dmitry Kardymon](https://github.com/kardymonds)).
|
||||||
|
* Functions `hasToken*` were totally wrong. Add a test for [#43358](https://github.com/ClickHouse/ClickHouse/issues/43358) [#51378](https://github.com/ClickHouse/ClickHouse/pull/51378) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix optimization to move functions before sorting. [#51481](https://github.com/ClickHouse/ClickHouse/pull/51481) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix Block structure mismatch in Pipe::unitePipes for FINAL [#51492](https://github.com/ClickHouse/ClickHouse/pull/51492) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Fix SIGSEGV for clusters with zero weight across all shards (fixes INSERT INTO FUNCTION clusterAllReplicas()) [#51545](https://github.com/ClickHouse/ClickHouse/pull/51545) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix timeout for hedged requests [#51582](https://github.com/ClickHouse/ClickHouse/pull/51582) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix logical error in ANTI join with NULL [#51601](https://github.com/ClickHouse/ClickHouse/pull/51601) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Fix for moving 'IN' conditions to PREWHERE [#51610](https://github.com/ClickHouse/ClickHouse/pull/51610) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Do not apply PredicateExpressionsOptimizer for ASOF/ANTI join [#51633](https://github.com/ClickHouse/ClickHouse/pull/51633) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Fix async insert with deduplication for ReplicatedMergeTree using merging algorithms [#51676](https://github.com/ClickHouse/ClickHouse/pull/51676) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix reading from empty column in `parseSipHashKey` [#51804](https://github.com/ClickHouse/ClickHouse/pull/51804) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Fix segfault when create invalid EmbeddedRocksdb table [#51847](https://github.com/ClickHouse/ClickHouse/pull/51847) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Fix inserts into MongoDB tables [#51876](https://github.com/ClickHouse/ClickHouse/pull/51876) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Fix deadlock on DatabaseCatalog shutdown [#51908](https://github.com/ClickHouse/ClickHouse/pull/51908) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix error in subquery operators [#51922](https://github.com/ClickHouse/ClickHouse/pull/51922) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix async connect to hosts with multiple ips [#51934](https://github.com/ClickHouse/ClickHouse/pull/51934) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Do not remove inputs after ActionsDAG::merge [#51947](https://github.com/ClickHouse/ClickHouse/pull/51947) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Check refcount in `RemoveManyObjectStorageOperation::finalize` instead of `execute` [#51954](https://github.com/ClickHouse/ClickHouse/pull/51954) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Allow parametric UDFs [#51964](https://github.com/ClickHouse/ClickHouse/pull/51964) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Small fix for toDateTime64() for dates after 2283-12-31 [#52130](https://github.com/ClickHouse/ClickHouse/pull/52130) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||||
|
* Fix ORDER BY tuple of WINDOW functions [#52145](https://github.com/ClickHouse/ClickHouse/pull/52145) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix incorrect projection analysis when aggregation expression contains monotonic functions [#52151](https://github.com/ClickHouse/ClickHouse/pull/52151) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Fix error in `groupArrayMoving` functions [#52161](https://github.com/ClickHouse/ClickHouse/pull/52161) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Disable direct join for range dictionary [#52187](https://github.com/ClickHouse/ClickHouse/pull/52187) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Fix sticky mutations test (and extremely rare race condition) [#52197](https://github.com/ClickHouse/ClickHouse/pull/52197) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fix race in Web disk [#52211](https://github.com/ClickHouse/ClickHouse/pull/52211) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix data race in Connection::setAsyncCallback on unknown packet from server [#52219](https://github.com/ClickHouse/ClickHouse/pull/52219) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix temp data deletion on startup, add test [#52275](https://github.com/ClickHouse/ClickHouse/pull/52275) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Don't use minmax_count projections when counting nullable columns [#52297](https://github.com/ClickHouse/ClickHouse/pull/52297) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* MergeTree/ReplicatedMergeTree should use server timezone for log entries [#52325](https://github.com/ClickHouse/ClickHouse/pull/52325) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix parameterized view with cte and multiple usage [#52328](https://github.com/ClickHouse/ClickHouse/pull/52328) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Disable expression templates for time intervals [#52335](https://github.com/ClickHouse/ClickHouse/pull/52335) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix `apply_snapshot` in Keeper [#52358](https://github.com/ClickHouse/ClickHouse/pull/52358) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Update build-osx.md [#52377](https://github.com/ClickHouse/ClickHouse/pull/52377) ([AlexBykovski](https://github.com/AlexBykovski)).
|
||||||
|
* Fix `countSubstrings()` hang with empty needle and a column haystack [#52409](https://github.com/ClickHouse/ClickHouse/pull/52409) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* Fix normal projection with merge table [#52432](https://github.com/ClickHouse/ClickHouse/pull/52432) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Fix possible double-free in Aggregator [#52439](https://github.com/ClickHouse/ClickHouse/pull/52439) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Fixed inserting into Buffer engine [#52440](https://github.com/ClickHouse/ClickHouse/pull/52440) ([Vasily Nemkov](https://github.com/Enmk)).
|
||||||
|
* The implementation of AnyHash was non-conformant. [#52448](https://github.com/ClickHouse/ClickHouse/pull/52448) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Check recursion depth in OptimizedRegularExpression [#52451](https://github.com/ClickHouse/ClickHouse/pull/52451) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix data-race DatabaseReplicated::startupTables()/canExecuteReplicatedMetadataAlter() [#52490](https://github.com/ClickHouse/ClickHouse/pull/52490) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix abort in function `transform` [#52513](https://github.com/ClickHouse/ClickHouse/pull/52513) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix lightweight delete after drop of projection [#52517](https://github.com/ClickHouse/ClickHouse/pull/52517) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fix possible error "Cannot drain connections: cancel first" [#52585](https://github.com/ClickHouse/ClickHouse/pull/52585) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
|
||||||
|
#### NO CL ENTRY
|
||||||
|
|
||||||
|
* NO CL ENTRY: 'Revert "Add documentation for building in docker"'. [#51773](https://github.com/ClickHouse/ClickHouse/pull/51773) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* NO CL ENTRY: 'Revert "Fix build"'. [#51911](https://github.com/ClickHouse/ClickHouse/pull/51911) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* NO CL ENTRY: 'Revert "Millisecond and microsecond support in date_diff / age functions"'. [#52129](https://github.com/ClickHouse/ClickHouse/pull/52129) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* NO CL ENTRY: 'Revert "Re-add SipHash keyed functions"'. [#52466](https://github.com/ClickHouse/ClickHouse/pull/52466) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* NO CL ENTRY: 'Revert "Add an ability to specify allocations size for sampling memory profiler"'. [#52496](https://github.com/ClickHouse/ClickHouse/pull/52496) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* NO CL ENTRY: 'Revert "Rewrite uniq to count"'. [#52576](https://github.com/ClickHouse/ClickHouse/pull/52576) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Remove duplicate_order_by_and_distinct optimization [#47135](https://github.com/ClickHouse/ClickHouse/pull/47135) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Update sort desc in ReadFromMergeTree after applying PREWHERE info [#48669](https://github.com/ClickHouse/ClickHouse/pull/48669) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Fix `BindException: Address already in use` in HDFS integration tests [#49428](https://github.com/ClickHouse/ClickHouse/pull/49428) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Force libunwind usage (removes gcc_eh support) [#49438](https://github.com/ClickHouse/ClickHouse/pull/49438) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Cleanup `storage_conf.xml` [#49557](https://github.com/ClickHouse/ClickHouse/pull/49557) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix flaky tests caused by OPTIMIZE FINAL failing memory budget check [#49764](https://github.com/ClickHouse/ClickHouse/pull/49764) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Remove unstable queries from performance/join_set_filter [#50235](https://github.com/ClickHouse/ClickHouse/pull/50235) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* More accurate DNS resolve for the keeper connection [#50738](https://github.com/ClickHouse/ClickHouse/pull/50738) ([pufit](https://github.com/pufit)).
|
||||||
|
* Try to fix some trash in Disks and part moves [#51135](https://github.com/ClickHouse/ClickHouse/pull/51135) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Add jemalloc support fro s390x [#51186](https://github.com/ClickHouse/ClickHouse/pull/51186) ([Boris Kuschel](https://github.com/bkuschel)).
|
||||||
|
* Resubmit [#48821](https://github.com/ClickHouse/ClickHouse/issues/48821) [#51208](https://github.com/ClickHouse/ClickHouse/pull/51208) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* test for [#36894](https://github.com/ClickHouse/ClickHouse/issues/36894) [#51274](https://github.com/ClickHouse/ClickHouse/pull/51274) ([Denny Crane](https://github.com/den-crane)).
|
||||||
|
* external_aggregation_fix for big endian machines [#51280](https://github.com/ClickHouse/ClickHouse/pull/51280) ([Sanjam Panda](https://github.com/saitama951)).
|
||||||
|
* Fix: Invalid number of rows in Chunk column Object [#51296](https://github.com/ClickHouse/ClickHouse/pull/51296) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Add a test for [#44816](https://github.com/ClickHouse/ClickHouse/issues/44816) [#51305](https://github.com/ClickHouse/ClickHouse/pull/51305) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add a test for `calculate_text_stack_trace` setting [#51311](https://github.com/ClickHouse/ClickHouse/pull/51311) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* decrease log level, make logs shorter [#51320](https://github.com/ClickHouse/ClickHouse/pull/51320) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Collect stack traces from job's scheduling and print along with exception's stack trace. [#51349](https://github.com/ClickHouse/ClickHouse/pull/51349) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Add a test for [#42691](https://github.com/ClickHouse/ClickHouse/issues/42691) [#51352](https://github.com/ClickHouse/ClickHouse/pull/51352) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add a test for [#32474](https://github.com/ClickHouse/ClickHouse/issues/32474) [#51354](https://github.com/ClickHouse/ClickHouse/pull/51354) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add a test for [#41727](https://github.com/ClickHouse/ClickHouse/issues/41727) [#51355](https://github.com/ClickHouse/ClickHouse/pull/51355) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add a test for [#35801](https://github.com/ClickHouse/ClickHouse/issues/35801) [#51356](https://github.com/ClickHouse/ClickHouse/pull/51356) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add a test for [#34626](https://github.com/ClickHouse/ClickHouse/issues/34626) [#51357](https://github.com/ClickHouse/ClickHouse/pull/51357) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Initialize text_log earlier to capture table startup messages [#51360](https://github.com/ClickHouse/ClickHouse/pull/51360) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Use separate default settings for clickhouse-local [#51363](https://github.com/ClickHouse/ClickHouse/pull/51363) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Attempt to remove wrong code (catch/throw in Functions) [#51367](https://github.com/ClickHouse/ClickHouse/pull/51367) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Remove suspicious code [#51383](https://github.com/ClickHouse/ClickHouse/pull/51383) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Disable hedged requests under TSan [#51392](https://github.com/ClickHouse/ClickHouse/pull/51392) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* no finalize in d-tor WriteBufferFromOStream [#51404](https://github.com/ClickHouse/ClickHouse/pull/51404) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Better diagnostics for 01193_metadata_loading [#51414](https://github.com/ClickHouse/ClickHouse/pull/51414) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix attaching gdb in stress tests [#51445](https://github.com/ClickHouse/ClickHouse/pull/51445) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Merging [#36384](https://github.com/ClickHouse/ClickHouse/issues/36384) [#51458](https://github.com/ClickHouse/ClickHouse/pull/51458) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix possible race on shutdown wait [#51497](https://github.com/ClickHouse/ClickHouse/pull/51497) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* Fix `test_alter_moving_garbage`: lock between getActiveContainingPart and swapActivePart in parts mover [#51498](https://github.com/ClickHouse/ClickHouse/pull/51498) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Fix a logical error on mutation [#51502](https://github.com/ClickHouse/ClickHouse/pull/51502) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix running integration tests with spaces in it's names [#51514](https://github.com/ClickHouse/ClickHouse/pull/51514) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix flaky test 00417_kill_query [#51522](https://github.com/ClickHouse/ClickHouse/pull/51522) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* fs cache: add some checks [#51536](https://github.com/ClickHouse/ClickHouse/pull/51536) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Don't run 02782_uniq_exact_parallel_merging_bug in parallel with other tests [#51549](https://github.com/ClickHouse/ClickHouse/pull/51549) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* 00900_orc_load: lift kill timeout [#51559](https://github.com/ClickHouse/ClickHouse/pull/51559) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Add retries to 00416_pocopatch_progress_in_http_headers [#51575](https://github.com/ClickHouse/ClickHouse/pull/51575) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Remove the usage of Analyzer setting in the client [#51578](https://github.com/ClickHouse/ClickHouse/pull/51578) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix merge_selecting_task scheduling [#51591](https://github.com/ClickHouse/ClickHouse/pull/51591) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Add hex functions for cityhash [#51595](https://github.com/ClickHouse/ClickHouse/pull/51595) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Remove `unset CLICKHOUSE_LOG_COMMENT` from tests [#51623](https://github.com/ClickHouse/ClickHouse/pull/51623) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Implement endianness-independent serialization [#51637](https://github.com/ClickHouse/ClickHouse/pull/51637) ([ltrk2](https://github.com/ltrk2)).
|
||||||
|
* Ignore APPEND and TRUNCATE modifiers if file does not exist. [#51640](https://github.com/ClickHouse/ClickHouse/pull/51640) ([alekar](https://github.com/alekar)).
|
||||||
|
* Try to fix flaky 02210_processors_profile_log [#51641](https://github.com/ClickHouse/ClickHouse/pull/51641) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Make common macros extendable [#51646](https://github.com/ClickHouse/ClickHouse/pull/51646) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Correct an exception message in src/Functions/nested.cpp [#51651](https://github.com/ClickHouse/ClickHouse/pull/51651) ([Alex Cheng](https://github.com/Alex-Cheng)).
|
||||||
|
* tests: fix 02050_client_profile_events flakiness [#51653](https://github.com/ClickHouse/ClickHouse/pull/51653) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Minor follow-up to re2 update to 2023-06-02 ([#50949](https://github.com/ClickHouse/ClickHouse/issues/50949)) [#51655](https://github.com/ClickHouse/ClickHouse/pull/51655) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix 02116_tuple_element with Analyzer [#51669](https://github.com/ClickHouse/ClickHouse/pull/51669) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Update timeouts in tests for transactions [#51683](https://github.com/ClickHouse/ClickHouse/pull/51683) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Remove unused code [#51684](https://github.com/ClickHouse/ClickHouse/pull/51684) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* Remove `mmap/mremap/munmap` from Allocator.h [#51686](https://github.com/ClickHouse/ClickHouse/pull/51686) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* SonarCloud: Add C++23 Experimental Flag [#51687](https://github.com/ClickHouse/ClickHouse/pull/51687) ([Julio Jimenez](https://github.com/juliojimenez)).
|
||||||
|
* Wait with retries when attaching GDB in tests [#51688](https://github.com/ClickHouse/ClickHouse/pull/51688) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.6.1.1524-stable [#51691](https://github.com/ClickHouse/ClickHouse/pull/51691) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* fix write to finalized buffer [#51696](https://github.com/ClickHouse/ClickHouse/pull/51696) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* do not log exception aborted for pending mutate/merge entries when shutdown [#51697](https://github.com/ClickHouse/ClickHouse/pull/51697) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Fix race in ContextAccess [#51704](https://github.com/ClickHouse/ClickHouse/pull/51704) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Make test scripts backwards compatible [#51707](https://github.com/ClickHouse/ClickHouse/pull/51707) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* test for full join and null predicate [#51709](https://github.com/ClickHouse/ClickHouse/pull/51709) ([Denny Crane](https://github.com/den-crane)).
|
||||||
|
* A cmake warning on job limits underutilizing CPU [#51710](https://github.com/ClickHouse/ClickHouse/pull/51710) ([velavokr](https://github.com/velavokr)).
|
||||||
|
* Fix SQLLogic docker images [#51719](https://github.com/ClickHouse/ClickHouse/pull/51719) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Added ASK_PASSWORD client constant instead of hardcoded '\n' [#51723](https://github.com/ClickHouse/ClickHouse/pull/51723) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||||
|
* Update README.md [#51726](https://github.com/ClickHouse/ClickHouse/pull/51726) ([Tyler Hannan](https://github.com/tylerhannan)).
|
||||||
|
* Fix source image for sqllogic [#51728](https://github.com/ClickHouse/ClickHouse/pull/51728) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Remove MemoryPool from Poco because it's useless [#51732](https://github.com/ClickHouse/ClickHouse/pull/51732) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fix: logical error in grace hash join [#51737](https://github.com/ClickHouse/ClickHouse/pull/51737) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Update 01320_create_sync_race_condition_zookeeper.sh [#51742](https://github.com/ClickHouse/ClickHouse/pull/51742) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Pin for docker-ce [#51743](https://github.com/ClickHouse/ClickHouse/pull/51743) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Revert "Fix: Invalid number of rows in Chunk column Object" [#51750](https://github.com/ClickHouse/ClickHouse/pull/51750) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Add SonarCloud to README [#51751](https://github.com/ClickHouse/ClickHouse/pull/51751) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix test `02789_object_type_invalid_num_of_rows` [#51754](https://github.com/ClickHouse/ClickHouse/pull/51754) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix (benign) data race in `transform` [#51755](https://github.com/ClickHouse/ClickHouse/pull/51755) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix flaky KeeperMap test [#51764](https://github.com/ClickHouse/ClickHouse/pull/51764) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Version mypy=1.4.1 falsly reports unused ignore comment [#51769](https://github.com/ClickHouse/ClickHouse/pull/51769) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Avoid keeping lock Context::getLock() while calculating access rights [#51772](https://github.com/ClickHouse/ClickHouse/pull/51772) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Making stateless tests with timeout less flaky [#51774](https://github.com/ClickHouse/ClickHouse/pull/51774) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix after [#51000](https://github.com/ClickHouse/ClickHouse/issues/51000) [#51790](https://github.com/ClickHouse/ClickHouse/pull/51790) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Add assert in ThreadStatus destructor for correct current_thread [#51800](https://github.com/ClickHouse/ClickHouse/pull/51800) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix broken parts handling in `ReplicatedMergeTree` [#51801](https://github.com/ClickHouse/ClickHouse/pull/51801) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix tsan signal-unsafe call [#51802](https://github.com/ClickHouse/ClickHouse/pull/51802) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Fix for parallel replicas not completely disabled by granule count threshold [#51805](https://github.com/ClickHouse/ClickHouse/pull/51805) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Make sure that we don't attempt to serialize/deserialize block with 0 columns and non-zero rows [#51807](https://github.com/ClickHouse/ClickHouse/pull/51807) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Fix rare bug in `DROP COLUMN` and enabled sparse columns [#51809](https://github.com/ClickHouse/ClickHouse/pull/51809) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fix flaky `test_multiple_disks` [#51821](https://github.com/ClickHouse/ClickHouse/pull/51821) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Follow up to [#51547](https://github.com/ClickHouse/ClickHouse/issues/51547) [#51822](https://github.com/ClickHouse/ClickHouse/pull/51822) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Correctly grep archives in stress tests [#51824](https://github.com/ClickHouse/ClickHouse/pull/51824) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Update analyzer_tech_debt.txt [#51836](https://github.com/ClickHouse/ClickHouse/pull/51836) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* remove unused code [#51837](https://github.com/ClickHouse/ClickHouse/pull/51837) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Fix disk config for upgrade tests [#51839](https://github.com/ClickHouse/ClickHouse/pull/51839) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Remove Coverity from workflows, but leave in the code [#51842](https://github.com/ClickHouse/ClickHouse/pull/51842) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Many fixes [3] [#51848](https://github.com/ClickHouse/ClickHouse/pull/51848) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Change misleading name in joins: addJoinedBlock -> addBlockToJoin [#51852](https://github.com/ClickHouse/ClickHouse/pull/51852) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* fix: correct exception messages on policies comparison [#51854](https://github.com/ClickHouse/ClickHouse/pull/51854) ([Feng Kaiyu](https://github.com/fky2015)).
|
||||||
|
* Update 02439_merge_selecting_partitions.sql [#51862](https://github.com/ClickHouse/ClickHouse/pull/51862) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Remove useless packages [#51863](https://github.com/ClickHouse/ClickHouse/pull/51863) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Remove useless logs [#51865](https://github.com/ClickHouse/ClickHouse/pull/51865) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix incorrect log level = warning [#51867](https://github.com/ClickHouse/ClickHouse/pull/51867) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix test_replicated_table_attach [#51868](https://github.com/ClickHouse/ClickHouse/pull/51868) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Better usability of a test [#51869](https://github.com/ClickHouse/ClickHouse/pull/51869) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Remove useless code [#51873](https://github.com/ClickHouse/ClickHouse/pull/51873) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Another fix upgrade check script [#51878](https://github.com/ClickHouse/ClickHouse/pull/51878) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Sqlloogic improvements [#51883](https://github.com/ClickHouse/ClickHouse/pull/51883) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Disable ThinLTO on non-Linux [#51897](https://github.com/ClickHouse/ClickHouse/pull/51897) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Pin rust nightly (to make it stable) [#51903](https://github.com/ClickHouse/ClickHouse/pull/51903) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix build [#51909](https://github.com/ClickHouse/ClickHouse/pull/51909) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix build [#51910](https://github.com/ClickHouse/ClickHouse/pull/51910) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix flaky test `00175_partition_by_ignore` and move it to correct location [#51913](https://github.com/ClickHouse/ClickHouse/pull/51913) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix flaky test 02360_send_logs_level_colors: avoid usage of `file` tool [#51914](https://github.com/ClickHouse/ClickHouse/pull/51914) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Maybe better tests [#51916](https://github.com/ClickHouse/ClickHouse/pull/51916) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Revert system drop filesystem cache by key [#51917](https://github.com/ClickHouse/ClickHouse/pull/51917) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix flaky test `detach_attach_partition_race` [#51920](https://github.com/ClickHouse/ClickHouse/pull/51920) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Another fix for `02481_async_insert_race_long` [#51925](https://github.com/ClickHouse/ClickHouse/pull/51925) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix segfault caused by `ThreadStatus` [#51931](https://github.com/ClickHouse/ClickHouse/pull/51931) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Print short fault info only from safe fields [#51932](https://github.com/ClickHouse/ClickHouse/pull/51932) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Fix typo in integration tests [#51944](https://github.com/ClickHouse/ClickHouse/pull/51944) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Better logs on shutdown [#51951](https://github.com/ClickHouse/ClickHouse/pull/51951) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Filter databases list before querying potentially slow fields [#51955](https://github.com/ClickHouse/ClickHouse/pull/51955) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Fix some issues with transactions [#51959](https://github.com/ClickHouse/ClickHouse/pull/51959) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix unrelated messages from LSan in clickhouse-client [#51966](https://github.com/ClickHouse/ClickHouse/pull/51966) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Allow OOM in AST Fuzzer with Sanitizers [#51967](https://github.com/ClickHouse/ClickHouse/pull/51967) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Disable one test under Analyzer [#51968](https://github.com/ClickHouse/ClickHouse/pull/51968) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix Docker [#51969](https://github.com/ClickHouse/ClickHouse/pull/51969) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix test `01825_type_json_from_map` [#51970](https://github.com/ClickHouse/ClickHouse/pull/51970) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix test `02354_distributed_with_external_aggregation_memory_usage` [#51971](https://github.com/ClickHouse/ClickHouse/pull/51971) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix disaster in integration tests, part 2 [#51973](https://github.com/ClickHouse/ClickHouse/pull/51973) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* [RFC] Cleanup remote_servers in dist config.xml [#51985](https://github.com/ClickHouse/ClickHouse/pull/51985) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.6.2.18-stable [#51986](https://github.com/ClickHouse/ClickHouse/pull/51986) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Update version_date.tsv and changelogs after v22.8.20.11-lts [#51987](https://github.com/ClickHouse/ClickHouse/pull/51987) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Fix performance test for regexp cache [#51988](https://github.com/ClickHouse/ClickHouse/pull/51988) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Move a test to the right place [#51989](https://github.com/ClickHouse/ClickHouse/pull/51989) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add a check to validate that the stateful tests are stateful [#51990](https://github.com/ClickHouse/ClickHouse/pull/51990) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Check that functional tests cleanup their tables [#51991](https://github.com/ClickHouse/ClickHouse/pull/51991) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix test_extreme_deduplication [#51992](https://github.com/ClickHouse/ClickHouse/pull/51992) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Cleanup SymbolIndex after reload got removed [#51993](https://github.com/ClickHouse/ClickHouse/pull/51993) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Update CompletedPipelineExecutor exception log name [#52028](https://github.com/ClickHouse/ClickHouse/pull/52028) ([xiao](https://github.com/nicelulu)).
|
||||||
|
* Fix `00502_custom_partitioning_replicated_zookeeper_long` [#52032](https://github.com/ClickHouse/ClickHouse/pull/52032) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Prohibit send_metadata for s3_plain disks [#52038](https://github.com/ClickHouse/ClickHouse/pull/52038) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.4.6.25-stable [#52061](https://github.com/ClickHouse/ClickHouse/pull/52061) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Preparations for Trivial Support For Resharding (part1) [#52068](https://github.com/ClickHouse/ClickHouse/pull/52068) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.3.8.21-lts [#52077](https://github.com/ClickHouse/ClickHouse/pull/52077) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Fix flakiness of test_keeper_s3_snapshot flakiness [#52083](https://github.com/ClickHouse/ClickHouse/pull/52083) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix test_extreme_deduplication flakiness [#52085](https://github.com/ClickHouse/ClickHouse/pull/52085) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Small docs update for toYearWeek() function [#52090](https://github.com/ClickHouse/ClickHouse/pull/52090) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||||
|
* Small docs update for DateTime, DateTime64 [#52094](https://github.com/ClickHouse/ClickHouse/pull/52094) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||||
|
* Add missing --force for docker network prune (otherwise it is noop on CI) [#52095](https://github.com/ClickHouse/ClickHouse/pull/52095) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* tests: drop existing view in test_materialized_mysql_database [#52103](https://github.com/ClickHouse/ClickHouse/pull/52103) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Update README.md [#52115](https://github.com/ClickHouse/ClickHouse/pull/52115) ([Tyler Hannan](https://github.com/tylerhannan)).
|
||||||
|
* Print Zxid in keeper stat command in hex (so as ZooKeeper) [#52122](https://github.com/ClickHouse/ClickHouse/pull/52122) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Skip protection from double decompression if inode from maps cannot be obtained [#52138](https://github.com/ClickHouse/ClickHouse/pull/52138) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* There is no point in detecting flaky tests [#52142](https://github.com/ClickHouse/ClickHouse/pull/52142) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Remove default argument value [#52143](https://github.com/ClickHouse/ClickHouse/pull/52143) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix the "kill_mutation" test [#52144](https://github.com/ClickHouse/ClickHouse/pull/52144) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix ORDER BY tuple of WINDOW functions (and slightly more changes) [#52146](https://github.com/ClickHouse/ClickHouse/pull/52146) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix possible EADDRINUSE ("Address already in use") in integration tests [#52148](https://github.com/ClickHouse/ClickHouse/pull/52148) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix test 02497_storage_file_reader_selection [#52154](https://github.com/ClickHouse/ClickHouse/pull/52154) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix unexpected AST Set [#52158](https://github.com/ClickHouse/ClickHouse/pull/52158) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix crash in comparison functions due to incorrect query analysis [#52172](https://github.com/ClickHouse/ClickHouse/pull/52172) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix slow test `02317_distinct_in_order_optimization` [#52173](https://github.com/ClickHouse/ClickHouse/pull/52173) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add comments for https://github.com/ClickHouse/ClickHouse/pull/52112 [#52175](https://github.com/ClickHouse/ClickHouse/pull/52175) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Randomize timezone in tests across non-deterministic around 1970 and default [#52184](https://github.com/ClickHouse/ClickHouse/pull/52184) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix `test_multiple_disks/test.py::test_start_stop_moves` [#52189](https://github.com/ClickHouse/ClickHouse/pull/52189) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* CMake: Simplify job limiting [#52196](https://github.com/ClickHouse/ClickHouse/pull/52196) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix self extracting binaries under qemu linux-user (qemu-$ARCH-static) [#52198](https://github.com/ClickHouse/ClickHouse/pull/52198) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix `Integration tests flaky check (asan)` [#52201](https://github.com/ClickHouse/ClickHouse/pull/52201) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix flaky test test_lost_part [#52202](https://github.com/ClickHouse/ClickHouse/pull/52202) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* MaterializedMySQL: Replace to_string by magic_enum::enum_name [#52204](https://github.com/ClickHouse/ClickHouse/pull/52204) ([Val Doroshchuk](https://github.com/valbok)).
|
||||||
|
* MaterializedMySQL: Add tests to parse db and table names from DDL [#52208](https://github.com/ClickHouse/ClickHouse/pull/52208) ([Val Doroshchuk](https://github.com/valbok)).
|
||||||
|
* Revert "Fixed several issues found by OSS-Fuzz" [#52216](https://github.com/ClickHouse/ClickHouse/pull/52216) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Use one copy replication more agressively [#52218](https://github.com/ClickHouse/ClickHouse/pull/52218) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fix flaky test `01076_parallel_alter_replicated_zookeeper` [#52221](https://github.com/ClickHouse/ClickHouse/pull/52221) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fix 01889_key_condition_function_chains for analyzer. [#52223](https://github.com/ClickHouse/ClickHouse/pull/52223) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Inhibit settings randomization in the test `json_ghdata` [#52226](https://github.com/ClickHouse/ClickHouse/pull/52226) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Slightly better diagnostics in a test [#52227](https://github.com/ClickHouse/ClickHouse/pull/52227) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Enable no-upgrade-check for 02273_full_sort_join [#52235](https://github.com/ClickHouse/ClickHouse/pull/52235) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Fix network manager for integration tests [#52237](https://github.com/ClickHouse/ClickHouse/pull/52237) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* List replication queue only for current test database [#52238](https://github.com/ClickHouse/ClickHouse/pull/52238) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Attempt to fix assert in tsan with fibers [#52241](https://github.com/ClickHouse/ClickHouse/pull/52241) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix undefined behaviour in fuzzer [#52256](https://github.com/ClickHouse/ClickHouse/pull/52256) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Follow-up to [#51959](https://github.com/ClickHouse/ClickHouse/issues/51959) [#52261](https://github.com/ClickHouse/ClickHouse/pull/52261) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* More fair queue for `drop table sync` [#52276](https://github.com/ClickHouse/ClickHouse/pull/52276) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix `02497_trace_events_stress_long` [#52279](https://github.com/ClickHouse/ClickHouse/pull/52279) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix test `01111_create_drop_replicated_db_stress` [#52283](https://github.com/ClickHouse/ClickHouse/pull/52283) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix ugly code [#52284](https://github.com/ClickHouse/ClickHouse/pull/52284) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add missing replica syncs in test_backup_restore_on_cluster [#52306](https://github.com/ClickHouse/ClickHouse/pull/52306) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Fix test_replicated_database 'node doesn't exist' flakiness [#52307](https://github.com/ClickHouse/ClickHouse/pull/52307) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Minor: Update description of events "QueryCacheHits/Misses" [#52309](https://github.com/ClickHouse/ClickHouse/pull/52309) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Beautify pretty-printing of the query string in SYSTEM.QUERY_CACHE [#52312](https://github.com/ClickHouse/ClickHouse/pull/52312) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Reduce dependencies for skim by avoid using default features [#52316](https://github.com/ClickHouse/ClickHouse/pull/52316) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix 02725_memory-for-merges [#52317](https://github.com/ClickHouse/ClickHouse/pull/52317) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Skip unsupported disks in Keeper [#52321](https://github.com/ClickHouse/ClickHouse/pull/52321) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Revert "Improve CSVInputFormat to check and set default value to column if deserialize failed" [#52322](https://github.com/ClickHouse/ClickHouse/pull/52322) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Resubmit [#51716](https://github.com/ClickHouse/ClickHouse/issues/51716) [#52323](https://github.com/ClickHouse/ClickHouse/pull/52323) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Add logging about all found workflows for merge_pr.py [#52324](https://github.com/ClickHouse/ClickHouse/pull/52324) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Minor: Less awkward IAST::FormatSettings [#52332](https://github.com/ClickHouse/ClickHouse/pull/52332) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Mark test 02125_many_mutations_2 as no-parallel to avoid flakiness [#52338](https://github.com/ClickHouse/ClickHouse/pull/52338) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix capabilities installed via systemd service (fixes netlink/IO priorities) [#52357](https://github.com/ClickHouse/ClickHouse/pull/52357) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Update 01606_git_import.sh [#52360](https://github.com/ClickHouse/ClickHouse/pull/52360) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Update ci-slack-bot.py [#52372](https://github.com/ClickHouse/ClickHouse/pull/52372) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix `test_keeper_session` [#52373](https://github.com/ClickHouse/ClickHouse/pull/52373) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Update ci-slack-bot.py [#52374](https://github.com/ClickHouse/ClickHouse/pull/52374) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Disable analyzer setting in backward_compatibility integration tests. [#52375](https://github.com/ClickHouse/ClickHouse/pull/52375) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* New metric - Filesystem cache size limit [#52378](https://github.com/ClickHouse/ClickHouse/pull/52378) ([Krzysztof Góralski](https://github.com/kgoralski)).
|
||||||
|
* Fix `test_replicated_merge_tree_encrypted_disk ` [#52379](https://github.com/ClickHouse/ClickHouse/pull/52379) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Fix `02122_parallel_formatting_XML ` [#52380](https://github.com/ClickHouse/ClickHouse/pull/52380) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Follow up to [#49698](https://github.com/ClickHouse/ClickHouse/issues/49698) [#52381](https://github.com/ClickHouse/ClickHouse/pull/52381) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Less replication errors [#52382](https://github.com/ClickHouse/ClickHouse/pull/52382) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Rename TaskStatsInfoGetter into NetlinkMetricsProvider [#52392](https://github.com/ClickHouse/ClickHouse/pull/52392) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix `test_keeper_force_recovery` [#52408](https://github.com/ClickHouse/ClickHouse/pull/52408) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix flaky gtest_lru_file_cache.cpp [#52418](https://github.com/ClickHouse/ClickHouse/pull/52418) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix: remove redundant distinct with views [#52438](https://github.com/ClickHouse/ClickHouse/pull/52438) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Add 02815_range_dict_no_direct_join to analyzer_tech_debt.txt [#52464](https://github.com/ClickHouse/ClickHouse/pull/52464) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* do not throw exception in OptimizedRegularExpressionImpl::analyze [#52467](https://github.com/ClickHouse/ClickHouse/pull/52467) ([Han Fei](https://github.com/hanfei1991)).
|
||||||
|
* Remove skip_startup_tables from IDatabase::loadStoredObjects() [#52491](https://github.com/ClickHouse/ClickHouse/pull/52491) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix test_insert_same_partition_and_merge by increasing wait time [#52497](https://github.com/ClickHouse/ClickHouse/pull/52497) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Try to fix asan wanring in HashJoin [#52499](https://github.com/ClickHouse/ClickHouse/pull/52499) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Replace with three way comparison [#52509](https://github.com/ClickHouse/ClickHouse/pull/52509) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Fix flakiness of test_version_update_after_mutation by enabling force_remove_data_recursively_on_drop [#52514](https://github.com/ClickHouse/ClickHouse/pull/52514) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix `test_throttling` [#52515](https://github.com/ClickHouse/ClickHouse/pull/52515) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Improve logging macros [#52519](https://github.com/ClickHouse/ClickHouse/pull/52519) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix `toDecimalString` function [#52520](https://github.com/ClickHouse/ClickHouse/pull/52520) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||||
|
* Remove unused code [#52527](https://github.com/ClickHouse/ClickHouse/pull/52527) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Cancel execution in PipelineExecutor in case of exception in graph->updateNode [#52533](https://github.com/ClickHouse/ClickHouse/pull/52533) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Make 01951_distributed_push_down_limit analyzer agnostic [#52534](https://github.com/ClickHouse/ClickHouse/pull/52534) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Fix disallow_concurrency test for backup and restore [#52536](https://github.com/ClickHouse/ClickHouse/pull/52536) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Update 02136_scalar_subquery_metrics.sql [#52537](https://github.com/ClickHouse/ClickHouse/pull/52537) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* tests: fix 01035_avg_weighted_long flakiness [#52556](https://github.com/ClickHouse/ClickHouse/pull/52556) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* tests: increase throttling for 01923_network_receive_time_metric_insert [#52557](https://github.com/ClickHouse/ClickHouse/pull/52557) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* tests: fix 00719_parallel_ddl_table flakiness in debug builds [#52558](https://github.com/ClickHouse/ClickHouse/pull/52558) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* tests: fix 01821_join_table_race_long flakiness [#52559](https://github.com/ClickHouse/ClickHouse/pull/52559) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix flaky `00995_exception_while_insert` [#52568](https://github.com/ClickHouse/ClickHouse/pull/52568) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* MaterializedMySQL: Fix typos in tests [#52575](https://github.com/ClickHouse/ClickHouse/pull/52575) ([Val Doroshchuk](https://github.com/valbok)).
|
||||||
|
* Fix `02497_trace_events_stress_long` again [#52587](https://github.com/ClickHouse/ClickHouse/pull/52587) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Revert "Remove `mmap/mremap/munmap` from Allocator.h" [#52589](https://github.com/ClickHouse/ClickHouse/pull/52589) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Remove peak memory usage from the final message in the client [#52598](https://github.com/ClickHouse/ClickHouse/pull/52598) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* GinIndexStore: fix a bug when files are finalizated after first write, [#52602](https://github.com/ClickHouse/ClickHouse/pull/52602) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Fix deadlocks in StorageTableFunctionProxy [#52626](https://github.com/ClickHouse/ClickHouse/pull/52626) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix build with clang-15 [#52627](https://github.com/ClickHouse/ClickHouse/pull/52627) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix style [#52647](https://github.com/ClickHouse/ClickHouse/pull/52647) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix logging level of a noisy message [#52648](https://github.com/ClickHouse/ClickHouse/pull/52648) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Revert "Added field `refcount` to `system.remote_data_paths` table" [#52657](https://github.com/ClickHouse/ClickHouse/pull/52657) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
|
@ -23,7 +23,7 @@ sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
|||||||
``` bash
|
``` bash
|
||||||
cd ClickHouse
|
cd ClickHouse
|
||||||
mkdir build-riscv64
|
mkdir build-riscv64
|
||||||
CC=clang-16 CXX=clang++-16 cmake . -Bbuild-riscv64 -G Ninja -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-riscv64.cmake -DGLIBC_COMPATIBILITY=OFF -DENABLE_LDAP=OFF -DOPENSSL_NO_ASM=ON -DENABLE_JEMALLOC=ON -DENABLE_PARQUET=OFF -DUSE_UNWIND=OFF -DENABLE_GRPC=OFF -DENABLE_HDFS=OFF -DENABLE_MYSQL=OFF
|
CC=clang-16 CXX=clang++-16 cmake . -Bbuild-riscv64 -G Ninja -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-riscv64.cmake -DGLIBC_COMPATIBILITY=OFF -DENABLE_LDAP=OFF -DOPENSSL_NO_ASM=ON -DENABLE_JEMALLOC=ON -DENABLE_PARQUET=OFF -DENABLE_GRPC=OFF -DENABLE_HDFS=OFF -DENABLE_MYSQL=OFF
|
||||||
ninja -C build-riscv64
|
ninja -C build-riscv64
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -11,7 +11,8 @@ Supported platforms:
|
|||||||
|
|
||||||
- x86_64
|
- x86_64
|
||||||
- AArch64
|
- AArch64
|
||||||
- Power9 (experimental)
|
- PowerPC 64 LE (experimental)
|
||||||
|
- RISC-V 64 (experimental)
|
||||||
|
|
||||||
## Building on Ubuntu
|
## Building on Ubuntu
|
||||||
|
|
||||||
@ -42,7 +43,7 @@ sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test
|
|||||||
|
|
||||||
For other Linux distribution - check the availability of LLVM's [prebuild packages](https://releases.llvm.org/download.html).
|
For other Linux distribution - check the availability of LLVM's [prebuild packages](https://releases.llvm.org/download.html).
|
||||||
|
|
||||||
As of April 2023, any version of Clang >= 15 will work.
|
As of April 2023, clang-16 or higher will work.
|
||||||
GCC as a compiler is not supported.
|
GCC as a compiler is not supported.
|
||||||
To build with a specific Clang version:
|
To build with a specific Clang version:
|
||||||
|
|
||||||
@ -86,8 +87,8 @@ The build requires the following components:
|
|||||||
|
|
||||||
- Git (used to checkout the sources, not needed for the build)
|
- Git (used to checkout the sources, not needed for the build)
|
||||||
- CMake 3.20 or newer
|
- CMake 3.20 or newer
|
||||||
- Compiler: Clang 15 or newer
|
- Compiler: clang-16 or newer
|
||||||
- Linker: lld 15 or newer
|
- Linker: lld-16 or newer
|
||||||
- Ninja
|
- Ninja
|
||||||
- Yasm
|
- Yasm
|
||||||
- Gawk
|
- Gawk
|
||||||
|
@ -7,12 +7,8 @@ description: How to build Clickhouse and run benchmark with DEFLATE_QPL Codec
|
|||||||
|
|
||||||
# Build Clickhouse with DEFLATE_QPL
|
# Build Clickhouse with DEFLATE_QPL
|
||||||
|
|
||||||
- Make sure your target machine meet the QPL required [prerequisites](https://intel.github.io/qpl/documentation/get_started_docs/installation.html#prerequisites)
|
- Make sure your host machine meet the QPL required [prerequisites](https://intel.github.io/qpl/documentation/get_started_docs/installation.html#prerequisites)
|
||||||
- Pass the following flag to CMake when building ClickHouse:
|
- deflate_qpl is enabled by default during cmake build. In case you accidentally change it, please double-check build flag: ENABLE_QPL=1
|
||||||
|
|
||||||
``` bash
|
|
||||||
cmake -DENABLE_QPL=1 ..
|
|
||||||
```
|
|
||||||
|
|
||||||
- For generic requirements, please refer to Clickhouse generic [build instructions](/docs/en/development/build.md)
|
- For generic requirements, please refer to Clickhouse generic [build instructions](/docs/en/development/build.md)
|
||||||
|
|
||||||
|
@ -33,6 +33,15 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name
|
|||||||
|
|
||||||
- `options` — MongoDB connection string options (optional parameter).
|
- `options` — MongoDB connection string options (optional parameter).
|
||||||
|
|
||||||
|
:::tip
|
||||||
|
If you are using the MongoDB Atlas cloud offering please add these options:
|
||||||
|
|
||||||
|
```
|
||||||
|
'connectTimeoutMS=10000&ssl=true&authSource=admin'
|
||||||
|
```
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
## Usage Example {#usage-example}
|
## Usage Example {#usage-example}
|
||||||
|
|
||||||
Create a table in ClickHouse which allows to read data from MongoDB collection:
|
Create a table in ClickHouse which allows to read data from MongoDB collection:
|
||||||
|
@ -54,7 +54,7 @@ $ sudo mysql
|
|||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
mysql> CREATE USER 'clickhouse'@'localhost' IDENTIFIED BY 'clickhouse';
|
mysql> CREATE USER 'clickhouse'@'localhost' IDENTIFIED BY 'clickhouse';
|
||||||
mysql> GRANT ALL PRIVILEGES ON *.* TO 'clickhouse'@'clickhouse' WITH GRANT OPTION;
|
mysql> GRANT ALL PRIVILEGES ON *.* TO 'clickhouse'@'localhost' WITH GRANT OPTION;
|
||||||
```
|
```
|
||||||
|
|
||||||
Then configure the connection in `/etc/odbc.ini`.
|
Then configure the connection in `/etc/odbc.ini`.
|
||||||
@ -66,7 +66,7 @@ DRIVER = /usr/local/lib/libmyodbc5w.so
|
|||||||
SERVER = 127.0.0.1
|
SERVER = 127.0.0.1
|
||||||
PORT = 3306
|
PORT = 3306
|
||||||
DATABASE = test
|
DATABASE = test
|
||||||
USERNAME = clickhouse
|
USER = clickhouse
|
||||||
PASSWORD = clickhouse
|
PASSWORD = clickhouse
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -83,6 +83,9 @@ $ isql -v mysqlconn
|
|||||||
Table in MySQL:
|
Table in MySQL:
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
|
mysql> CREATE DATABASE test;
|
||||||
|
Query OK, 1 row affected (0,01 sec)
|
||||||
|
|
||||||
mysql> CREATE TABLE `test`.`test` (
|
mysql> CREATE TABLE `test`.`test` (
|
||||||
-> `int_id` INT NOT NULL AUTO_INCREMENT,
|
-> `int_id` INT NOT NULL AUTO_INCREMENT,
|
||||||
-> `int_nullable` INT NULL DEFAULT NULL,
|
-> `int_nullable` INT NULL DEFAULT NULL,
|
||||||
@ -91,10 +94,10 @@ mysql> CREATE TABLE `test`.`test` (
|
|||||||
-> PRIMARY KEY (`int_id`));
|
-> PRIMARY KEY (`int_id`));
|
||||||
Query OK, 0 rows affected (0,09 sec)
|
Query OK, 0 rows affected (0,09 sec)
|
||||||
|
|
||||||
mysql> insert into test (`int_id`, `float`) VALUES (1,2);
|
mysql> insert into test.test (`int_id`, `float`) VALUES (1,2);
|
||||||
Query OK, 1 row affected (0,00 sec)
|
Query OK, 1 row affected (0,00 sec)
|
||||||
|
|
||||||
mysql> select * from test;
|
mysql> select * from test.test;
|
||||||
+------+----------+-----+----------+
|
+------+----------+-----+----------+
|
||||||
| int_id | int_nullable | float | float_nullable |
|
| int_id | int_nullable | float | float_nullable |
|
||||||
+------+----------+-----+----------+
|
+------+----------+-----+----------+
|
||||||
|
@ -57,7 +57,8 @@ Notice that the S3 endpoint in the `ENGINE` configuration uses the parameter tok
|
|||||||
|
|
||||||
:::note
|
:::note
|
||||||
As shown in the example, querying from S3 tables that are partitioned is
|
As shown in the example, querying from S3 tables that are partitioned is
|
||||||
not directly supported at this time, but can be accomplished by querying the bucket contents with a wildcard.
|
not directly supported at this time, but can be accomplished by querying the individual partitions
|
||||||
|
using the S3 table function.
|
||||||
|
|
||||||
The primary use-case for writing
|
The primary use-case for writing
|
||||||
partitioned data in S3 is to enable transferring that data into another
|
partitioned data in S3 is to enable transferring that data into another
|
||||||
@ -127,23 +128,7 @@ FROM s3('http://minio:10000/clickhouse//test_45.csv', 'minioadmin', 'minioadminp
|
|||||||
└────┴────┴────┘
|
└────┴────┴────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Select from all partitions
|
#### Limitation
|
||||||
|
|
||||||
```sql
|
|
||||||
SELECT *
|
|
||||||
FROM s3('http://minio:10000/clickhouse//**', 'minioadmin', 'minioadminpassword', 'CSV')
|
|
||||||
```
|
|
||||||
```response
|
|
||||||
┌─c1─┬─c2─┬─c3─┐
|
|
||||||
│ 3 │ 2 │ 1 │
|
|
||||||
└────┴────┴────┘
|
|
||||||
┌─c1─┬─c2─┬─c3─┐
|
|
||||||
│ 1 │ 2 │ 3 │
|
|
||||||
└────┴────┴────┘
|
|
||||||
┌─c1─┬─c2─┬─c3─┐
|
|
||||||
│ 78 │ 43 │ 45 │
|
|
||||||
└────┴────┴────┘
|
|
||||||
```
|
|
||||||
|
|
||||||
You may naturally try to `Select * from p`, but as noted above, this query will fail; use the preceding query.
|
You may naturally try to `Select * from p`, but as noted above, this query will fail; use the preceding query.
|
||||||
|
|
||||||
|
@ -37,8 +37,8 @@ The [Merge](/docs/en/engines/table-engines/special/merge.md/#merge) engine does
|
|||||||
``` sql
|
``` sql
|
||||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||||
(
|
(
|
||||||
name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1],
|
name1 [type1] [DEFAULT|MATERIALIZED|ALIAS|EPHEMERAL expr1] [TTL expr1] [CODEC(codec1)] [[NOT] NULL|PRIMARY KEY],
|
||||||
name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2],
|
name2 [type2] [DEFAULT|MATERIALIZED|ALIAS|EPHEMERAL expr2] [TTL expr2] [CODEC(codec2)] [[NOT] NULL|PRIMARY KEY],
|
||||||
...
|
...
|
||||||
INDEX index_name1 expr1 TYPE type1(...) [GRANULARITY value1],
|
INDEX index_name1 expr1 TYPE type1(...) [GRANULARITY value1],
|
||||||
INDEX index_name2 expr2 TYPE type2(...) [GRANULARITY value2],
|
INDEX index_name2 expr2 TYPE type2(...) [GRANULARITY value2],
|
||||||
|
@ -106,3 +106,4 @@ For partitioning by month, use the `toYYYYMM(date_column)` expression, where `da
|
|||||||
## Storage Settings {#storage-settings}
|
## Storage Settings {#storage-settings}
|
||||||
|
|
||||||
- [engine_url_skip_empty_files](/docs/en/operations/settings/settings.md#engine_url_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
- [engine_url_skip_empty_files](/docs/en/operations/settings/settings.md#engine_url_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||||
|
- [disable_url_encoding](/docs/en/operations/settings/settings.md#disable_url_encoding) -allows to disable decoding/encoding path in uri. Disabled by default.
|
||||||
|
@ -76,6 +76,7 @@ The supported formats are:
|
|||||||
| [RowBinary](#rowbinary) | ✔ | ✔ |
|
| [RowBinary](#rowbinary) | ✔ | ✔ |
|
||||||
| [RowBinaryWithNames](#rowbinarywithnamesandtypes) | ✔ | ✔ |
|
| [RowBinaryWithNames](#rowbinarywithnamesandtypes) | ✔ | ✔ |
|
||||||
| [RowBinaryWithNamesAndTypes](#rowbinarywithnamesandtypes) | ✔ | ✔ |
|
| [RowBinaryWithNamesAndTypes](#rowbinarywithnamesandtypes) | ✔ | ✔ |
|
||||||
|
| [RowBinaryWithDefaults](#rowbinarywithdefaults) | ✔ | ✔ |
|
||||||
| [Native](#native) | ✔ | ✔ |
|
| [Native](#native) | ✔ | ✔ |
|
||||||
| [Null](#null) | ✗ | ✔ |
|
| [Null](#null) | ✗ | ✔ |
|
||||||
| [XML](#xml) | ✗ | ✔ |
|
| [XML](#xml) | ✗ | ✔ |
|
||||||
@ -471,6 +472,8 @@ The CSV format supports the output of totals and extremes the same way as `TabSe
|
|||||||
- [input_format_csv_skip_trailing_empty_lines](/docs/en/operations/settings/settings-formats.md/#input_format_csv_skip_trailing_empty_lines) - skip trailing empty lines at the end of data. Default value - `false`.
|
- [input_format_csv_skip_trailing_empty_lines](/docs/en/operations/settings/settings-formats.md/#input_format_csv_skip_trailing_empty_lines) - skip trailing empty lines at the end of data. Default value - `false`.
|
||||||
- [input_format_csv_trim_whitespaces](/docs/en/operations/settings/settings-formats.md/#input_format_csv_trim_whitespaces) - trim spaces and tabs in non-quoted CSV strings. Default value - `true`.
|
- [input_format_csv_trim_whitespaces](/docs/en/operations/settings/settings-formats.md/#input_format_csv_trim_whitespaces) - trim spaces and tabs in non-quoted CSV strings. Default value - `true`.
|
||||||
- [input_format_csv_allow_whitespace_or_tab_as_delimiter](/docs/en/operations/settings/settings-formats.md/# input_format_csv_allow_whitespace_or_tab_as_delimiter) - Allow to use whitespace or tab as field delimiter in CSV strings. Default value - `false`.
|
- [input_format_csv_allow_whitespace_or_tab_as_delimiter](/docs/en/operations/settings/settings-formats.md/# input_format_csv_allow_whitespace_or_tab_as_delimiter) - Allow to use whitespace or tab as field delimiter in CSV strings. Default value - `false`.
|
||||||
|
- [input_format_csv_allow_variable_number_of_columns](/docs/en/operations/settings/settings-formats.md/#input_format_csv_allow_variable_number_of_columns) - ignore extra columns in CSV input (if file has more columns than expected) and treat missing fields in CSV input as default values. Default value - `false`.
|
||||||
|
- [input_format_csv_use_default_on_bad_values](/docs/en/operations/settings/settings-formats.md/#input_format_csv_use_default_on_bad_values) - Allow to set default value to column when CSV field deserialization failed on bad value. Default value - `false`.
|
||||||
|
|
||||||
## CSVWithNames {#csvwithnames}
|
## CSVWithNames {#csvwithnames}
|
||||||
|
|
||||||
@ -1514,6 +1517,23 @@ If setting [input_format_with_types_use_header](/docs/en/operations/settings/set
|
|||||||
the types from input data will be compared with the types of the corresponding columns from the table. Otherwise, the second row will be skipped.
|
the types from input data will be compared with the types of the corresponding columns from the table. Otherwise, the second row will be skipped.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
## RowBinaryWithDefaults {#rowbinarywithdefaults}
|
||||||
|
|
||||||
|
Similar to [RowBinary](#rowbinary), but with an extra byte before each column that indicates if default value should be used.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
:) select * from format('RowBinaryWithDefaults', 'x UInt32 default 42, y UInt32', x'010001000000')
|
||||||
|
|
||||||
|
┌──x─┬─y─┐
|
||||||
|
│ 42 │ 1 │
|
||||||
|
└────┴───┘
|
||||||
|
```
|
||||||
|
|
||||||
|
For column `x` there is only one byte `01` that indicates that default value should be used and no other data after this byte is provided.
|
||||||
|
For column `y` data starts with byte `00` that indicates that column has actual value that should be read from the subsequent data `01000000`.
|
||||||
|
|
||||||
## RowBinary format settings {#row-binary-format-settings}
|
## RowBinary format settings {#row-binary-format-settings}
|
||||||
|
|
||||||
- [format_binary_max_string_size](/docs/en/operations/settings/settings-formats.md/#format_binary_max_string_size) - The maximum allowed size for String in RowBinary format. Default value - `1GiB`.
|
- [format_binary_max_string_size](/docs/en/operations/settings/settings-formats.md/#format_binary_max_string_size) - The maximum allowed size for String in RowBinary format. Default value - `1GiB`.
|
||||||
|
@ -56,7 +56,7 @@ Connection: Close
|
|||||||
Content-Type: text/tab-separated-values; charset=UTF-8
|
Content-Type: text/tab-separated-values; charset=UTF-8
|
||||||
X-ClickHouse-Server-Display-Name: clickhouse.ru-central1.internal
|
X-ClickHouse-Server-Display-Name: clickhouse.ru-central1.internal
|
||||||
X-ClickHouse-Query-Id: 5abe861c-239c-467f-b955-8a201abb8b7f
|
X-ClickHouse-Query-Id: 5abe861c-239c-467f-b955-8a201abb8b7f
|
||||||
X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}
|
X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","peak_memory_usage":"0"}
|
||||||
|
|
||||||
1
|
1
|
||||||
```
|
```
|
||||||
@ -286,9 +286,9 @@ Similarly, you can use ClickHouse sessions in the HTTP protocol. To do this, you
|
|||||||
You can receive information about the progress of a query in `X-ClickHouse-Progress` response headers. To do this, enable [send_progress_in_http_headers](../operations/settings/settings.md#settings-send_progress_in_http_headers). Example of the header sequence:
|
You can receive information about the progress of a query in `X-ClickHouse-Progress` response headers. To do this, enable [send_progress_in_http_headers](../operations/settings/settings.md#settings-send_progress_in_http_headers). Example of the header sequence:
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
X-ClickHouse-Progress: {"read_rows":"2752512","read_bytes":"240570816","total_rows_to_read":"8880128"}
|
X-ClickHouse-Progress: {"read_rows":"2752512","read_bytes":"240570816","total_rows_to_read":"8880128","peak_memory_usage":"4371480"}
|
||||||
X-ClickHouse-Progress: {"read_rows":"5439488","read_bytes":"482285394","total_rows_to_read":"8880128"}
|
X-ClickHouse-Progress: {"read_rows":"5439488","read_bytes":"482285394","total_rows_to_read":"8880128","peak_memory_usage":"13621616"}
|
||||||
X-ClickHouse-Progress: {"read_rows":"8783786","read_bytes":"819092887","total_rows_to_read":"8880128"}
|
X-ClickHouse-Progress: {"read_rows":"8783786","read_bytes":"819092887","total_rows_to_read":"8880128","peak_memory_usage":"23155600"}
|
||||||
```
|
```
|
||||||
|
|
||||||
Possible header fields:
|
Possible header fields:
|
||||||
@ -416,7 +416,7 @@ $ curl -v 'http://localhost:8123/predefined_query'
|
|||||||
< X-ClickHouse-Format: Template
|
< X-ClickHouse-Format: Template
|
||||||
< X-ClickHouse-Timezone: Asia/Shanghai
|
< X-ClickHouse-Timezone: Asia/Shanghai
|
||||||
< Keep-Alive: timeout=3
|
< Keep-Alive: timeout=3
|
||||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}
|
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","peak_memory_usage":"0"}
|
||||||
<
|
<
|
||||||
# HELP "Query" "Number of executing queries"
|
# HELP "Query" "Number of executing queries"
|
||||||
# TYPE "Query" counter
|
# TYPE "Query" counter
|
||||||
@ -581,7 +581,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/hi'
|
|||||||
< Content-Type: text/html; charset=UTF-8
|
< Content-Type: text/html; charset=UTF-8
|
||||||
< Transfer-Encoding: chunked
|
< Transfer-Encoding: chunked
|
||||||
< Keep-Alive: timeout=3
|
< Keep-Alive: timeout=3
|
||||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}
|
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","peak_memory_usage":"0"}
|
||||||
<
|
<
|
||||||
* Connection #0 to host localhost left intact
|
* Connection #0 to host localhost left intact
|
||||||
Say Hi!%
|
Say Hi!%
|
||||||
@ -621,7 +621,7 @@ $ curl -v -H 'XXX:xxx' 'http://localhost:8123/get_config_static_handler'
|
|||||||
< Content-Type: text/plain; charset=UTF-8
|
< Content-Type: text/plain; charset=UTF-8
|
||||||
< Transfer-Encoding: chunked
|
< Transfer-Encoding: chunked
|
||||||
< Keep-Alive: timeout=3
|
< Keep-Alive: timeout=3
|
||||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}
|
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","peak_memory_usage":"0"}
|
||||||
<
|
<
|
||||||
* Connection #0 to host localhost left intact
|
* Connection #0 to host localhost left intact
|
||||||
<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>%
|
<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>%
|
||||||
@ -673,7 +673,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_absolute_path_static_handler'
|
|||||||
< Content-Type: text/html; charset=UTF-8
|
< Content-Type: text/html; charset=UTF-8
|
||||||
< Transfer-Encoding: chunked
|
< Transfer-Encoding: chunked
|
||||||
< Keep-Alive: timeout=3
|
< Keep-Alive: timeout=3
|
||||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}
|
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","peak_memory_usage":"0"}
|
||||||
<
|
<
|
||||||
<html><body>Absolute Path File</body></html>
|
<html><body>Absolute Path File</body></html>
|
||||||
* Connection #0 to host localhost left intact
|
* Connection #0 to host localhost left intact
|
||||||
@ -692,7 +692,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_relative_path_static_handler'
|
|||||||
< Content-Type: text/html; charset=UTF-8
|
< Content-Type: text/html; charset=UTF-8
|
||||||
< Transfer-Encoding: chunked
|
< Transfer-Encoding: chunked
|
||||||
< Keep-Alive: timeout=3
|
< Keep-Alive: timeout=3
|
||||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}
|
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","peak_memory_usage":"0"}
|
||||||
<
|
<
|
||||||
<html><body>Relative Path File</body></html>
|
<html><body>Relative Path File</body></html>
|
||||||
* Connection #0 to host localhost left intact
|
* Connection #0 to host localhost left intact
|
||||||
|
@ -30,7 +30,7 @@ description: In order to effectively mitigate possible human errors, you should
|
|||||||
```
|
```
|
||||||
|
|
||||||
:::note ALL
|
:::note ALL
|
||||||
`ALL` is only applicable to the `RESTORE` command prior to version 23.4 of Clickhouse.
|
Prior to version 23.4 of ClickHouse, `ALL` was only applicable to the `RESTORE` command.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
## Background
|
## Background
|
||||||
|
@ -65,6 +65,43 @@ XML substitution example:
|
|||||||
|
|
||||||
Substitutions can also be performed from ZooKeeper. To do this, specify the attribute `from_zk = "/path/to/node"`. The element value is replaced with the contents of the node at `/path/to/node` in ZooKeeper. You can also put an entire XML subtree on the ZooKeeper node and it will be fully inserted into the source element.
|
Substitutions can also be performed from ZooKeeper. To do this, specify the attribute `from_zk = "/path/to/node"`. The element value is replaced with the contents of the node at `/path/to/node` in ZooKeeper. You can also put an entire XML subtree on the ZooKeeper node and it will be fully inserted into the source element.
|
||||||
|
|
||||||
|
## Encrypting Configuration {#encryption}
|
||||||
|
|
||||||
|
You can use symmetric encryption to encrypt a configuration element, for example, a password field. To do so, first configure the [encryption codec](../sql-reference/statements/create/table.md#encryption-codecs), then add attribute `encrypted_by` with the name of the encryption codec as value to the element to encrypt.
|
||||||
|
|
||||||
|
Unlike attributes `from_zk`, `from_env` and `incl` (or element `include`), no substitution, i.e. decryption of the encrypted value, is performed in the preprocessed file. Decryption happens only at runtime in the server process.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```xml
|
||||||
|
<clickhouse>
|
||||||
|
|
||||||
|
<encryption_codecs>
|
||||||
|
<aes_128_gcm_siv>
|
||||||
|
<key_hex>00112233445566778899aabbccddeeff</key_hex>
|
||||||
|
</aes_128_gcm_siv>
|
||||||
|
</encryption_codecs>
|
||||||
|
|
||||||
|
<interserver_http_credentials>
|
||||||
|
<user>admin</user>
|
||||||
|
<password encrypted_by="AES_128_GCM_SIV">961F000000040000000000EEDDEF4F453CFE6457C4234BD7C09258BD651D85</password>
|
||||||
|
</interserver_http_credentials>
|
||||||
|
|
||||||
|
</clickhouse>
|
||||||
|
```
|
||||||
|
|
||||||
|
To encrypt a value, you can use the (example) program `encrypt_decrypt`:
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
./encrypt_decrypt /etc/clickhouse-server/config.xml -e AES_128_GCM_SIV abcd
|
||||||
|
```
|
||||||
|
|
||||||
|
``` text
|
||||||
|
961F000000040000000000EEDDEF4F453CFE6457C4234BD7C09258BD651D85
|
||||||
|
```
|
||||||
|
|
||||||
## User Settings {#user-settings}
|
## User Settings {#user-settings}
|
||||||
|
|
||||||
The `config.xml` file can specify a separate config with user settings, profiles, and quotas. The relative path to this config is set in the `users_config` element. By default, it is `users.xml`. If `users_config` is omitted, the user settings, profiles, and quotas are specified directly in `config.xml`.
|
The `config.xml` file can specify a separate config with user settings, profiles, and quotas. The relative path to this config is set in the `users_config` element. By default, it is `users.xml`. If `users_config` is omitted, the user settings, profiles, and quotas are specified directly in `config.xml`.
|
||||||
@ -104,12 +141,17 @@ Here you can see default config written in YAML: [config.yaml.example](https://g
|
|||||||
|
|
||||||
There are some differences between YAML and XML formats in terms of ClickHouse configurations. Here are some tips for writing a configuration in YAML format.
|
There are some differences between YAML and XML formats in terms of ClickHouse configurations. Here are some tips for writing a configuration in YAML format.
|
||||||
|
|
||||||
You should use a Scalar node to write a key-value pair:
|
An XML tag with a text value is represented by a YAML key-value pair
|
||||||
``` yaml
|
``` yaml
|
||||||
key: value
|
key: value
|
||||||
```
|
```
|
||||||
|
|
||||||
To create a node, containing other nodes you should use a Map:
|
Corresponding XML:
|
||||||
|
``` xml
|
||||||
|
<key>value</value>
|
||||||
|
```
|
||||||
|
|
||||||
|
A nested XML node is represented by a YAML map:
|
||||||
``` yaml
|
``` yaml
|
||||||
map_key:
|
map_key:
|
||||||
key1: val1
|
key1: val1
|
||||||
@ -117,7 +159,16 @@ map_key:
|
|||||||
key3: val3
|
key3: val3
|
||||||
```
|
```
|
||||||
|
|
||||||
To create a list of values or nodes assigned to one tag you should use a Sequence:
|
Corresponding XML:
|
||||||
|
``` xml
|
||||||
|
<map_key>
|
||||||
|
<key1>val1</key1>
|
||||||
|
<key2>val2</key2>
|
||||||
|
<key3>val3</key3>
|
||||||
|
</map_key>
|
||||||
|
```
|
||||||
|
|
||||||
|
To create the same XML tag multiple times, use a YAML sequence:
|
||||||
``` yaml
|
``` yaml
|
||||||
seq_key:
|
seq_key:
|
||||||
- val1
|
- val1
|
||||||
@ -128,8 +179,22 @@ seq_key:
|
|||||||
key3: val5
|
key3: val5
|
||||||
```
|
```
|
||||||
|
|
||||||
If you want to write an attribute for a Sequence or Map node, you should use a @ prefix before the attribute key. Note, that @ is reserved by YAML standard, so you should also to wrap it into double quotes:
|
Corresponding XML:
|
||||||
|
```xml
|
||||||
|
<seq_key>val1</seq_key>
|
||||||
|
<seq_key>val2</seq_key>
|
||||||
|
<seq_key>
|
||||||
|
<key1>val3</key1>
|
||||||
|
</seq_key>
|
||||||
|
<seq_key>
|
||||||
|
<map>
|
||||||
|
<key2>val4</key2>
|
||||||
|
<key3>val5</key3>
|
||||||
|
</map>
|
||||||
|
</seq_key>
|
||||||
|
```
|
||||||
|
|
||||||
|
To provide an XML attribute, you can use an attribute key with a `@` prefix. Note that `@` is reserved by YAML standard, so must be wrapped in double quotes:
|
||||||
``` yaml
|
``` yaml
|
||||||
map:
|
map:
|
||||||
"@attr1": value1
|
"@attr1": value1
|
||||||
@ -137,16 +202,14 @@ map:
|
|||||||
key: 123
|
key: 123
|
||||||
```
|
```
|
||||||
|
|
||||||
From that Map we will get these XML nodes:
|
Corresponding XML:
|
||||||
|
|
||||||
``` xml
|
``` xml
|
||||||
<map attr1="value1" attr2="value2">
|
<map attr1="value1" attr2="value2">
|
||||||
<key>123</key>
|
<key>123</key>
|
||||||
</map>
|
</map>
|
||||||
```
|
```
|
||||||
|
|
||||||
You can also set attributes for Sequence:
|
It is also possible to use attributes in YAML sequence:
|
||||||
|
|
||||||
``` yaml
|
``` yaml
|
||||||
seq:
|
seq:
|
||||||
- "@attr1": value1
|
- "@attr1": value1
|
||||||
@ -155,13 +218,25 @@ seq:
|
|||||||
- abc
|
- abc
|
||||||
```
|
```
|
||||||
|
|
||||||
So, we can get YAML config equal to this XML one:
|
Corresponding XML:
|
||||||
|
|
||||||
``` xml
|
``` xml
|
||||||
<seq attr1="value1" attr2="value2">123</seq>
|
<seq attr1="value1" attr2="value2">123</seq>
|
||||||
<seq attr1="value1" attr2="value2">abc</seq>
|
<seq attr1="value1" attr2="value2">abc</seq>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
The aforementioned syntax does not allow to express XML text nodes with XML attributes as YAML. This special case can be achieved using an
|
||||||
|
`#text` attribute key:
|
||||||
|
```yaml
|
||||||
|
map_key:
|
||||||
|
"@attr1": value1
|
||||||
|
"#text": value2
|
||||||
|
```
|
||||||
|
|
||||||
|
Corresponding XML:
|
||||||
|
```xml
|
||||||
|
<map_key attr1="value1">value2</map>
|
||||||
|
```
|
||||||
|
|
||||||
## Implementation Details {#implementation-details}
|
## Implementation Details {#implementation-details}
|
||||||
|
|
||||||
For each config file, the server also generates `file-preprocessed.xml` files when starting. These files contain all the completed substitutions and overrides, and they are intended for informational use. If ZooKeeper substitutions were used in the config files but ZooKeeper is not available on the server start, the server loads the configuration from the preprocessed file.
|
For each config file, the server also generates `file-preprocessed.xml` files when starting. These files contain all the completed substitutions and overrides, and they are intended for informational use. If ZooKeeper substitutions were used in the config files but ZooKeeper is not available on the server start, the server loads the configuration from the preprocessed file.
|
||||||
|
@ -61,9 +61,12 @@ use_query_cache = true`) but one should keep in mind that all `SELECT` queries i
|
|||||||
may return cached results then.
|
may return cached results then.
|
||||||
|
|
||||||
The query cache can be cleared using statement `SYSTEM DROP QUERY CACHE`. The content of the query cache is displayed in system table
|
The query cache can be cleared using statement `SYSTEM DROP QUERY CACHE`. The content of the query cache is displayed in system table
|
||||||
`system.query_cache`. The number of query cache hits and misses are shown as events "QueryCacheHits" and "QueryCacheMisses" in system table
|
`system.query_cache`. The number of query cache hits and misses since database start are shown as events "QueryCacheHits" and
|
||||||
`system.events`. Both counters are only updated for `SELECT` queries which run with setting "use_query_cache = true". Other queries do not
|
"QueryCacheMisses" in system table [system.events](system-tables/events.md). Both counters are only updated for `SELECT` queries which run
|
||||||
affect the cache miss counter.
|
with setting `use_query_cache = true`, other queries do not affect "QueryCacheMisses". Field `query_log_usage` in system table
|
||||||
|
[system.query_log](system-tables/query_log.md) shows for each executed query whether the query result was written into or read from the
|
||||||
|
query cache. Asynchronous metrics "QueryCacheEntries" and "QueryCacheBytes" in system table
|
||||||
|
[system.asynchronous_metrics](system-tables/asynchronous_metrics.md) show how many entries / bytes the query cache currently contains.
|
||||||
|
|
||||||
The query cache exists once per ClickHouse server process. However, cache results are by default not shared between users. This can be
|
The query cache exists once per ClickHouse server process. However, cache results are by default not shared between users. This can be
|
||||||
changed (see below) but doing so is not recommended for security reasons.
|
changed (see below) but doing so is not recommended for security reasons.
|
||||||
|
@ -1,11 +1,11 @@
|
|||||||
---
|
---
|
||||||
slug: /en/operations/server-configuration-parameters/settings
|
slug: /en/operations/server-configuration-parameters/settings
|
||||||
sidebar_position: 57
|
sidebar_position: 57
|
||||||
sidebar_label: Server Settings
|
sidebar_label: Global Server Settings
|
||||||
description: This section contains descriptions of server settings that cannot be changed at the session or query level.
|
description: This section contains descriptions of server settings that cannot be changed at the session or query level.
|
||||||
---
|
---
|
||||||
|
|
||||||
# Server Settings
|
# Global Server Settings
|
||||||
|
|
||||||
This section contains descriptions of server settings that cannot be changed at the session or query level.
|
This section contains descriptions of server settings that cannot be changed at the session or query level.
|
||||||
|
|
||||||
@ -512,7 +512,7 @@ Both the cache for `local_disk`, and temporary data will be stored in `/tiny_loc
|
|||||||
<type>cache</type>
|
<type>cache</type>
|
||||||
<disk>local_disk</disk>
|
<disk>local_disk</disk>
|
||||||
<path>/tiny_local_cache/</path>
|
<path>/tiny_local_cache/</path>
|
||||||
<max_size>10M</max_size>
|
<max_size_rows>10M</max_size_rows>
|
||||||
<max_file_segment_size>1M</max_file_segment_size>
|
<max_file_segment_size>1M</max_file_segment_size>
|
||||||
<cache_on_write_operations>1</cache_on_write_operations>
|
<cache_on_write_operations>1</cache_on_write_operations>
|
||||||
<do_not_evict_index_and_mark_files>0</do_not_evict_index_and_mark_files>
|
<do_not_evict_index_and_mark_files>0</do_not_evict_index_and_mark_files>
|
||||||
@ -1201,13 +1201,58 @@ Keys:
|
|||||||
- `console` – Send `log` and `errorlog` to the console instead of file. To enable, set to `1` or `true`.
|
- `console` – Send `log` and `errorlog` to the console instead of file. To enable, set to `1` or `true`.
|
||||||
- `stream_compress` – Compress `log` and `errorlog` with `lz4` stream compression. To enable, set to `1` or `true`.
|
- `stream_compress` – Compress `log` and `errorlog` with `lz4` stream compression. To enable, set to `1` or `true`.
|
||||||
|
|
||||||
|
Both log and error log file names (only file names, not directories) support date and time format specifiers.
|
||||||
|
|
||||||
|
**Format specifiers**
|
||||||
|
Using the following format specifiers, you can define a pattern for the resulting file name. “Example” column shows possible results for `2023-07-06 18:32:07`.
|
||||||
|
|
||||||
|
| Specifier | Description | Example |
|
||||||
|
|-------------|---------------------------------------------------------------------------------------------------------------------|--------------------------|
|
||||||
|
| %% | Literal % | % |
|
||||||
|
| %n | New-line character | |
|
||||||
|
| %t | Horizontal tab character | |
|
||||||
|
| %Y | Year as a decimal number, e.g. 2017 | 2023 |
|
||||||
|
| %y | Last 2 digits of year as a decimal number (range [00,99]) | 23 |
|
||||||
|
| %C | First 2 digits of year as a decimal number (range [00,99]) | 20 |
|
||||||
|
| %G | Four-digit [ISO 8601 week-based year](https://en.wikipedia.org/wiki/ISO_8601#Week_dates), i.e. the year that contains the specified week. Normally useful only with %V | 2023 |
|
||||||
|
| %g | Last 2 digits of [ISO 8601 week-based year](https://en.wikipedia.org/wiki/ISO_8601#Week_dates), i.e. the year that contains the specified week. | 23 |
|
||||||
|
| %b | Abbreviated month name, e.g. Oct (locale dependent) | Jul |
|
||||||
|
| %h | Synonym of %b | Jul |
|
||||||
|
| %B | Full month name, e.g. October (locale dependent) | July |
|
||||||
|
| %m | Month as a decimal number (range [01,12]) | 07 |
|
||||||
|
| %U | Week of the year as a decimal number (Sunday is the first day of the week) (range [00,53]) | 27 |
|
||||||
|
| %W | Week of the year as a decimal number (Monday is the first day of the week) (range [00,53]) | 27 |
|
||||||
|
| %V | ISO 8601 week number (range [01,53]) | 27 |
|
||||||
|
| %j | Day of the year as a decimal number (range [001,366]) | 187 |
|
||||||
|
| %d | Day of the month as a zero-padded decimal number (range [01,31]). Single digit is preceded by zero. | 06 |
|
||||||
|
| %e | Day of the month as a space-padded decimal number (range [1,31]). Single digit is preceded by a space. | 6 |
|
||||||
|
| %a | Abbreviated weekday name, e.g. Fri (locale dependent) | Thu |
|
||||||
|
| %A | Full weekday name, e.g. Friday (locale dependent) | Thursday |
|
||||||
|
| %w | Weekday as a integer number with Sunday as 0 (range [0-6]) | 4 |
|
||||||
|
| %u | Weekday as a decimal number, where Monday is 1 (ISO 8601 format) (range [1-7]) | 4 |
|
||||||
|
| %H | Hour as a decimal number, 24 hour clock (range [00-23]) | 18 |
|
||||||
|
| %I | Hour as a decimal number, 12 hour clock (range [01,12]) | 06 |
|
||||||
|
| %M | Minute as a decimal number (range [00,59]) | 32 |
|
||||||
|
| %S | Second as a decimal number (range [00,60]) | 07 |
|
||||||
|
| %c | Standard date and time string, e.g. Sun Oct 17 04:41:13 2010 (locale dependent) | Thu Jul 6 18:32:07 2023 |
|
||||||
|
| %x | Localized date representation (locale dependent) | 07/06/23 |
|
||||||
|
| %X | Localized time representation, e.g. 18:40:20 or 6:40:20 PM (locale dependent) | 18:32:07 |
|
||||||
|
| %D | Short MM/DD/YY date, equivalent to %m/%d/%y | 07/06/23 |
|
||||||
|
| %F | Short YYYY-MM-DD date, equivalent to %Y-%m-%d | 2023-07-06 |
|
||||||
|
| %r | Localized 12-hour clock time (locale dependent) | 06:32:07 PM |
|
||||||
|
| %R | Equivalent to "%H:%M" | 18:32 |
|
||||||
|
| %T | Equivalent to "%H:%M:%S" (the ISO 8601 time format) | 18:32:07 |
|
||||||
|
| %p | Localized a.m. or p.m. designation (locale dependent) | PM |
|
||||||
|
| %z | Offset from UTC in the ISO 8601 format (e.g. -0430), or no characters if the time zone information is not available | +0800 |
|
||||||
|
| %Z | Locale-dependent time zone name or abbreviation, or no characters if the time zone information is not available | Z AWST |
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
``` xml
|
``` xml
|
||||||
<logger>
|
<logger>
|
||||||
<level>trace</level>
|
<level>trace</level>
|
||||||
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
|
<log>/var/log/clickhouse-server/clickhouse-server-%F-%T.log</log>
|
||||||
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
|
<errorlog>/var/log/clickhouse-server/clickhouse-server-%F-%T.err.log</errorlog>
|
||||||
<size>1000M</size>
|
<size>1000M</size>
|
||||||
<count>10</count>
|
<count>10</count>
|
||||||
<stream_compress>true</stream_compress>
|
<stream_compress>true</stream_compress>
|
||||||
@ -1547,6 +1592,10 @@ To manually turn on metrics history collection [`system.metric_log`](../../opera
|
|||||||
<table>metric_log</table>
|
<table>metric_log</table>
|
||||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||||
<collect_interval_milliseconds>1000</collect_interval_milliseconds>
|
<collect_interval_milliseconds>1000</collect_interval_milliseconds>
|
||||||
|
<max_size_rows>1048576</max_size_rows>
|
||||||
|
<reserved_size_rows>8192</reserved_size_rows>
|
||||||
|
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||||
|
<flush_on_crash>false</flush_on_crash>
|
||||||
</metric_log>
|
</metric_log>
|
||||||
</clickhouse>
|
</clickhouse>
|
||||||
```
|
```
|
||||||
@ -1650,6 +1699,14 @@ Use the following parameters to configure logging:
|
|||||||
- `order_by` - [Custom sorting key](../../engines/table-engines/mergetree-family/mergetree.md#order_by) for a system table. Can't be used if `engine` defined.
|
- `order_by` - [Custom sorting key](../../engines/table-engines/mergetree-family/mergetree.md#order_by) for a system table. Can't be used if `engine` defined.
|
||||||
- `engine` - [MergeTree Engine Definition](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) for a system table. Can't be used if `partition_by` or `order_by` defined.
|
- `engine` - [MergeTree Engine Definition](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) for a system table. Can't be used if `partition_by` or `order_by` defined.
|
||||||
- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table.
|
- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table.
|
||||||
|
- `max_size_rows` – Maximal size in lines for the logs. When non-flushed logs amount reaches max_size, logs dumped to the disk.
|
||||||
|
Default: 1048576.
|
||||||
|
- `reserved_size_rows` – Pre-allocated memory size in lines for the logs.
|
||||||
|
Default: 8192.
|
||||||
|
- `buffer_size_rows_flush_threshold` – Lines amount threshold, reaching it launches flushing logs to the disk in background.
|
||||||
|
Default: `max_size_rows / 2`.
|
||||||
|
- `flush_on_crash` - Indication whether logs should be dumped to the disk in case of a crash.
|
||||||
|
Default: false.
|
||||||
- `storage_policy` – Name of storage policy to use for the table (optional)
|
- `storage_policy` – Name of storage policy to use for the table (optional)
|
||||||
- `settings` - [Additional parameters](../../engines/table-engines/mergetree-family/mergetree.md/#settings) that control the behavior of the MergeTree (optional).
|
- `settings` - [Additional parameters](../../engines/table-engines/mergetree-family/mergetree.md/#settings) that control the behavior of the MergeTree (optional).
|
||||||
|
|
||||||
@ -1661,6 +1718,10 @@ Use the following parameters to configure logging:
|
|||||||
<table>part_log</table>
|
<table>part_log</table>
|
||||||
<partition_by>toMonday(event_date)</partition_by>
|
<partition_by>toMonday(event_date)</partition_by>
|
||||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||||
|
<max_size_rows>1048576</max_size_rows>
|
||||||
|
<reserved_size_rows>8192</reserved_size_rows>
|
||||||
|
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||||
|
<flush_on_crash>false</flush_on_crash>
|
||||||
</part_log>
|
</part_log>
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -1728,6 +1789,14 @@ Use the following parameters to configure logging:
|
|||||||
- `order_by` - [Custom sorting key](../../engines/table-engines/mergetree-family/mergetree.md#order_by) for a system table. Can't be used if `engine` defined.
|
- `order_by` - [Custom sorting key](../../engines/table-engines/mergetree-family/mergetree.md#order_by) for a system table. Can't be used if `engine` defined.
|
||||||
- `engine` - [MergeTree Engine Definition](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) for a system table. Can't be used if `partition_by` or `order_by` defined.
|
- `engine` - [MergeTree Engine Definition](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) for a system table. Can't be used if `partition_by` or `order_by` defined.
|
||||||
- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table.
|
- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table.
|
||||||
|
- `max_size_rows` – Maximal size in lines for the logs. When non-flushed logs amount reaches max_size, logs dumped to the disk.
|
||||||
|
Default: 1048576.
|
||||||
|
- `reserved_size_rows` – Pre-allocated memory size in lines for the logs.
|
||||||
|
Default: 8192.
|
||||||
|
- `buffer_size_rows_flush_threshold` – Lines amount threshold, reaching it launches flushing logs to the disk in background.
|
||||||
|
Default: `max_size_rows / 2`.
|
||||||
|
- `flush_on_crash` - Indication whether logs should be dumped to the disk in case of a crash.
|
||||||
|
Default: false.
|
||||||
- `storage_policy` – Name of storage policy to use for the table (optional)
|
- `storage_policy` – Name of storage policy to use for the table (optional)
|
||||||
- `settings` - [Additional parameters](../../engines/table-engines/mergetree-family/mergetree.md/#settings) that control the behavior of the MergeTree (optional).
|
- `settings` - [Additional parameters](../../engines/table-engines/mergetree-family/mergetree.md/#settings) that control the behavior of the MergeTree (optional).
|
||||||
|
|
||||||
@ -1741,6 +1810,10 @@ If the table does not exist, ClickHouse will create it. If the structure of the
|
|||||||
<table>query_log</table>
|
<table>query_log</table>
|
||||||
<engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + INTERVAL 30 day</engine>
|
<engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + INTERVAL 30 day</engine>
|
||||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||||
|
<max_size_rows>1048576</max_size_rows>
|
||||||
|
<reserved_size_rows>8192</reserved_size_rows>
|
||||||
|
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||||
|
<flush_on_crash>false</flush_on_crash>
|
||||||
</query_log>
|
</query_log>
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -1786,6 +1859,14 @@ Use the following parameters to configure logging:
|
|||||||
- `order_by` - [Custom sorting key](../../engines/table-engines/mergetree-family/mergetree.md#order_by) for a system table. Can't be used if `engine` defined.
|
- `order_by` - [Custom sorting key](../../engines/table-engines/mergetree-family/mergetree.md#order_by) for a system table. Can't be used if `engine` defined.
|
||||||
- `engine` - [MergeTree Engine Definition](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) for a system table. Can't be used if `partition_by` or `order_by` defined.
|
- `engine` - [MergeTree Engine Definition](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) for a system table. Can't be used if `partition_by` or `order_by` defined.
|
||||||
- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table.
|
- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table.
|
||||||
|
- `max_size_rows` – Maximal size in lines for the logs. When non-flushed logs amount reaches max_size_rows, logs dumped to the disk.
|
||||||
|
Default: 1048576.
|
||||||
|
- `reserved_size_rows` – Pre-allocated memory size in lines for the logs.
|
||||||
|
Default: 8192.
|
||||||
|
- `buffer_size_rows_flush_threshold` – Lines amount threshold, reaching it launches flushing logs to the disk in background.
|
||||||
|
Default: `max_size_rows / 2`.
|
||||||
|
- `flush_on_crash` - Indication whether logs should be dumped to the disk in case of a crash.
|
||||||
|
Default: false.
|
||||||
- `storage_policy` – Name of storage policy to use for the table (optional)
|
- `storage_policy` – Name of storage policy to use for the table (optional)
|
||||||
- `settings` - [Additional parameters](../../engines/table-engines/mergetree-family/mergetree.md/#settings) that control the behavior of the MergeTree (optional).
|
- `settings` - [Additional parameters](../../engines/table-engines/mergetree-family/mergetree.md/#settings) that control the behavior of the MergeTree (optional).
|
||||||
|
|
||||||
@ -1799,6 +1880,10 @@ If the table does not exist, ClickHouse will create it. If the structure of the
|
|||||||
<table>query_thread_log</table>
|
<table>query_thread_log</table>
|
||||||
<partition_by>toMonday(event_date)</partition_by>
|
<partition_by>toMonday(event_date)</partition_by>
|
||||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||||
|
<max_size_rows>1048576</max_size_rows>
|
||||||
|
<reserved_size_rows>8192</reserved_size_rows>
|
||||||
|
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||||
|
<flush_on_crash>false</flush_on_crash>
|
||||||
</query_thread_log>
|
</query_thread_log>
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -1816,6 +1901,14 @@ Use the following parameters to configure logging:
|
|||||||
- `order_by` - [Custom sorting key](../../engines/table-engines/mergetree-family/mergetree.md#order_by) for a system table. Can't be used if `engine` defined.
|
- `order_by` - [Custom sorting key](../../engines/table-engines/mergetree-family/mergetree.md#order_by) for a system table. Can't be used if `engine` defined.
|
||||||
- `engine` - [MergeTree Engine Definition](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) for a system table. Can't be used if `partition_by` or `order_by` defined.
|
- `engine` - [MergeTree Engine Definition](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) for a system table. Can't be used if `partition_by` or `order_by` defined.
|
||||||
- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table.
|
- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table.
|
||||||
|
- `max_size_rows` – Maximal size in lines for the logs. When non-flushed logs amount reaches max_size, logs dumped to the disk.
|
||||||
|
Default: 1048576.
|
||||||
|
- `reserved_size_rows` – Pre-allocated memory size in lines for the logs.
|
||||||
|
Default: 8192.
|
||||||
|
- `buffer_size_rows_flush_threshold` – Lines amount threshold, reaching it launches flushing logs to the disk in background.
|
||||||
|
Default: `max_size_rows / 2`.
|
||||||
|
- `flush_on_crash` - Indication whether logs should be dumped to the disk in case of a crash.
|
||||||
|
Default: false.
|
||||||
- `storage_policy` – Name of storage policy to use for the table (optional)
|
- `storage_policy` – Name of storage policy to use for the table (optional)
|
||||||
- `settings` - [Additional parameters](../../engines/table-engines/mergetree-family/mergetree.md/#settings) that control the behavior of the MergeTree (optional).
|
- `settings` - [Additional parameters](../../engines/table-engines/mergetree-family/mergetree.md/#settings) that control the behavior of the MergeTree (optional).
|
||||||
|
|
||||||
@ -1829,6 +1922,10 @@ If the table does not exist, ClickHouse will create it. If the structure of the
|
|||||||
<table>query_views_log</table>
|
<table>query_views_log</table>
|
||||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||||
|
<max_size_rows>1048576</max_size_rows>
|
||||||
|
<reserved_size_rows>8192</reserved_size_rows>
|
||||||
|
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||||
|
<flush_on_crash>false</flush_on_crash>
|
||||||
</query_views_log>
|
</query_views_log>
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -1845,6 +1942,14 @@ Parameters:
|
|||||||
- `order_by` - [Custom sorting key](../../engines/table-engines/mergetree-family/mergetree.md#order_by) for a system table. Can't be used if `engine` defined.
|
- `order_by` - [Custom sorting key](../../engines/table-engines/mergetree-family/mergetree.md#order_by) for a system table. Can't be used if `engine` defined.
|
||||||
- `engine` - [MergeTree Engine Definition](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) for a system table. Can't be used if `partition_by` or `order_by` defined.
|
- `engine` - [MergeTree Engine Definition](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) for a system table. Can't be used if `partition_by` or `order_by` defined.
|
||||||
- `flush_interval_milliseconds` — Interval for flushing data from the buffer in memory to the table.
|
- `flush_interval_milliseconds` — Interval for flushing data from the buffer in memory to the table.
|
||||||
|
- `max_size_rows` – Maximal size in lines for the logs. When non-flushed logs amount reaches max_size, logs dumped to the disk.
|
||||||
|
Default: 1048576.
|
||||||
|
- `reserved_size_rows` – Pre-allocated memory size in lines for the logs.
|
||||||
|
Default: 8192.
|
||||||
|
- `buffer_size_rows_flush_threshold` – Lines amount threshold, reaching it launches flushing logs to the disk in background.
|
||||||
|
Default: `max_size_rows / 2`.
|
||||||
|
- `flush_on_crash` - Indication whether logs should be dumped to the disk in case of a crash.
|
||||||
|
Default: false.
|
||||||
- `storage_policy` – Name of storage policy to use for the table (optional)
|
- `storage_policy` – Name of storage policy to use for the table (optional)
|
||||||
- `settings` - [Additional parameters](../../engines/table-engines/mergetree-family/mergetree.md/#settings) that control the behavior of the MergeTree (optional).
|
- `settings` - [Additional parameters](../../engines/table-engines/mergetree-family/mergetree.md/#settings) that control the behavior of the MergeTree (optional).
|
||||||
|
|
||||||
@ -1856,13 +1961,16 @@ Parameters:
|
|||||||
<database>system</database>
|
<database>system</database>
|
||||||
<table>text_log</table>
|
<table>text_log</table>
|
||||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||||
|
<max_size_rows>1048576</max_size_rows>
|
||||||
|
<reserved_size_rows>8192</reserved_size_rows>
|
||||||
|
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||||
|
<flush_on_crash>false</flush_on_crash>
|
||||||
<!-- <partition_by>event_date</partition_by> -->
|
<!-- <partition_by>event_date</partition_by> -->
|
||||||
<engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + INTERVAL 30 day</engine>
|
<engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + INTERVAL 30 day</engine>
|
||||||
</text_log>
|
</text_log>
|
||||||
</clickhouse>
|
</clickhouse>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
## trace_log {#server_configuration_parameters-trace_log}
|
## trace_log {#server_configuration_parameters-trace_log}
|
||||||
|
|
||||||
Settings for the [trace_log](../../operations/system-tables/trace_log.md#system_tables-trace_log) system table operation.
|
Settings for the [trace_log](../../operations/system-tables/trace_log.md#system_tables-trace_log) system table operation.
|
||||||
@ -1875,6 +1983,12 @@ Parameters:
|
|||||||
- `order_by` - [Custom sorting key](../../engines/table-engines/mergetree-family/mergetree.md#order_by) for a system table. Can't be used if `engine` defined.
|
- `order_by` - [Custom sorting key](../../engines/table-engines/mergetree-family/mergetree.md#order_by) for a system table. Can't be used if `engine` defined.
|
||||||
- `engine` - [MergeTree Engine Definition](../../engines/table-engines/mergetree-family/index.md) for a system table. Can't be used if `partition_by` or `order_by` defined.
|
- `engine` - [MergeTree Engine Definition](../../engines/table-engines/mergetree-family/index.md) for a system table. Can't be used if `partition_by` or `order_by` defined.
|
||||||
- `flush_interval_milliseconds` — Interval for flushing data from the buffer in memory to the table.
|
- `flush_interval_milliseconds` — Interval for flushing data from the buffer in memory to the table.
|
||||||
|
- `max_size_rows` – Maximal size in lines for the logs. When non-flushed logs amount reaches max_size, logs dumped to the disk.
|
||||||
|
Default: 1048576.
|
||||||
|
- `reserved_size_rows` – Pre-allocated memory size in lines for the logs.
|
||||||
|
Default: 8192.
|
||||||
|
- `buffer_size_rows_flush_threshold` – Lines amount threshold, reaching it launches flushing logs to the disk in background.
|
||||||
|
Default: `max_size_rows / 2`.
|
||||||
- `storage_policy` – Name of storage policy to use for the table (optional)
|
- `storage_policy` – Name of storage policy to use for the table (optional)
|
||||||
- `settings` - [Additional parameters](../../engines/table-engines/mergetree-family/mergetree.md/#settings) that control the behavior of the MergeTree (optional).
|
- `settings` - [Additional parameters](../../engines/table-engines/mergetree-family/mergetree.md/#settings) that control the behavior of the MergeTree (optional).
|
||||||
|
|
||||||
@ -1886,6 +2000,10 @@ The default server configuration file `config.xml` contains the following settin
|
|||||||
<table>trace_log</table>
|
<table>trace_log</table>
|
||||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||||
|
<max_size_rows>1048576</max_size_rows>
|
||||||
|
<reserved_size_rows>8192</reserved_size_rows>
|
||||||
|
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||||
|
<flush_on_crash>false</flush_on_crash>
|
||||||
</trace_log>
|
</trace_log>
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -1900,9 +2018,18 @@ Parameters:
|
|||||||
- `partition_by` — [Custom partitioning key](../../engines/table-engines/mergetree-family/custom-partitioning-key.md) for a system table. Can't be used if `engine` defined.
|
- `partition_by` — [Custom partitioning key](../../engines/table-engines/mergetree-family/custom-partitioning-key.md) for a system table. Can't be used if `engine` defined.
|
||||||
- `engine` - [MergeTree Engine Definition](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) for a system table. Can't be used if `partition_by` defined.
|
- `engine` - [MergeTree Engine Definition](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) for a system table. Can't be used if `partition_by` defined.
|
||||||
- `flush_interval_milliseconds` — Interval for flushing data from the buffer in memory to the table.
|
- `flush_interval_milliseconds` — Interval for flushing data from the buffer in memory to the table.
|
||||||
|
- `max_size_rows` – Maximal size in lines for the logs. When non-flushed logs amount reaches max_size, logs dumped to the disk.
|
||||||
|
Default: 1048576.
|
||||||
|
- `reserved_size_rows` – Pre-allocated memory size in lines for the logs.
|
||||||
|
Default: 8192.
|
||||||
|
- `buffer_size_rows_flush_threshold` – Lines amount threshold, reaching it launches flushing logs to the disk in background.
|
||||||
|
Default: `max_size_rows / 2`.
|
||||||
|
- `flush_on_crash` - Indication whether logs should be dumped to the disk in case of a crash.
|
||||||
|
Default: false.
|
||||||
- `storage_policy` – Name of storage policy to use for the table (optional)
|
- `storage_policy` – Name of storage policy to use for the table (optional)
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
```xml
|
```xml
|
||||||
<clickhouse>
|
<clickhouse>
|
||||||
<asynchronous_insert_log>
|
<asynchronous_insert_log>
|
||||||
@ -1910,11 +2037,53 @@ Parameters:
|
|||||||
<table>asynchronous_insert_log</table>
|
<table>asynchronous_insert_log</table>
|
||||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||||
|
<max_size_rows>1048576</max_size_rows>
|
||||||
|
<reserved_size_rows>8192</reserved_size_rows>
|
||||||
|
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||||
|
<flush_on_crash>false</flush_on_crash>
|
||||||
<!-- <engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + INTERVAL 30 day</engine> -->
|
<!-- <engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + INTERVAL 30 day</engine> -->
|
||||||
</asynchronous_insert_log>
|
</asynchronous_insert_log>
|
||||||
</clickhouse>
|
</clickhouse>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## crash_log {#server_configuration_parameters-crash_log}
|
||||||
|
|
||||||
|
Settings for the [crash_log](../../operations/system-tables/crash-log.md) system table operation.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
|
||||||
|
- `database` — Database for storing a table.
|
||||||
|
- `table` — Table name.
|
||||||
|
- `partition_by` — [Custom partitioning key](../../engines/table-engines/mergetree-family/custom-partitioning-key.md) for a system table. Can't be used if `engine` defined.
|
||||||
|
- `order_by` - [Custom sorting key](../../engines/table-engines/mergetree-family/mergetree.md#order_by) for a system table. Can't be used if `engine` defined.
|
||||||
|
- `engine` - [MergeTree Engine Definition](../../engines/table-engines/mergetree-family/index.md) for a system table. Can't be used if `partition_by` or `order_by` defined.
|
||||||
|
- `flush_interval_milliseconds` — Interval for flushing data from the buffer in memory to the table.
|
||||||
|
- `max_size_rows` – Maximal size in lines for the logs. When non-flushed logs amount reaches max_size, logs dumped to the disk.
|
||||||
|
Default: 1048576.
|
||||||
|
- `reserved_size_rows` – Pre-allocated memory size in lines for the logs.
|
||||||
|
Default: 8192.
|
||||||
|
- `buffer_size_rows_flush_threshold` – Lines amount threshold, reaching it launches flushing logs to the disk in background.
|
||||||
|
Default: `max_size_rows / 2`.
|
||||||
|
- `flush_on_crash` - Indication whether logs should be dumped to the disk in case of a crash.
|
||||||
|
Default: false.
|
||||||
|
- `storage_policy` – Name of storage policy to use for the table (optional)
|
||||||
|
- `settings` - [Additional parameters](../../engines/table-engines/mergetree-family/mergetree.md/#settings) that control the behavior of the MergeTree (optional).
|
||||||
|
|
||||||
|
The default server configuration file `config.xml` contains the following settings section:
|
||||||
|
|
||||||
|
``` xml
|
||||||
|
<crash_log>
|
||||||
|
<database>system</database>
|
||||||
|
<table>crash_log</table>
|
||||||
|
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||||
|
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||||
|
<max_size_rows>1024</max_size_rows>
|
||||||
|
<reserved_size_rows>1024</reserved_size_rows>
|
||||||
|
<buffer_size_rows_flush_threshold>512</buffer_size_rows_flush_threshold>
|
||||||
|
<flush_on_crash>false</flush_on_crash>
|
||||||
|
</crash_log>
|
||||||
|
```
|
||||||
|
|
||||||
## query_masking_rules {#query-masking-rules}
|
## query_masking_rules {#query-masking-rules}
|
||||||
|
|
||||||
Regexp-based rules, which will be applied to queries as well as all log messages before storing them in server logs,
|
Regexp-based rules, which will be applied to queries as well as all log messages before storing them in server logs,
|
||||||
@ -2119,6 +2288,8 @@ This section contains the following parameters:
|
|||||||
- `session_timeout_ms` — Maximum timeout for the client session in milliseconds.
|
- `session_timeout_ms` — Maximum timeout for the client session in milliseconds.
|
||||||
- `operation_timeout_ms` — Maximum timeout for one operation in milliseconds.
|
- `operation_timeout_ms` — Maximum timeout for one operation in milliseconds.
|
||||||
- `root` — The [znode](http://zookeeper.apache.org/doc/r3.5.5/zookeeperOver.html#Nodes+and+ephemeral+nodes) that is used as the root for znodes used by the ClickHouse server. Optional.
|
- `root` — The [znode](http://zookeeper.apache.org/doc/r3.5.5/zookeeperOver.html#Nodes+and+ephemeral+nodes) that is used as the root for znodes used by the ClickHouse server. Optional.
|
||||||
|
- `fallback_session_lifetime.min` - If the first zookeeper host resolved by zookeeper_load_balancing strategy is unavailable, limit the lifetime of a zookeeper session to the fallback node. This is done for load-balancing purposes to avoid excessive load on one of zookeeper hosts. This setting sets the minimal duration of the fallback session. Set in seconds. Optional. Default is 3 hours.
|
||||||
|
- `fallback_session_lifetime.max` - If the first zookeeper host resolved by zookeeper_load_balancing strategy is unavailable, limit the lifetime of a zookeeper session to the fallback node. This is done for load-balancing purposes to avoid excessive load on one of zookeeper hosts. This setting sets the maximum duration of the fallback session. Set in seconds. Optional. Default is 6 hours.
|
||||||
- `identity` — User and password, that can be required by ZooKeeper to give access to requested znodes. Optional.
|
- `identity` — User and password, that can be required by ZooKeeper to give access to requested znodes. Optional.
|
||||||
- zookeeper_load_balancing - Specifies the algorithm of ZooKeeper node selection.
|
- zookeeper_load_balancing - Specifies the algorithm of ZooKeeper node selection.
|
||||||
* random - randomly selects one of ZooKeeper nodes.
|
* random - randomly selects one of ZooKeeper nodes.
|
||||||
|
@ -7,90 +7,16 @@ pagination_next: en/operations/settings/settings
|
|||||||
|
|
||||||
# Settings Overview
|
# Settings Overview
|
||||||
|
|
||||||
There are multiple ways to define ClickHouse settings. Settings are configured in layers, and each subsequent layer redefines the previous values of a setting.
|
There are two main groups of ClickHouse settings:
|
||||||
|
|
||||||
The order of priority for defining a setting is:
|
- Global server settings
|
||||||
|
- Query-level settings
|
||||||
|
|
||||||
1. Settings in the `users.xml` server configuration file
|
The main distinction between global server settings and query-level settings is that
|
||||||
|
global server settings must be set in configuration files while query-level settings
|
||||||
|
can be set in configuration files or with SQL queries.
|
||||||
|
|
||||||
- Set in the element `<profiles>`.
|
Read about [global server settings](/docs/en/operations/server-configuration-parameters/settings.md) to learn more about configuring your ClickHouse server at the global server level.
|
||||||
|
|
||||||
2. Session settings
|
Read about [query-level settings](/docs/en/operations/settings/settings-query-level.md) to learn more about configuring your ClickHouse server at the query-level.
|
||||||
|
|
||||||
- Send `SET setting=value` from the ClickHouse console client in interactive mode.
|
|
||||||
Similarly, you can use ClickHouse sessions in the HTTP protocol. To do this, you need to specify the `session_id` HTTP parameter.
|
|
||||||
|
|
||||||
3. Query settings
|
|
||||||
|
|
||||||
- When starting the ClickHouse console client in non-interactive mode, set the startup parameter `--setting=value`.
|
|
||||||
- When using the HTTP API, pass CGI parameters (`URL?setting_1=value&setting_2=value...`).
|
|
||||||
- Define settings in the [SETTINGS](../../sql-reference/statements/select/index.md#settings-in-select-query) clause of the SELECT query. The setting value is applied only to that query and is reset to the default or previous value after the query is executed.
|
|
||||||
|
|
||||||
View the [Settings](./settings.md) page for a description of the ClickHouse settings.
|
|
||||||
|
|
||||||
## Converting a Setting to its Default Value
|
|
||||||
|
|
||||||
If you change a setting and would like to revert it back to its default value, set the value to `DEFAULT`. The syntax looks like:
|
|
||||||
|
|
||||||
```sql
|
|
||||||
SET setting_name = DEFAULT
|
|
||||||
```
|
|
||||||
|
|
||||||
For example, the default value of `max_insert_block_size` is 1048449. Suppose you change its value to 100000:
|
|
||||||
|
|
||||||
```sql
|
|
||||||
SET max_insert_block_size=100000;
|
|
||||||
|
|
||||||
SELECT value FROM system.settings where name='max_insert_block_size';
|
|
||||||
```
|
|
||||||
|
|
||||||
The response is:
|
|
||||||
|
|
||||||
```response
|
|
||||||
┌─value──┐
|
|
||||||
│ 100000 │
|
|
||||||
└────────┘
|
|
||||||
```
|
|
||||||
|
|
||||||
The following command sets its value back to 1048449:
|
|
||||||
|
|
||||||
```sql
|
|
||||||
SET max_insert_block_size=DEFAULT;
|
|
||||||
|
|
||||||
SELECT value FROM system.settings where name='max_insert_block_size';
|
|
||||||
```
|
|
||||||
|
|
||||||
The setting is now back to its default:
|
|
||||||
|
|
||||||
```response
|
|
||||||
┌─value───┐
|
|
||||||
│ 1048449 │
|
|
||||||
└─────────┘
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
## Custom Settings {#custom_settings}
|
|
||||||
|
|
||||||
In addition to the common [settings](../../operations/settings/settings.md), users can define custom settings.
|
|
||||||
|
|
||||||
A custom setting name must begin with one of predefined prefixes. The list of these prefixes must be declared in the [custom_settings_prefixes](../../operations/server-configuration-parameters/settings.md#custom_settings_prefixes) parameter in the server configuration file.
|
|
||||||
|
|
||||||
```xml
|
|
||||||
<custom_settings_prefixes>custom_</custom_settings_prefixes>
|
|
||||||
```
|
|
||||||
|
|
||||||
To define a custom setting use `SET` command:
|
|
||||||
|
|
||||||
```sql
|
|
||||||
SET custom_a = 123;
|
|
||||||
```
|
|
||||||
|
|
||||||
To get the current value of a custom setting use `getSetting()` function:
|
|
||||||
|
|
||||||
```sql
|
|
||||||
SELECT getSetting('custom_a');
|
|
||||||
```
|
|
||||||
|
|
||||||
**See Also**
|
|
||||||
|
|
||||||
- [Server Configuration Settings](../../operations/server-configuration-parameters/settings.md)
|
|
||||||
|
@ -327,3 +327,39 @@ The maximum amount of data consumed by temporary files on disk in bytes for all
|
|||||||
Zero means unlimited.
|
Zero means unlimited.
|
||||||
|
|
||||||
Default value: 0.
|
Default value: 0.
|
||||||
|
|
||||||
|
## max_sessions_for_user {#max-sessions-per-user}
|
||||||
|
|
||||||
|
Maximum number of simultaneous sessions per authenticated user to the ClickHouse server.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
``` xml
|
||||||
|
<profiles>
|
||||||
|
<single_session_profile>
|
||||||
|
<max_sessions_for_user>1</max_sessions_for_user>
|
||||||
|
</single_session_profile>
|
||||||
|
<two_sessions_profile>
|
||||||
|
<max_sessions_for_user>2</max_sessions_for_user>
|
||||||
|
</two_sessions_profile>
|
||||||
|
<unlimited_sessions_profile>
|
||||||
|
<max_sessions_for_user>0</max_sessions_for_user>
|
||||||
|
</unlimited_sessions_profile>
|
||||||
|
</profiles>
|
||||||
|
<users>
|
||||||
|
<!-- User Alice can connect to a ClickHouse server no more than once at a time. -->
|
||||||
|
<Alice>
|
||||||
|
<profile>single_session_user</profile>
|
||||||
|
</Alice>
|
||||||
|
<!-- User Bob can use 2 simultaneous sessions. -->
|
||||||
|
<Bob>
|
||||||
|
<profile>two_sessions_profile</profile>
|
||||||
|
</Bob>
|
||||||
|
<!-- User Charles can use arbitrarily many of simultaneous sessions. -->
|
||||||
|
<Charles>
|
||||||
|
<profile>unlimited_sessions_profile</profile>
|
||||||
|
</Charles>
|
||||||
|
</users>
|
||||||
|
```
|
||||||
|
|
||||||
|
Default value: 0 (Infinite count of simultaneous sessions).
|
||||||
|
@ -242,6 +242,26 @@ See also:
|
|||||||
- [DateTime data type.](../../sql-reference/data-types/datetime.md)
|
- [DateTime data type.](../../sql-reference/data-types/datetime.md)
|
||||||
- [Functions for working with dates and times.](../../sql-reference/functions/date-time-functions.md)
|
- [Functions for working with dates and times.](../../sql-reference/functions/date-time-functions.md)
|
||||||
|
|
||||||
|
## interval_output_format {#interval_output_format}
|
||||||
|
|
||||||
|
Allows choosing different output formats of the text representation of interval types.
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
|
||||||
|
- `kusto` - KQL-style output format.
|
||||||
|
|
||||||
|
ClickHouse outputs intervals in [KQL format](https://learn.microsoft.com/en-us/dotnet/standard/base-types/standard-timespan-format-strings#the-constant-c-format-specifier). For example, `toIntervalDay(2)` would be formatted as `2.00:00:00`. Please note that for interval types of varying length (ie. `IntervalMonth` and `IntervalYear`) the average number of seconds per interval is taken into account.
|
||||||
|
|
||||||
|
- `numeric` - Numeric output format.
|
||||||
|
|
||||||
|
ClickHouse outputs intervals as their underlying numeric representation. For example, `toIntervalDay(2)` would be formatted as `2`.
|
||||||
|
|
||||||
|
Default value: `numeric`.
|
||||||
|
|
||||||
|
See also:
|
||||||
|
|
||||||
|
- [Interval](../../sql-reference/data-types/special-data-types/interval.md)
|
||||||
|
|
||||||
## input_format_ipv4_default_on_conversion_error {#input_format_ipv4_default_on_conversion_error}
|
## input_format_ipv4_default_on_conversion_error {#input_format_ipv4_default_on_conversion_error}
|
||||||
|
|
||||||
Deserialization of IPv4 will use default values instead of throwing exception on conversion error.
|
Deserialization of IPv4 will use default values instead of throwing exception on conversion error.
|
||||||
@ -931,6 +951,11 @@ Result
|
|||||||
```text
|
```text
|
||||||
" string "
|
" string "
|
||||||
```
|
```
|
||||||
|
### input_format_csv_allow_variable_number_of_columns {#input_format_csv_allow_variable_number_of_columns}
|
||||||
|
|
||||||
|
ignore extra columns in CSV input (if file has more columns than expected) and treat missing fields in CSV input as default values.
|
||||||
|
|
||||||
|
Disabled by default.
|
||||||
|
|
||||||
### input_format_csv_allow_whitespace_or_tab_as_delimiter {#input_format_csv_allow_whitespace_or_tab_as_delimiter}
|
### input_format_csv_allow_whitespace_or_tab_as_delimiter {#input_format_csv_allow_whitespace_or_tab_as_delimiter}
|
||||||
|
|
||||||
@ -964,6 +989,28 @@ Result
|
|||||||
a b
|
a b
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### input_format_csv_use_default_on_bad_values {#input_format_csv_use_default_on_bad_values}
|
||||||
|
|
||||||
|
Allow to set default value to column when CSV field deserialization failed on bad value
|
||||||
|
|
||||||
|
Default value: `false`.
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
|
Query
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./clickhouse local -q "create table test_tbl (x String, y UInt32, z Date) engine=MergeTree order by x"
|
||||||
|
echo 'a,b,c' | ./clickhouse local -q "INSERT INTO test_tbl SETTINGS input_format_csv_use_default_on_bad_values=true FORMAT CSV"
|
||||||
|
./clickhouse local -q "select * from test_tbl"
|
||||||
|
```
|
||||||
|
|
||||||
|
Result
|
||||||
|
|
||||||
|
```text
|
||||||
|
a 0 1971-01-01
|
||||||
|
```
|
||||||
|
|
||||||
## Values format settings {#values-format-settings}
|
## Values format settings {#values-format-settings}
|
||||||
|
|
||||||
### input_format_values_interpret_expressions {#input_format_values_interpret_expressions}
|
### input_format_values_interpret_expressions {#input_format_values_interpret_expressions}
|
||||||
@ -1117,7 +1164,7 @@ Enabled by default.
|
|||||||
|
|
||||||
Compression method used in output Arrow format. Supported codecs: `lz4_frame`, `zstd`, `none` (uncompressed)
|
Compression method used in output Arrow format. Supported codecs: `lz4_frame`, `zstd`, `none` (uncompressed)
|
||||||
|
|
||||||
Default value: `none`.
|
Default value: `lz4_frame`.
|
||||||
|
|
||||||
## ORC format settings {#orc-format-settings}
|
## ORC format settings {#orc-format-settings}
|
||||||
|
|
||||||
@ -1300,6 +1347,17 @@ Default value: 0.
|
|||||||
|
|
||||||
Sets [Confluent Schema Registry](https://docs.confluent.io/current/schema-registry/index.html) URL to use with [AvroConfluent](../../interfaces/formats.md/#data-format-avro-confluent) format.
|
Sets [Confluent Schema Registry](https://docs.confluent.io/current/schema-registry/index.html) URL to use with [AvroConfluent](../../interfaces/formats.md/#data-format-avro-confluent) format.
|
||||||
|
|
||||||
|
Format:
|
||||||
|
``` text
|
||||||
|
http://[user:password@]machine[:port]"
|
||||||
|
```
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
``` text
|
||||||
|
http://registry.example.com:8081
|
||||||
|
http://admin:secret@registry.example.com:8081
|
||||||
|
```
|
||||||
|
|
||||||
Default value: `Empty`.
|
Default value: `Empty`.
|
||||||
|
|
||||||
### output_format_avro_codec {#output_format_avro_codec}
|
### output_format_avro_codec {#output_format_avro_codec}
|
||||||
|
@ -39,7 +39,7 @@ Example:
|
|||||||
<max_threads>8</max_threads>
|
<max_threads>8</max_threads>
|
||||||
</default>
|
</default>
|
||||||
|
|
||||||
<!-- Settings for quries from the user interface -->
|
<!-- Settings for queries from the user interface -->
|
||||||
<web>
|
<web>
|
||||||
<max_rows_to_read>1000000000</max_rows_to_read>
|
<max_rows_to_read>1000000000</max_rows_to_read>
|
||||||
<max_bytes_to_read>100000000000</max_bytes_to_read>
|
<max_bytes_to_read>100000000000</max_bytes_to_read>
|
||||||
@ -67,6 +67,8 @@ Example:
|
|||||||
<max_ast_depth>50</max_ast_depth>
|
<max_ast_depth>50</max_ast_depth>
|
||||||
<max_ast_elements>100</max_ast_elements>
|
<max_ast_elements>100</max_ast_elements>
|
||||||
|
|
||||||
|
<max_sessions_for_user>4</max_sessions_for_user>
|
||||||
|
|
||||||
<readonly>1</readonly>
|
<readonly>1</readonly>
|
||||||
</web>
|
</web>
|
||||||
</profiles>
|
</profiles>
|
||||||
|
217
docs/en/operations/settings/settings-query-level.md
Normal file
217
docs/en/operations/settings/settings-query-level.md
Normal file
@ -0,0 +1,217 @@
|
|||||||
|
---
|
||||||
|
sidebar_label: Query-level Settings
|
||||||
|
title: Query-level Settings
|
||||||
|
slug: /en/operations/settings/query-level
|
||||||
|
---
|
||||||
|
|
||||||
|
There are multiple ways to set ClickHouse query-level settings. Settings are configured in layers, and each subsequent layer redefines the previous values of a setting.
|
||||||
|
|
||||||
|
The order of priority for defining a setting is:
|
||||||
|
|
||||||
|
1. Applying a setting to a user directly, or within a settings profile
|
||||||
|
|
||||||
|
- SQL (recommended)
|
||||||
|
- adding one or more XML or YAML files to `/etc/clickhouse-server/users.d`
|
||||||
|
|
||||||
|
2. Session settings
|
||||||
|
|
||||||
|
- Send `SET setting=value` from the ClickHouse Cloud SQL console or
|
||||||
|
`clickhouse client` in interactive mode. Similarly, you can use ClickHouse
|
||||||
|
sessions in the HTTP protocol. To do this, you need to specify the
|
||||||
|
`session_id` HTTP parameter.
|
||||||
|
|
||||||
|
3. Query settings
|
||||||
|
|
||||||
|
- When starting `clickhouse client` in non-interactive mode, set the startup
|
||||||
|
parameter `--setting=value`.
|
||||||
|
- When using the HTTP API, pass CGI parameters (`URL?setting_1=value&setting_2=value...`).
|
||||||
|
- Define settings in the
|
||||||
|
[SETTINGS](../../sql-reference/statements/select/index.md#settings-in-select-query)
|
||||||
|
clause of the SELECT query. The setting value is applied only to that query
|
||||||
|
and is reset to the default or previous value after the query is executed.
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
These examples all set the value of the `async_insert` setting to `1`, and
|
||||||
|
show how to examine the settings in a running system.
|
||||||
|
|
||||||
|
### Using SQL to apply a setting to a user directly
|
||||||
|
|
||||||
|
This creates the user `ingester` with the setting `async_inset = 1`:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE USER ingester
|
||||||
|
IDENTIFIED WITH sha256_hash BY '7e099f39b84ea79559b3e85ea046804e63725fd1f46b37f281276aae20f86dc3'
|
||||||
|
# highlight-next-line
|
||||||
|
SETTINGS async_insert = 1
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Examine the settings profile and assignment
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SHOW ACCESS
|
||||||
|
```
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─ACCESS─────────────────────────────────────────────────────────────────────────────┐
|
||||||
|
│ ... │
|
||||||
|
# highlight-next-line
|
||||||
|
│ CREATE USER ingester IDENTIFIED WITH sha256_password SETTINGS async_insert = true │
|
||||||
|
│ ... │
|
||||||
|
└────────────────────────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
### Using SQL to create a settings profile and assign to a user
|
||||||
|
|
||||||
|
This creates the profile `log_ingest` with the setting `async_inset = 1`:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE
|
||||||
|
SETTINGS PROFILE log_ingest SETTINGS async_insert = 1
|
||||||
|
```
|
||||||
|
|
||||||
|
This creates the user `ingester` and assigns the user the settings profile `log_ingest`:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE USER ingester
|
||||||
|
IDENTIFIED WITH sha256_hash BY '7e099f39b84ea79559b3e85ea046804e63725fd1f46b37f281276aae20f86dc3'
|
||||||
|
# highlight-next-line
|
||||||
|
SETTINGS PROFILE log_ingest
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
### Using XML to create a settings profile and user
|
||||||
|
|
||||||
|
```xml title=/etc/clickhouse-server/users.d/users.xml
|
||||||
|
<clickhouse>
|
||||||
|
# highlight-start
|
||||||
|
<profiles>
|
||||||
|
<log_ingest>
|
||||||
|
<async_insert>1</async_insert>
|
||||||
|
</log_ingest>
|
||||||
|
</profiles>
|
||||||
|
# highlight-end
|
||||||
|
|
||||||
|
<users>
|
||||||
|
<ingester>
|
||||||
|
<password_sha256_hex>7e099f39b84ea79559b3e85ea046804e63725fd1f46b37f281276aae20f86dc3</password_sha256_hex>
|
||||||
|
# highlight-start
|
||||||
|
<profile>log_ingest</profile>
|
||||||
|
# highlight-end
|
||||||
|
</ingester>
|
||||||
|
<default replace="true">
|
||||||
|
<password_sha256_hex>7e099f39b84ea79559b3e85ea046804e63725fd1f46b37f281276aae20f86dc3</password_sha256_hex>
|
||||||
|
<access_management>1</access_management>
|
||||||
|
<named_collection_control>1</named_collection_control>
|
||||||
|
</default>
|
||||||
|
</users>
|
||||||
|
</clickhouse>
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Examine the settings profile and assignment
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SHOW ACCESS
|
||||||
|
```
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─ACCESS─────────────────────────────────────────────────────────────────────────────┐
|
||||||
|
│ CREATE USER default IDENTIFIED WITH sha256_password │
|
||||||
|
# highlight-next-line
|
||||||
|
│ CREATE USER ingester IDENTIFIED WITH sha256_password SETTINGS PROFILE log_ingest │
|
||||||
|
│ CREATE SETTINGS PROFILE default │
|
||||||
|
# highlight-next-line
|
||||||
|
│ CREATE SETTINGS PROFILE log_ingest SETTINGS async_insert = true │
|
||||||
|
│ CREATE SETTINGS PROFILE readonly SETTINGS readonly = 1 │
|
||||||
|
│ ... │
|
||||||
|
└────────────────────────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### Assign a setting to a session
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SET async_insert =1;
|
||||||
|
SELECT value FROM system.settings where name='async_insert';
|
||||||
|
```
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─value──┐
|
||||||
|
│ 1 │
|
||||||
|
└────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### Assign a setting during a query
|
||||||
|
|
||||||
|
```sql
|
||||||
|
INSERT INTO YourTable
|
||||||
|
# highlight-next-line
|
||||||
|
SETTINGS async_insert=1
|
||||||
|
VALUES (...)
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Converting a Setting to its Default Value
|
||||||
|
|
||||||
|
If you change a setting and would like to revert it back to its default value, set the value to `DEFAULT`. The syntax looks like:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SET setting_name = DEFAULT
|
||||||
|
```
|
||||||
|
|
||||||
|
For example, the default value of `async_insert` is `0`. Suppose you change its value to `1`:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SET async_insert = 1;
|
||||||
|
|
||||||
|
SELECT value FROM system.settings where name='async_insert';
|
||||||
|
```
|
||||||
|
|
||||||
|
The response is:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─value──┐
|
||||||
|
│ 1 │
|
||||||
|
└────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
The following command sets its value back to 0:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SET async_insert = DEFAULT;
|
||||||
|
|
||||||
|
SELECT value FROM system.settings where name='async_insert';
|
||||||
|
```
|
||||||
|
|
||||||
|
The setting is now back to its default:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─value───┐
|
||||||
|
│ 0 │
|
||||||
|
└─────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## Custom Settings {#custom_settings}
|
||||||
|
|
||||||
|
In addition to the common [settings](../../operations/settings/settings.md), users can define custom settings.
|
||||||
|
|
||||||
|
A custom setting name must begin with one of predefined prefixes. The list of these prefixes must be declared in the [custom_settings_prefixes](../../operations/server-configuration-parameters/settings.md#custom_settings_prefixes) parameter in the server configuration file.
|
||||||
|
|
||||||
|
```xml
|
||||||
|
<custom_settings_prefixes>custom_</custom_settings_prefixes>
|
||||||
|
```
|
||||||
|
|
||||||
|
To define a custom setting use `SET` command:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SET custom_a = 123;
|
||||||
|
```
|
||||||
|
|
||||||
|
To get the current value of a custom setting use `getSetting()` function:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT getSetting('custom_a');
|
||||||
|
```
|
||||||
|
|
||||||
|
**See Also**
|
||||||
|
|
||||||
|
- View the [Settings](./settings.md) page for a description of the ClickHouse settings.
|
||||||
|
- [Global server settings](../../operations/server-configuration-parameters/settings.md)
|
@ -17,7 +17,8 @@ Default value: 0.
|
|||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
insert into table_1 values (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd');
|
INSERT INTO table_1 VALUES (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd');
|
||||||
|
SELECT * FROM table_1;
|
||||||
```
|
```
|
||||||
```response
|
```response
|
||||||
┌─x─┬─y────┐
|
┌─x─┬─y────┐
|
||||||
@ -30,7 +31,7 @@ insert into table_1 values (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd');
|
|||||||
```sql
|
```sql
|
||||||
SELECT *
|
SELECT *
|
||||||
FROM table_1
|
FROM table_1
|
||||||
SETTINGS additional_table_filters = (('table_1', 'x != 2'))
|
SETTINGS additional_table_filters = {'table_1': 'x != 2'}
|
||||||
```
|
```
|
||||||
```response
|
```response
|
||||||
┌─x─┬─y────┐
|
┌─x─┬─y────┐
|
||||||
@ -50,7 +51,8 @@ Default value: `''`.
|
|||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
insert into table_1 values (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd');
|
INSERT INTO table_1 VALUES (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd');
|
||||||
|
SElECT * FROM table_1;
|
||||||
```
|
```
|
||||||
```response
|
```response
|
||||||
┌─x─┬─y────┐
|
┌─x─┬─y────┐
|
||||||
@ -535,6 +537,8 @@ Possible values:
|
|||||||
|
|
||||||
The first phase of a grace join reads the right table and splits it into N buckets depending on the hash value of key columns (initially, N is `grace_hash_join_initial_buckets`). This is done in a way to ensure that each bucket can be processed independently. Rows from the first bucket are added to an in-memory hash table while the others are saved to disk. If the hash table grows beyond the memory limit (e.g., as set by [`max_bytes_in_join`](/docs/en/operations/settings/query-complexity.md/#settings-max_bytes_in_join)), the number of buckets is increased and the assigned bucket for each row. Any rows which don’t belong to the current bucket are flushed and reassigned.
|
The first phase of a grace join reads the right table and splits it into N buckets depending on the hash value of key columns (initially, N is `grace_hash_join_initial_buckets`). This is done in a way to ensure that each bucket can be processed independently. Rows from the first bucket are added to an in-memory hash table while the others are saved to disk. If the hash table grows beyond the memory limit (e.g., as set by [`max_bytes_in_join`](/docs/en/operations/settings/query-complexity.md/#settings-max_bytes_in_join)), the number of buckets is increased and the assigned bucket for each row. Any rows which don’t belong to the current bucket are flushed and reassigned.
|
||||||
|
|
||||||
|
Supports `INNER/LEFT/RIGHT/FULL ALL/ANY JOIN`.
|
||||||
|
|
||||||
- hash
|
- hash
|
||||||
|
|
||||||
[Hash join algorithm](https://en.wikipedia.org/wiki/Hash_join) is used. The most generic implementation that supports all combinations of kind and strictness and multiple join keys that are combined with `OR` in the `JOIN ON` section.
|
[Hash join algorithm](https://en.wikipedia.org/wiki/Hash_join) is used. The most generic implementation that supports all combinations of kind and strictness and multiple join keys that are combined with `OR` in the `JOIN ON` section.
|
||||||
@ -3201,6 +3205,40 @@ ENGINE = Log
|
|||||||
└──────────────────────────────────────────────────────────────────────────┘
|
└──────────────────────────────────────────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## default_temporary_table_engine {#default_temporary_table_engine}
|
||||||
|
|
||||||
|
Same as [default_table_engine](#default_table_engine) but for temporary tables.
|
||||||
|
|
||||||
|
Default value: `Memory`.
|
||||||
|
|
||||||
|
In this example, any new temporary table that does not specify an `Engine` will use the `Log` table engine:
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SET default_temporary_table_engine = 'Log';
|
||||||
|
|
||||||
|
CREATE TEMPORARY TABLE my_table (
|
||||||
|
x UInt32,
|
||||||
|
y UInt32
|
||||||
|
);
|
||||||
|
|
||||||
|
SHOW CREATE TEMPORARY TABLE my_table;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─statement────────────────────────────────────────────────────────────────┐
|
||||||
|
│ CREATE TEMPORARY TABLE default.my_table
|
||||||
|
(
|
||||||
|
`x` UInt32,
|
||||||
|
`y` UInt32
|
||||||
|
)
|
||||||
|
ENGINE = Log
|
||||||
|
└──────────────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## data_type_default_nullable {#data_type_default_nullable}
|
## data_type_default_nullable {#data_type_default_nullable}
|
||||||
|
|
||||||
Allows data types without explicit modifiers [NULL or NOT NULL](../../sql-reference/statements/create/table.md/#null-modifiers) in column definition will be [Nullable](../../sql-reference/data-types/nullable.md/#data_type-nullable).
|
Allows data types without explicit modifiers [NULL or NOT NULL](../../sql-reference/statements/create/table.md/#null-modifiers) in column definition will be [Nullable](../../sql-reference/data-types/nullable.md/#data_type-nullable).
|
||||||
@ -3430,6 +3468,12 @@ Possible values:
|
|||||||
|
|
||||||
Default value: `0`.
|
Default value: `0`.
|
||||||
|
|
||||||
|
## disable_url_encoding {#disable_url_encoding}
|
||||||
|
|
||||||
|
Allows to disable decoding/encoding path in uri in [URL](../../engines/table-engines/special/url.md) engine tables.
|
||||||
|
|
||||||
|
Disabled by default.
|
||||||
|
|
||||||
## database_atomic_wait_for_drop_and_detach_synchronously {#database_atomic_wait_for_drop_and_detach_synchronously}
|
## database_atomic_wait_for_drop_and_detach_synchronously {#database_atomic_wait_for_drop_and_detach_synchronously}
|
||||||
|
|
||||||
Adds a modifier `SYNC` to all `DROP` and `DETACH` queries.
|
Adds a modifier `SYNC` to all `DROP` and `DETACH` queries.
|
||||||
@ -3501,7 +3545,7 @@ Possible values:
|
|||||||
- Any positive integer.
|
- Any positive integer.
|
||||||
- 0 - Disabled (infinite timeout).
|
- 0 - Disabled (infinite timeout).
|
||||||
|
|
||||||
Default value: 180.
|
Default value: 30.
|
||||||
|
|
||||||
## http_receive_timeout {#http_receive_timeout}
|
## http_receive_timeout {#http_receive_timeout}
|
||||||
|
|
||||||
@ -3512,7 +3556,7 @@ Possible values:
|
|||||||
- Any positive integer.
|
- Any positive integer.
|
||||||
- 0 - Disabled (infinite timeout).
|
- 0 - Disabled (infinite timeout).
|
||||||
|
|
||||||
Default value: 180.
|
Default value: 30.
|
||||||
|
|
||||||
## check_query_single_value_result {#check_query_single_value_result}
|
## check_query_single_value_result {#check_query_single_value_result}
|
||||||
|
|
||||||
@ -4488,6 +4532,7 @@ This setting allows to specify renaming pattern for files processed by `file` ta
|
|||||||
|
|
||||||
### Placeholders
|
### Placeholders
|
||||||
|
|
||||||
|
- `%a` — Full original filename (e.g., "sample.csv").
|
||||||
- `%f` — Original filename without extension (e.g., "sample").
|
- `%f` — Original filename without extension (e.g., "sample").
|
||||||
- `%e` — Original file extension with dot (e.g., ".csv").
|
- `%e` — Original file extension with dot (e.g., ".csv").
|
||||||
- `%t` — Timestamp (in microseconds).
|
- `%t` — Timestamp (in microseconds).
|
||||||
|
@ -9,7 +9,6 @@ Columns:
|
|||||||
|
|
||||||
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — Event date.
|
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — Event date.
|
||||||
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Event time.
|
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Event time.
|
||||||
- `event_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — Event time with microseconds resolution.
|
|
||||||
- `name` ([String](../../sql-reference/data-types/string.md)) — Metric name.
|
- `name` ([String](../../sql-reference/data-types/string.md)) — Metric name.
|
||||||
- `value` ([Float64](../../sql-reference/data-types/float.md)) — Metric value.
|
- `value` ([Float64](../../sql-reference/data-types/float.md)) — Metric value.
|
||||||
|
|
||||||
@ -20,18 +19,18 @@ SELECT * FROM system.asynchronous_metric_log LIMIT 10
|
|||||||
```
|
```
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
┌─event_date─┬──────────event_time─┬────event_time_microseconds─┬─name─────────────────────────────────────┬─────value─┐
|
┌─event_date─┬──────────event_time─┬─name─────────────────────────────────────┬─────value─┐
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ CPUFrequencyMHz_0 │ 2120.9 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ CPUFrequencyMHz_0 │ 2120.9 │
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.arenas.all.pmuzzy │ 743 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.arenas.all.pmuzzy │ 743 │
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.arenas.all.pdirty │ 26288 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.arenas.all.pdirty │ 26288 │
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.background_thread.run_intervals │ 0 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.background_thread.run_intervals │ 0 │
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.background_thread.num_runs │ 0 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.background_thread.num_runs │ 0 │
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.retained │ 60694528 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.retained │ 60694528 │
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.mapped │ 303161344 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.mapped │ 303161344 │
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.resident │ 260931584 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.resident │ 260931584 │
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.metadata │ 12079488 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.metadata │ 12079488 │
|
||||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.allocated │ 133756128 │
|
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.allocated │ 133756128 │
|
||||||
└────────────┴─────────────────────┴────────────────────────────┴──────────────────────────────────────────┴───────────┘
|
└────────────┴─────────────────────┴──────────────────────────────────────────┴───────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
@ -32,6 +32,10 @@ SELECT * FROM system.asynchronous_metrics LIMIT 10
|
|||||||
└─────────────────────────────────────────┴────────────┴────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
└─────────────────────────────────────────┴────────────┴────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
<!--- Unlike with system.events and system.metrics, the asynchronous metrics are not gathered in a simple list in a source code file - they
|
||||||
|
are mixed with logic in src/Interpreters/ServerAsynchronousMetrics.cpp.
|
||||||
|
Listing them here explicitly for reader convenience. --->
|
||||||
|
|
||||||
## Metric descriptions
|
## Metric descriptions
|
||||||
|
|
||||||
|
|
||||||
@ -483,6 +487,14 @@ The value is similar to `OSUserTime` but divided to the number of CPU cores to b
|
|||||||
|
|
||||||
Number of threads in the server of the PostgreSQL compatibility protocol.
|
Number of threads in the server of the PostgreSQL compatibility protocol.
|
||||||
|
|
||||||
|
### QueryCacheBytes
|
||||||
|
|
||||||
|
Total size of the query cache cache in bytes.
|
||||||
|
|
||||||
|
### QueryCacheEntries
|
||||||
|
|
||||||
|
Total number of entries in the query cache.
|
||||||
|
|
||||||
### ReplicasMaxAbsoluteDelay
|
### ReplicasMaxAbsoluteDelay
|
||||||
|
|
||||||
Maximum difference in seconds between the most fresh replicated part and the most fresh data part still to be replicated, across Replicated tables. A very high value indicates a replica with no data.
|
Maximum difference in seconds between the most fresh replicated part and the most fresh data part still to be replicated, across Replicated tables. A very high value indicates a replica with no data.
|
||||||
|
@ -10,6 +10,9 @@ Columns:
|
|||||||
- `event` ([String](../../sql-reference/data-types/string.md)) — Event name.
|
- `event` ([String](../../sql-reference/data-types/string.md)) — Event name.
|
||||||
- `value` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of events occurred.
|
- `value` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of events occurred.
|
||||||
- `description` ([String](../../sql-reference/data-types/string.md)) — Event description.
|
- `description` ([String](../../sql-reference/data-types/string.md)) — Event description.
|
||||||
|
- `name` ([String](../../sql-reference/data-types/string.md)) — Alias for `event`.
|
||||||
|
|
||||||
|
You can find all supported events in source file [src/Common/ProfileEvents.cpp](https://github.com/ClickHouse/ClickHouse/blob/master/src/Common/ProfileEvents.cpp).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
|
@ -13,6 +13,7 @@ System tables provide information about:
|
|||||||
|
|
||||||
- Server states, processes, and environment.
|
- Server states, processes, and environment.
|
||||||
- Server’s internal processes.
|
- Server’s internal processes.
|
||||||
|
- Options used when the ClickHouse binary was built.
|
||||||
|
|
||||||
System tables:
|
System tables:
|
||||||
|
|
||||||
@ -46,6 +47,10 @@ An example:
|
|||||||
<engine>ENGINE = MergeTree PARTITION BY toYYYYMM(event_date) ORDER BY (event_date, event_time) SETTINGS index_granularity = 1024</engine>
|
<engine>ENGINE = MergeTree PARTITION BY toYYYYMM(event_date) ORDER BY (event_date, event_time) SETTINGS index_granularity = 1024</engine>
|
||||||
-->
|
-->
|
||||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||||
|
<max_size_rows>1048576</max_size>
|
||||||
|
<reserved_size_rows>8192</reserved_size_rows>
|
||||||
|
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||||
|
<flush_on_crash>false</flush_on_crash>
|
||||||
</query_log>
|
</query_log>
|
||||||
</clickhouse>
|
</clickhouse>
|
||||||
```
|
```
|
||||||
|
45
docs/en/operations/system-tables/jemalloc_bins.md
Normal file
45
docs/en/operations/system-tables/jemalloc_bins.md
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
---
|
||||||
|
slug: /en/operations/system-tables/jemalloc_bins
|
||||||
|
---
|
||||||
|
# jemalloc_bins
|
||||||
|
|
||||||
|
Contains information about memory allocations done via jemalloc allocator in different size classes (bins) aggregated from all arenas.
|
||||||
|
These statistics might not be absolutely accurate because of thread local caching in jemalloc.
|
||||||
|
|
||||||
|
Columns:
|
||||||
|
|
||||||
|
- `index` (UInt64) — Index of the bin ordered by size
|
||||||
|
- `large` (Bool) — True for large allocations and False for small
|
||||||
|
- `size` (UInt64) — Size of allocations in this bin
|
||||||
|
- `allocations` (UInt64) — Number of allocations
|
||||||
|
- `deallocations` (UInt64) — Number of deallocations
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Find the sizes of allocations that contributed the most to the current overall memory usage.
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT
|
||||||
|
*,
|
||||||
|
allocations - deallocations AS active_allocations,
|
||||||
|
size * active_allocations AS allocated_bytes
|
||||||
|
FROM system.jemalloc_bins
|
||||||
|
WHERE allocated_bytes > 0
|
||||||
|
ORDER BY allocated_bytes DESC
|
||||||
|
LIMIT 10
|
||||||
|
```
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─index─┬─large─┬─────size─┬─allocactions─┬─deallocations─┬─active_allocations─┬─allocated_bytes─┐
|
||||||
|
│ 82 │ 1 │ 50331648 │ 1 │ 0 │ 1 │ 50331648 │
|
||||||
|
│ 10 │ 0 │ 192 │ 512336 │ 370710 │ 141626 │ 27192192 │
|
||||||
|
│ 69 │ 1 │ 5242880 │ 6 │ 2 │ 4 │ 20971520 │
|
||||||
|
│ 3 │ 0 │ 48 │ 16938224 │ 16559484 │ 378740 │ 18179520 │
|
||||||
|
│ 28 │ 0 │ 4096 │ 122924 │ 119142 │ 3782 │ 15491072 │
|
||||||
|
│ 61 │ 1 │ 1310720 │ 44569 │ 44558 │ 11 │ 14417920 │
|
||||||
|
│ 39 │ 1 │ 28672 │ 1285 │ 913 │ 372 │ 10665984 │
|
||||||
|
│ 4 │ 0 │ 64 │ 2837225 │ 2680568 │ 156657 │ 10026048 │
|
||||||
|
│ 6 │ 0 │ 96 │ 2617803 │ 2531435 │ 86368 │ 8291328 │
|
||||||
|
│ 36 │ 1 │ 16384 │ 22431 │ 21970 │ 461 │ 7553024 │
|
||||||
|
└───────┴───────┴──────────┴──────────────┴───────────────┴────────────────────┴─────────────────┘
|
||||||
|
```
|
@ -7,11 +7,17 @@ Contains information about settings for `MergeTree` tables.
|
|||||||
|
|
||||||
Columns:
|
Columns:
|
||||||
|
|
||||||
- `name` (String) — Setting name.
|
- `name` ([String](../../sql-reference/data-types/string.md)) — Setting name.
|
||||||
- `value` (String) — Setting value.
|
- `value` ([String](../../sql-reference/data-types/string.md)) — Setting value.
|
||||||
- `description` (String) — Setting description.
|
- `changed` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Whether the setting was explicitly defined in the config or explicitly changed.
|
||||||
- `type` (String) — Setting type (implementation specific string value).
|
- `description` ([String](../../sql-reference/data-types/string.md)) — Setting description.
|
||||||
- `changed` (UInt8) — Whether the setting was explicitly defined in the config or explicitly changed.
|
- `min` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — Minimum value of the setting, if any is set via [constraints](../../operations/settings/constraints-on-settings.md#constraints-on-settings). If the setting has no minimum value, contains [NULL](../../sql-reference/syntax.md#null-literal).
|
||||||
|
- `max` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — Maximum value of the setting, if any is set via [constraints](../../operations/settings/constraints-on-settings.md#constraints-on-settings). If the setting has no maximum value, contains [NULL](../../sql-reference/syntax.md#null-literal).
|
||||||
|
- `readonly` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Shows whether the current user can change the setting:
|
||||||
|
- `0` — Current user can change the setting.
|
||||||
|
- `1` — Current user can’t change the setting.
|
||||||
|
- `type` ([String](../../sql-reference/data-types/string.md)) — Setting type (implementation specific string value).
|
||||||
|
- `is_obsolete` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) _ Shows whether a setting is obsolete.
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
```sql
|
```sql
|
||||||
@ -21,35 +27,51 @@ SELECT * FROM system.merge_tree_settings LIMIT 4 FORMAT Vertical;
|
|||||||
```response
|
```response
|
||||||
Row 1:
|
Row 1:
|
||||||
──────
|
──────
|
||||||
|
name: min_compress_block_size
|
||||||
|
value: 0
|
||||||
|
changed: 0
|
||||||
|
description: When granule is written, compress the data in buffer if the size of pending uncompressed data is larger or equal than the specified threshold. If this setting is not set, the corresponding global setting is used.
|
||||||
|
min: ____
|
||||||
|
max: ____
|
||||||
|
readonly: 0
|
||||||
|
type: UInt64
|
||||||
|
is_obsolete: 0
|
||||||
|
|
||||||
|
Row 2:
|
||||||
|
──────
|
||||||
|
name: max_compress_block_size
|
||||||
|
value: 0
|
||||||
|
changed: 0
|
||||||
|
description: Compress the pending uncompressed data in buffer if its size is larger or equal than the specified threshold. Block of data will be compressed even if the current granule is not finished. If this setting is not set, the corresponding global setting is used.
|
||||||
|
min: ____
|
||||||
|
max: ____
|
||||||
|
readonly: 0
|
||||||
|
type: UInt64
|
||||||
|
is_obsolete: 0
|
||||||
|
|
||||||
|
Row 3:
|
||||||
|
──────
|
||||||
name: index_granularity
|
name: index_granularity
|
||||||
value: 8192
|
value: 8192
|
||||||
changed: 0
|
changed: 0
|
||||||
description: How many rows correspond to one primary key value.
|
description: How many rows correspond to one primary key value.
|
||||||
type: SettingUInt64
|
min: ____
|
||||||
|
max: ____
|
||||||
Row 2:
|
readonly: 0
|
||||||
──────
|
type: UInt64
|
||||||
name: min_bytes_for_wide_part
|
is_obsolete: 0
|
||||||
value: 0
|
|
||||||
changed: 0
|
|
||||||
description: Minimal uncompressed size in bytes to create part in wide format instead of compact
|
|
||||||
type: SettingUInt64
|
|
||||||
|
|
||||||
Row 3:
|
|
||||||
──────
|
|
||||||
name: min_rows_for_wide_part
|
|
||||||
value: 0
|
|
||||||
changed: 0
|
|
||||||
description: Minimal number of rows to create part in wide format instead of compact
|
|
||||||
type: SettingUInt64
|
|
||||||
|
|
||||||
Row 4:
|
Row 4:
|
||||||
──────
|
──────
|
||||||
name: merge_max_block_size
|
name: max_digestion_size_per_segment
|
||||||
value: 8192
|
value: 268435456
|
||||||
changed: 0
|
changed: 0
|
||||||
description: How many rows in blocks should be formed for merge operations.
|
description: Max number of bytes to digest per segment to build GIN index.
|
||||||
type: SettingUInt64
|
min: ____
|
||||||
|
max: ____
|
||||||
|
readonly: 0
|
||||||
|
type: UInt64
|
||||||
|
is_obsolete: 0
|
||||||
|
|
||||||
4 rows in set. Elapsed: 0.001 sec.
|
4 rows in set. Elapsed: 0.009 sec.
|
||||||
```
|
```
|
||||||
|
@ -10,8 +10,9 @@ Columns:
|
|||||||
- `metric` ([String](../../sql-reference/data-types/string.md)) — Metric name.
|
- `metric` ([String](../../sql-reference/data-types/string.md)) — Metric name.
|
||||||
- `value` ([Int64](../../sql-reference/data-types/int-uint.md)) — Metric value.
|
- `value` ([Int64](../../sql-reference/data-types/int-uint.md)) — Metric value.
|
||||||
- `description` ([String](../../sql-reference/data-types/string.md)) — Metric description.
|
- `description` ([String](../../sql-reference/data-types/string.md)) — Metric description.
|
||||||
|
- `name` ([String](../../sql-reference/data-types/string.md)) — Alias for `metric`.
|
||||||
|
|
||||||
The list of supported metrics you can find in the [src/Common/CurrentMetrics.cpp](https://github.com/ClickHouse/ClickHouse/blob/master/src/Common/CurrentMetrics.cpp) source file of ClickHouse.
|
You can find all supported metrics in source file [src/Common/CurrentMetrics.cpp](https://github.com/ClickHouse/ClickHouse/blob/master/src/Common/CurrentMetrics.cpp).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
|
@ -39,6 +39,8 @@ Columns:
|
|||||||
|
|
||||||
- `data_uncompressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) – Total size of uncompressed data in the data part. All the auxiliary files (for example, files with marks) are not included.
|
- `data_uncompressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) – Total size of uncompressed data in the data part. All the auxiliary files (for example, files with marks) are not included.
|
||||||
|
|
||||||
|
- `primary_key_size` ([UInt64](../../sql-reference/data-types/int-uint.md)) – The amount of memory (in bytes) used by primary key values in the primary.idx/cidx file on disk.
|
||||||
|
|
||||||
- `marks_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) – The size of the file with marks.
|
- `marks_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) – The size of the file with marks.
|
||||||
|
|
||||||
- `secondary_indices_compressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) – Total size of compressed data for secondary indices in the data part. All the auxiliary files (for example, files with marks) are not included.
|
- `secondary_indices_compressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) – Total size of compressed data for secondary indices in the data part. All the auxiliary files (for example, files with marks) are not included.
|
||||||
|
@ -111,6 +111,11 @@ Columns:
|
|||||||
- `used_functions` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `functions`, which were used during query execution.
|
- `used_functions` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `functions`, which were used during query execution.
|
||||||
- `used_storages` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `storages`, which were used during query execution.
|
- `used_storages` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `storages`, which were used during query execution.
|
||||||
- `used_table_functions` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `table functions`, which were used during query execution.
|
- `used_table_functions` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `table functions`, which were used during query execution.
|
||||||
|
- `query_cache_usage` ([Enum8](../../sql-reference/data-types/enum.md)) — Usage of the [query cache](../query-cache.md) during query execution. Values:
|
||||||
|
- `'Unknown'` = Status unknown.
|
||||||
|
- `'None'` = The query result was neither written into nor read from the query cache.
|
||||||
|
- `'Write'` = The query result was written into the query cache.
|
||||||
|
- `'Read'` = The query result was read from the query cache.
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
@ -186,6 +191,7 @@ used_formats: []
|
|||||||
used_functions: []
|
used_functions: []
|
||||||
used_storages: []
|
used_storages: []
|
||||||
used_table_functions: []
|
used_table_functions: []
|
||||||
|
query_cache_usage: None
|
||||||
```
|
```
|
||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
@ -14,6 +14,7 @@ Columns:
|
|||||||
- `changed` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Shows whether a setting was specified in `config.xml`
|
- `changed` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Shows whether a setting was specified in `config.xml`
|
||||||
- `description` ([String](../../sql-reference/data-types/string.md)) — Short server setting description.
|
- `description` ([String](../../sql-reference/data-types/string.md)) — Short server setting description.
|
||||||
- `type` ([String](../../sql-reference/data-types/string.md)) — Server setting value type.
|
- `type` ([String](../../sql-reference/data-types/string.md)) — Server setting value type.
|
||||||
|
- `is_obsolete` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) _ Shows whether a setting is obsolete.
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
@ -26,14 +27,22 @@ WHERE name LIKE '%thread_pool%'
|
|||||||
```
|
```
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
┌─name─────────────────────────┬─value─┬─default─┬─changed─┬─description─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬─type───┐
|
┌─name────────────────────────────────────────_─value─_─default─_─changed─_─description──────────────────────────────────────────────────────────────────────────────────────────────────────
|
||||||
│ max_thread_pool_size │ 5000 │ 10000 │ 1 │ The maximum number of threads that could be allocated from the OS and used for query execution and background operations. │ UInt64 │
|
───────────────────────────────────_─type───_─is_obsolete─┐
|
||||||
│ max_thread_pool_free_size │ 1000 │ 1000 │ 0 │ The maximum number of threads that will always stay in a global thread pool once allocated and remain idle in case of insufficient number of tasks. │ UInt64 │
|
│ max_thread_pool_size │ 10000 │ 10000 │ 1 │ The maximum number of threads that could be allocated from the OS and used for query execution and background operations. │ UInt64 │ 0 │
|
||||||
│ thread_pool_queue_size │ 10000 │ 10000 │ 0 │ The maximum number of tasks that will be placed in a queue and wait for execution. │ UInt64 │
|
│ max_thread_pool_free_size │ 1000 │ 1000 │ 0 │ The maximum number of threads that will always stay in a global thread pool once allocated and remain idle in case of insufficient number of tasks. │ UInt64 │ 0 │
|
||||||
│ max_io_thread_pool_size │ 100 │ 100 │ 0 │ The maximum number of threads that would be used for IO operations │ UInt64 │
|
│ thread_pool_queue_size │ 10000 │ 10000 │ 0 │ The maximum number of tasks that will be placed in a queue and wait for execution. │ UInt64 │ 0 │
|
||||||
│ max_io_thread_pool_free_size │ 0 │ 0 │ 0 │ Max free size for IO thread pool. │ UInt64 │
|
│ max_io_thread_pool_size │ 100 │ 100 │ 0 │ The maximum number of threads that would be used for IO operations │ UInt64 │ 0 │
|
||||||
│ io_thread_pool_queue_size │ 10000 │ 10000 │ 0 │ Queue size for IO thread pool. │ UInt64 │
|
│ max_io_thread_pool_free_size │ 0 │ 0 │ 0 │ Max free size for IO thread pool. │ UInt64 │ 0 │
|
||||||
└──────────────────────────────┴───────┴─────────┴─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴────────┘
|
│ io_thread_pool_queue_size │ 10000 │ 10000 │ 0 │ Queue size for IO thread pool. │ UInt64 │ 0 │
|
||||||
|
│ max_active_parts_loading_thread_pool_size │ 64 │ 64 │ 0 │ The number of threads to load active set of data parts (Active ones) at startup. │ UInt64 │ 0 │
|
||||||
|
│ max_outdated_parts_loading_thread_pool_size │ 32 │ 32 │ 0 │ The number of threads to load inactive set of data parts (Outdated ones) at startup. │ UInt64 │ 0 │
|
||||||
|
│ max_parts_cleaning_thread_pool_size │ 128 │ 128 │ 0 │ The number of threads for concurrent removal of inactive data parts. │ UInt64 │ 0 │
|
||||||
|
│ max_backups_io_thread_pool_size │ 1000 │ 1000 │ 0 │ The maximum number of threads that would be used for IO operations for BACKUP queries │ UInt64 │ 0 │
|
||||||
|
│ max_backups_io_thread_pool_free_size │ 0 │ 0 │ 0 │ Max free size for backups IO thread pool. │ UInt64 │ 0 │
|
||||||
|
│ backups_io_thread_pool_queue_size │ 0 │ 0 │ 0 │ Queue size for backups IO thread pool. │ UInt64 │ 0 │
|
||||||
|
└─────────────────────────────────────────────┴───────┴─────────┴─────────┴──────────────────────────────────────────────────────────────────────────────────────────────────────────────────
|
||||||
|
───────────────────────────────────┴────────┴─────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
Using of `WHERE changed` can be useful, for example, when you want to check
|
Using of `WHERE changed` can be useful, for example, when you want to check
|
||||||
|
@ -17,6 +17,7 @@ Columns:
|
|||||||
- `0` — Current user can change the setting.
|
- `0` — Current user can change the setting.
|
||||||
- `1` — Current user can’t change the setting.
|
- `1` — Current user can’t change the setting.
|
||||||
- `default` ([String](../../sql-reference/data-types/string.md)) — Setting default value.
|
- `default` ([String](../../sql-reference/data-types/string.md)) — Setting default value.
|
||||||
|
- `is_obsolete` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) _ Shows whether a setting is obsolete.
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
@ -29,11 +30,14 @@ WHERE name LIKE '%min_i%'
|
|||||||
```
|
```
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
┌─name────────────────────────────────────────┬─value─────┬─changed─┬─description───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬─min──┬─max──┬─readonly─┐
|
┌─name───────────────────────────────────────────────_─value─────_─changed─_─description───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────_─min──_─max──_─readonly─_─type─────────_─default───_─alias_for─_─is_obsolete─┐
|
||||||
│ min_insert_block_size_rows │ 1048576 │ 0 │ Squash blocks passed to INSERT query to specified size in rows, if blocks are not big enough. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │
|
│ min_insert_block_size_rows │ 1048449 │ 0 │ Squash blocks passed to INSERT query to specified size in rows, if blocks are not big enough. │ ____ │ ____ │ 0 │ UInt64 │ 1048449 │ │ 0 │
|
||||||
│ min_insert_block_size_bytes │ 268435456 │ 0 │ Squash blocks passed to INSERT query to specified size in bytes, if blocks are not big enough. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │
|
│ min_insert_block_size_bytes │ 268402944 │ 0 │ Squash blocks passed to INSERT query to specified size in bytes, if blocks are not big enough. │ ____ │ ____ │ 0 │ UInt64 │ 268402944 │ │ 0 │
|
||||||
│ read_backoff_min_interval_between_events_ms │ 1000 │ 0 │ Settings to reduce the number of threads in case of slow reads. Do not pay attention to the event, if the previous one has passed less than a certain amount of time. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │
|
│ min_insert_block_size_rows_for_materialized_views │ 0 │ 0 │ Like min_insert_block_size_rows, but applied only during pushing to MATERIALIZED VIEW (default: min_insert_block_size_rows) │ ____ │ ____ │ 0 │ UInt64 │ 0 │ │ 0 │
|
||||||
└─────────────────────────────────────────────┴───────────┴─────────┴───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────┴──────┴──────────┘
|
│ min_insert_block_size_bytes_for_materialized_views │ 0 │ 0 │ Like min_insert_block_size_bytes, but applied only during pushing to MATERIALIZED VIEW (default: min_insert_block_size_bytes) │ ____ │ ____ │ 0 │ UInt64 │ 0 │ │ 0 │
|
||||||
|
│ read_backoff_min_interval_between_events_ms │ 1000 │ 0 │ Settings to reduce the number of threads in case of slow reads. Do not pay attention to the event, if the previous one has passed less than a certain amount of time. │ ____ │ ____ │ 0 │ Milliseconds │ 1000 │ │ 0 │
|
||||||
|
└────────────────────────────────────────────────────┴───────────┴─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────
|
||||||
|
──────────────────────────────────────────────────────┴──────┴──────┴──────────┴──────────────┴───────────┴───────────┴─────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
Using of `WHERE changed` can be useful, for example, when you want to check:
|
Using of `WHERE changed` can be useful, for example, when you want to check:
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user