mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-23 16:12:01 +00:00
Merge branch 'master' into fix_insert_into_buffer_table
This commit is contained in:
commit
6add283933
259
.github/workflows/master.yml
vendored
259
.github/workflows/master.yml
vendored
@ -850,6 +850,48 @@ jobs:
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
BuilderBinRISCV64:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_riscv64
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
############################################################################################
|
||||
##################################### Docker images #######################################
|
||||
############################################################################################
|
||||
@ -932,6 +974,7 @@ jobs:
|
||||
- BuilderBinDarwinAarch64
|
||||
- BuilderBinFreeBSD
|
||||
- BuilderBinPPC64
|
||||
- BuilderBinRISCV64
|
||||
- BuilderBinAmd64Compat
|
||||
- BuilderBinAarch64V80Compat
|
||||
- BuilderBinClangTidy
|
||||
@ -2827,6 +2870,216 @@ jobs:
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
IntegrationTestsAnalyzerAsan0:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (asan, analyzer)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=0
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
IntegrationTestsAnalyzerAsan1:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (asan, analyzer)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=1
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
IntegrationTestsAnalyzerAsan2:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (asan, analyzer)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=2
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
IntegrationTestsAnalyzerAsan3:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (asan, analyzer)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=3
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
IntegrationTestsAnalyzerAsan4:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (asan, analyzer)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=4
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
IntegrationTestsAnalyzerAsan5:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (asan, analyzer)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=5
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
IntegrationTestsTsan0:
|
||||
needs: [BuilderDebTsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
@ -3920,6 +4173,12 @@ jobs:
|
||||
- IntegrationTestsAsan3
|
||||
- IntegrationTestsAsan4
|
||||
- IntegrationTestsAsan5
|
||||
- IntegrationTestsAnalyzerAsan0
|
||||
- IntegrationTestsAnalyzerAsan1
|
||||
- IntegrationTestsAnalyzerAsan2
|
||||
- IntegrationTestsAnalyzerAsan3
|
||||
- IntegrationTestsAnalyzerAsan4
|
||||
- IntegrationTestsAnalyzerAsan5
|
||||
- IntegrationTestsRelease0
|
||||
- IntegrationTestsRelease1
|
||||
- IntegrationTestsRelease2
|
||||
|
45
.github/workflows/nightly.yml
vendored
45
.github/workflows/nightly.yml
vendored
@ -75,51 +75,6 @@ jobs:
|
||||
Codebrowser:
|
||||
needs: [DockerHubPush]
|
||||
uses: ./.github/workflows/woboq.yml
|
||||
BuilderCoverity:
|
||||
needs: DockerHubPush
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
BUILD_NAME=coverity
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
EOF
|
||||
echo "COVERITY_TOKEN=${{ secrets.COVERITY_TOKEN }}" >> "$GITHUB_ENV"
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload Coverity Analysis
|
||||
if: ${{ success() || failure() }}
|
||||
run: |
|
||||
curl --form token="${COVERITY_TOKEN}" \
|
||||
--form email='security+coverity@clickhouse.com' \
|
||||
--form file="@$TEMP_PATH/$BUILD_NAME/coverity-scan.tar.gz" \
|
||||
--form version="${GITHUB_REF#refs/heads/}-${GITHUB_SHA::6}" \
|
||||
--form description="Nighly Scan: $(date +'%Y-%m-%dT%H:%M:%S')" \
|
||||
https://scan.coverity.com/builds?project=ClickHouse%2FClickHouse
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
SonarCloud:
|
||||
runs-on: [self-hosted, builder]
|
||||
env:
|
||||
|
258
.github/workflows/pull_request.yml
vendored
258
.github/workflows/pull_request.yml
vendored
@ -911,6 +911,47 @@ jobs:
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
BuilderBinRISCV64:
|
||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_riscv64
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
############################################################################################
|
||||
##################################### Docker images #######################################
|
||||
############################################################################################
|
||||
@ -992,6 +1033,7 @@ jobs:
|
||||
- BuilderBinDarwinAarch64
|
||||
- BuilderBinFreeBSD
|
||||
- BuilderBinPPC64
|
||||
- BuilderBinRISCV64
|
||||
- BuilderBinAmd64Compat
|
||||
- BuilderBinAarch64V80Compat
|
||||
- BuilderBinClangTidy
|
||||
@ -3861,6 +3903,216 @@ jobs:
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
IntegrationTestsAnalyzerAsan0:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (asan, analyzer)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=0
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
IntegrationTestsAnalyzerAsan1:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (asan, analyzer)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=1
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
IntegrationTestsAnalyzerAsan2:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (asan, analyzer)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=2
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
IntegrationTestsAnalyzerAsan3:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (asan, analyzer)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=3
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
IntegrationTestsAnalyzerAsan4:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (asan, analyzer)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=4
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
IntegrationTestsAnalyzerAsan5:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (asan, analyzer)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=5
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
IntegrationTestsTsan0:
|
||||
needs: [BuilderDebTsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
@ -4847,6 +5099,12 @@ jobs:
|
||||
- IntegrationTestsAsan3
|
||||
- IntegrationTestsAsan4
|
||||
- IntegrationTestsAsan5
|
||||
- IntegrationTestsAnalyzerAsan0
|
||||
- IntegrationTestsAnalyzerAsan1
|
||||
- IntegrationTestsAnalyzerAsan2
|
||||
- IntegrationTestsAnalyzerAsan3
|
||||
- IntegrationTestsAnalyzerAsan4
|
||||
- IntegrationTestsAnalyzerAsan5
|
||||
- IntegrationTestsRelease0
|
||||
- IntegrationTestsRelease1
|
||||
- IntegrationTestsRelease2
|
||||
|
3
.gitignore
vendored
3
.gitignore
vendored
@ -69,6 +69,7 @@ cmake-build-*
|
||||
*.pyc
|
||||
__pycache__
|
||||
*.pytest_cache
|
||||
.mypy_cache
|
||||
|
||||
test.cpp
|
||||
CPackConfig.cmake
|
||||
@ -161,8 +162,10 @@ tests/queries/0_stateless/test_*
|
||||
tests/queries/0_stateless/*.binary
|
||||
tests/queries/0_stateless/*.generated-expect
|
||||
tests/queries/0_stateless/*.expect.history
|
||||
tests/integration/**/_gen
|
||||
|
||||
# rust
|
||||
/rust/**/target
|
||||
# It is autogenerated from *.in
|
||||
/rust/**/.cargo/config.toml
|
||||
/rust/**/vendor
|
||||
|
6
.gitmodules
vendored
6
.gitmodules
vendored
@ -258,9 +258,6 @@
|
||||
[submodule "contrib/wyhash"]
|
||||
path = contrib/wyhash
|
||||
url = https://github.com/wangyi-fudan/wyhash
|
||||
[submodule "contrib/hashidsxx"]
|
||||
path = contrib/hashidsxx
|
||||
url = https://github.com/schoentoon/hashidsxx
|
||||
[submodule "contrib/nats-io"]
|
||||
path = contrib/nats-io
|
||||
url = https://github.com/ClickHouse/nats.c
|
||||
@ -343,3 +340,6 @@
|
||||
[submodule "contrib/c-ares"]
|
||||
path = contrib/c-ares
|
||||
url = https://github.com/c-ares/c-ares.git
|
||||
[submodule "contrib/incbin"]
|
||||
path = contrib/incbin
|
||||
url = https://github.com/graphitemaster/incbin.git
|
||||
|
@ -87,7 +87,6 @@ if (ENABLE_FUZZING)
|
||||
set (ENABLE_CLICKHOUSE_ODBC_BRIDGE OFF)
|
||||
set (ENABLE_LIBRARIES 0)
|
||||
set (ENABLE_SSL 1)
|
||||
set (USE_UNWIND ON)
|
||||
set (ENABLE_EMBEDDED_COMPILER 0)
|
||||
set (ENABLE_EXAMPLES 0)
|
||||
set (ENABLE_UTILS 0)
|
||||
@ -344,9 +343,9 @@ if (COMPILER_CLANG)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fdiagnostics-absolute-paths")
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fdiagnostics-absolute-paths")
|
||||
|
||||
if (NOT ENABLE_TESTS AND NOT SANITIZE)
|
||||
if (NOT ENABLE_TESTS AND NOT SANITIZE AND OS_LINUX)
|
||||
# https://clang.llvm.org/docs/ThinLTO.html
|
||||
# Applies to clang only.
|
||||
# Applies to clang and linux only.
|
||||
# Disabled when building with tests or sanitizers.
|
||||
option(ENABLE_THINLTO "Clang-specific link time optimization" ON)
|
||||
endif()
|
||||
|
@ -23,7 +23,7 @@ curl https://clickhouse.com/ | sh
|
||||
|
||||
## Upcoming Events
|
||||
|
||||
* [**v23.6 Release Webinar**](https://clickhouse.com/company/events/v23-6-release-call?utm_source=github&utm_medium=social&utm_campaign=release-webinar-2023-06) - Jun 29 - 23.6 is rapidly approaching. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release.
|
||||
* [**v23.7 Release Webinar**](https://clickhouse.com/company/events/v23-7-community-release-call?utm_source=github&utm_medium=social&utm_campaign=release-webinar-2023-07) - Jul 27 - 23.7 is rapidly approaching. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release.
|
||||
* [**ClickHouse Meetup in Boston**](https://www.meetup.com/clickhouse-boston-user-group/events/293913596) - Jul 18
|
||||
* [**ClickHouse Meetup in NYC**](https://www.meetup.com/clickhouse-new-york-user-group/events/293913441) - Jul 19
|
||||
* [**ClickHouse Meetup in Toronto**](https://www.meetup.com/clickhouse-toronto-user-group/events/294183127) - Jul 20
|
||||
@ -34,13 +34,13 @@ Also, keep an eye out for upcoming meetups around the world. Somewhere else you
|
||||
|
||||
## Recent Recordings
|
||||
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
||||
* **Recording available**: [**v23.4 Release Webinar**](https://www.youtube.com/watch?v=4rrf6bk_mOg) Faster Parquet Reading, Asynchonous Connections to Reoplicas, Trailing Comma before FROM, extractKeyValuePairs, integrations updates, and so much more! Watch it now!
|
||||
* **Recording available**: [**v23.6 Release Webinar**](https://www.youtube.com/watch?v=cuf_hYn7dqU) All the features of 23.6, one convenient video! Watch it now!
|
||||
* **All release webinar recordings**: [YouTube playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3jAlSy1JxyP8zluvXaN3nxU)
|
||||
|
||||
|
||||
## Interested in joining ClickHouse and making it your full time job?
|
||||
## Interested in joining ClickHouse and making it your full-time job?
|
||||
|
||||
We are a globally diverse and distributed team, united behind a common goal of creating industry-leading, real-time analytics. Here, you will have an opportunity to solve some of the most cutting edge technical challenges and have direct ownership of your work and vision. If you are a contributor by nature, a thinker as well as a doer - we’ll definitely click!
|
||||
We are a globally diverse and distributed team, united behind a common goal of creating industry-leading, real-time analytics. Here, you will have an opportunity to solve some of the most cutting-edge technical challenges and have direct ownership of your work and vision. If you are a contributor by nature, a thinker and a doer - we’ll definitely click!
|
||||
|
||||
Check out our **current openings** here: https://clickhouse.com/company/careers
|
||||
|
||||
|
@ -448,7 +448,7 @@ inline char * find_last_not_symbols_or_null(char * begin, char * end)
|
||||
/// See https://github.com/boostorg/algorithm/issues/63
|
||||
/// And https://bugs.llvm.org/show_bug.cgi?id=41141
|
||||
template <char... symbols, typename To>
|
||||
inline void splitInto(To & to, const std::string & what, bool token_compress = false)
|
||||
inline To & splitInto(To & to, std::string_view what, bool token_compress = false)
|
||||
{
|
||||
const char * pos = what.data();
|
||||
const char * end = pos + what.size();
|
||||
@ -464,4 +464,6 @@ inline void splitInto(To & to, const std::string & what, bool token_compress = f
|
||||
else
|
||||
pos = delimiter_or_end;
|
||||
}
|
||||
|
||||
return to;
|
||||
}
|
||||
|
@ -15,25 +15,34 @@
|
||||
|
||||
|
||||
static thread_local uint64_t current_tid = 0;
|
||||
|
||||
static void setCurrentThreadId()
|
||||
{
|
||||
#if defined(OS_ANDROID)
|
||||
current_tid = gettid();
|
||||
#elif defined(OS_LINUX)
|
||||
current_tid = static_cast<uint64_t>(syscall(SYS_gettid)); /// This call is always successful. - man gettid
|
||||
#elif defined(OS_FREEBSD)
|
||||
current_tid = pthread_getthreadid_np();
|
||||
#elif defined(OS_SUNOS)
|
||||
// On Solaris-derived systems, this returns the ID of the LWP, analogous
|
||||
// to a thread.
|
||||
current_tid = static_cast<uint64_t>(pthread_self());
|
||||
#else
|
||||
if (0 != pthread_threadid_np(nullptr, ¤t_tid))
|
||||
throw std::logic_error("pthread_threadid_np returned error");
|
||||
#endif
|
||||
}
|
||||
|
||||
uint64_t getThreadId()
|
||||
{
|
||||
if (!current_tid)
|
||||
{
|
||||
#if defined(OS_ANDROID)
|
||||
current_tid = gettid();
|
||||
#elif defined(OS_LINUX)
|
||||
current_tid = static_cast<uint64_t>(syscall(SYS_gettid)); /// This call is always successful. - man gettid
|
||||
#elif defined(OS_FREEBSD)
|
||||
current_tid = pthread_getthreadid_np();
|
||||
#elif defined(OS_SUNOS)
|
||||
// On Solaris-derived systems, this returns the ID of the LWP, analogous
|
||||
// to a thread.
|
||||
current_tid = static_cast<uint64_t>(pthread_self());
|
||||
#else
|
||||
if (0 != pthread_threadid_np(nullptr, ¤t_tid))
|
||||
throw std::logic_error("pthread_threadid_np returned error");
|
||||
#endif
|
||||
}
|
||||
setCurrentThreadId();
|
||||
|
||||
return current_tid;
|
||||
}
|
||||
|
||||
void updateCurrentThreadIdAfterFork()
|
||||
{
|
||||
setCurrentThreadId();
|
||||
}
|
||||
|
@ -3,3 +3,5 @@
|
||||
|
||||
/// Obtain thread id from OS. The value is cached in thread local variable.
|
||||
uint64_t getThreadId();
|
||||
|
||||
void updateCurrentThreadIdAfterFork();
|
||||
|
9
base/base/move_extend.h
Normal file
9
base/base/move_extend.h
Normal file
@ -0,0 +1,9 @@
|
||||
#pragma once
|
||||
|
||||
/// Extend @p to by moving elements from @p from to @p to end
|
||||
/// @return @p to iterator to first of moved elements.
|
||||
template <class To, class From>
|
||||
typename To::iterator moveExtend(To & to, From && from)
|
||||
{
|
||||
return to.insert(to.end(), std::make_move_iterator(from.begin()), std::make_move_iterator(from.end()));
|
||||
}
|
@ -57,7 +57,7 @@ public:
|
||||
URI();
|
||||
/// Creates an empty URI.
|
||||
|
||||
explicit URI(const std::string & uri);
|
||||
explicit URI(const std::string & uri, bool disable_url_encoding = false);
|
||||
/// Parses an URI from the given string. Throws a
|
||||
/// SyntaxException if the uri is not valid.
|
||||
|
||||
@ -350,6 +350,10 @@ protected:
|
||||
static const std::string ILLEGAL;
|
||||
|
||||
private:
|
||||
void encodePath(std::string & encodedStr) const;
|
||||
void decodePath(const std::string & encodedStr);
|
||||
|
||||
|
||||
std::string _scheme;
|
||||
std::string _userInfo;
|
||||
std::string _host;
|
||||
@ -357,6 +361,8 @@ private:
|
||||
std::string _path;
|
||||
std::string _query;
|
||||
std::string _fragment;
|
||||
|
||||
bool _disable_url_encoding = false;
|
||||
};
|
||||
|
||||
|
||||
|
@ -36,8 +36,8 @@ URI::URI():
|
||||
}
|
||||
|
||||
|
||||
URI::URI(const std::string& uri):
|
||||
_port(0)
|
||||
URI::URI(const std::string& uri, bool decode_and_encode_path):
|
||||
_port(0), _disable_url_encoding(decode_and_encode_path)
|
||||
{
|
||||
parse(uri);
|
||||
}
|
||||
@ -107,7 +107,8 @@ URI::URI(const URI& uri):
|
||||
_port(uri._port),
|
||||
_path(uri._path),
|
||||
_query(uri._query),
|
||||
_fragment(uri._fragment)
|
||||
_fragment(uri._fragment),
|
||||
_disable_url_encoding(uri._disable_url_encoding)
|
||||
{
|
||||
}
|
||||
|
||||
@ -119,7 +120,8 @@ URI::URI(const URI& baseURI, const std::string& relativeURI):
|
||||
_port(baseURI._port),
|
||||
_path(baseURI._path),
|
||||
_query(baseURI._query),
|
||||
_fragment(baseURI._fragment)
|
||||
_fragment(baseURI._fragment),
|
||||
_disable_url_encoding(baseURI._disable_url_encoding)
|
||||
{
|
||||
resolve(relativeURI);
|
||||
}
|
||||
@ -151,6 +153,7 @@ URI& URI::operator = (const URI& uri)
|
||||
_path = uri._path;
|
||||
_query = uri._query;
|
||||
_fragment = uri._fragment;
|
||||
_disable_url_encoding = uri._disable_url_encoding;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
@ -181,6 +184,7 @@ void URI::swap(URI& uri)
|
||||
std::swap(_path, uri._path);
|
||||
std::swap(_query, uri._query);
|
||||
std::swap(_fragment, uri._fragment);
|
||||
std::swap(_disable_url_encoding, uri._disable_url_encoding);
|
||||
}
|
||||
|
||||
|
||||
@ -201,7 +205,7 @@ std::string URI::toString() const
|
||||
std::string uri;
|
||||
if (isRelative())
|
||||
{
|
||||
encode(_path, RESERVED_PATH, uri);
|
||||
encodePath(uri);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -217,7 +221,7 @@ std::string URI::toString() const
|
||||
{
|
||||
if (!auth.empty() && _path[0] != '/')
|
||||
uri += '/';
|
||||
encode(_path, RESERVED_PATH, uri);
|
||||
encodePath(uri);
|
||||
}
|
||||
else if (!_query.empty() || !_fragment.empty())
|
||||
{
|
||||
@ -313,7 +317,7 @@ void URI::setAuthority(const std::string& authority)
|
||||
void URI::setPath(const std::string& path)
|
||||
{
|
||||
_path.clear();
|
||||
decode(path, _path);
|
||||
decodePath(path);
|
||||
}
|
||||
|
||||
|
||||
@ -418,7 +422,7 @@ void URI::setPathEtc(const std::string& pathEtc)
|
||||
std::string URI::getPathEtc() const
|
||||
{
|
||||
std::string pathEtc;
|
||||
encode(_path, RESERVED_PATH, pathEtc);
|
||||
encodePath(pathEtc);
|
||||
if (!_query.empty())
|
||||
{
|
||||
pathEtc += '?';
|
||||
@ -436,7 +440,7 @@ std::string URI::getPathEtc() const
|
||||
std::string URI::getPathAndQuery() const
|
||||
{
|
||||
std::string pathAndQuery;
|
||||
encode(_path, RESERVED_PATH, pathAndQuery);
|
||||
encodePath(pathAndQuery);
|
||||
if (!_query.empty())
|
||||
{
|
||||
pathAndQuery += '?';
|
||||
@ -681,6 +685,21 @@ void URI::decode(const std::string& str, std::string& decodedStr, bool plusAsSpa
|
||||
}
|
||||
}
|
||||
|
||||
void URI::encodePath(std::string & encodedStr) const
|
||||
{
|
||||
if (_disable_url_encoding)
|
||||
encodedStr = _path;
|
||||
else
|
||||
encode(_path, RESERVED_PATH, encodedStr);
|
||||
}
|
||||
|
||||
void URI::decodePath(const std::string & encodedStr)
|
||||
{
|
||||
if (_disable_url_encoding)
|
||||
_path = encodedStr;
|
||||
else
|
||||
decode(encodedStr, _path);
|
||||
}
|
||||
|
||||
bool URI::isWellKnownPort() const
|
||||
{
|
||||
@ -820,7 +839,7 @@ void URI::parsePath(std::string::const_iterator& it, const std::string::const_it
|
||||
{
|
||||
std::string path;
|
||||
while (it != end && *it != '?' && *it != '#') path += *it++;
|
||||
decode(path, _path);
|
||||
decodePath(path);
|
||||
}
|
||||
|
||||
|
||||
|
@ -306,7 +306,7 @@ namespace Net
|
||||
DEFAULT_KEEP_ALIVE_TIMEOUT = 8
|
||||
};
|
||||
|
||||
void reconnect();
|
||||
virtual void reconnect();
|
||||
/// Connects the underlying socket to the HTTP server.
|
||||
|
||||
int write(const char * buffer, std::streamsize length);
|
||||
|
@ -15,6 +15,7 @@ set(CMAKE_OSX_DEPLOYMENT_TARGET 10.15)
|
||||
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||
find_package(Threads REQUIRED)
|
||||
|
||||
include (cmake/unwind.cmake)
|
||||
include (cmake/cxx.cmake)
|
||||
link_libraries(global-group)
|
||||
|
||||
|
@ -1,58 +0,0 @@
|
||||
# Embed a set of resource files into a resulting object file.
|
||||
#
|
||||
# Signature: `clickhouse_embed_binaries(TARGET <target> RESOURCE_DIR <dir> RESOURCES <resource> ...)
|
||||
#
|
||||
# This will generate a static library target named `<target>`, which contains the contents of
|
||||
# each `<resource>` file. The files should be located in `<dir>`. <dir> defaults to
|
||||
# ${CMAKE_CURRENT_SOURCE_DIR}, and the resources may not be empty.
|
||||
#
|
||||
# Each resource will result in three symbols in the final archive, based on the name `<resource>`.
|
||||
# These are:
|
||||
# 1. `_binary_<name>_start`: Points to the start of the binary data from `<resource>`.
|
||||
# 2. `_binary_<name>_end`: Points to the end of the binary data from `<resource>`.
|
||||
# 2. `_binary_<name>_size`: Points to the size of the binary data from `<resource>`.
|
||||
#
|
||||
# `<name>` is a normalized name derived from `<resource>`, by replacing the characters "./-" with
|
||||
# the character "_", and the character "+" with "_PLUS_". This scheme is similar to those generated
|
||||
# by `ld -r -b binary`, and matches the expectations in `./base/common/getResource.cpp`.
|
||||
macro(clickhouse_embed_binaries)
|
||||
set(one_value_args TARGET RESOURCE_DIR)
|
||||
set(resources RESOURCES)
|
||||
cmake_parse_arguments(EMBED "" "${one_value_args}" ${resources} ${ARGN})
|
||||
|
||||
if (NOT DEFINED EMBED_TARGET)
|
||||
message(FATAL_ERROR "A target name must be provided for embedding binary resources into")
|
||||
endif()
|
||||
|
||||
if (NOT DEFINED EMBED_RESOURCE_DIR)
|
||||
set(EMBED_RESOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}")
|
||||
endif()
|
||||
|
||||
list(LENGTH EMBED_RESOURCES N_RESOURCES)
|
||||
if (N_RESOURCES LESS 1)
|
||||
message(FATAL_ERROR "The list of binary resources to embed may not be empty")
|
||||
endif()
|
||||
|
||||
add_library("${EMBED_TARGET}" STATIC)
|
||||
set_target_properties("${EMBED_TARGET}" PROPERTIES LINKER_LANGUAGE C)
|
||||
|
||||
set(EMBED_TEMPLATE_FILE "${PROJECT_SOURCE_DIR}/programs/embed_binary.S.in")
|
||||
|
||||
foreach(RESOURCE_FILE ${EMBED_RESOURCES})
|
||||
set(ASSEMBLY_FILE_NAME "${RESOURCE_FILE}.S")
|
||||
set(BINARY_FILE_NAME "${RESOURCE_FILE}")
|
||||
|
||||
# Normalize the name of the resource.
|
||||
string(REGEX REPLACE "[\./-]" "_" SYMBOL_NAME "${RESOURCE_FILE}") # - must be last in regex
|
||||
string(REPLACE "+" "_PLUS_" SYMBOL_NAME "${SYMBOL_NAME}")
|
||||
|
||||
# Generate the configured assembly file in the output directory.
|
||||
configure_file("${EMBED_TEMPLATE_FILE}" "${CMAKE_CURRENT_BINARY_DIR}/${ASSEMBLY_FILE_NAME}" @ONLY)
|
||||
|
||||
# Set the include directory for relative paths specified for `.incbin` directive.
|
||||
set_property(SOURCE "${CMAKE_CURRENT_BINARY_DIR}/${ASSEMBLY_FILE_NAME}" APPEND PROPERTY INCLUDE_DIRECTORIES "${EMBED_RESOURCE_DIR}")
|
||||
|
||||
target_sources("${EMBED_TARGET}" PRIVATE "${CMAKE_CURRENT_BINARY_DIR}/${ASSEMBLY_FILE_NAME}")
|
||||
set_target_properties("${EMBED_TARGET}" PROPERTIES OBJECT_DEPENDS "${RESOURCE_FILE}")
|
||||
endforeach()
|
||||
endmacro()
|
@ -1,38 +1,39 @@
|
||||
# Usage:
|
||||
# set (MAX_COMPILER_MEMORY 2000 CACHE INTERNAL "") # In megabytes
|
||||
# set (MAX_LINKER_MEMORY 3500 CACHE INTERNAL "")
|
||||
# include (cmake/limit_jobs.cmake)
|
||||
# Limit compiler/linker job concurrency to avoid OOMs on subtrees where compilation/linking is memory-intensive.
|
||||
#
|
||||
# Usage from CMake:
|
||||
# set (MAX_COMPILER_MEMORY 2000 CACHE INTERNAL "") # megabyte
|
||||
# set (MAX_LINKER_MEMORY 3500 CACHE INTERNAL "") # megabyte
|
||||
# include (cmake/limit_jobs.cmake)
|
||||
#
|
||||
# (bigger values mean fewer jobs)
|
||||
|
||||
cmake_host_system_information(RESULT TOTAL_PHYSICAL_MEMORY QUERY TOTAL_PHYSICAL_MEMORY) # Not available under freebsd
|
||||
cmake_host_system_information(RESULT TOTAL_PHYSICAL_MEMORY QUERY TOTAL_PHYSICAL_MEMORY)
|
||||
cmake_host_system_information(RESULT NUMBER_OF_LOGICAL_CORES QUERY NUMBER_OF_LOGICAL_CORES)
|
||||
|
||||
# 1 if not set
|
||||
option(PARALLEL_COMPILE_JOBS "Maximum number of concurrent compilation jobs" "")
|
||||
# Set to disable the automatic job-limiting
|
||||
option(PARALLEL_COMPILE_JOBS "Maximum number of concurrent compilation jobs" OFF)
|
||||
option(PARALLEL_LINK_JOBS "Maximum number of concurrent link jobs" OFF)
|
||||
|
||||
# 1 if not set
|
||||
option(PARALLEL_LINK_JOBS "Maximum number of concurrent link jobs" "")
|
||||
|
||||
if (NOT PARALLEL_COMPILE_JOBS AND TOTAL_PHYSICAL_MEMORY AND MAX_COMPILER_MEMORY)
|
||||
if (NOT PARALLEL_COMPILE_JOBS AND MAX_COMPILER_MEMORY)
|
||||
math(EXPR PARALLEL_COMPILE_JOBS ${TOTAL_PHYSICAL_MEMORY}/${MAX_COMPILER_MEMORY})
|
||||
|
||||
if (NOT PARALLEL_COMPILE_JOBS)
|
||||
set (PARALLEL_COMPILE_JOBS 1)
|
||||
endif ()
|
||||
if (PARALLEL_COMPILE_JOBS LESS NUMBER_OF_LOGICAL_CORES)
|
||||
message(WARNING "The auto-calculated compile jobs limit (${PARALLEL_COMPILE_JOBS}) underutilizes CPU cores (${NUMBER_OF_LOGICAL_CORES}). Set PARALLEL_COMPILE_JOBS to override.")
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
if (PARALLEL_COMPILE_JOBS AND (NOT NUMBER_OF_LOGICAL_CORES OR PARALLEL_COMPILE_JOBS LESS NUMBER_OF_LOGICAL_CORES))
|
||||
set(CMAKE_JOB_POOL_COMPILE compile_job_pool${CMAKE_CURRENT_SOURCE_DIR})
|
||||
string (REGEX REPLACE "[^a-zA-Z0-9]+" "_" CMAKE_JOB_POOL_COMPILE ${CMAKE_JOB_POOL_COMPILE})
|
||||
set_property(GLOBAL APPEND PROPERTY JOB_POOLS ${CMAKE_JOB_POOL_COMPILE}=${PARALLEL_COMPILE_JOBS})
|
||||
endif ()
|
||||
|
||||
|
||||
if (NOT PARALLEL_LINK_JOBS AND TOTAL_PHYSICAL_MEMORY AND MAX_LINKER_MEMORY)
|
||||
if (NOT PARALLEL_LINK_JOBS AND MAX_LINKER_MEMORY)
|
||||
math(EXPR PARALLEL_LINK_JOBS ${TOTAL_PHYSICAL_MEMORY}/${MAX_LINKER_MEMORY})
|
||||
|
||||
if (NOT PARALLEL_LINK_JOBS)
|
||||
set (PARALLEL_LINK_JOBS 1)
|
||||
endif ()
|
||||
if (PARALLEL_LINK_JOBS LESS NUMBER_OF_LOGICAL_CORES)
|
||||
message(WARNING "The auto-calculated link jobs limit (${PARALLEL_LINK_JOBS}) underutilizes CPU cores (${NUMBER_OF_LOGICAL_CORES}). Set PARALLEL_LINK_JOBS to override.")
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
# ThinLTO provides its own parallel linking
|
||||
@ -46,14 +47,16 @@ if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND ENABLE_THINLTO AND PARALLE
|
||||
set (PARALLEL_LINK_JOBS 2)
|
||||
endif()
|
||||
|
||||
if (PARALLEL_LINK_JOBS AND (NOT NUMBER_OF_LOGICAL_CORES OR PARALLEL_LINK_JOBS LESS NUMBER_OF_LOGICAL_CORES))
|
||||
message(STATUS "Building sub-tree with ${PARALLEL_COMPILE_JOBS} compile jobs and ${PARALLEL_LINK_JOBS} linker jobs (system: ${NUMBER_OF_LOGICAL_CORES} cores, ${TOTAL_PHYSICAL_MEMORY} MB DRAM, 'OFF' means the native core count).")
|
||||
|
||||
if (PARALLEL_COMPILE_JOBS LESS NUMBER_OF_LOGICAL_CORES)
|
||||
set(CMAKE_JOB_POOL_COMPILE compile_job_pool${CMAKE_CURRENT_SOURCE_DIR})
|
||||
string (REGEX REPLACE "[^a-zA-Z0-9]+" "_" CMAKE_JOB_POOL_COMPILE ${CMAKE_JOB_POOL_COMPILE})
|
||||
set_property(GLOBAL APPEND PROPERTY JOB_POOLS ${CMAKE_JOB_POOL_COMPILE}=${PARALLEL_COMPILE_JOBS})
|
||||
endif ()
|
||||
|
||||
if (PARALLEL_LINK_JOBS LESS NUMBER_OF_LOGICAL_CORES)
|
||||
set(CMAKE_JOB_POOL_LINK link_job_pool${CMAKE_CURRENT_SOURCE_DIR})
|
||||
string (REGEX REPLACE "[^a-zA-Z0-9]+" "_" CMAKE_JOB_POOL_LINK ${CMAKE_JOB_POOL_LINK})
|
||||
set_property(GLOBAL APPEND PROPERTY JOB_POOLS ${CMAKE_JOB_POOL_LINK}=${PARALLEL_LINK_JOBS})
|
||||
endif ()
|
||||
|
||||
if (PARALLEL_COMPILE_JOBS OR PARALLEL_LINK_JOBS)
|
||||
message(STATUS
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}: Have ${TOTAL_PHYSICAL_MEMORY} megabytes of memory.
|
||||
Limiting concurrent linkers jobs to ${PARALLEL_LINK_JOBS} and compiler jobs to ${PARALLEL_COMPILE_JOBS} (system has ${NUMBER_OF_LOGICAL_CORES} logical cores)")
|
||||
endif ()
|
||||
|
@ -33,6 +33,18 @@ if (CMAKE_CROSSCOMPILING)
|
||||
elseif (ARCH_PPC64LE)
|
||||
set (ENABLE_GRPC OFF CACHE INTERNAL "")
|
||||
set (ENABLE_SENTRY OFF CACHE INTERNAL "")
|
||||
elseif (ARCH_RISCV64)
|
||||
# RISC-V support is preliminary
|
||||
set (GLIBC_COMPATIBILITY OFF CACHE INTERNAL "")
|
||||
set (ENABLE_LDAP OFF CACHE INTERNAL "")
|
||||
set (OPENSSL_NO_ASM ON CACHE INTERNAL "")
|
||||
set (ENABLE_JEMALLOC ON CACHE INTERNAL "")
|
||||
set (ENABLE_PARQUET OFF CACHE INTERNAL "")
|
||||
set (ENABLE_GRPC OFF CACHE INTERNAL "")
|
||||
set (ENABLE_HDFS OFF CACHE INTERNAL "")
|
||||
set (ENABLE_MYSQL OFF CACHE INTERNAL "")
|
||||
# It might be ok, but we need to update 'sysroot'
|
||||
set (ENABLE_RUST OFF CACHE INTERNAL "")
|
||||
elseif (ARCH_S390X)
|
||||
set (ENABLE_GRPC OFF CACHE INTERNAL "")
|
||||
set (ENABLE_SENTRY OFF CACHE INTERNAL "")
|
||||
|
@ -1,13 +1 @@
|
||||
option (USE_UNWIND "Enable libunwind (better stacktraces)" ${ENABLE_LIBRARIES})
|
||||
|
||||
if (USE_UNWIND)
|
||||
add_subdirectory(contrib/libunwind-cmake)
|
||||
set (UNWIND_LIBRARIES unwind)
|
||||
set (EXCEPTION_HANDLING_LIBRARY ${UNWIND_LIBRARIES})
|
||||
|
||||
message (STATUS "Using libunwind: ${UNWIND_LIBRARIES}")
|
||||
else ()
|
||||
set (EXCEPTION_HANDLING_LIBRARY gcc_eh)
|
||||
endif ()
|
||||
|
||||
message (STATUS "Using exception handler: ${EXCEPTION_HANDLING_LIBRARY}")
|
||||
add_subdirectory(contrib/libunwind-cmake)
|
||||
|
3
contrib/CMakeLists.txt
vendored
3
contrib/CMakeLists.txt
vendored
@ -164,14 +164,13 @@ add_contrib (libpq-cmake libpq)
|
||||
add_contrib (nuraft-cmake NuRaft)
|
||||
add_contrib (fast_float-cmake fast_float)
|
||||
add_contrib (datasketches-cpp-cmake datasketches-cpp)
|
||||
add_contrib (hashidsxx-cmake hashidsxx)
|
||||
add_contrib (incbin-cmake incbin)
|
||||
|
||||
option(ENABLE_NLP "Enable NLP functions support" ${ENABLE_LIBRARIES})
|
||||
if (ENABLE_NLP)
|
||||
add_contrib (libstemmer-c-cmake libstemmer_c)
|
||||
add_contrib (wordnet-blast-cmake wordnet-blast)
|
||||
add_contrib (lemmagen-c-cmake lemmagen-c)
|
||||
add_contrib (nlp-data-cmake nlp-data)
|
||||
add_contrib (cld2-cmake cld2)
|
||||
endif()
|
||||
|
||||
|
2
contrib/NuRaft
vendored
2
contrib/NuRaft
vendored
@ -1 +1 @@
|
||||
Subproject commit 491eaf592d950e0e37accbe8b3f217e068c9fecf
|
||||
Subproject commit eb1572129c71beb2156dcdaadc3fb136954aed96
|
2
contrib/cctz
vendored
2
contrib/cctz
vendored
@ -1 +1 @@
|
||||
Subproject commit 5e05432420f9692418e2e12aff09859e420b14a2
|
||||
Subproject commit 8529bcef5cd996b7c0f4d7475286b76b5d126c4c
|
@ -1,4 +1,3 @@
|
||||
include(${ClickHouse_SOURCE_DIR}/cmake/embed_binary.cmake)
|
||||
set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/cctz")
|
||||
|
||||
set (SRCS
|
||||
@ -23,12 +22,10 @@ if (OS_FREEBSD)
|
||||
endif ()
|
||||
|
||||
# Related to time_zones table:
|
||||
# StorageSystemTimeZones.generated.cpp is autogenerated each time during a build
|
||||
# data in this file will be used to populate the system.time_zones table, this is specific to OS_LINUX
|
||||
# as the library that's built using embedded tzdata is also specific to OS_LINUX
|
||||
set(SYSTEM_STORAGE_TZ_FILE "${PROJECT_BINARY_DIR}/src/Storages/System/StorageSystemTimeZones.generated.cpp")
|
||||
# TimeZones.generated.cpp is autogenerated each time during a build
|
||||
set(TIMEZONES_FILE "${CMAKE_CURRENT_BINARY_DIR}/TimeZones.generated.cpp")
|
||||
# remove existing copies so that its generated fresh on each build.
|
||||
file(REMOVE ${SYSTEM_STORAGE_TZ_FILE})
|
||||
file(REMOVE ${TIMEZONES_FILE})
|
||||
|
||||
# get the list of timezones from tzdata shipped with cctz
|
||||
set(TZDIR "${LIBRARY_DIR}/testdata/zoneinfo")
|
||||
@ -36,28 +33,44 @@ file(STRINGS "${LIBRARY_DIR}/testdata/version" TZDATA_VERSION)
|
||||
set_property(GLOBAL PROPERTY TZDATA_VERSION_PROP "${TZDATA_VERSION}")
|
||||
message(STATUS "Packaging with tzdata version: ${TZDATA_VERSION}")
|
||||
|
||||
set(TIMEZONE_RESOURCE_FILES)
|
||||
|
||||
# each file in that dir (except of tab and localtime) store the info about timezone
|
||||
execute_process(COMMAND
|
||||
bash -c "cd ${TZDIR} && find * -type f -and ! -name '*.tab' -and ! -name 'localtime' | LC_ALL=C sort | paste -sd ';' -"
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
OUTPUT_VARIABLE TIMEZONES)
|
||||
|
||||
file(APPEND ${SYSTEM_STORAGE_TZ_FILE} "// autogenerated by ClickHouse/contrib/cctz-cmake/CMakeLists.txt\n")
|
||||
file(APPEND ${SYSTEM_STORAGE_TZ_FILE} "const char * auto_time_zones[] {\n" )
|
||||
file(APPEND ${TIMEZONES_FILE} "// autogenerated by ClickHouse/contrib/cctz-cmake/CMakeLists.txt\n")
|
||||
file(APPEND ${TIMEZONES_FILE} "#include <incbin.h>\n")
|
||||
|
||||
set (COUNTER 1)
|
||||
foreach(TIMEZONE ${TIMEZONES})
|
||||
file(APPEND ${TIMEZONES_FILE} "INCBIN(resource_timezone${COUNTER}, \"${TZDIR}/${TIMEZONE}\");\n")
|
||||
MATH(EXPR COUNTER "${COUNTER}+1")
|
||||
endforeach(TIMEZONE)
|
||||
|
||||
file(APPEND ${TIMEZONES_FILE} "const char * auto_time_zones[] {\n" )
|
||||
|
||||
foreach(TIMEZONE ${TIMEZONES})
|
||||
file(APPEND ${SYSTEM_STORAGE_TZ_FILE} " \"${TIMEZONE}\",\n")
|
||||
list(APPEND TIMEZONE_RESOURCE_FILES "${TIMEZONE}")
|
||||
file(APPEND ${TIMEZONES_FILE} " \"${TIMEZONE}\",\n")
|
||||
MATH(EXPR COUNTER "${COUNTER}+1")
|
||||
endforeach(TIMEZONE)
|
||||
file(APPEND ${SYSTEM_STORAGE_TZ_FILE} " nullptr};\n")
|
||||
clickhouse_embed_binaries(
|
||||
TARGET tzdata
|
||||
RESOURCE_DIR "${TZDIR}"
|
||||
RESOURCES ${TIMEZONE_RESOURCE_FILES}
|
||||
)
|
||||
add_dependencies(_cctz tzdata)
|
||||
target_link_libraries(_cctz INTERFACE "-Wl,${WHOLE_ARCHIVE} $<TARGET_FILE:tzdata> -Wl,${NO_WHOLE_ARCHIVE}")
|
||||
|
||||
file(APPEND ${TIMEZONES_FILE} " nullptr\n};\n\n")
|
||||
|
||||
file(APPEND ${TIMEZONES_FILE} "#include <string_view>\n\n")
|
||||
file(APPEND ${TIMEZONES_FILE} "std::string_view getTimeZone(const char * name)\n{\n" )
|
||||
|
||||
set (COUNTER 1)
|
||||
foreach(TIMEZONE ${TIMEZONES})
|
||||
file(APPEND ${TIMEZONES_FILE} " if (std::string_view(\"${TIMEZONE}\") == name) return { reinterpret_cast<const char *>(gresource_timezone${COUNTER}Data), gresource_timezone${COUNTER}Size };\n")
|
||||
MATH(EXPR COUNTER "${COUNTER}+1")
|
||||
endforeach(TIMEZONE)
|
||||
|
||||
file(APPEND ${TIMEZONES_FILE} " return {};\n")
|
||||
file(APPEND ${TIMEZONES_FILE} "}\n")
|
||||
|
||||
add_library (tzdata ${TIMEZONES_FILE})
|
||||
target_link_libraries(tzdata ch_contrib::incbin)
|
||||
target_link_libraries(_cctz tzdata)
|
||||
|
||||
add_library(ch_contrib::cctz ALIAS _cctz)
|
||||
|
1
contrib/hashidsxx
vendored
1
contrib/hashidsxx
vendored
@ -1 +0,0 @@
|
||||
Subproject commit 783f6911ccfdaca83e3cfac084c4aad888a80cee
|
@ -1,14 +0,0 @@
|
||||
set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/hashidsxx")
|
||||
|
||||
set (SRCS
|
||||
"${LIBRARY_DIR}/hashids.cpp"
|
||||
)
|
||||
|
||||
set (HDRS
|
||||
"${LIBRARY_DIR}/hashids.h"
|
||||
)
|
||||
|
||||
add_library(_hashidsxx ${SRCS} ${HDRS})
|
||||
target_include_directories(_hashidsxx SYSTEM PUBLIC "${LIBRARY_DIR}")
|
||||
|
||||
add_library(ch_contrib::hashidsxx ALIAS _hashidsxx)
|
1
contrib/incbin
vendored
Submodule
1
contrib/incbin
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 6e576cae5ab5810f25e2631f2e0b80cbe7dc8cbf
|
8
contrib/incbin-cmake/CMakeLists.txt
Normal file
8
contrib/incbin-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,8 @@
|
||||
set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/incbin")
|
||||
add_library(_incbin INTERFACE)
|
||||
target_include_directories(_incbin SYSTEM INTERFACE ${LIBRARY_DIR})
|
||||
add_library(ch_contrib::incbin ALIAS _incbin)
|
||||
|
||||
# Warning "incbin is incompatible with bitcode. Using the library will break upload to App Store if you have bitcode enabled.
|
||||
# Add `#define INCBIN_SILENCE_BITCODE_WARNING` before including this header to silence this warning."
|
||||
target_compile_definitions(_incbin INTERFACE INCBIN_SILENCE_BITCODE_WARNING)
|
@ -1,5 +1,5 @@
|
||||
if (SANITIZE OR NOT (
|
||||
((OS_LINUX OR OS_FREEBSD) AND (ARCH_AMD64 OR ARCH_AARCH64 OR ARCH_PPC64LE OR ARCH_RISCV64)) OR
|
||||
((OS_LINUX OR OS_FREEBSD) AND (ARCH_AMD64 OR ARCH_AARCH64 OR ARCH_PPC64LE OR ARCH_RISCV64 OR ARCH_S390X)) OR
|
||||
(OS_DARWIN AND (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" OR CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG"))
|
||||
))
|
||||
if (ENABLE_JEMALLOC)
|
||||
@ -17,17 +17,17 @@ if (NOT ENABLE_JEMALLOC)
|
||||
endif ()
|
||||
|
||||
if (NOT OS_LINUX)
|
||||
message (WARNING "jemalloc support on non-linux is EXPERIMENTAL")
|
||||
message (WARNING "jemalloc support on non-Linux is EXPERIMENTAL")
|
||||
endif()
|
||||
|
||||
if (OS_LINUX)
|
||||
# ThreadPool select job randomly, and there can be some threads that had been
|
||||
# performed some memory heavy task before and will be inactive for some time,
|
||||
# but until it will became active again, the memory will not be freed since by
|
||||
# default each thread has it's own arena, but there should be not more then
|
||||
# ThreadPool select job randomly, and there can be some threads that have been
|
||||
# performed some memory-heavy tasks before and will be inactive for some time,
|
||||
# but until it becomes active again, the memory will not be freed since, by
|
||||
# default, each thread has its arena, but there should be no more than
|
||||
# 4*CPU arenas (see opt.nareans description).
|
||||
#
|
||||
# By enabling percpu_arena number of arenas limited to number of CPUs and hence
|
||||
# By enabling percpu_arena number of arenas is limited to the number of CPUs, and hence
|
||||
# this problem should go away.
|
||||
#
|
||||
# muzzy_decay_ms -- use MADV_FREE when available on newer Linuxes, to
|
||||
@ -38,7 +38,7 @@ if (OS_LINUX)
|
||||
else()
|
||||
set (JEMALLOC_CONFIG_MALLOC_CONF "oversize_threshold:0,muzzy_decay_ms:5000,dirty_decay_ms:5000")
|
||||
endif()
|
||||
# CACHE variable is empty, to allow changing defaults without necessity
|
||||
# CACHE variable is empty to allow changing defaults without the necessity
|
||||
# to purge cache
|
||||
set (JEMALLOC_CONFIG_MALLOC_CONF_OVERRIDE "" CACHE STRING "Change default configuration string of JEMalloc" )
|
||||
if (JEMALLOC_CONFIG_MALLOC_CONF_OVERRIDE)
|
||||
@ -148,6 +148,8 @@ elseif (ARCH_PPC64LE)
|
||||
set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_ppc64le")
|
||||
elseif (ARCH_RISCV64)
|
||||
set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_riscv64")
|
||||
elseif (ARCH_S390X)
|
||||
set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_s390x")
|
||||
else ()
|
||||
message (FATAL_ERROR "internal jemalloc: This arch is not supported")
|
||||
endif ()
|
||||
@ -170,16 +172,13 @@ endif ()
|
||||
|
||||
target_compile_definitions(_jemalloc PRIVATE -DJEMALLOC_PROF=1)
|
||||
|
||||
if (USE_UNWIND)
|
||||
# jemalloc provides support for two different libunwind flavors: the original HP libunwind and the one coming with gcc / g++ / libstdc++.
|
||||
# The latter is identified by `JEMALLOC_PROF_LIBGCC` and uses `_Unwind_Backtrace` method instead of `unw_backtrace`.
|
||||
# At the time ClickHouse uses LLVM libunwind which follows libgcc's way of backtracing.
|
||||
|
||||
# ClickHouse has to provide `unw_backtrace` method by the means of [commit 8e2b31e](https://github.com/ClickHouse/libunwind/commit/8e2b31e766dd502f6df74909e04a7dbdf5182eb1).
|
||||
|
||||
target_compile_definitions (_jemalloc PRIVATE -DJEMALLOC_PROF_LIBGCC=1)
|
||||
target_link_libraries (_jemalloc PRIVATE unwind)
|
||||
endif ()
|
||||
# jemalloc provides support for two different libunwind flavors: the original HP libunwind and the one coming with gcc / g++ / libstdc++.
|
||||
# The latter is identified by `JEMALLOC_PROF_LIBGCC` and uses `_Unwind_Backtrace` method instead of `unw_backtrace`.
|
||||
# At the time ClickHouse uses LLVM libunwind which follows libgcc's way of backtracking.
|
||||
#
|
||||
# ClickHouse has to provide `unw_backtrace` method by the means of [commit 8e2b31e](https://github.com/ClickHouse/libunwind/commit/8e2b31e766dd502f6df74909e04a7dbdf5182eb1).
|
||||
target_compile_definitions (_jemalloc PRIVATE -DJEMALLOC_PROF_LIBGCC=1)
|
||||
target_link_libraries (_jemalloc PRIVATE unwind)
|
||||
|
||||
# for RTLD_NEXT
|
||||
target_compile_options(_jemalloc PRIVATE -D_GNU_SOURCE)
|
||||
|
@ -0,0 +1,435 @@
|
||||
/* include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */
|
||||
#ifndef JEMALLOC_INTERNAL_DEFS_H_
|
||||
#define JEMALLOC_INTERNAL_DEFS_H_
|
||||
/*
|
||||
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
|
||||
* public APIs to be prefixed. This makes it possible, with some care, to use
|
||||
* multiple allocators simultaneously.
|
||||
*/
|
||||
/* #undef JEMALLOC_PREFIX */
|
||||
/* #undef JEMALLOC_CPREFIX */
|
||||
|
||||
/*
|
||||
* Define overrides for non-standard allocator-related functions if they are
|
||||
* present on the system.
|
||||
*/
|
||||
#define JEMALLOC_OVERRIDE___LIBC_CALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_FREE
|
||||
#define JEMALLOC_OVERRIDE___LIBC_MALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_MEMALIGN
|
||||
#define JEMALLOC_OVERRIDE___LIBC_REALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_VALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_PVALLOC
|
||||
/* #undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN */
|
||||
|
||||
/*
|
||||
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
|
||||
* For shared libraries, symbol visibility mechanisms prevent these symbols
|
||||
* from being exported, but for static libraries, naming collisions are a real
|
||||
* possibility.
|
||||
*/
|
||||
#define JEMALLOC_PRIVATE_NAMESPACE je_
|
||||
|
||||
/*
|
||||
* Hyper-threaded CPUs may need a special instruction inside spin loops in
|
||||
* order to yield to another virtual CPU.
|
||||
*/
|
||||
#define CPU_SPINWAIT
|
||||
/* 1 if CPU_SPINWAIT is defined, 0 otherwise. */
|
||||
#define HAVE_CPU_SPINWAIT 0
|
||||
|
||||
/*
|
||||
* Number of significant bits in virtual addresses. This may be less than the
|
||||
* total number of bits in a pointer, e.g. on x64, for which the uppermost 16
|
||||
* bits are the same as bit 47.
|
||||
*/
|
||||
#define LG_VADDR 64
|
||||
|
||||
/* Defined if C11 atomics are available. */
|
||||
#define JEMALLOC_C11_ATOMICS
|
||||
|
||||
/* Defined if GCC __atomic atomics are available. */
|
||||
#define JEMALLOC_GCC_ATOMIC_ATOMICS
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS
|
||||
|
||||
/* Defined if GCC __sync atomics are available. */
|
||||
#define JEMALLOC_GCC_SYNC_ATOMICS
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_SYNC_ATOMICS
|
||||
|
||||
/*
|
||||
* Defined if __builtin_clz() and __builtin_clzl() are available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_BUILTIN_CLZ
|
||||
|
||||
/*
|
||||
* Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
|
||||
*/
|
||||
/* #undef JEMALLOC_OS_UNFAIR_LOCK */
|
||||
|
||||
/* Defined if syscall(2) is usable. */
|
||||
#define JEMALLOC_USE_SYSCALL
|
||||
|
||||
/*
|
||||
* Defined if secure_getenv(3) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_SECURE_GETENV
|
||||
|
||||
/*
|
||||
* Defined if issetugid(2) is available.
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_ISSETUGID */
|
||||
|
||||
/* Defined if pthread_atfork(3) is available. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_ATFORK
|
||||
|
||||
/* Defined if pthread_setname_np(3) is available. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_SETNAME_NP
|
||||
|
||||
/* Defined if pthread_getname_np(3) is available. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_GETNAME_NP
|
||||
|
||||
/* Defined if pthread_get_name_np(3) is available. */
|
||||
/* #undef JEMALLOC_HAVE_PTHREAD_GET_NAME_NP */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC
|
||||
|
||||
/*
|
||||
* Defined if mach_absolute_time() is available.
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_REALTIME, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_REALTIME
|
||||
|
||||
/*
|
||||
* Defined if _malloc_thread_cleanup() exists. At least in the case of
|
||||
* FreeBSD, pthread_key_create() allocates, which if used during malloc
|
||||
* bootstrapping will cause recursion into the pthreads library. Therefore, if
|
||||
* _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
|
||||
* malloc_tsd.
|
||||
*/
|
||||
/* #undef JEMALLOC_MALLOC_THREAD_CLEANUP */
|
||||
|
||||
/*
|
||||
* Defined if threaded initialization is known to be safe on this platform.
|
||||
* Among other things, it must be possible to initialize a mutex without
|
||||
* triggering allocation in order for threaded allocation to be safe.
|
||||
*/
|
||||
#define JEMALLOC_THREADED_INIT
|
||||
|
||||
/*
|
||||
* Defined if the pthreads implementation defines
|
||||
* _pthread_mutex_init_calloc_cb(), in which case the function is used in order
|
||||
* to avoid recursive allocation during mutex initialization.
|
||||
*/
|
||||
/* #undef JEMALLOC_MUTEX_INIT_CB */
|
||||
|
||||
/* Non-empty if the tls_model attribute is supported. */
|
||||
#define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec")))
|
||||
|
||||
/*
|
||||
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
|
||||
* inline functions.
|
||||
*/
|
||||
/* #undef JEMALLOC_DEBUG */
|
||||
|
||||
/* JEMALLOC_STATS enables statistics calculation. */
|
||||
#define JEMALLOC_STATS
|
||||
|
||||
/* JEMALLOC_EXPERIMENTAL_SMALLOCX_API enables experimental smallocx API. */
|
||||
/* #undef JEMALLOC_EXPERIMENTAL_SMALLOCX_API */
|
||||
|
||||
/* JEMALLOC_PROF enables allocation profiling. */
|
||||
/* #undef JEMALLOC_PROF */
|
||||
|
||||
/* Use libunwind for profile backtracing if defined. */
|
||||
/* #undef JEMALLOC_PROF_LIBUNWIND */
|
||||
|
||||
/* Use libgcc for profile backtracing if defined. */
|
||||
/* #undef JEMALLOC_PROF_LIBGCC */
|
||||
|
||||
/* Use gcc intrinsics for profile backtracing if defined. */
|
||||
/* #undef JEMALLOC_PROF_GCC */
|
||||
|
||||
/* JEMALLOC_PAGEID enabled page id */
|
||||
/* #undef JEMALLOC_PAGEID */
|
||||
|
||||
/* JEMALLOC_HAVE_PRCTL checks prctl */
|
||||
#define JEMALLOC_HAVE_PRCTL
|
||||
|
||||
/*
|
||||
* JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
|
||||
* segment (DSS).
|
||||
*/
|
||||
#define JEMALLOC_DSS
|
||||
|
||||
/* Support memory filling (junk/zero). */
|
||||
#define JEMALLOC_FILL
|
||||
|
||||
/* Support utrace(2)-based tracing. */
|
||||
/* #undef JEMALLOC_UTRACE */
|
||||
|
||||
/* Support utrace(2)-based tracing (label based signature). */
|
||||
/* #undef JEMALLOC_UTRACE_LABEL */
|
||||
|
||||
/* Support optional abort() on OOM. */
|
||||
/* #undef JEMALLOC_XMALLOC */
|
||||
|
||||
/* Support lazy locking (avoid locking unless a second thread is launched). */
|
||||
/* #undef JEMALLOC_LAZY_LOCK */
|
||||
|
||||
/*
|
||||
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
|
||||
* classes).
|
||||
*/
|
||||
/* #undef LG_QUANTUM */
|
||||
|
||||
/* One page is 2^LG_PAGE bytes. */
|
||||
#define LG_PAGE 12
|
||||
|
||||
/* Maximum number of regions in a slab. */
|
||||
/* #undef CONFIG_LG_SLAB_MAXREGS */
|
||||
|
||||
/*
|
||||
* One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
|
||||
* system does not explicitly support huge pages; system calls that require
|
||||
* explicit huge page support are separately configured.
|
||||
*/
|
||||
#define LG_HUGEPAGE 20
|
||||
|
||||
/*
|
||||
* If defined, adjacent virtual memory mappings with identical attributes
|
||||
* automatically coalesce, and they fragment when changes are made to subranges.
|
||||
* This is the normal order of things for mmap()/munmap(), but on Windows
|
||||
* VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
|
||||
* mappings do *not* coalesce/fragment.
|
||||
*/
|
||||
#define JEMALLOC_MAPS_COALESCE
|
||||
|
||||
/*
|
||||
* If defined, retain memory for later reuse by default rather than using e.g.
|
||||
* munmap() to unmap freed extents. This is enabled on 64-bit Linux because
|
||||
* common sequences of mmap()/munmap() calls will cause virtual memory map
|
||||
* holes.
|
||||
*/
|
||||
#define JEMALLOC_RETAIN
|
||||
|
||||
/* TLS is used to map arenas and magazine caches to threads. */
|
||||
#define JEMALLOC_TLS
|
||||
|
||||
/*
|
||||
* Used to mark unreachable code to quiet "end of non-void" compiler warnings.
|
||||
* Don't use this directly; instead use unreachable() from util.h
|
||||
*/
|
||||
#define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable
|
||||
|
||||
/*
|
||||
* ffs*() functions to use for bitmapping. Don't use these directly; instead,
|
||||
* use ffs_*() from util.h.
|
||||
*/
|
||||
#define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll
|
||||
#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl
|
||||
#define JEMALLOC_INTERNAL_FFS __builtin_ffs
|
||||
|
||||
/*
|
||||
* popcount*() functions to use for bitmapping.
|
||||
*/
|
||||
#define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl
|
||||
#define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount
|
||||
|
||||
/*
|
||||
* If defined, explicitly attempt to more uniformly distribute large allocation
|
||||
* pointer alignments across all cache indices.
|
||||
*/
|
||||
#define JEMALLOC_CACHE_OBLIVIOUS
|
||||
|
||||
/*
|
||||
* If defined, enable logging facilities. We make this a configure option to
|
||||
* avoid taking extra branches everywhere.
|
||||
*/
|
||||
/* #undef JEMALLOC_LOG */
|
||||
|
||||
/*
|
||||
* If defined, use readlinkat() (instead of readlink()) to follow
|
||||
* /etc/malloc_conf.
|
||||
*/
|
||||
/* #undef JEMALLOC_READLINKAT */
|
||||
|
||||
/*
|
||||
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
|
||||
*/
|
||||
/* #undef JEMALLOC_ZONE */
|
||||
|
||||
/*
|
||||
* Methods for determining whether the OS overcommits.
|
||||
* JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
|
||||
* /proc/sys/vm.overcommit_memory file.
|
||||
* JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
|
||||
*/
|
||||
/* #undef JEMALLOC_SYSCTL_VM_OVERCOMMIT */
|
||||
#define JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
|
||||
|
||||
/* Defined if madvise(2) is available. */
|
||||
#define JEMALLOC_HAVE_MADVISE
|
||||
|
||||
/*
|
||||
* Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
|
||||
* arguments to madvise(2).
|
||||
*/
|
||||
#define JEMALLOC_HAVE_MADVISE_HUGE
|
||||
|
||||
/*
|
||||
* Methods for purging unused pages differ between operating systems.
|
||||
*
|
||||
* madvise(..., MADV_FREE) : This marks pages as being unused, such that they
|
||||
* will be discarded rather than swapped out.
|
||||
* madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is
|
||||
* defined, this immediately discards pages,
|
||||
* such that new pages will be demand-zeroed if
|
||||
* the address region is later touched;
|
||||
* otherwise this behaves similarly to
|
||||
* MADV_FREE, though typically with higher
|
||||
* system overhead.
|
||||
*/
|
||||
#define JEMALLOC_PURGE_MADVISE_FREE
|
||||
#define JEMALLOC_PURGE_MADVISE_DONTNEED
|
||||
#define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
|
||||
|
||||
/* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */
|
||||
/* #undef JEMALLOC_DEFINE_MADVISE_FREE */
|
||||
|
||||
/*
|
||||
* Defined if MADV_DO[NT]DUMP is supported as an argument to madvise.
|
||||
*/
|
||||
#define JEMALLOC_MADVISE_DONTDUMP
|
||||
|
||||
/*
|
||||
* Defined if MADV_[NO]CORE is supported as an argument to madvise.
|
||||
*/
|
||||
/* #undef JEMALLOC_MADVISE_NOCORE */
|
||||
|
||||
/* Defined if mprotect(2) is available. */
|
||||
#define JEMALLOC_HAVE_MPROTECT
|
||||
|
||||
/*
|
||||
* Defined if transparent huge pages (THPs) are supported via the
|
||||
* MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
|
||||
*/
|
||||
/* #undef JEMALLOC_THP */
|
||||
|
||||
/* Defined if posix_madvise is available. */
|
||||
/* #undef JEMALLOC_HAVE_POSIX_MADVISE */
|
||||
|
||||
/*
|
||||
* Method for purging unused pages using posix_madvise.
|
||||
*
|
||||
* posix_madvise(..., POSIX_MADV_DONTNEED)
|
||||
*/
|
||||
/* #undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED */
|
||||
/* #undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS */
|
||||
|
||||
/*
|
||||
* Defined if memcntl page admin call is supported
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_MEMCNTL */
|
||||
|
||||
/*
|
||||
* Defined if malloc_size is supported
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_MALLOC_SIZE */
|
||||
|
||||
/* Define if operating system has alloca.h header. */
|
||||
#define JEMALLOC_HAS_ALLOCA_H
|
||||
|
||||
/* C99 restrict keyword supported. */
|
||||
#define JEMALLOC_HAS_RESTRICT
|
||||
|
||||
/* For use by hash code. */
|
||||
#define JEMALLOC_BIG_ENDIAN
|
||||
|
||||
/* sizeof(int) == 2^LG_SIZEOF_INT. */
|
||||
#define LG_SIZEOF_INT 2
|
||||
|
||||
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
|
||||
#define LG_SIZEOF_LONG 3
|
||||
|
||||
/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
|
||||
#define LG_SIZEOF_LONG_LONG 3
|
||||
|
||||
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
|
||||
#define LG_SIZEOF_INTMAX_T 3
|
||||
|
||||
/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
|
||||
/* #undef JEMALLOC_GLIBC_MALLOC_HOOK */
|
||||
|
||||
/* glibc memalign hook. */
|
||||
/* #undef JEMALLOC_GLIBC_MEMALIGN_HOOK */
|
||||
|
||||
/* pthread support */
|
||||
#define JEMALLOC_HAVE_PTHREAD
|
||||
|
||||
/* dlsym() support */
|
||||
#define JEMALLOC_HAVE_DLSYM
|
||||
|
||||
/* Adaptive mutex support in pthreads. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
|
||||
|
||||
/* GNU specific sched_getcpu support */
|
||||
#define JEMALLOC_HAVE_SCHED_GETCPU
|
||||
|
||||
/* GNU specific sched_setaffinity support */
|
||||
#define JEMALLOC_HAVE_SCHED_SETAFFINITY
|
||||
|
||||
/*
|
||||
* If defined, all the features necessary for background threads are present.
|
||||
*/
|
||||
#define JEMALLOC_BACKGROUND_THREAD
|
||||
|
||||
/*
|
||||
* If defined, jemalloc symbols are not exported (doesn't work when
|
||||
* JEMALLOC_PREFIX is not defined).
|
||||
*/
|
||||
/* #undef JEMALLOC_EXPORT */
|
||||
|
||||
/* config.malloc_conf options string. */
|
||||
#define JEMALLOC_CONFIG_MALLOC_CONF ""
|
||||
|
||||
/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
|
||||
#define JEMALLOC_IS_MALLOC
|
||||
|
||||
/*
|
||||
* Defined if strerror_r returns char * if _GNU_SOURCE is defined.
|
||||
*/
|
||||
#define JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE
|
||||
|
||||
/* Performs additional safety checks when defined. */
|
||||
/* #undef JEMALLOC_OPT_SAFETY_CHECKS */
|
||||
|
||||
/* Is C++ support being built? */
|
||||
#define JEMALLOC_ENABLE_CXX
|
||||
|
||||
/* Performs additional size checks when defined. */
|
||||
/* #undef JEMALLOC_OPT_SIZE_CHECKS */
|
||||
|
||||
/* Allows sampled junk and stash for checking use-after-free when defined. */
|
||||
/* #undef JEMALLOC_UAF_DETECTION */
|
||||
|
||||
/* Darwin VM_MAKE_TAG support */
|
||||
/* #undef JEMALLOC_HAVE_VM_MAKE_TAG */
|
||||
|
||||
/* If defined, realloc(ptr, 0) defaults to "free" instead of "alloc". */
|
||||
#define JEMALLOC_ZERO_REALLOC_DEFAULT_FREE
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
|
@ -61,9 +61,7 @@ target_include_directories(cxx SYSTEM BEFORE PUBLIC $<$<COMPILE_LANGUAGE:CXX>:$
|
||||
target_compile_definitions(cxx PRIVATE -D_LIBCPP_BUILDING_LIBRARY -DLIBCXX_BUILDING_LIBCXXABI)
|
||||
|
||||
# Enable capturing stack traces for all exceptions.
|
||||
if (USE_UNWIND)
|
||||
target_compile_definitions(cxx PUBLIC -DSTD_EXCEPTION_HAS_STACK_TRACE=1)
|
||||
endif ()
|
||||
target_compile_definitions(cxx PUBLIC -DSTD_EXCEPTION_HAS_STACK_TRACE=1)
|
||||
|
||||
if (USE_MUSL)
|
||||
target_compile_definitions(cxx PUBLIC -D_LIBCPP_HAS_MUSL_LIBC=1)
|
||||
|
@ -35,12 +35,10 @@ target_include_directories(cxxabi SYSTEM BEFORE
|
||||
)
|
||||
target_compile_definitions(cxxabi PRIVATE -D_LIBCPP_BUILDING_LIBRARY)
|
||||
target_compile_options(cxxabi PRIVATE -nostdinc++ -fno-sanitize=undefined -Wno-macro-redefined) # If we don't disable UBSan, infinite recursion happens in dynamic_cast.
|
||||
target_link_libraries(cxxabi PUBLIC ${EXCEPTION_HANDLING_LIBRARY})
|
||||
target_link_libraries(cxxabi PUBLIC unwind)
|
||||
|
||||
# Enable capturing stack traces for all exceptions.
|
||||
if (USE_UNWIND)
|
||||
target_compile_definitions(cxxabi PUBLIC -DSTD_EXCEPTION_HAS_STACK_TRACE=1)
|
||||
endif ()
|
||||
target_compile_definitions(cxxabi PUBLIC -DSTD_EXCEPTION_HAS_STACK_TRACE=1)
|
||||
|
||||
install(
|
||||
TARGETS cxxabi
|
||||
|
@ -1,15 +0,0 @@
|
||||
include(${ClickHouse_SOURCE_DIR}/cmake/embed_binary.cmake)
|
||||
|
||||
set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/nlp-data")
|
||||
|
||||
add_library (_nlp_data INTERFACE)
|
||||
|
||||
clickhouse_embed_binaries(
|
||||
TARGET nlp_dictionaries
|
||||
RESOURCE_DIR "${LIBRARY_DIR}"
|
||||
RESOURCES charset.zst tonality_ru.zst programming.zst
|
||||
)
|
||||
|
||||
add_dependencies(_nlp_data nlp_dictionaries)
|
||||
target_link_libraries(_nlp_data INTERFACE "-Wl,${WHOLE_ARCHIVE} $<TARGET_FILE:nlp_dictionaries> -Wl,${NO_WHOLE_ARCHIVE}")
|
||||
add_library(ch_contrib::nlp_data ALIAS _nlp_data)
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
esac
|
||||
|
||||
ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release"
|
||||
ARG VERSION="23.6.1.1524"
|
||||
ARG VERSION="23.6.2.18"
|
||||
ARG PACKAGES="clickhouse-keeper"
|
||||
|
||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||
|
@ -49,8 +49,8 @@ ENV CARGO_HOME=/rust/cargo
|
||||
ENV PATH="/rust/cargo/bin:${PATH}"
|
||||
RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \
|
||||
chmod 777 -R /rust && \
|
||||
rustup toolchain install nightly && \
|
||||
rustup default nightly && \
|
||||
rustup toolchain install nightly-2023-07-04 && \
|
||||
rustup default nightly-2023-07-04 && \
|
||||
rustup component add rust-src && \
|
||||
rustup target add aarch64-unknown-linux-gnu && \
|
||||
rustup target add x86_64-apple-darwin && \
|
||||
@ -58,6 +58,33 @@ RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \
|
||||
rustup target add aarch64-apple-darwin && \
|
||||
rustup target add powerpc64le-unknown-linux-gnu
|
||||
|
||||
# Create vendor cache for cargo.
|
||||
#
|
||||
# Note, that the config.toml for the root is used, you will not be able to
|
||||
# install any other crates, except those which had been vendored (since if
|
||||
# there is "replace-with" for some source, then cargo will not look to other
|
||||
# remotes except this).
|
||||
#
|
||||
# Notes for the command itself:
|
||||
# - --chown is required to preserve the rights
|
||||
# - unstable-options for -C
|
||||
# - chmod is required to fix the permissions, since builds are running from a different user
|
||||
# - copy of the Cargo.lock is required for proper dependencies versions
|
||||
# - cargo vendor --sync is requried to overcome [1] bug.
|
||||
#
|
||||
# [1]: https://github.com/rust-lang/wg-cargo-std-aware/issues/23
|
||||
COPY --chown=root:root /rust /rust/packages
|
||||
RUN cargo -Z unstable-options -C /rust/packages vendor > $CARGO_HOME/config.toml && \
|
||||
cp "$(rustc --print=sysroot)"/lib/rustlib/src/rust/Cargo.lock "$(rustc --print=sysroot)"/lib/rustlib/src/rust/library/test/ && \
|
||||
cargo -Z unstable-options -C /rust/packages vendor --sync "$(rustc --print=sysroot)"/lib/rustlib/src/rust/library/test/Cargo.toml && \
|
||||
rm "$(rustc --print=sysroot)"/lib/rustlib/src/rust/library/test/Cargo.lock && \
|
||||
sed -i "s#\"vendor\"#\"/rust/vendor\"#" $CARGO_HOME/config.toml && \
|
||||
cat $CARGO_HOME/config.toml && \
|
||||
mv /rust/packages/vendor /rust/vendor && \
|
||||
chmod -R o=r+X /rust/vendor && \
|
||||
ls -R -l /rust/packages && \
|
||||
rm -r /rust/packages
|
||||
|
||||
# NOTE: Seems like gcc-11 is too new for ubuntu20 repository
|
||||
# A cross-linker for RISC-V 64 (we need it, because LLVM's LLD does not work):
|
||||
RUN add-apt-repository ppa:ubuntu-toolchain-r/test --yes \
|
||||
|
1
docker/packager/binary/rust
Symbolic link
1
docker/packager/binary/rust
Symbolic link
@ -0,0 +1 @@
|
||||
../../../rust
|
@ -138,6 +138,7 @@ def parse_env_variables(
|
||||
ARM_V80COMPAT_SUFFIX = "-aarch64-v80compat"
|
||||
FREEBSD_SUFFIX = "-freebsd"
|
||||
PPC_SUFFIX = "-ppc64le"
|
||||
RISCV_SUFFIX = "-riscv64"
|
||||
AMD64_COMPAT_SUFFIX = "-amd64-compat"
|
||||
|
||||
result = []
|
||||
@ -150,6 +151,7 @@ def parse_env_variables(
|
||||
is_cross_arm = compiler.endswith(ARM_SUFFIX)
|
||||
is_cross_arm_v80compat = compiler.endswith(ARM_V80COMPAT_SUFFIX)
|
||||
is_cross_ppc = compiler.endswith(PPC_SUFFIX)
|
||||
is_cross_riscv = compiler.endswith(RISCV_SUFFIX)
|
||||
is_cross_freebsd = compiler.endswith(FREEBSD_SUFFIX)
|
||||
is_amd64_compat = compiler.endswith(AMD64_COMPAT_SUFFIX)
|
||||
|
||||
@ -206,6 +208,11 @@ def parse_env_variables(
|
||||
cmake_flags.append(
|
||||
"-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-ppc64le.cmake"
|
||||
)
|
||||
elif is_cross_riscv:
|
||||
cc = compiler[: -len(RISCV_SUFFIX)]
|
||||
cmake_flags.append(
|
||||
"-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-riscv64.cmake"
|
||||
)
|
||||
elif is_amd64_compat:
|
||||
cc = compiler[: -len(AMD64_COMPAT_SUFFIX)]
|
||||
result.append("DEB_ARCH=amd64")
|
||||
@ -370,6 +377,7 @@ def parse_args() -> argparse.Namespace:
|
||||
"clang-16-aarch64",
|
||||
"clang-16-aarch64-v80compat",
|
||||
"clang-16-ppc64le",
|
||||
"clang-16-riscv64",
|
||||
"clang-16-amd64-compat",
|
||||
"clang-16-freebsd",
|
||||
),
|
||||
|
@ -33,7 +33,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="23.6.1.1524"
|
||||
ARG VERSION="23.6.2.18"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||
|
@ -23,7 +23,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
||||
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||
ARG VERSION="23.6.1.1524"
|
||||
ARG VERSION="23.6.2.18"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
# set non-empty deb_location_url url to create a docker image
|
||||
|
@ -97,8 +97,8 @@ docker run -d \
|
||||
|
||||
You may also want to mount:
|
||||
|
||||
* `/etc/clickhouse-server/config.d/*.xml` - files with server configuration adjustmenets
|
||||
* `/etc/clickhouse-server/users.d/*.xml` - files with user settings adjustmenets
|
||||
* `/etc/clickhouse-server/config.d/*.xml` - files with server configuration adjustments
|
||||
* `/etc/clickhouse-server/users.d/*.xml` - files with user settings adjustments
|
||||
* `/docker-entrypoint-initdb.d/` - folder with database initialization scripts (see below).
|
||||
|
||||
### Linux capabilities
|
||||
|
@ -141,13 +141,13 @@ function clone_submodules
|
||||
contrib/jemalloc
|
||||
contrib/replxx
|
||||
contrib/wyhash
|
||||
contrib/hashidsxx
|
||||
contrib/c-ares
|
||||
contrib/morton-nd
|
||||
contrib/xxHash
|
||||
contrib/simdjson
|
||||
contrib/liburing
|
||||
contrib/libfiu
|
||||
contrib/incbin
|
||||
)
|
||||
|
||||
git submodule sync
|
||||
@ -166,7 +166,6 @@ function run_cmake
|
||||
"-DENABLE_UTILS=0"
|
||||
"-DENABLE_EMBEDDED_COMPILER=0"
|
||||
"-DENABLE_THINLTO=0"
|
||||
"-DUSE_UNWIND=1"
|
||||
"-DENABLE_NURAFT=1"
|
||||
"-DENABLE_SIMDJSON=1"
|
||||
"-DENABLE_JEMALLOC=1"
|
||||
|
@ -291,7 +291,7 @@ quit
|
||||
if [ "$server_died" == 1 ]
|
||||
then
|
||||
# The server has died.
|
||||
if ! rg --text -o 'Received signal.*|Logical error.*|Assertion.*failed|Failed assertion.*|.*runtime error: .*|.*is located.*|(SUMMARY|ERROR): [a-zA-Z]+Sanitizer:.*|.*_LIBCPP_ASSERT.*' server.log > description.txt
|
||||
if ! rg --text -o 'Received signal.*|Logical error.*|Assertion.*failed|Failed assertion.*|.*runtime error: .*|.*is located.*|(SUMMARY|ERROR): [a-zA-Z]+Sanitizer:.*|.*_LIBCPP_ASSERT.*|.*Child process was terminated by signal 9.*' server.log > description.txt
|
||||
then
|
||||
echo "Lost connection to server. See the logs." > description.txt
|
||||
fi
|
||||
|
@ -98,6 +98,7 @@ RUN python3 -m pip install --no-cache-dir \
|
||||
redis \
|
||||
requests-kerberos \
|
||||
tzlocal==2.1 \
|
||||
retry \
|
||||
urllib3
|
||||
|
||||
# Hudi supports only spark 3.3.*, not 3.4
|
||||
@ -134,4 +135,5 @@ ENV MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1'
|
||||
|
||||
EXPOSE 2375
|
||||
ENTRYPOINT ["dockerd-entrypoint.sh"]
|
||||
CMD ["sh", "-c", "pytest $PYTEST_OPTS"]
|
||||
# To pass additional arguments (i.e. list of tests) use PYTEST_ADDOPTS
|
||||
CMD ["sh", "-c", "pytest"]
|
||||
|
@ -4,6 +4,8 @@ services:
|
||||
kafka_zookeeper:
|
||||
image: zookeeper:3.4.9
|
||||
hostname: kafka_zookeeper
|
||||
ports:
|
||||
- 2181:2181
|
||||
environment:
|
||||
ZOO_MY_ID: 1
|
||||
ZOO_PORT: 2181
|
||||
@ -15,15 +17,14 @@ services:
|
||||
image: confluentinc/cp-kafka:5.2.0
|
||||
hostname: kafka1
|
||||
ports:
|
||||
- ${KAFKA_EXTERNAL_PORT:-8081}:${KAFKA_EXTERNAL_PORT:-8081}
|
||||
- ${KAFKA_EXTERNAL_PORT}:${KAFKA_EXTERNAL_PORT}
|
||||
environment:
|
||||
KAFKA_ADVERTISED_LISTENERS: INSIDE://localhost:${KAFKA_EXTERNAL_PORT},OUTSIDE://kafka1:19092
|
||||
KAFKA_ADVERTISED_HOST_NAME: kafka1
|
||||
KAFKA_LISTENERS: INSIDE://0.0.0.0:${KAFKA_EXTERNAL_PORT},OUTSIDE://0.0.0.0:19092
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: INSIDE
|
||||
KAFKA_BROKER_ID: 1
|
||||
KAFKA_ZOOKEEPER_CONNECT: "kafka_zookeeper:2181"
|
||||
KAFKA_ZOOKEEPER_CONNECT: kafka_zookeeper:2181
|
||||
KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO"
|
||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
||||
depends_on:
|
||||
@ -35,13 +36,38 @@ services:
|
||||
image: confluentinc/cp-schema-registry:5.2.0
|
||||
hostname: schema-registry
|
||||
ports:
|
||||
- ${SCHEMA_REGISTRY_EXTERNAL_PORT:-12313}:${SCHEMA_REGISTRY_INTERNAL_PORT:-12313}
|
||||
- ${SCHEMA_REGISTRY_EXTERNAL_PORT}:${SCHEMA_REGISTRY_EXTERNAL_PORT}
|
||||
environment:
|
||||
SCHEMA_REGISTRY_HOST_NAME: schema-registry
|
||||
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
||||
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka1:19092
|
||||
SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:${SCHEMA_REGISTRY_EXTERNAL_PORT}
|
||||
SCHEMA_REGISTRY_SCHEMA_REGISTRY_GROUP_ID: noauth
|
||||
depends_on:
|
||||
- kafka_zookeeper
|
||||
- kafka1
|
||||
restart: always
|
||||
security_opt:
|
||||
- label:disable
|
||||
|
||||
schema-registry-auth:
|
||||
image: confluentinc/cp-schema-registry:5.2.0
|
||||
hostname: schema-registry-auth
|
||||
ports:
|
||||
- ${SCHEMA_REGISTRY_AUTH_EXTERNAL_PORT}:${SCHEMA_REGISTRY_AUTH_EXTERNAL_PORT}
|
||||
environment:
|
||||
SCHEMA_REGISTRY_HOST_NAME: schema-registry-auth
|
||||
SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:${SCHEMA_REGISTRY_AUTH_EXTERNAL_PORT}
|
||||
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka1:19092
|
||||
SCHEMA_REGISTRY_AUTHENTICATION_METHOD: BASIC
|
||||
SCHEMA_REGISTRY_AUTHENTICATION_ROLES: user
|
||||
SCHEMA_REGISTRY_AUTHENTICATION_REALM: RealmFooBar
|
||||
SCHEMA_REGISTRY_OPTS: "-Djava.security.auth.login.config=/etc/schema-registry/secrets/schema_registry_jaas.conf"
|
||||
SCHEMA_REGISTRY_SCHEMA_REGISTRY_GROUP_ID: auth
|
||||
volumes:
|
||||
- ${SCHEMA_REGISTRY_DIR:-}/secrets:/etc/schema-registry/secrets
|
||||
depends_on:
|
||||
- kafka_zookeeper
|
||||
- kafka1
|
||||
restart: always
|
||||
security_opt:
|
||||
- label:disable
|
||||
|
@ -92,8 +92,8 @@ sudo clickhouse stop ||:
|
||||
|
||||
for _ in $(seq 1 60); do if [[ $(wget --timeout=1 -q 'localhost:8123' -O-) == 'Ok.' ]]; then sleep 1 ; else break; fi ; done
|
||||
|
||||
grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server.log ||:
|
||||
pigz < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.gz &
|
||||
rg -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server.log ||:
|
||||
zstd < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.zst &
|
||||
|
||||
# Compressed (FIXME: remove once only github actions will be left)
|
||||
rm /var/log/clickhouse-server/clickhouse-server.log
|
||||
|
@ -33,7 +33,6 @@ RUN apt-get update -y \
|
||||
qemu-user-static \
|
||||
sqlite3 \
|
||||
sudo \
|
||||
telnet \
|
||||
tree \
|
||||
unixodbc \
|
||||
wget \
|
||||
|
@ -4,6 +4,9 @@
|
||||
set -e -x -a
|
||||
|
||||
# Choose random timezone for this test run.
|
||||
#
|
||||
# NOTE: that clickhouse-test will randomize session_timezone by itself as well
|
||||
# (it will choose between default server timezone and something specific).
|
||||
TZ="$(rg -v '#' /usr/share/zoneinfo/zone.tab | awk '{print $3}' | shuf | head -n1)"
|
||||
echo "Choosen random timezone $TZ"
|
||||
ln -snf "/usr/share/zoneinfo/$TZ" /etc/localtime && echo "$TZ" > /etc/timezone
|
||||
|
@ -8,8 +8,6 @@ RUN apt-get update -y \
|
||||
apt-get install --yes --no-install-recommends \
|
||||
bash \
|
||||
tzdata \
|
||||
fakeroot \
|
||||
debhelper \
|
||||
parallel \
|
||||
expect \
|
||||
python3 \
|
||||
@ -20,7 +18,6 @@ RUN apt-get update -y \
|
||||
sudo \
|
||||
openssl \
|
||||
netcat-openbsd \
|
||||
telnet \
|
||||
brotli \
|
||||
&& apt-get clean
|
||||
|
||||
|
@ -8,8 +8,6 @@ RUN apt-get update -y \
|
||||
apt-get install --yes --no-install-recommends \
|
||||
bash \
|
||||
tzdata \
|
||||
fakeroot \
|
||||
debhelper \
|
||||
parallel \
|
||||
expect \
|
||||
python3 \
|
||||
@ -20,7 +18,6 @@ RUN apt-get update -y \
|
||||
sudo \
|
||||
openssl \
|
||||
netcat-openbsd \
|
||||
telnet \
|
||||
brotli \
|
||||
&& apt-get clean
|
||||
|
||||
|
@ -67,6 +67,13 @@ start
|
||||
stop
|
||||
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.initial.log
|
||||
|
||||
# Start server from previous release
|
||||
# Let's enable S3 storage by default
|
||||
export USE_S3_STORAGE_FOR_MERGE_TREE=1
|
||||
# Previous version may not be ready for fault injections
|
||||
export ZOOKEEPER_FAULT_INJECTION=0
|
||||
configure
|
||||
|
||||
# force_sync=false doesn't work correctly on some older versions
|
||||
sudo cat /etc/clickhouse-server/config.d/keeper_port.xml \
|
||||
| sed "s|<force_sync>false</force_sync>|<force_sync>true</force_sync>|" \
|
||||
@ -81,13 +88,6 @@ mv /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp /etc/cli
|
||||
sudo chown clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
||||
sudo chgrp clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
||||
|
||||
# Start server from previous release
|
||||
# Let's enable S3 storage by default
|
||||
export USE_S3_STORAGE_FOR_MERGE_TREE=1
|
||||
# Previous version may not be ready for fault injections
|
||||
export ZOOKEEPER_FAULT_INJECTION=0
|
||||
configure
|
||||
|
||||
# it contains some new settings, but we can safely remove it
|
||||
rm /etc/clickhouse-server/config.d/merge_tree.xml
|
||||
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
|
||||
|
@ -44,7 +44,6 @@ RUN apt-get update \
|
||||
clang-${LLVM_VERSION} \
|
||||
clang-tidy-${LLVM_VERSION} \
|
||||
cmake \
|
||||
fakeroot \
|
||||
gdb \
|
||||
git \
|
||||
gperf \
|
||||
@ -94,7 +93,10 @@ RUN mkdir /tmp/ccache \
|
||||
&& rm -rf /tmp/ccache
|
||||
|
||||
ARG TARGETARCH
|
||||
ARG SCCACHE_VERSION=v0.4.1
|
||||
ARG SCCACHE_VERSION=v0.5.4
|
||||
ENV SCCACHE_IGNORE_SERVER_IO_ERROR=1
|
||||
# sccache requires a value for the region. So by default we use The Default Region
|
||||
ENV SCCACHE_REGION=us-east-1
|
||||
RUN arch=${TARGETARCH:-amd64} \
|
||||
&& case $arch in \
|
||||
amd64) rarch=x86_64 ;; \
|
||||
|
@ -33,6 +33,9 @@ then
|
||||
elif [ "${ARCH}" = "powerpc64le" -o "${ARCH}" = "ppc64le" ]
|
||||
then
|
||||
DIR="powerpc64le"
|
||||
elif [ "${ARCH}" = "riscv64" ]
|
||||
then
|
||||
DIR="riscv64"
|
||||
fi
|
||||
elif [ "${OS}" = "FreeBSD" ]
|
||||
then
|
||||
|
20
docs/changelogs/v22.8.20.11-lts.md
Normal file
20
docs/changelogs/v22.8.20.11-lts.md
Normal file
@ -0,0 +1,20 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v22.8.20.11-lts (c9ca79e24e8) FIXME as compared to v22.8.19.10-lts (989bc2fe8b0)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix broken index analysis when binary operator contains a null constant argument [#50177](https://github.com/ClickHouse/ClickHouse/pull/50177) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix incorrect constant folding [#50536](https://github.com/ClickHouse/ClickHouse/pull/50536) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix fuzzer failure in ActionsDAG [#51301](https://github.com/ClickHouse/ClickHouse/pull/51301) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix segfault in MathUnary [#51499](https://github.com/ClickHouse/ClickHouse/pull/51499) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Decoupled commits from [#51180](https://github.com/ClickHouse/ClickHouse/issues/51180) for backports [#51561](https://github.com/ClickHouse/ClickHouse/pull/51561) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
23
docs/changelogs/v23.3.8.21-lts.md
Normal file
23
docs/changelogs/v23.3.8.21-lts.md
Normal file
@ -0,0 +1,23 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.3.8.21-lts (1675f2264f3) FIXME as compared to v23.3.7.5-lts (bc683c11c92)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix backward compatibility for IP types hashing in aggregate functions [#50551](https://github.com/ClickHouse/ClickHouse/pull/50551) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix segfault in MathUnary [#51499](https://github.com/ClickHouse/ClickHouse/pull/51499) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Fix for moving 'IN' conditions to PREWHERE [#51610](https://github.com/ClickHouse/ClickHouse/pull/51610) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Fix reading from empty column in `parseSipHashKey` [#51804](https://github.com/ClickHouse/ClickHouse/pull/51804) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Check refcount in `RemoveManyObjectStorageOperation::finalize` instead of `execute` [#51954](https://github.com/ClickHouse/ClickHouse/pull/51954) ([vdimir](https://github.com/vdimir)).
|
||||
* Allow parametric UDFs [#51964](https://github.com/ClickHouse/ClickHouse/pull/51964) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Decoupled commits from [#51180](https://github.com/ClickHouse/ClickHouse/issues/51180) for backports [#51561](https://github.com/ClickHouse/ClickHouse/pull/51561) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix MergeTreeMarksLoader segfaulting if marks file is longer than expected [#51636](https://github.com/ClickHouse/ClickHouse/pull/51636) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
|
26
docs/changelogs/v23.4.6.25-stable.md
Normal file
26
docs/changelogs/v23.4.6.25-stable.md
Normal file
@ -0,0 +1,26 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.4.6.25-stable (a06848b1770) FIXME as compared to v23.4.5.22-stable (0ced5d6a8da)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#51234](https://github.com/ClickHouse/ClickHouse/issues/51234): Improve the progress bar for file/s3/hdfs/url table functions by using chunk size from source data and using incremental total size counting in each thread. Fix the progress bar for *Cluster functions. This closes [#47250](https://github.com/ClickHouse/ClickHouse/issues/47250). [#51088](https://github.com/ClickHouse/ClickHouse/pull/51088) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix backward compatibility for IP types hashing in aggregate functions [#50551](https://github.com/ClickHouse/ClickHouse/pull/50551) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix segfault in MathUnary [#51499](https://github.com/ClickHouse/ClickHouse/pull/51499) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Fix for moving 'IN' conditions to PREWHERE [#51610](https://github.com/ClickHouse/ClickHouse/pull/51610) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Fix reading from empty column in `parseSipHashKey` [#51804](https://github.com/ClickHouse/ClickHouse/pull/51804) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Allow parametric UDFs [#51964](https://github.com/ClickHouse/ClickHouse/pull/51964) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Decoupled commits from [#51180](https://github.com/ClickHouse/ClickHouse/issues/51180) for backports [#51561](https://github.com/ClickHouse/ClickHouse/pull/51561) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix MergeTreeMarksLoader segfaulting if marks file is longer than expected [#51636](https://github.com/ClickHouse/ClickHouse/pull/51636) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Fix source image for sqllogic [#51728](https://github.com/ClickHouse/ClickHouse/pull/51728) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
25
docs/changelogs/v23.6.2.18-stable.md
Normal file
25
docs/changelogs/v23.6.2.18-stable.md
Normal file
@ -0,0 +1,25 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.6.2.18-stable (89f39a7ccfe) FIXME as compared to v23.6.1.1524-stable (d1c7e13d088)
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#51888](https://github.com/ClickHouse/ClickHouse/issues/51888): Update cargo dependencies. [#51721](https://github.com/ClickHouse/ClickHouse/pull/51721) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix reading from empty column in `parseSipHashKey` [#51804](https://github.com/ClickHouse/ClickHouse/pull/51804) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Allow parametric UDFs [#51964](https://github.com/ClickHouse/ClickHouse/pull/51964) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Remove the usage of Analyzer setting in the client [#51578](https://github.com/ClickHouse/ClickHouse/pull/51578) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix 02116_tuple_element with Analyzer [#51669](https://github.com/ClickHouse/ClickHouse/pull/51669) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix SQLLogic docker images [#51719](https://github.com/ClickHouse/ClickHouse/pull/51719) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix source image for sqllogic [#51728](https://github.com/ClickHouse/ClickHouse/pull/51728) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Pin for docker-ce [#51743](https://github.com/ClickHouse/ClickHouse/pull/51743) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
@ -23,7 +23,7 @@ sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
||||
``` bash
|
||||
cd ClickHouse
|
||||
mkdir build-riscv64
|
||||
CC=clang-16 CXX=clang++-16 cmake . -Bbuild-riscv64 -G Ninja -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-riscv64.cmake -DGLIBC_COMPATIBILITY=OFF -DENABLE_LDAP=OFF -DOPENSSL_NO_ASM=ON -DENABLE_JEMALLOC=ON -DENABLE_PARQUET=OFF -DUSE_UNWIND=OFF -DENABLE_GRPC=OFF -DENABLE_HDFS=OFF -DENABLE_MYSQL=OFF
|
||||
CC=clang-16 CXX=clang++-16 cmake . -Bbuild-riscv64 -G Ninja -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-riscv64.cmake -DGLIBC_COMPATIBILITY=OFF -DENABLE_LDAP=OFF -DOPENSSL_NO_ASM=ON -DENABLE_JEMALLOC=ON -DENABLE_PARQUET=OFF -DENABLE_GRPC=OFF -DENABLE_HDFS=OFF -DENABLE_MYSQL=OFF
|
||||
ninja -C build-riscv64
|
||||
```
|
||||
|
||||
|
@ -11,7 +11,8 @@ Supported platforms:
|
||||
|
||||
- x86_64
|
||||
- AArch64
|
||||
- Power9 (experimental)
|
||||
- PowerPC 64 LE (experimental)
|
||||
- RISC-V 64 (experimental)
|
||||
|
||||
## Building on Ubuntu
|
||||
|
||||
@ -42,7 +43,7 @@ sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test
|
||||
|
||||
For other Linux distribution - check the availability of LLVM's [prebuild packages](https://releases.llvm.org/download.html).
|
||||
|
||||
As of April 2023, any version of Clang >= 15 will work.
|
||||
As of April 2023, clang-16 or higher will work.
|
||||
GCC as a compiler is not supported.
|
||||
To build with a specific Clang version:
|
||||
|
||||
@ -86,8 +87,8 @@ The build requires the following components:
|
||||
|
||||
- Git (used to checkout the sources, not needed for the build)
|
||||
- CMake 3.20 or newer
|
||||
- Compiler: Clang 15 or newer
|
||||
- Linker: lld 15 or newer
|
||||
- Compiler: clang-16 or newer
|
||||
- Linker: lld-16 or newer
|
||||
- Ninja
|
||||
- Yasm
|
||||
- Gawk
|
||||
|
@ -33,6 +33,15 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name
|
||||
|
||||
- `options` — MongoDB connection string options (optional parameter).
|
||||
|
||||
:::tip
|
||||
If you are using the MongoDB Atlas cloud offering please add these options:
|
||||
|
||||
```
|
||||
'connectTimeoutMS=10000&ssl=true&authSource=admin'
|
||||
```
|
||||
|
||||
:::
|
||||
|
||||
## Usage Example {#usage-example}
|
||||
|
||||
Create a table in ClickHouse which allows to read data from MongoDB collection:
|
||||
|
@ -37,8 +37,8 @@ The [Merge](/docs/en/engines/table-engines/special/merge.md/#merge) engine does
|
||||
``` sql
|
||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
(
|
||||
name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1],
|
||||
name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2],
|
||||
name1 [type1] [DEFAULT|MATERIALIZED|ALIAS|EPHEMERAL expr1] [TTL expr1] [CODEC(codec1)] [[NOT] NULL|PRIMARY KEY],
|
||||
name2 [type2] [DEFAULT|MATERIALIZED|ALIAS|EPHEMERAL expr2] [TTL expr2] [CODEC(codec2)] [[NOT] NULL|PRIMARY KEY],
|
||||
...
|
||||
INDEX index_name1 expr1 TYPE type1(...) [GRANULARITY value1],
|
||||
INDEX index_name2 expr2 TYPE type2(...) [GRANULARITY value2],
|
||||
@ -439,41 +439,41 @@ Syntax: `ngrambf_v1(n, size_of_bloom_filter_in_bytes, number_of_hash_functions,
|
||||
- `number_of_hash_functions` — The number of hash functions used in the Bloom filter.
|
||||
- `random_seed` — The seed for Bloom filter hash functions.
|
||||
|
||||
Users can create [UDF](/docs/en/sql-reference/statements/create/function.md) to estimate the parameters set of `ngrambf_v1`. Query statements are as follows:
|
||||
Users can create [UDF](/docs/en/sql-reference/statements/create/function.md) to estimate the parameters set of `ngrambf_v1`. Query statements are as follows:
|
||||
|
||||
```sql
|
||||
CREATE FUNCTION bfEstimateFunctions [ON CLUSTER cluster]
|
||||
AS
|
||||
(total_nubmer_of_all_grams, size_of_bloom_filter_in_bits) -> round((size_of_bloom_filter_in_bits / total_nubmer_of_all_grams) * log(2));
|
||||
|
||||
CREATE FUNCTION bfEstimateBmSize [ON CLUSTER cluster]
|
||||
AS
|
||||
(total_nubmer_of_all_grams, probability_of_false_positives) -> ceil((total_nubmer_of_all_grams * log(probability_of_false_positives)) / log(1 / pow(2, log(2))));
|
||||
|
||||
CREATE FUNCTION bfEstimateFalsePositive [ON CLUSTER cluster]
|
||||
AS
|
||||
(total_nubmer_of_all_grams, number_of_hash_functions, size_of_bloom_filter_in_bytes) -> pow(1 - exp(-number_of_hash_functions/ (size_of_bloom_filter_in_bytes / total_nubmer_of_all_grams)), number_of_hash_functions);
|
||||
|
||||
CREATE FUNCTION bfEstimateGramNumber [ON CLUSTER cluster]
|
||||
AS
|
||||
CREATE FUNCTION bfEstimateFunctions [ON CLUSTER cluster]
|
||||
AS
|
||||
(total_nubmer_of_all_grams, size_of_bloom_filter_in_bits) -> round((size_of_bloom_filter_in_bits / total_nubmer_of_all_grams) * log(2));
|
||||
|
||||
CREATE FUNCTION bfEstimateBmSize [ON CLUSTER cluster]
|
||||
AS
|
||||
(total_nubmer_of_all_grams, probability_of_false_positives) -> ceil((total_nubmer_of_all_grams * log(probability_of_false_positives)) / log(1 / pow(2, log(2))));
|
||||
|
||||
CREATE FUNCTION bfEstimateFalsePositive [ON CLUSTER cluster]
|
||||
AS
|
||||
(total_nubmer_of_all_grams, number_of_hash_functions, size_of_bloom_filter_in_bytes) -> pow(1 - exp(-number_of_hash_functions/ (size_of_bloom_filter_in_bytes / total_nubmer_of_all_grams)), number_of_hash_functions);
|
||||
|
||||
CREATE FUNCTION bfEstimateGramNumber [ON CLUSTER cluster]
|
||||
AS
|
||||
(number_of_hash_functions, probability_of_false_positives, size_of_bloom_filter_in_bytes) -> ceil(size_of_bloom_filter_in_bytes / (-number_of_hash_functions / log(1 - exp(log(probability_of_false_positives) / number_of_hash_functions))))
|
||||
|
||||
```
|
||||
```
|
||||
To use those functions,we need to specify two parameter at least.
|
||||
For example, if there 4300 ngrams in the granule and we expect false positives to be less than 0.0001. The other parameters can be estimated by executing following queries:
|
||||
|
||||
For example, if there 4300 ngrams in the granule and we expect false positives to be less than 0.0001. The other parameters can be estimated by executing following queries:
|
||||
|
||||
|
||||
```sql
|
||||
--- estimate number of bits in the filter
|
||||
SELECT bfEstimateBmSize(4300, 0.0001) / 8 as size_of_bloom_filter_in_bytes;
|
||||
SELECT bfEstimateBmSize(4300, 0.0001) / 8 as size_of_bloom_filter_in_bytes;
|
||||
|
||||
┌─size_of_bloom_filter_in_bytes─┐
|
||||
│ 10304 │
|
||||
└───────────────────────────────┘
|
||||
|
||||
|
||||
--- estimate number of hash functions
|
||||
SELECT bfEstimateFunctions(4300, bfEstimateBmSize(4300, 0.0001)) as number_of_hash_functions
|
||||
|
||||
|
||||
┌─number_of_hash_functions─┐
|
||||
│ 13 │
|
||||
└──────────────────────────┘
|
||||
@ -991,7 +991,7 @@ use a local disk to cache data from a table stored at a URL. Neither the cache d
|
||||
nor the web storage is configured in the ClickHouse configuration files; both are
|
||||
configured in the CREATE/ATTACH query settings.
|
||||
|
||||
In the settings highlighted below notice that the disk of `type=web` is nested within
|
||||
In the settings highlighted below notice that the disk of `type=web` is nested within
|
||||
the disk of `type=cache`.
|
||||
|
||||
```sql
|
||||
@ -1308,7 +1308,7 @@ configuration file.
|
||||
In this sample configuration:
|
||||
- the disk is of type `web`
|
||||
- the data is hosted at `http://nginx:80/test1/`
|
||||
- a cache on local storage is used
|
||||
- a cache on local storage is used
|
||||
|
||||
```xml
|
||||
<clickhouse>
|
||||
|
@ -106,3 +106,4 @@ For partitioning by month, use the `toYYYYMM(date_column)` expression, where `da
|
||||
## Storage Settings {#storage-settings}
|
||||
|
||||
- [engine_url_skip_empty_files](/docs/en/operations/settings/settings.md#engine_url_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||
- [disable_url_encoding](/docs/en/operations/settings/settings.md#disable_url_encoding) -allows to disable decoding/encoding path in uri. Disabled by default.
|
||||
|
@ -76,6 +76,7 @@ The supported formats are:
|
||||
| [RowBinary](#rowbinary) | ✔ | ✔ |
|
||||
| [RowBinaryWithNames](#rowbinarywithnamesandtypes) | ✔ | ✔ |
|
||||
| [RowBinaryWithNamesAndTypes](#rowbinarywithnamesandtypes) | ✔ | ✔ |
|
||||
| [RowBinaryWithDefaults](#rowbinarywithdefaults) | ✔ | ✔ |
|
||||
| [Native](#native) | ✔ | ✔ |
|
||||
| [Null](#null) | ✗ | ✔ |
|
||||
| [XML](#xml) | ✗ | ✔ |
|
||||
@ -471,6 +472,8 @@ The CSV format supports the output of totals and extremes the same way as `TabSe
|
||||
- [input_format_csv_skip_trailing_empty_lines](/docs/en/operations/settings/settings-formats.md/#input_format_csv_skip_trailing_empty_lines) - skip trailing empty lines at the end of data. Default value - `false`.
|
||||
- [input_format_csv_trim_whitespaces](/docs/en/operations/settings/settings-formats.md/#input_format_csv_trim_whitespaces) - trim spaces and tabs in non-quoted CSV strings. Default value - `true`.
|
||||
- [input_format_csv_allow_whitespace_or_tab_as_delimiter](/docs/en/operations/settings/settings-formats.md/# input_format_csv_allow_whitespace_or_tab_as_delimiter) - Allow to use whitespace or tab as field delimiter in CSV strings. Default value - `false`.
|
||||
- [input_format_csv_allow_variable_number_of_columns](/docs/en/operations/settings/settings-formats.md/#input_format_csv_allow_variable_number_of_columns) - ignore extra columns in CSV input (if file has more columns than expected) and treat missing fields in CSV input as default values. Default value - `false`.
|
||||
- [input_format_csv_use_default_on_bad_values](/docs/en/operations/settings/settings-formats.md/#input_format_csv_use_default_on_bad_values) - Allow to set default value to column when CSV field deserialization failed on bad value. Default value - `false`.
|
||||
|
||||
## CSVWithNames {#csvwithnames}
|
||||
|
||||
@ -1514,6 +1517,23 @@ If setting [input_format_with_types_use_header](/docs/en/operations/settings/set
|
||||
the types from input data will be compared with the types of the corresponding columns from the table. Otherwise, the second row will be skipped.
|
||||
:::
|
||||
|
||||
## RowBinaryWithDefaults {#rowbinarywithdefaults}
|
||||
|
||||
Similar to [RowBinary](#rowbinary), but with an extra byte before each column that indicates if default value should be used.
|
||||
|
||||
Examples:
|
||||
|
||||
```sql
|
||||
:) select * from format('RowBinaryWithDefaults', 'x UInt32 default 42, y UInt32', x'010001000000')
|
||||
|
||||
┌──x─┬─y─┐
|
||||
│ 42 │ 1 │
|
||||
└────┴───┘
|
||||
```
|
||||
|
||||
For column `x` there is only one byte `01` that indicates that default value should be used and no other data after this byte is provided.
|
||||
For column `y` data starts with byte `00` that indicates that column has actual value that should be read from the subsequent data `01000000`.
|
||||
|
||||
## RowBinary format settings {#row-binary-format-settings}
|
||||
|
||||
- [format_binary_max_string_size](/docs/en/operations/settings/settings-formats.md/#format_binary_max_string_size) - The maximum allowed size for String in RowBinary format. Default value - `1GiB`.
|
||||
|
@ -30,7 +30,7 @@ description: In order to effectively mitigate possible human errors, you should
|
||||
```
|
||||
|
||||
:::note ALL
|
||||
`ALL` is only applicable to the `RESTORE` command prior to version 23.4 of Clickhouse.
|
||||
Prior to version 23.4 of ClickHouse, `ALL` was only applicable to the `RESTORE` command.
|
||||
:::
|
||||
|
||||
## Background
|
||||
|
@ -1,11 +1,11 @@
|
||||
---
|
||||
slug: /en/operations/server-configuration-parameters/settings
|
||||
sidebar_position: 57
|
||||
sidebar_label: Server Settings
|
||||
sidebar_label: Global Server Settings
|
||||
description: This section contains descriptions of server settings that cannot be changed at the session or query level.
|
||||
---
|
||||
|
||||
# Server Settings
|
||||
# Global Server Settings
|
||||
|
||||
This section contains descriptions of server settings that cannot be changed at the session or query level.
|
||||
|
||||
@ -1201,13 +1201,58 @@ Keys:
|
||||
- `console` – Send `log` and `errorlog` to the console instead of file. To enable, set to `1` or `true`.
|
||||
- `stream_compress` – Compress `log` and `errorlog` with `lz4` stream compression. To enable, set to `1` or `true`.
|
||||
|
||||
Both log and error log file names (only file names, not directories) support date and time format specifiers.
|
||||
|
||||
**Format specifiers**
|
||||
Using the following format specifiers, you can define a pattern for the resulting file name. “Example” column shows possible results for `2023-07-06 18:32:07`.
|
||||
|
||||
| Specifier | Description | Example |
|
||||
|-------------|---------------------------------------------------------------------------------------------------------------------|--------------------------|
|
||||
| %% | Literal % | % |
|
||||
| %n | New-line character | |
|
||||
| %t | Horizontal tab character | |
|
||||
| %Y | Year as a decimal number, e.g. 2017 | 2023 |
|
||||
| %y | Last 2 digits of year as a decimal number (range [00,99]) | 23 |
|
||||
| %C | First 2 digits of year as a decimal number (range [00,99]) | 20 |
|
||||
| %G | Four-digit [ISO 8601 week-based year](https://en.wikipedia.org/wiki/ISO_8601#Week_dates), i.e. the year that contains the specified week. Normally useful only with %V | 2023 |
|
||||
| %g | Last 2 digits of [ISO 8601 week-based year](https://en.wikipedia.org/wiki/ISO_8601#Week_dates), i.e. the year that contains the specified week. | 23 |
|
||||
| %b | Abbreviated month name, e.g. Oct (locale dependent) | Jul |
|
||||
| %h | Synonym of %b | Jul |
|
||||
| %B | Full month name, e.g. October (locale dependent) | July |
|
||||
| %m | Month as a decimal number (range [01,12]) | 07 |
|
||||
| %U | Week of the year as a decimal number (Sunday is the first day of the week) (range [00,53]) | 27 |
|
||||
| %W | Week of the year as a decimal number (Monday is the first day of the week) (range [00,53]) | 27 |
|
||||
| %V | ISO 8601 week number (range [01,53]) | 27 |
|
||||
| %j | Day of the year as a decimal number (range [001,366]) | 187 |
|
||||
| %d | Day of the month as a zero-padded decimal number (range [01,31]). Single digit is preceded by zero. | 06 |
|
||||
| %e | Day of the month as a space-padded decimal number (range [1,31]). Single digit is preceded by a space. | 6 |
|
||||
| %a | Abbreviated weekday name, e.g. Fri (locale dependent) | Thu |
|
||||
| %A | Full weekday name, e.g. Friday (locale dependent) | Thursday |
|
||||
| %w | Weekday as a integer number with Sunday as 0 (range [0-6]) | 4 |
|
||||
| %u | Weekday as a decimal number, where Monday is 1 (ISO 8601 format) (range [1-7]) | 4 |
|
||||
| %H | Hour as a decimal number, 24 hour clock (range [00-23]) | 18 |
|
||||
| %I | Hour as a decimal number, 12 hour clock (range [01,12]) | 06 |
|
||||
| %M | Minute as a decimal number (range [00,59]) | 32 |
|
||||
| %S | Second as a decimal number (range [00,60]) | 07 |
|
||||
| %c | Standard date and time string, e.g. Sun Oct 17 04:41:13 2010 (locale dependent) | Thu Jul 6 18:32:07 2023 |
|
||||
| %x | Localized date representation (locale dependent) | 07/06/23 |
|
||||
| %X | Localized time representation, e.g. 18:40:20 or 6:40:20 PM (locale dependent) | 18:32:07 |
|
||||
| %D | Short MM/DD/YY date, equivalent to %m/%d/%y | 07/06/23 |
|
||||
| %F | Short YYYY-MM-DD date, equivalent to %Y-%m-%d | 2023-07-06 |
|
||||
| %r | Localized 12-hour clock time (locale dependent) | 06:32:07 PM |
|
||||
| %R | Equivalent to "%H:%M" | 18:32 |
|
||||
| %T | Equivalent to "%H:%M:%S" (the ISO 8601 time format) | 18:32:07 |
|
||||
| %p | Localized a.m. or p.m. designation (locale dependent) | PM |
|
||||
| %z | Offset from UTC in the ISO 8601 format (e.g. -0430), or no characters if the time zone information is not available | +0800 |
|
||||
| %Z | Locale-dependent time zone name or abbreviation, or no characters if the time zone information is not available | Z AWST |
|
||||
|
||||
**Example**
|
||||
|
||||
``` xml
|
||||
<logger>
|
||||
<level>trace</level>
|
||||
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
|
||||
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
|
||||
<log>/var/log/clickhouse-server/clickhouse-server-%F-%T.log</log>
|
||||
<errorlog>/var/log/clickhouse-server/clickhouse-server-%F-%T.err.log</errorlog>
|
||||
<size>1000M</size>
|
||||
<count>10</count>
|
||||
<stream_compress>true</stream_compress>
|
||||
|
@ -7,90 +7,16 @@ pagination_next: en/operations/settings/settings
|
||||
|
||||
# Settings Overview
|
||||
|
||||
There are multiple ways to define ClickHouse settings. Settings are configured in layers, and each subsequent layer redefines the previous values of a setting.
|
||||
There are two main groups of ClickHouse settings:
|
||||
|
||||
The order of priority for defining a setting is:
|
||||
- Global server settings
|
||||
- Query-level settings
|
||||
|
||||
1. Settings in the `users.xml` server configuration file
|
||||
The main distinction between global server settings and query-level settings is that
|
||||
global server settings must be set in configuration files while query-level settings
|
||||
can be set in configuration files or with SQL queries.
|
||||
|
||||
- Set in the element `<profiles>`.
|
||||
Read about [global server settings](/docs/en/operations/server-configuration-parameters/settings.md) to learn more about configuring your ClickHouse server at the global server level.
|
||||
|
||||
2. Session settings
|
||||
Read about [query-level settings](/docs/en/operations/settings/settings-query-level.md) to learn more about configuring your ClickHouse server at the query-level.
|
||||
|
||||
- Send `SET setting=value` from the ClickHouse console client in interactive mode.
|
||||
Similarly, you can use ClickHouse sessions in the HTTP protocol. To do this, you need to specify the `session_id` HTTP parameter.
|
||||
|
||||
3. Query settings
|
||||
|
||||
- When starting the ClickHouse console client in non-interactive mode, set the startup parameter `--setting=value`.
|
||||
- When using the HTTP API, pass CGI parameters (`URL?setting_1=value&setting_2=value...`).
|
||||
- Define settings in the [SETTINGS](../../sql-reference/statements/select/index.md#settings-in-select-query) clause of the SELECT query. The setting value is applied only to that query and is reset to the default or previous value after the query is executed.
|
||||
|
||||
View the [Settings](./settings.md) page for a description of the ClickHouse settings.
|
||||
|
||||
## Converting a Setting to its Default Value
|
||||
|
||||
If you change a setting and would like to revert it back to its default value, set the value to `DEFAULT`. The syntax looks like:
|
||||
|
||||
```sql
|
||||
SET setting_name = DEFAULT
|
||||
```
|
||||
|
||||
For example, the default value of `max_insert_block_size` is 1048449. Suppose you change its value to 100000:
|
||||
|
||||
```sql
|
||||
SET max_insert_block_size=100000;
|
||||
|
||||
SELECT value FROM system.settings where name='max_insert_block_size';
|
||||
```
|
||||
|
||||
The response is:
|
||||
|
||||
```response
|
||||
┌─value──┐
|
||||
│ 100000 │
|
||||
└────────┘
|
||||
```
|
||||
|
||||
The following command sets its value back to 1048449:
|
||||
|
||||
```sql
|
||||
SET max_insert_block_size=DEFAULT;
|
||||
|
||||
SELECT value FROM system.settings where name='max_insert_block_size';
|
||||
```
|
||||
|
||||
The setting is now back to its default:
|
||||
|
||||
```response
|
||||
┌─value───┐
|
||||
│ 1048449 │
|
||||
└─────────┘
|
||||
```
|
||||
|
||||
|
||||
## Custom Settings {#custom_settings}
|
||||
|
||||
In addition to the common [settings](../../operations/settings/settings.md), users can define custom settings.
|
||||
|
||||
A custom setting name must begin with one of predefined prefixes. The list of these prefixes must be declared in the [custom_settings_prefixes](../../operations/server-configuration-parameters/settings.md#custom_settings_prefixes) parameter in the server configuration file.
|
||||
|
||||
```xml
|
||||
<custom_settings_prefixes>custom_</custom_settings_prefixes>
|
||||
```
|
||||
|
||||
To define a custom setting use `SET` command:
|
||||
|
||||
```sql
|
||||
SET custom_a = 123;
|
||||
```
|
||||
|
||||
To get the current value of a custom setting use `getSetting()` function:
|
||||
|
||||
```sql
|
||||
SELECT getSetting('custom_a');
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [Server Configuration Settings](../../operations/server-configuration-parameters/settings.md)
|
||||
|
@ -242,6 +242,26 @@ See also:
|
||||
- [DateTime data type.](../../sql-reference/data-types/datetime.md)
|
||||
- [Functions for working with dates and times.](../../sql-reference/functions/date-time-functions.md)
|
||||
|
||||
## interval_output_format {#interval_output_format}
|
||||
|
||||
Allows choosing different output formats of the text representation of interval types.
|
||||
|
||||
Possible values:
|
||||
|
||||
- `kusto` - KQL-style output format.
|
||||
|
||||
ClickHouse outputs intervals in [KQL format](https://learn.microsoft.com/en-us/dotnet/standard/base-types/standard-timespan-format-strings#the-constant-c-format-specifier). For example, `toIntervalDay(2)` would be formatted as `2.00:00:00`. Please note that for interval types of varying length (ie. `IntervalMonth` and `IntervalYear`) the average number of seconds per interval is taken into account.
|
||||
|
||||
- `numeric` - Numeric output format.
|
||||
|
||||
ClickHouse outputs intervals as their underlying numeric representation. For example, `toIntervalDay(2)` would be formatted as `2`.
|
||||
|
||||
Default value: `numeric`.
|
||||
|
||||
See also:
|
||||
|
||||
- [Interval](../../sql-reference/data-types/special-data-types/interval.md)
|
||||
|
||||
## input_format_ipv4_default_on_conversion_error {#input_format_ipv4_default_on_conversion_error}
|
||||
|
||||
Deserialization of IPv4 will use default values instead of throwing exception on conversion error.
|
||||
@ -931,6 +951,11 @@ Result
|
||||
```text
|
||||
" string "
|
||||
```
|
||||
### input_format_csv_allow_variable_number_of_columns {#input_format_csv_allow_variable_number_of_columns}
|
||||
|
||||
ignore extra columns in CSV input (if file has more columns than expected) and treat missing fields in CSV input as default values.
|
||||
|
||||
Disabled by default.
|
||||
|
||||
### input_format_csv_allow_whitespace_or_tab_as_delimiter {#input_format_csv_allow_whitespace_or_tab_as_delimiter}
|
||||
|
||||
@ -964,6 +989,28 @@ Result
|
||||
a b
|
||||
```
|
||||
|
||||
### input_format_csv_use_default_on_bad_values {#input_format_csv_use_default_on_bad_values}
|
||||
|
||||
Allow to set default value to column when CSV field deserialization failed on bad value
|
||||
|
||||
Default value: `false`.
|
||||
|
||||
**Examples**
|
||||
|
||||
Query
|
||||
|
||||
```bash
|
||||
./clickhouse local -q "create table test_tbl (x String, y UInt32, z Date) engine=MergeTree order by x"
|
||||
echo 'a,b,c' | ./clickhouse local -q "INSERT INTO test_tbl SETTINGS input_format_csv_use_default_on_bad_values=true FORMAT CSV"
|
||||
./clickhouse local -q "select * from test_tbl"
|
||||
```
|
||||
|
||||
Result
|
||||
|
||||
```text
|
||||
a 0 1971-01-01
|
||||
```
|
||||
|
||||
## Values format settings {#values-format-settings}
|
||||
|
||||
### input_format_values_interpret_expressions {#input_format_values_interpret_expressions}
|
||||
@ -1300,6 +1347,17 @@ Default value: 0.
|
||||
|
||||
Sets [Confluent Schema Registry](https://docs.confluent.io/current/schema-registry/index.html) URL to use with [AvroConfluent](../../interfaces/formats.md/#data-format-avro-confluent) format.
|
||||
|
||||
Format:
|
||||
``` text
|
||||
http://[user:password@]machine[:port]"
|
||||
```
|
||||
|
||||
Examples:
|
||||
``` text
|
||||
http://registry.example.com:8081
|
||||
http://admin:secret@registry.example.com:8081
|
||||
```
|
||||
|
||||
Default value: `Empty`.
|
||||
|
||||
### output_format_avro_codec {#output_format_avro_codec}
|
||||
|
217
docs/en/operations/settings/settings-query-level.md
Normal file
217
docs/en/operations/settings/settings-query-level.md
Normal file
@ -0,0 +1,217 @@
|
||||
---
|
||||
sidebar_label: Query-level Settings
|
||||
title: Query-level Settings
|
||||
slug: /en/operations/settings/query-level
|
||||
---
|
||||
|
||||
There are multiple ways to set ClickHouse query-level settings. Settings are configured in layers, and each subsequent layer redefines the previous values of a setting.
|
||||
|
||||
The order of priority for defining a setting is:
|
||||
|
||||
1. Applying a setting to a user directly, or within a settings profile
|
||||
|
||||
- SQL (recommended)
|
||||
- adding one or more XML or YAML files to `/etc/clickhouse-server/users.d`
|
||||
|
||||
2. Session settings
|
||||
|
||||
- Send `SET setting=value` from the ClickHouse Cloud SQL console or
|
||||
`clickhouse client` in interactive mode. Similarly, you can use ClickHouse
|
||||
sessions in the HTTP protocol. To do this, you need to specify the
|
||||
`session_id` HTTP parameter.
|
||||
|
||||
3. Query settings
|
||||
|
||||
- When starting `clickhouse client` in non-interactive mode, set the startup
|
||||
parameter `--setting=value`.
|
||||
- When using the HTTP API, pass CGI parameters (`URL?setting_1=value&setting_2=value...`).
|
||||
- Define settings in the
|
||||
[SETTINGS](../../sql-reference/statements/select/index.md#settings-in-select-query)
|
||||
clause of the SELECT query. The setting value is applied only to that query
|
||||
and is reset to the default or previous value after the query is executed.
|
||||
|
||||
## Examples
|
||||
|
||||
These examples all set the value of the `async_insert` setting to `1`, and
|
||||
show how to examine the settings in a running system.
|
||||
|
||||
### Using SQL to apply a setting to a user directly
|
||||
|
||||
This creates the user `ingester` with the setting `async_inset = 1`:
|
||||
|
||||
```sql
|
||||
CREATE USER ingester
|
||||
IDENTIFIED WITH sha256_hash BY '7e099f39b84ea79559b3e85ea046804e63725fd1f46b37f281276aae20f86dc3'
|
||||
# highlight-next-line
|
||||
SETTINGS async_insert = 1
|
||||
```
|
||||
|
||||
#### Examine the settings profile and assignment
|
||||
|
||||
```sql
|
||||
SHOW ACCESS
|
||||
```
|
||||
|
||||
```response
|
||||
┌─ACCESS─────────────────────────────────────────────────────────────────────────────┐
|
||||
│ ... │
|
||||
# highlight-next-line
|
||||
│ CREATE USER ingester IDENTIFIED WITH sha256_password SETTINGS async_insert = true │
|
||||
│ ... │
|
||||
└────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
### Using SQL to create a settings profile and assign to a user
|
||||
|
||||
This creates the profile `log_ingest` with the setting `async_inset = 1`:
|
||||
|
||||
```sql
|
||||
CREATE
|
||||
SETTINGS PROFILE log_ingest SETTINGS async_insert = 1
|
||||
```
|
||||
|
||||
This creates the user `ingester` and assigns the user the settings profile `log_ingest`:
|
||||
|
||||
```sql
|
||||
CREATE USER ingester
|
||||
IDENTIFIED WITH sha256_hash BY '7e099f39b84ea79559b3e85ea046804e63725fd1f46b37f281276aae20f86dc3'
|
||||
# highlight-next-line
|
||||
SETTINGS PROFILE log_ingest
|
||||
```
|
||||
|
||||
|
||||
### Using XML to create a settings profile and user
|
||||
|
||||
```xml title=/etc/clickhouse-server/users.d/users.xml
|
||||
<clickhouse>
|
||||
# highlight-start
|
||||
<profiles>
|
||||
<log_ingest>
|
||||
<async_insert>1</async_insert>
|
||||
</log_ingest>
|
||||
</profiles>
|
||||
# highlight-end
|
||||
|
||||
<users>
|
||||
<ingester>
|
||||
<password_sha256_hex>7e099f39b84ea79559b3e85ea046804e63725fd1f46b37f281276aae20f86dc3</password_sha256_hex>
|
||||
# highlight-start
|
||||
<profile>log_ingest</profile>
|
||||
# highlight-end
|
||||
</ingester>
|
||||
<default replace="true">
|
||||
<password_sha256_hex>7e099f39b84ea79559b3e85ea046804e63725fd1f46b37f281276aae20f86dc3</password_sha256_hex>
|
||||
<access_management>1</access_management>
|
||||
<named_collection_control>1</named_collection_control>
|
||||
</default>
|
||||
</users>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
#### Examine the settings profile and assignment
|
||||
|
||||
```sql
|
||||
SHOW ACCESS
|
||||
```
|
||||
|
||||
```response
|
||||
┌─ACCESS─────────────────────────────────────────────────────────────────────────────┐
|
||||
│ CREATE USER default IDENTIFIED WITH sha256_password │
|
||||
# highlight-next-line
|
||||
│ CREATE USER ingester IDENTIFIED WITH sha256_password SETTINGS PROFILE log_ingest │
|
||||
│ CREATE SETTINGS PROFILE default │
|
||||
# highlight-next-line
|
||||
│ CREATE SETTINGS PROFILE log_ingest SETTINGS async_insert = true │
|
||||
│ CREATE SETTINGS PROFILE readonly SETTINGS readonly = 1 │
|
||||
│ ... │
|
||||
└────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### Assign a setting to a session
|
||||
|
||||
```sql
|
||||
SET async_insert =1;
|
||||
SELECT value FROM system.settings where name='async_insert';
|
||||
```
|
||||
|
||||
```response
|
||||
┌─value──┐
|
||||
│ 1 │
|
||||
└────────┘
|
||||
```
|
||||
|
||||
### Assign a setting during a query
|
||||
|
||||
```sql
|
||||
INSERT INTO YourTable
|
||||
# highlight-next-line
|
||||
SETTINGS async_insert=1
|
||||
VALUES (...)
|
||||
```
|
||||
|
||||
|
||||
## Converting a Setting to its Default Value
|
||||
|
||||
If you change a setting and would like to revert it back to its default value, set the value to `DEFAULT`. The syntax looks like:
|
||||
|
||||
```sql
|
||||
SET setting_name = DEFAULT
|
||||
```
|
||||
|
||||
For example, the default value of `async_insert` is `0`. Suppose you change its value to `1`:
|
||||
|
||||
```sql
|
||||
SET async_insert = 1;
|
||||
|
||||
SELECT value FROM system.settings where name='async_insert';
|
||||
```
|
||||
|
||||
The response is:
|
||||
|
||||
```response
|
||||
┌─value──┐
|
||||
│ 1 │
|
||||
└────────┘
|
||||
```
|
||||
|
||||
The following command sets its value back to 0:
|
||||
|
||||
```sql
|
||||
SET async_insert = DEFAULT;
|
||||
|
||||
SELECT value FROM system.settings where name='async_insert';
|
||||
```
|
||||
|
||||
The setting is now back to its default:
|
||||
|
||||
```response
|
||||
┌─value───┐
|
||||
│ 0 │
|
||||
└─────────┘
|
||||
```
|
||||
|
||||
## Custom Settings {#custom_settings}
|
||||
|
||||
In addition to the common [settings](../../operations/settings/settings.md), users can define custom settings.
|
||||
|
||||
A custom setting name must begin with one of predefined prefixes. The list of these prefixes must be declared in the [custom_settings_prefixes](../../operations/server-configuration-parameters/settings.md#custom_settings_prefixes) parameter in the server configuration file.
|
||||
|
||||
```xml
|
||||
<custom_settings_prefixes>custom_</custom_settings_prefixes>
|
||||
```
|
||||
|
||||
To define a custom setting use `SET` command:
|
||||
|
||||
```sql
|
||||
SET custom_a = 123;
|
||||
```
|
||||
|
||||
To get the current value of a custom setting use `getSetting()` function:
|
||||
|
||||
```sql
|
||||
SELECT getSetting('custom_a');
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- View the [Settings](./settings.md) page for a description of the ClickHouse settings.
|
||||
- [Global server settings](../../operations/server-configuration-parameters/settings.md)
|
@ -17,7 +17,8 @@ Default value: 0.
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
insert into table_1 values (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd');
|
||||
INSERT INTO table_1 VALUES (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd');
|
||||
SELECT * FROM table_1;
|
||||
```
|
||||
```response
|
||||
┌─x─┬─y────┐
|
||||
@ -30,7 +31,7 @@ insert into table_1 values (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd');
|
||||
```sql
|
||||
SELECT *
|
||||
FROM table_1
|
||||
SETTINGS additional_table_filters = (('table_1', 'x != 2'))
|
||||
SETTINGS additional_table_filters = {'table_1': 'x != 2'}
|
||||
```
|
||||
```response
|
||||
┌─x─┬─y────┐
|
||||
@ -50,7 +51,8 @@ Default value: `''`.
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
insert into table_1 values (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd');
|
||||
INSERT INTO table_1 VALUES (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd');
|
||||
SElECT * FROM table_1;
|
||||
```
|
||||
```response
|
||||
┌─x─┬─y────┐
|
||||
@ -535,6 +537,8 @@ Possible values:
|
||||
|
||||
The first phase of a grace join reads the right table and splits it into N buckets depending on the hash value of key columns (initially, N is `grace_hash_join_initial_buckets`). This is done in a way to ensure that each bucket can be processed independently. Rows from the first bucket are added to an in-memory hash table while the others are saved to disk. If the hash table grows beyond the memory limit (e.g., as set by [`max_bytes_in_join`](/docs/en/operations/settings/query-complexity.md/#settings-max_bytes_in_join)), the number of buckets is increased and the assigned bucket for each row. Any rows which don’t belong to the current bucket are flushed and reassigned.
|
||||
|
||||
Supports `INNER/LEFT/RIGHT/FULL ALL/ANY JOIN`.
|
||||
|
||||
- hash
|
||||
|
||||
[Hash join algorithm](https://en.wikipedia.org/wiki/Hash_join) is used. The most generic implementation that supports all combinations of kind and strictness and multiple join keys that are combined with `OR` in the `JOIN ON` section.
|
||||
@ -3464,6 +3468,12 @@ Possible values:
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## disable_url_encoding {#disable_url_encoding}
|
||||
|
||||
Allows to disable decoding/encoding path in uri in [URL](../../engines/table-engines/special/url.md) engine tables.
|
||||
|
||||
Disabled by default.
|
||||
|
||||
## database_atomic_wait_for_drop_and_detach_synchronously {#database_atomic_wait_for_drop_and_detach_synchronously}
|
||||
|
||||
Adds a modifier `SYNC` to all `DROP` and `DETACH` queries.
|
||||
@ -3535,7 +3545,7 @@ Possible values:
|
||||
- Any positive integer.
|
||||
- 0 - Disabled (infinite timeout).
|
||||
|
||||
Default value: 180.
|
||||
Default value: 30.
|
||||
|
||||
## http_receive_timeout {#http_receive_timeout}
|
||||
|
||||
@ -3546,7 +3556,7 @@ Possible values:
|
||||
- Any positive integer.
|
||||
- 0 - Disabled (infinite timeout).
|
||||
|
||||
Default value: 180.
|
||||
Default value: 30.
|
||||
|
||||
## check_query_single_value_result {#check_query_single_value_result}
|
||||
|
||||
@ -4522,6 +4532,7 @@ This setting allows to specify renaming pattern for files processed by `file` ta
|
||||
|
||||
### Placeholders
|
||||
|
||||
- `%a` — Full original filename (e.g., "sample.csv").
|
||||
- `%f` — Original filename without extension (e.g., "sample").
|
||||
- `%e` — Original file extension with dot (e.g., ".csv").
|
||||
- `%t` — Timestamp (in microseconds).
|
||||
|
@ -9,7 +9,6 @@ Columns:
|
||||
|
||||
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — Event date.
|
||||
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Event time.
|
||||
- `event_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — Event time with microseconds resolution.
|
||||
- `name` ([String](../../sql-reference/data-types/string.md)) — Metric name.
|
||||
- `value` ([Float64](../../sql-reference/data-types/float.md)) — Metric value.
|
||||
|
||||
@ -20,18 +19,18 @@ SELECT * FROM system.asynchronous_metric_log LIMIT 10
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─event_date─┬──────────event_time─┬────event_time_microseconds─┬─name─────────────────────────────────────┬─────value─┐
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ CPUFrequencyMHz_0 │ 2120.9 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.arenas.all.pmuzzy │ 743 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.arenas.all.pdirty │ 26288 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.background_thread.run_intervals │ 0 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.background_thread.num_runs │ 0 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.retained │ 60694528 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.mapped │ 303161344 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.resident │ 260931584 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.metadata │ 12079488 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.allocated │ 133756128 │
|
||||
└────────────┴─────────────────────┴────────────────────────────┴──────────────────────────────────────────┴───────────┘
|
||||
┌─event_date─┬──────────event_time─┬─name─────────────────────────────────────┬─────value─┐
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ CPUFrequencyMHz_0 │ 2120.9 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.arenas.all.pmuzzy │ 743 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.arenas.all.pdirty │ 26288 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.background_thread.run_intervals │ 0 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.background_thread.num_runs │ 0 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.retained │ 60694528 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.mapped │ 303161344 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.resident │ 260931584 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.metadata │ 12079488 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.allocated │ 133756128 │
|
||||
└────────────┴─────────────────────┴──────────────────────────────────────────┴───────────┘
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
45
docs/en/operations/system-tables/jemalloc_bins.md
Normal file
45
docs/en/operations/system-tables/jemalloc_bins.md
Normal file
@ -0,0 +1,45 @@
|
||||
---
|
||||
slug: /en/operations/system-tables/jemalloc_bins
|
||||
---
|
||||
# jemalloc_bins
|
||||
|
||||
Contains information about memory allocations done via jemalloc allocator in different size classes (bins) aggregated from all arenas.
|
||||
These statistics might not be absolutely accurate because of thread local caching in jemalloc.
|
||||
|
||||
Columns:
|
||||
|
||||
- `index` (UInt64) — Index of the bin ordered by size
|
||||
- `large` (Bool) — True for large allocations and False for small
|
||||
- `size` (UInt64) — Size of allocations in this bin
|
||||
- `allocations` (UInt64) — Number of allocations
|
||||
- `deallocations` (UInt64) — Number of deallocations
|
||||
|
||||
**Example**
|
||||
|
||||
Find the sizes of allocations that contributed the most to the current overall memory usage.
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
*,
|
||||
allocations - deallocations AS active_allocations,
|
||||
size * active_allocations AS allocated_bytes
|
||||
FROM system.jemalloc_bins
|
||||
WHERE allocated_bytes > 0
|
||||
ORDER BY allocated_bytes DESC
|
||||
LIMIT 10
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─index─┬─large─┬─────size─┬─allocactions─┬─deallocations─┬─active_allocations─┬─allocated_bytes─┐
|
||||
│ 82 │ 1 │ 50331648 │ 1 │ 0 │ 1 │ 50331648 │
|
||||
│ 10 │ 0 │ 192 │ 512336 │ 370710 │ 141626 │ 27192192 │
|
||||
│ 69 │ 1 │ 5242880 │ 6 │ 2 │ 4 │ 20971520 │
|
||||
│ 3 │ 0 │ 48 │ 16938224 │ 16559484 │ 378740 │ 18179520 │
|
||||
│ 28 │ 0 │ 4096 │ 122924 │ 119142 │ 3782 │ 15491072 │
|
||||
│ 61 │ 1 │ 1310720 │ 44569 │ 44558 │ 11 │ 14417920 │
|
||||
│ 39 │ 1 │ 28672 │ 1285 │ 913 │ 372 │ 10665984 │
|
||||
│ 4 │ 0 │ 64 │ 2837225 │ 2680568 │ 156657 │ 10026048 │
|
||||
│ 6 │ 0 │ 96 │ 2617803 │ 2531435 │ 86368 │ 8291328 │
|
||||
│ 36 │ 1 │ 16384 │ 22431 │ 21970 │ 461 │ 7553024 │
|
||||
└───────┴───────┴──────────┴──────────────┴───────────────┴────────────────────┴─────────────────┘
|
||||
```
|
@ -7,11 +7,17 @@ Contains information about settings for `MergeTree` tables.
|
||||
|
||||
Columns:
|
||||
|
||||
- `name` (String) — Setting name.
|
||||
- `value` (String) — Setting value.
|
||||
- `description` (String) — Setting description.
|
||||
- `type` (String) — Setting type (implementation specific string value).
|
||||
- `changed` (UInt8) — Whether the setting was explicitly defined in the config or explicitly changed.
|
||||
- `name` ([String](../../sql-reference/data-types/string.md)) — Setting name.
|
||||
- `value` ([String](../../sql-reference/data-types/string.md)) — Setting value.
|
||||
- `changed` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Whether the setting was explicitly defined in the config or explicitly changed.
|
||||
- `description` ([String](../../sql-reference/data-types/string.md)) — Setting description.
|
||||
- `min` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — Minimum value of the setting, if any is set via [constraints](../../operations/settings/constraints-on-settings.md#constraints-on-settings). If the setting has no minimum value, contains [NULL](../../sql-reference/syntax.md#null-literal).
|
||||
- `max` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — Maximum value of the setting, if any is set via [constraints](../../operations/settings/constraints-on-settings.md#constraints-on-settings). If the setting has no maximum value, contains [NULL](../../sql-reference/syntax.md#null-literal).
|
||||
- `readonly` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Shows whether the current user can change the setting:
|
||||
- `0` — Current user can change the setting.
|
||||
- `1` — Current user can’t change the setting.
|
||||
- `type` ([String](../../sql-reference/data-types/string.md)) — Setting type (implementation specific string value).
|
||||
- `is_obsolete` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) _ Shows whether a setting is obsolete.
|
||||
|
||||
**Example**
|
||||
```sql
|
||||
@ -21,35 +27,51 @@ SELECT * FROM system.merge_tree_settings LIMIT 4 FORMAT Vertical;
|
||||
```response
|
||||
Row 1:
|
||||
──────
|
||||
name: min_compress_block_size
|
||||
value: 0
|
||||
changed: 0
|
||||
description: When granule is written, compress the data in buffer if the size of pending uncompressed data is larger or equal than the specified threshold. If this setting is not set, the corresponding global setting is used.
|
||||
min: ____
|
||||
max: ____
|
||||
readonly: 0
|
||||
type: UInt64
|
||||
is_obsolete: 0
|
||||
|
||||
Row 2:
|
||||
──────
|
||||
name: max_compress_block_size
|
||||
value: 0
|
||||
changed: 0
|
||||
description: Compress the pending uncompressed data in buffer if its size is larger or equal than the specified threshold. Block of data will be compressed even if the current granule is not finished. If this setting is not set, the corresponding global setting is used.
|
||||
min: ____
|
||||
max: ____
|
||||
readonly: 0
|
||||
type: UInt64
|
||||
is_obsolete: 0
|
||||
|
||||
Row 3:
|
||||
──────
|
||||
name: index_granularity
|
||||
value: 8192
|
||||
changed: 0
|
||||
description: How many rows correspond to one primary key value.
|
||||
type: SettingUInt64
|
||||
|
||||
Row 2:
|
||||
──────
|
||||
name: min_bytes_for_wide_part
|
||||
value: 0
|
||||
changed: 0
|
||||
description: Minimal uncompressed size in bytes to create part in wide format instead of compact
|
||||
type: SettingUInt64
|
||||
|
||||
Row 3:
|
||||
──────
|
||||
name: min_rows_for_wide_part
|
||||
value: 0
|
||||
changed: 0
|
||||
description: Minimal number of rows to create part in wide format instead of compact
|
||||
type: SettingUInt64
|
||||
min: ____
|
||||
max: ____
|
||||
readonly: 0
|
||||
type: UInt64
|
||||
is_obsolete: 0
|
||||
|
||||
Row 4:
|
||||
──────
|
||||
name: merge_max_block_size
|
||||
value: 8192
|
||||
name: max_digestion_size_per_segment
|
||||
value: 268435456
|
||||
changed: 0
|
||||
description: How many rows in blocks should be formed for merge operations.
|
||||
type: SettingUInt64
|
||||
description: Max number of bytes to digest per segment to build GIN index.
|
||||
min: ____
|
||||
max: ____
|
||||
readonly: 0
|
||||
type: UInt64
|
||||
is_obsolete: 0
|
||||
|
||||
4 rows in set. Elapsed: 0.001 sec.
|
||||
4 rows in set. Elapsed: 0.009 sec.
|
||||
```
|
||||
|
@ -39,6 +39,8 @@ Columns:
|
||||
|
||||
- `data_uncompressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) – Total size of uncompressed data in the data part. All the auxiliary files (for example, files with marks) are not included.
|
||||
|
||||
- `primary_key_size` ([UInt64](../../sql-reference/data-types/int-uint.md)) – The amount of memory (in bytes) used by primary key values in the primary.idx/cidx file on disk.
|
||||
|
||||
- `marks_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) – The size of the file with marks.
|
||||
|
||||
- `secondary_indices_compressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) – Total size of compressed data for secondary indices in the data part. All the auxiliary files (for example, files with marks) are not included.
|
||||
|
@ -14,6 +14,7 @@ Columns:
|
||||
- `changed` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Shows whether a setting was specified in `config.xml`
|
||||
- `description` ([String](../../sql-reference/data-types/string.md)) — Short server setting description.
|
||||
- `type` ([String](../../sql-reference/data-types/string.md)) — Server setting value type.
|
||||
- `is_obsolete` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) _ Shows whether a setting is obsolete.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -26,14 +27,22 @@ WHERE name LIKE '%thread_pool%'
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─name─────────────────────────┬─value─┬─default─┬─changed─┬─description─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬─type───┐
|
||||
│ max_thread_pool_size │ 5000 │ 10000 │ 1 │ The maximum number of threads that could be allocated from the OS and used for query execution and background operations. │ UInt64 │
|
||||
│ max_thread_pool_free_size │ 1000 │ 1000 │ 0 │ The maximum number of threads that will always stay in a global thread pool once allocated and remain idle in case of insufficient number of tasks. │ UInt64 │
|
||||
│ thread_pool_queue_size │ 10000 │ 10000 │ 0 │ The maximum number of tasks that will be placed in a queue and wait for execution. │ UInt64 │
|
||||
│ max_io_thread_pool_size │ 100 │ 100 │ 0 │ The maximum number of threads that would be used for IO operations │ UInt64 │
|
||||
│ max_io_thread_pool_free_size │ 0 │ 0 │ 0 │ Max free size for IO thread pool. │ UInt64 │
|
||||
│ io_thread_pool_queue_size │ 10000 │ 10000 │ 0 │ Queue size for IO thread pool. │ UInt64 │
|
||||
└──────────────────────────────┴───────┴─────────┴─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴────────┘
|
||||
┌─name────────────────────────────────────────_─value─_─default─_─changed─_─description──────────────────────────────────────────────────────────────────────────────────────────────────────
|
||||
───────────────────────────────────_─type───_─is_obsolete─┐
|
||||
│ max_thread_pool_size │ 10000 │ 10000 │ 1 │ The maximum number of threads that could be allocated from the OS and used for query execution and background operations. │ UInt64 │ 0 │
|
||||
│ max_thread_pool_free_size │ 1000 │ 1000 │ 0 │ The maximum number of threads that will always stay in a global thread pool once allocated and remain idle in case of insufficient number of tasks. │ UInt64 │ 0 │
|
||||
│ thread_pool_queue_size │ 10000 │ 10000 │ 0 │ The maximum number of tasks that will be placed in a queue and wait for execution. │ UInt64 │ 0 │
|
||||
│ max_io_thread_pool_size │ 100 │ 100 │ 0 │ The maximum number of threads that would be used for IO operations │ UInt64 │ 0 │
|
||||
│ max_io_thread_pool_free_size │ 0 │ 0 │ 0 │ Max free size for IO thread pool. │ UInt64 │ 0 │
|
||||
│ io_thread_pool_queue_size │ 10000 │ 10000 │ 0 │ Queue size for IO thread pool. │ UInt64 │ 0 │
|
||||
│ max_active_parts_loading_thread_pool_size │ 64 │ 64 │ 0 │ The number of threads to load active set of data parts (Active ones) at startup. │ UInt64 │ 0 │
|
||||
│ max_outdated_parts_loading_thread_pool_size │ 32 │ 32 │ 0 │ The number of threads to load inactive set of data parts (Outdated ones) at startup. │ UInt64 │ 0 │
|
||||
│ max_parts_cleaning_thread_pool_size │ 128 │ 128 │ 0 │ The number of threads for concurrent removal of inactive data parts. │ UInt64 │ 0 │
|
||||
│ max_backups_io_thread_pool_size │ 1000 │ 1000 │ 0 │ The maximum number of threads that would be used for IO operations for BACKUP queries │ UInt64 │ 0 │
|
||||
│ max_backups_io_thread_pool_free_size │ 0 │ 0 │ 0 │ Max free size for backups IO thread pool. │ UInt64 │ 0 │
|
||||
│ backups_io_thread_pool_queue_size │ 0 │ 0 │ 0 │ Queue size for backups IO thread pool. │ UInt64 │ 0 │
|
||||
└─────────────────────────────────────────────┴───────┴─────────┴─────────┴──────────────────────────────────────────────────────────────────────────────────────────────────────────────────
|
||||
───────────────────────────────────┴────────┴─────────────┘
|
||||
```
|
||||
|
||||
Using of `WHERE changed` can be useful, for example, when you want to check
|
||||
|
@ -17,6 +17,7 @@ Columns:
|
||||
- `0` — Current user can change the setting.
|
||||
- `1` — Current user can’t change the setting.
|
||||
- `default` ([String](../../sql-reference/data-types/string.md)) — Setting default value.
|
||||
- `is_obsolete` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) _ Shows whether a setting is obsolete.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -29,11 +30,14 @@ WHERE name LIKE '%min_i%'
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─name────────────────────────────────────────┬─value─────┬─changed─┬─description───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬─min──┬─max──┬─readonly─┐
|
||||
│ min_insert_block_size_rows │ 1048576 │ 0 │ Squash blocks passed to INSERT query to specified size in rows, if blocks are not big enough. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │
|
||||
│ min_insert_block_size_bytes │ 268435456 │ 0 │ Squash blocks passed to INSERT query to specified size in bytes, if blocks are not big enough. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │
|
||||
│ read_backoff_min_interval_between_events_ms │ 1000 │ 0 │ Settings to reduce the number of threads in case of slow reads. Do not pay attention to the event, if the previous one has passed less than a certain amount of time. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │
|
||||
└─────────────────────────────────────────────┴───────────┴─────────┴───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────┴──────┴──────────┘
|
||||
┌─name───────────────────────────────────────────────_─value─────_─changed─_─description───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────_─min──_─max──_─readonly─_─type─────────_─default───_─alias_for─_─is_obsolete─┐
|
||||
│ min_insert_block_size_rows │ 1048449 │ 0 │ Squash blocks passed to INSERT query to specified size in rows, if blocks are not big enough. │ ____ │ ____ │ 0 │ UInt64 │ 1048449 │ │ 0 │
|
||||
│ min_insert_block_size_bytes │ 268402944 │ 0 │ Squash blocks passed to INSERT query to specified size in bytes, if blocks are not big enough. │ ____ │ ____ │ 0 │ UInt64 │ 268402944 │ │ 0 │
|
||||
│ min_insert_block_size_rows_for_materialized_views │ 0 │ 0 │ Like min_insert_block_size_rows, but applied only during pushing to MATERIALIZED VIEW (default: min_insert_block_size_rows) │ ____ │ ____ │ 0 │ UInt64 │ 0 │ │ 0 │
|
||||
│ min_insert_block_size_bytes_for_materialized_views │ 0 │ 0 │ Like min_insert_block_size_bytes, but applied only during pushing to MATERIALIZED VIEW (default: min_insert_block_size_bytes) │ ____ │ ____ │ 0 │ UInt64 │ 0 │ │ 0 │
|
||||
│ read_backoff_min_interval_between_events_ms │ 1000 │ 0 │ Settings to reduce the number of threads in case of slow reads. Do not pay attention to the event, if the previous one has passed less than a certain amount of time. │ ____ │ ____ │ 0 │ Milliseconds │ 1000 │ │ 0 │
|
||||
└────────────────────────────────────────────────────┴───────────┴─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────
|
||||
──────────────────────────────────────────────────────┴──────┴──────┴──────────┴──────────────┴───────────┴───────────┴─────────────┘
|
||||
```
|
||||
|
||||
Using of `WHERE changed` can be useful, for example, when you want to check:
|
||||
|
@ -300,7 +300,7 @@ SELECT groupArrayResample(30, 75, 30)(name, age) FROM people
|
||||
|
||||
Consider the results.
|
||||
|
||||
`Jonh` is out of the sample because he’s too young. Other people are distributed according to the specified age intervals.
|
||||
`John` is out of the sample because he’s too young. Other people are distributed according to the specified age intervals.
|
||||
|
||||
Now let’s count the total number of people and their average wage in the specified age intervals.
|
||||
|
||||
|
@ -12,3 +12,5 @@ To get a determinate result, you can use the ‘min’ or ‘max’ function ins
|
||||
In some cases, you can rely on the order of execution. This applies to cases when SELECT comes from a subquery that uses ORDER BY.
|
||||
|
||||
When a `SELECT` query has the `GROUP BY` clause or at least one aggregate function, ClickHouse (in contrast to MySQL) requires that all expressions in the `SELECT`, `HAVING`, and `ORDER BY` clauses be calculated from keys or from aggregate functions. In other words, each column selected from the table must be used either in keys or inside aggregate functions. To get behavior like in MySQL, you can put the other columns in the `any` aggregate function.
|
||||
|
||||
- Alias: `any_value`
|
||||
|
@ -0,0 +1,32 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/array_concat_agg
|
||||
sidebar_position: 110
|
||||
---
|
||||
|
||||
# array_concat_agg
|
||||
- Alias of `groupArrayArray`. The function is case insensitive.
|
||||
|
||||
**Example**
|
||||
|
||||
```text
|
||||
SELECT *
|
||||
FROM t
|
||||
|
||||
┌─a───────┐
|
||||
│ [1,2,3] │
|
||||
│ [4,5] │
|
||||
│ [6] │
|
||||
└─────────┘
|
||||
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT array_concat_agg(a) AS a
|
||||
FROM t
|
||||
|
||||
┌─a─────────────┐
|
||||
│ [1,2,3,4,5,6] │
|
||||
└───────────────┘
|
||||
```
|
@ -44,3 +44,5 @@ Result:
|
||||
```
|
||||
|
||||
The groupArray function will remove ᴺᵁᴸᴸ value based on the above results.
|
||||
|
||||
- Alias: `array_agg`.
|
||||
|
@ -143,5 +143,6 @@ Time shifts for multiple days. Some pacific islands changed their timezone offse
|
||||
- [The `date_time_input_format` setting](../../operations/settings/settings.md#settings-date_time_input_format)
|
||||
- [The `date_time_output_format` setting](../../operations/settings/settings.md#settings-date_time_output_format)
|
||||
- [The `timezone` server configuration parameter](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone)
|
||||
- [The `session_timezone` setting](../../operations/settings/settings.md#session_timezone)
|
||||
- [Operators for working with dates and times](../../sql-reference/operators/index.md#operators-datetime)
|
||||
- [The `Date` data type](../../sql-reference/data-types/date.md)
|
||||
|
@ -119,6 +119,7 @@ FROM dt;
|
||||
- [The `date_time_input_format` setting](../../operations/settings/settings-formats.md#date_time_input_format)
|
||||
- [The `date_time_output_format` setting](../../operations/settings/settings-formats.md#date_time_output_format)
|
||||
- [The `timezone` server configuration parameter](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone)
|
||||
- [The `session_timezone` setting](../../operations/settings/settings.md#session_timezone)
|
||||
- [Operators for working with dates and times](../../sql-reference/operators/index.md#operators-for-working-with-dates-and-times)
|
||||
- [`Date` data type](../../sql-reference/data-types/date.md)
|
||||
- [`DateTime` data type](../../sql-reference/data-types/datetime.md)
|
||||
|
@ -6,9 +6,20 @@ sidebar_label: Arithmetic
|
||||
|
||||
# Arithmetic Functions
|
||||
|
||||
The result type of all arithmetic functions is the smallest type which can represent all possible results. Size promotion happens for integers up to 32 bit, e.g. `UInt8 + UInt16 = UInt32`. If one of the inters has 64 or more bits, the result is of the same type as the bigger of the input integers, e.g. `UInt16 + UInt128 = UInt128`. While this introduces a risk of overflows around the value range boundary, it ensures that calculations are performed quickly using the maximum native integer width of 64 bit.
|
||||
Arithmetic functions work for any two operands of type `UInt8`, `UInt16`, `UInt32`, `UInt64`, `Int8`, `Int16`, `Int32`, `Int64`, `Float32`, or `Float64`.
|
||||
|
||||
The result of addition or multiplication of two integers is unsigned unless one of the integers is signed.
|
||||
Before performing the operation, both operands are casted to the result type. The result type is determined as follows (unless specified
|
||||
differently in the function documentation below):
|
||||
- If both operands are up to 32 bits wide, the size of the result type will be the size of the next bigger type following the bigger of the
|
||||
two operands (integer size promotion). For example, `UInt8 + UInt16 = UInt32` or `Float32 * Float32 = Float64`.
|
||||
- If one of the operands has 64 or more bits, the size of the result type will be the same size as the bigger of the two operands. For
|
||||
example, `UInt32 + UInt128 = UInt128` or `Float32 * Float64 = Float64`.
|
||||
- If one of the operands is signed, the result type will also be signed, otherwise it will be signed. For example, `UInt32 * Int32 = Int64`.
|
||||
|
||||
These rules make sure that the result type will be the smallest type which can represent all possible results. While this introduces a risk
|
||||
of overflows around the value range boundary, it ensures that calculations are performed quickly using the maximum native integer width of
|
||||
64 bit. This behavior also guarantees compatibility with many other databases which provide 64 bit integers (BIGINT) as the biggest integer
|
||||
type.
|
||||
|
||||
Example:
|
||||
|
||||
@ -22,8 +33,6 @@ SELECT toTypeName(0), toTypeName(0 + 0), toTypeName(0 + 0 + 0), toTypeName(0 + 0
|
||||
└───────────────┴────────────────────────┴─────────────────────────────────┴──────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Arithmetic functions work for any pair of `UInt8`, `UInt16`, `UInt32`, `UInt64`, `Int8`, `Int16`, `Int32`, `Int64`, `Float32`, or `Float64` values.
|
||||
|
||||
Overflows are produced the same way as in C++.
|
||||
|
||||
## plus
|
||||
@ -68,7 +77,7 @@ Alias: `a \* b` (operator)
|
||||
|
||||
## divide
|
||||
|
||||
Calculates the quotient of two values `a` and `b`. The result is always a floating-point value. If you need integer division, you can use the `intDiv` function.
|
||||
Calculates the quotient of two values `a` and `b`. The result type is always [Float64](../../sql-reference/data-types/float.md). Integer division is provided by the `intDiv` function.
|
||||
|
||||
Division by 0 returns `inf`, `-inf`, or `nan`.
|
||||
|
||||
@ -84,7 +93,7 @@ Alias: `a / b` (operator)
|
||||
|
||||
Performs an integer division of two values `a` by `b`, i.e. computes the quotient rounded down to the next smallest integer.
|
||||
|
||||
The result has the same type as the dividend (the first parameter).
|
||||
The result has the same width as the dividend (the first parameter).
|
||||
|
||||
An exception is thrown when dividing by zero, when the quotient does not fit in the range of the dividend, or when dividing a minimal negative number by minus one.
|
||||
|
||||
@ -135,7 +144,7 @@ intDivOrZero(a, b)
|
||||
|
||||
Calculates the remainder of the division of two values `a` by `b`.
|
||||
|
||||
The result type is an integer if both inputs are integers. If one of the inputs is a floating-point number, the result is a floating-point number.
|
||||
The result type is an integer if both inputs are integers. If one of the inputs is a floating-point number, the result type is [Float64](../../sql-reference/data-types/float.md).
|
||||
|
||||
The remainder is computed like in C++. Truncated division is used for negative numbers.
|
||||
|
||||
|
@ -102,6 +102,8 @@ The function also works for strings.
|
||||
|
||||
Can be optimized by enabling the [optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns) setting. With `optimize_functions_to_subcolumns = 1` the function reads only [size0](../../sql-reference/data-types/array.md#array-size) subcolumn instead of reading and processing the whole array column. The query `SELECT length(arr) FROM table` transforms to `SELECT arr.size0 FROM TABLE`.
|
||||
|
||||
Alias: `OCTET_LENGTH`
|
||||
|
||||
## emptyArrayUInt8, emptyArrayUInt16, emptyArrayUInt32, emptyArrayUInt64
|
||||
|
||||
## emptyArrayInt8, emptyArrayInt16, emptyArrayInt32, emptyArrayInt64
|
||||
@ -142,6 +144,7 @@ range([start, ] end [, step])
|
||||
|
||||
- All arguments `start`, `end`, `step` must be below data types: `UInt8`, `UInt16`, `UInt32`, `UInt64`,`Int8`, `Int16`, `Int32`, `Int64`, as well as elements of the returned array, which's type is a super type of all arguments.
|
||||
- An exception is thrown if query results in arrays with a total length of more than number of elements specified by the [function_range_max_elements_in_block](../../operations/settings/settings.md#settings-function_range_max_elements_in_block) setting.
|
||||
- Returns Null if any argument has Nullable(Nothing) type. An exception is thrown if any argument has Null value (Nullable(T) type).
|
||||
|
||||
**Examples**
|
||||
|
||||
@ -878,7 +881,7 @@ A special function. See the section [“ArrayJoin function”](../../sql-referen
|
||||
|
||||
## arrayDifference
|
||||
|
||||
Calculates an array of differences between adjacent array elements. The first element of the result array will be 0, the second `a[1] - a[0]`, the third `a[2] - a[1]`, etc. The type of elements in the result array is determined by the type inference rules for subtraction (e.g. `UInt8` - `UInt8` = `Int16`).
|
||||
Calculates an array of differences between adjacent array elements. The first element of the result array will be 0, the second `a[1] - a[0]`, the third `a[2] - a[1]`, etc. The type of elements in the result array is determined by the type inference rules for subtraction (e.g. `UInt8` - `UInt8` = `Int16`).
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -996,6 +999,24 @@ SELECT
|
||||
└──────────────┴───────────┘
|
||||
```
|
||||
|
||||
## arrayJaccardIndex
|
||||
|
||||
Returns the [Jaccard index](https://en.wikipedia.org/wiki/Jaccard_index) of two arrays.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
``` sql
|
||||
SELECT arrayJaccardIndex([1, 2], [2, 3]) AS res
|
||||
```
|
||||
|
||||
Result:
|
||||
``` text
|
||||
┌─res────────────────┐
|
||||
│ 0.3333333333333333 │
|
||||
└────────────────────┘
|
||||
```
|
||||
|
||||
## arrayReduce
|
||||
|
||||
Applies an aggregate function to array elements and returns its result. The name of the aggregation function is passed as a string in single quotes `'max'`, `'sum'`. When using parametric aggregate functions, the parameter is indicated after the function name in parentheses `'uniqUpTo(6)'`.
|
||||
|
@ -694,10 +694,14 @@ SELECT toDate('2016-12-27') AS date, toWeek(date) AS week0, toWeek(date,1) AS we
|
||||
|
||||
Returns year and week for a date. The year in the result may be different from the year in the date argument for the first and the last week of the year.
|
||||
|
||||
The mode argument works exactly like the mode argument to `toWeek()`. For the single-argument syntax, a mode value of 0 is used.
|
||||
The mode argument works like the mode argument to `toWeek()`. For the single-argument syntax, a mode value of 0 is used.
|
||||
|
||||
`toISOYear()` is a compatibility function that is equivalent to `intDiv(toYearWeek(date,3),100)`.
|
||||
|
||||
:::warning
|
||||
The week number returned by `toYearWeek()` can be different from what the `toWeek()` returns. `toWeek()` always returns week number in the context of the given year, and in case `toWeek()` returns `0`, `toYearWeek()` returns the value corresponding to the last week of previous year. See `prev_yearWeek` in example below.
|
||||
:::
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
@ -707,18 +711,18 @@ toYearWeek(t[, mode[, timezone]])
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(date,1) AS yearWeek1, toYearWeek(date,9) AS yearWeek9;
|
||||
SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(date,1) AS yearWeek1, toYearWeek(date,9) AS yearWeek9, toYearWeek(toDate('2022-01-01')) AS prev_yearWeek;
|
||||
```
|
||||
|
||||
``` text
|
||||
┌───────date─┬─yearWeek0─┬─yearWeek1─┬─yearWeek9─┐
|
||||
│ 2016-12-27 │ 201652 │ 201652 │ 201701 │
|
||||
└────────────┴───────────┴───────────┴───────────┘
|
||||
┌───────date─┬─yearWeek0─┬─yearWeek1─┬─yearWeek9─┬─prev_yearWeek─┐
|
||||
│ 2016-12-27 │ 201652 │ 201652 │ 201701 │ 202152 │
|
||||
└────────────┴───────────┴───────────┴───────────┴───────────────┘
|
||||
```
|
||||
|
||||
## age
|
||||
|
||||
Returns the `unit` component of the difference between `startdate` and `enddate`. The difference is calculated using a precision of 1 second.
|
||||
Returns the `unit` component of the difference between `startdate` and `enddate`. The difference is calculated using a precision of 1 microsecond.
|
||||
E.g. the difference between `2021-12-29` and `2022-01-01` is 3 days for `day` unit, 0 months for `month` unit, 0 years for `year` unit.
|
||||
|
||||
For an alternative to `age`, see function `date\_diff`.
|
||||
@ -734,6 +738,8 @@ age('unit', startdate, enddate, [timezone])
|
||||
- `unit` — The type of interval for result. [String](../../sql-reference/data-types/string.md).
|
||||
Possible values:
|
||||
|
||||
- `microsecond` (possible abbreviations: `us`, `u`)
|
||||
- `millisecond` (possible abbreviations: `ms`)
|
||||
- `second` (possible abbreviations: `ss`, `s`)
|
||||
- `minute` (possible abbreviations: `mi`, `n`)
|
||||
- `hour` (possible abbreviations: `hh`, `h`)
|
||||
@ -809,6 +815,8 @@ Aliases: `dateDiff`, `DATE_DIFF`, `timestampDiff`, `timestamp_diff`, `TIMESTAMP_
|
||||
- `unit` — The type of interval for result. [String](../../sql-reference/data-types/string.md).
|
||||
Possible values:
|
||||
|
||||
- `microsecond` (possible abbreviations: `us`, `u`)
|
||||
- `millisecond` (possible abbreviations: `ms`)
|
||||
- `second` (possible abbreviations: `ss`, `s`)
|
||||
- `minute` (possible abbreviations: `mi`, `n`)
|
||||
- `hour` (possible abbreviations: `hh`, `h`)
|
||||
@ -1130,6 +1138,8 @@ Result:
|
||||
|
||||
Returns the current date and time at the moment of query analysis. The function is a constant expression.
|
||||
|
||||
Alias: `current_timestamp`.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
@ -1260,6 +1270,8 @@ Result:
|
||||
Accepts zero arguments and returns the current date at one of the moments of query analysis.
|
||||
The same as ‘toDate(now())’.
|
||||
|
||||
Aliases: `curdate`, `current_date`.
|
||||
|
||||
## yesterday
|
||||
|
||||
Accepts zero arguments and returns yesterday’s date at one of the moments of query analysis.
|
||||
@ -1437,7 +1449,7 @@ Using replacement fields, you can define a pattern for the resulting string. “
|
||||
| %n | new-line character (‘’) | |
|
||||
| %p | AM or PM designation | PM |
|
||||
| %Q | Quarter (1-4) | 1 |
|
||||
| %r | 12-hour HH:MM AM/PM time, equivalent to %H:%i %p | 10:30 PM |
|
||||
| %r | 12-hour HH:MM AM/PM time, equivalent to %h:%i %p | 10:30 PM |
|
||||
| %R | 24-hour HH:MM time, equivalent to %H:%i | 22:33 |
|
||||
| %s | second (00-59) | 44 |
|
||||
| %S | second (00-59) | 44 |
|
||||
|
@ -90,6 +90,8 @@ Returns the length of a string in bytes (not: in characters or Unicode code poin
|
||||
|
||||
The function also works for arrays.
|
||||
|
||||
Alias: `OCTET_LENGTH`
|
||||
|
||||
## lengthUTF8
|
||||
|
||||
Returns the length of a string in Unicode code points (not: in bytes or characters). It assumes that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined.
|
||||
@ -573,6 +575,42 @@ Alias:
|
||||
|
||||
Like `substring` but for Unicode code points. Assumes that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined.
|
||||
|
||||
|
||||
## substringIndex(s, delim, count)
|
||||
|
||||
Returns the substring of `s` before `count` occurrences of the delimiter `delim`, as in Spark or MySQL.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
substringIndex(s, delim, count)
|
||||
```
|
||||
Alias: `SUBSTRING_INDEX`
|
||||
|
||||
|
||||
**Arguments**
|
||||
|
||||
- s: The string to extract substring from. [String](../../sql-reference/data-types/string.md).
|
||||
- delim: The character to split. [String](../../sql-reference/data-types/string.md).
|
||||
- count: The number of occurrences of the delimiter to count before extracting the substring. If count is positive, everything to the left of the final delimiter (counting from the left) is returned. If count is negative, everything to the right of the final delimiter (counting from the right) is returned. [UInt or Int](../data-types/int-uint.md)
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
SELECT substringIndex('www.clickhouse.com', '.', 2)
|
||||
```
|
||||
|
||||
Result:
|
||||
```
|
||||
┌─substringIndex('www.clickhouse.com', '.', 2)─┐
|
||||
│ www.clickhouse │
|
||||
└──────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## substringIndexUTF8(s, delim, count)
|
||||
|
||||
Like `substringIndex` but for Unicode code points. Assumes that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined.
|
||||
|
||||
## appendTrailingCharIfAbsent
|
||||
|
||||
Appends character `c` to string `s` if `s` is non-empty and does not end with character `c`.
|
||||
@ -1253,3 +1291,48 @@ Result:
|
||||
│ A240 │
|
||||
└──────────────────┘
|
||||
```
|
||||
|
||||
## initcap
|
||||
|
||||
Convert the first letter of each word to upper case and the rest to lower case. Words are sequences of alphanumeric characters separated by non-alphanumeric characters.
|
||||
|
||||
## initcapUTF8
|
||||
|
||||
Like [initcap](#initcap), assuming that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined.
|
||||
|
||||
Does not detect the language, e.g. for Turkish the result might not be exactly correct (i/İ vs. i/I).
|
||||
|
||||
If the length of the UTF-8 byte sequence is different for upper and lower case of a code point, the result may be incorrect for this code point.
|
||||
|
||||
## firstLine
|
||||
|
||||
Returns the first line from a multi-line string.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
firstLine(val)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `val` - Input value. [String](../data-types/string.md)
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The first line of the input value or the whole value if there is no line
|
||||
separators. [String](../data-types/string.md)
|
||||
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
select firstLine('foo\nbar\nbaz');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```result
|
||||
┌─firstLine('foo\nbar\nbaz')─┐
|
||||
│ foo │
|
||||
└────────────────────────────┘
|
||||
```
|
||||
|
@ -631,3 +631,53 @@ Result:
|
||||
│ 100 │ 200 │ 100-200 │ 100 │
|
||||
└──────────────────────────────────────────────┴──────────────────────────────────────────────┴──────────────────────────────────────────────┴───────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## hasSubsequence
|
||||
|
||||
Returns 1 if needle is a subsequence of haystack, or 0 otherwise.
|
||||
A subsequence of a string is a sequence that can be derived from the given string by deleting zero or more elements without changing the order of the remaining elements.
|
||||
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
hasSubsequence(haystack, needle)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `haystack` — String in which the search is performed. [String](../../sql-reference/syntax.md#syntax-string-literal).
|
||||
- `needle` — Subsequence to be searched. [String](../../sql-reference/syntax.md#syntax-string-literal).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- 1, if needle is a subsequence of haystack.
|
||||
- 0, otherwise.
|
||||
|
||||
Type: `UInt8`.
|
||||
|
||||
**Examples**
|
||||
|
||||
``` sql
|
||||
SELECT hasSubsequence('garbage', 'arg') ;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─hasSubsequence('garbage', 'arg')─┐
|
||||
│ 1 │
|
||||
└──────────────────────────────────┘
|
||||
```
|
||||
|
||||
## hasSubsequenceCaseInsensitive
|
||||
|
||||
Like [hasSubsequence](#hasSubsequence) but searches case-insensitively.
|
||||
|
||||
## hasSubsequenceUTF8
|
||||
|
||||
Like [hasSubsequence](#hasSubsequence) but assumes `haystack` and `needle` are UTF-8 encoded strings.
|
||||
|
||||
## hasSubsequenceCaseInsensitiveUTF8
|
||||
|
||||
Like [hasSubsequenceUTF8](#hasSubsequenceUTF8) but searches case-insensitively.
|
@ -399,7 +399,11 @@ toDateTime(expr[, time_zone ])
|
||||
- `expr` — The value. [String](/docs/en/sql-reference/data-types/string.md), [Int](/docs/en/sql-reference/data-types/int-uint.md), [Date](/docs/en/sql-reference/data-types/date.md) or [DateTime](/docs/en/sql-reference/data-types/datetime.md).
|
||||
- `time_zone` — Time zone. [String](/docs/en/sql-reference/data-types/string.md).
|
||||
|
||||
If `expr` is a number, it is interpreted as the number of seconds since the beginning of the Unix Epoch (as Unix timestamp).
|
||||
:::note
|
||||
If `expr` is a number, it is interpreted as the number of seconds since the beginning of the Unix Epoch (as Unix timestamp).
|
||||
If `expr` is a [String](/docs/en/sql-reference/data-types/string.md), it may be interpreted as a Unix timestamp or as a string representation of date / date with time.
|
||||
Thus, parsing of short numbers' string representations (up to 4 digits) is explicitly disabled due to ambiguity, e.g. a string `'1999'` may be both a year (an incomplete string representation of Date / DateTime) or a unix timestamp. Longer numeric strings are allowed.
|
||||
:::
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -941,44 +945,6 @@ Result:
|
||||
└────────────┴───────┘
|
||||
```
|
||||
|
||||
## toDecimalString
|
||||
|
||||
Converts a numeric value to String with the number of fractional digits in the output specified by the user.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
toDecimalString(number, scale)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `number` — Value to be represented as String, [Int, UInt](/docs/en/sql-reference/data-types/int-uint.md), [Float](/docs/en/sql-reference/data-types/float.md), [Decimal](/docs/en/sql-reference/data-types/decimal.md),
|
||||
- `scale` — Number of fractional digits, [UInt8](/docs/en/sql-reference/data-types/int-uint.md).
|
||||
* Maximum scale for [Decimal](/docs/en/sql-reference/data-types/decimal.md) and [Int, UInt](/docs/en/sql-reference/data-types/int-uint.md) types is 77 (it is the maximum possible number of significant digits for Decimal),
|
||||
* Maximum scale for [Float](/docs/en/sql-reference/data-types/float.md) is 60.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Input value represented as [String](/docs/en/sql-reference/data-types/string.md) with given number of fractional digits (scale).
|
||||
The number is rounded up or down according to common arithmetic in case requested scale is smaller than original number's scale.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT toDecimalString(CAST('64.32', 'Float64'), 5);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌toDecimalString(CAST('64.32', 'Float64'), 5)─┐
|
||||
│ 64.32000 │
|
||||
└─────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## reinterpretAsUInt(8\|16\|32\|64)
|
||||
|
||||
## reinterpretAsInt(8\|16\|32\|64)
|
||||
|
@ -171,12 +171,13 @@ Result:
|
||||
└──────────────────────────────┘
|
||||
```
|
||||
|
||||
Executable user defined functions can take constant parameters configured in `command` setting (works only for user defined functions with `executable` type).
|
||||
Executable user defined functions can take constant parameters configured in `command` setting (works only for user defined functions with `executable` type). It also requires the `execute_direct` option (to ensure no shell argument expansion vulnerability).
|
||||
File `test_function_parameter_python.xml` (`/etc/clickhouse-server/test_function_parameter_python.xml` with default path settings).
|
||||
```xml
|
||||
<functions>
|
||||
<function>
|
||||
<type>executable</type>
|
||||
<execute_direct>true</execute_direct>
|
||||
<name>test_function_parameter_python</name>
|
||||
<return_type>String</return_type>
|
||||
<argument>
|
||||
|
@ -5,15 +5,28 @@ sidebar_label: SAMPLE BY
|
||||
title: "Manipulating Sampling-Key Expressions"
|
||||
---
|
||||
|
||||
Syntax:
|
||||
# Manipulating SAMPLE BY expression
|
||||
|
||||
The following operations are available:
|
||||
|
||||
## MODIFY
|
||||
|
||||
``` sql
|
||||
ALTER TABLE [db].name [ON CLUSTER cluster] MODIFY SAMPLE BY new_expression
|
||||
```
|
||||
|
||||
The command changes the [sampling key](../../../engines/table-engines/mergetree-family/mergetree.md) of the table to `new_expression` (an expression or a tuple of expressions).
|
||||
The command changes the [sampling key](../../../engines/table-engines/mergetree-family/mergetree.md) of the table to `new_expression` (an expression or a tuple of expressions). The primary key must contain the new sample key.
|
||||
|
||||
The command is lightweight in the sense that it only changes metadata. The primary key must contain the new sample key.
|
||||
## REMOVE
|
||||
|
||||
``` sql
|
||||
ALTER TABLE [db].name [ON CLUSTER cluster] REMOVE SAMPLE BY
|
||||
```
|
||||
|
||||
The command removes the [sampling key](../../../engines/table-engines/mergetree-family/mergetree.md) of the table.
|
||||
|
||||
|
||||
The commands `MODIFY` and `REMOVE` are lightweight in the sense that they only change metadata or remove files.
|
||||
|
||||
:::note
|
||||
It only works for tables in the [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md) family (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) tables).
|
||||
|
@ -97,7 +97,7 @@ This is an experimental feature that may change in backwards-incompatible ways i
|
||||
:::
|
||||
|
||||
```sql
|
||||
CREATE LIVE VIEW [IF NOT EXISTS] [db.]table_name [WITH [TIMEOUT [value_in_sec] [AND]] [REFRESH [value_in_sec]]] AS SELECT ...
|
||||
CREATE LIVE VIEW [IF NOT EXISTS] [db.]table_name [WITH REFRESH [value_in_sec]] AS SELECT ...
|
||||
```
|
||||
|
||||
Live views store result of the corresponding [SELECT](../../../sql-reference/statements/select/index.md) query and are updated any time the result of the query changes. Query result as well as partial result needed to combine with new data are stored in memory providing increased performance for repeated queries. Live views can provide push notifications when query result changes using the [WATCH](../../../sql-reference/statements/watch.md) query.
|
||||
|
@ -5,7 +5,27 @@ sidebar_label: WITH
|
||||
|
||||
# WITH Clause
|
||||
|
||||
ClickHouse supports Common Table Expressions ([CTE](https://en.wikipedia.org/wiki/Hierarchical_and_recursive_queries_in_SQL)), that is provides to use results of `WITH` clause in the rest of `SELECT` query. Named subqueries can be included to the current and child query context in places where table objects are allowed. Recursion is prevented by hiding the current level CTEs from the WITH expression.
|
||||
ClickHouse supports Common Table Expressions ([CTE](https://en.wikipedia.org/wiki/Hierarchical_and_recursive_queries_in_SQL)) and substitutes the code defined in the `WITH` clause in all places of use for the rest of `SELECT` query. Named subqueries can be included to the current and child query context in places where table objects are allowed. Recursion is prevented by hiding the current level CTEs from the WITH expression.
|
||||
|
||||
Please note that CTEs do not guarantee the same results in all places they are called because the query will be re-executed for each use case.
|
||||
|
||||
An example of such behavior is below
|
||||
``` sql
|
||||
with cte_numbers as
|
||||
(
|
||||
select
|
||||
num
|
||||
from generateRandom('num UInt64', NULL)
|
||||
limit 1000000
|
||||
)
|
||||
select
|
||||
count()
|
||||
from cte_numbers
|
||||
where num in (select num from cte_numbers)
|
||||
```
|
||||
If CTEs were to pass exactly the results and not just a piece of code, you would always see `1000000`
|
||||
|
||||
However, due to the fact that we are referring `cte_numbers` twice, random numbers are generated each time and, accordingly, we see different random results, `280501, 392454, 261636, 196227` and so on...
|
||||
|
||||
## Syntax
|
||||
|
||||
|
@ -205,7 +205,7 @@ The optional keyword `EXTENDED` currently has no effect, it only exists for MySQ
|
||||
|
||||
The optional keyword `FULL` causes the output to include the collation, comment and privilege columns.
|
||||
|
||||
`SHOW COLUMNS` produces a result table with the following structure:
|
||||
The statement produces a result table with the following structure:
|
||||
- field - The name of the column (String)
|
||||
- type - The column data type (String)
|
||||
- null - If the column data type is Nullable (UInt8)
|
||||
@ -272,6 +272,10 @@ SHOW DICTIONARIES FROM db LIKE '%reg%' LIMIT 2
|
||||
|
||||
Displays a list of primary and data skipping indexes of a table.
|
||||
|
||||
This statement mostly exists for compatibility with MySQL. System tables [system.tables](../../operations/system-tables/tables.md) (for
|
||||
primary keys) and [system.data_skipping_indices](../../operations/system-tables/data_skipping_indices.md) (for data skipping indices)
|
||||
provide equivalent information but in a fashion more native to ClickHouse.
|
||||
|
||||
```sql
|
||||
SHOW [EXTENDED] {INDEX | INDEXES | INDICES | KEYS } {FROM | IN} <table> [{FROM | IN} <db>] [WHERE <expr>] [INTO OUTFILE <filename>] [FORMAT <format>]
|
||||
```
|
||||
@ -281,22 +285,22 @@ equivalent. If no database is specified, the query assumes the current database
|
||||
|
||||
The optional keyword `EXTENDED` currently has no effect, it only exists for MySQL compatibility.
|
||||
|
||||
`SHOW INDEX` produces a result table with the following structure:
|
||||
- table - The name of the table (String)
|
||||
- non_unique - 0 if the index cannot contain duplicates, 1 otherwise (UInt8)
|
||||
- key_name - The name of the index, `PRIMARY` if the index is a primary key index (String)
|
||||
- seq_in_index - Currently unused
|
||||
- column_name - Currently unused
|
||||
- collation - The sorting of the column in the index, `A` if ascending, `D` if descending, `NULL` if unsorted (Nullable(String))
|
||||
- cardinality - Currently unused
|
||||
- sub_part - Currently unused
|
||||
- packed - Currently unused
|
||||
The statement produces a result table with the following structure:
|
||||
- table - The name of the table. (String)
|
||||
- non_unique - Always `1` as ClickHouse does not support uniqueness constraints. (UInt8)
|
||||
- key_name - The name of the index, `PRIMARY` if the index is a primary key index. (String)
|
||||
- seq_in_index - For a primary key index, the position of the column starting from `1`. For a data skipping index: always `1`. (UInt8)
|
||||
- column_name - For a primary key index, the name of the column. For a data skipping index: `''` (empty string), see field "expression". (String)
|
||||
- collation - The sorting of the column in the index: `A` if ascending, `D` if descending, `NULL` if unsorted. (Nullable(String))
|
||||
- cardinality - An estimation of the index cardinality (number of unique values in the index). Currently always 0. (UInt64)
|
||||
- sub_part - Always `NULL` because ClickHouse does not support index prefixes like MySQL. (Nullable(String))
|
||||
- packed - Always `NULL` because ClickHouse does not support packed indexes (like MySQL). (Nullable(String))
|
||||
- null - Currently unused
|
||||
- index_type - The index type, e.g. `primary`, `minmax`, `bloom_filter` etc. (String)
|
||||
- comment - Currently unused
|
||||
- index_comment - Currently unused
|
||||
- visible - If the index is visible to the optimizer, always `YES` (String)
|
||||
- expression - The index expression (String)
|
||||
- index_type - The index type, e.g. `PRIMARY`, `MINMAX`, `BLOOM_FILTER` etc. (String)
|
||||
- comment - Additional information about the index, currently always `''` (empty string). (String)
|
||||
- index_comment - `''` (empty string) because indexes in ClickHouse cannot have a `COMMENT` field (like in MySQL). (String)
|
||||
- visible - If the index is visible to the optimizer, always `YES`. (String)
|
||||
- expression - For a data skipping index, the index expression. For a primary key index: `''` (empty string). (String)
|
||||
|
||||
**Examples**
|
||||
|
||||
@ -310,11 +314,12 @@ Result:
|
||||
|
||||
``` text
|
||||
┌─table─┬─non_unique─┬─key_name─┬─seq_in_index─┬─column_name─┬─collation─┬─cardinality─┬─sub_part─┬─packed─┬─null─┬─index_type───┬─comment─┬─index_comment─┬─visible─┬─expression─┐
|
||||
│ tbl │ 0 │ blf_idx │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ bloom_filter │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ YES │ d, b │
|
||||
│ tbl │ 0 │ mm1_idx │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ minmax │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ YES │ a, c, d │
|
||||
│ tbl │ 0 │ mm2_idx │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ minmax │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ YES │ c, d, e │
|
||||
│ tbl │ 0 │ PRIMARY │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ A │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ primary │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ YES │ c, a │
|
||||
│ tbl │ 0 │ set_idx │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ set │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ YES │ e │
|
||||
│ tbl │ 1 │ blf_idx │ 1 │ 1 │ ᴺᵁᴸᴸ │ 0 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ BLOOM_FILTER │ │ │ YES │ d, b │
|
||||
│ tbl │ 1 │ mm1_idx │ 1 │ 1 │ ᴺᵁᴸᴸ │ 0 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ MINMAX │ │ │ YES │ a, c, d │
|
||||
│ tbl │ 1 │ mm2_idx │ 1 │ 1 │ ᴺᵁᴸᴸ │ 0 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ MINMAX │ │ │ YES │ c, d, e │
|
||||
│ tbl │ 1 │ PRIMARY │ 1 │ c │ A │ 0 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ PRIMARY │ │ │ YES │ │
|
||||
│ tbl │ 1 │ PRIMARY │ 2 │ a │ A │ 0 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ PRIMARY │ │ │ YES │ │
|
||||
│ tbl │ 1 │ set_idx │ 1 │ 1 │ ᴺᵁᴸᴸ │ 0 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ SET │ │ │ YES │ e │
|
||||
└───────┴────────────┴──────────┴──────────────┴─────────────┴───────────┴─────────────┴──────────┴────────┴──────┴──────────────┴─────────┴───────────────┴─────────┴────────────┘
|
||||
```
|
||||
|
||||
|
@ -134,7 +134,7 @@ Multiple path components can have globs. For being processed file must exist and
|
||||
|
||||
- `*` — Substitutes any number of any characters except `/` including empty string.
|
||||
- `?` — Substitutes any single character.
|
||||
- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`.
|
||||
- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`, including `/`.
|
||||
- `{N..M}` — Substitutes any number in range from N to M including both borders.
|
||||
- `**` - Fetches all files inside the folder recursively.
|
||||
|
||||
|
@ -30,6 +30,14 @@ mongodb(host:port, database, collection, user, password, structure [, options])
|
||||
|
||||
- `options` - MongoDB connection string options (optional parameter).
|
||||
|
||||
:::tip
|
||||
If you are using the MongoDB Atlas cloud offering please add these options:
|
||||
|
||||
```
|
||||
'connectTimeoutMS=10000&ssl=true&authSource=admin'
|
||||
```
|
||||
|
||||
:::
|
||||
|
||||
**Returned Value**
|
||||
|
||||
|
@ -56,6 +56,7 @@ Character `|` inside patterns is used to specify failover addresses. They are it
|
||||
## Storage Settings {#storage-settings}
|
||||
|
||||
- [engine_url_skip_empty_files](/docs/en/operations/settings/settings.md#engine_url_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||
- [disable_url_encoding](/docs/en/operations/settings/settings.md#disable_url_encoding) - allows to disable decoding/encoding path in uri. Disabled by default.
|
||||
|
||||
**See Also**
|
||||
|
||||
|
@ -3,23 +3,46 @@ slug: /en/guides/developer/transactional
|
||||
---
|
||||
# Transactional (ACID) support
|
||||
|
||||
INSERT into one partition* in one table* of MergeTree* family up to max_insert_block_size rows* is transactional (ACID):
|
||||
- Atomic: INSERT is succeeded or rejected as a whole: if confirmation is sent to the client, all rows INSERTed; if error is sent to the client, no rows INSERTed.
|
||||
## Case 1: INSERT into one partition, of one table, of the MergeTree* family
|
||||
|
||||
This is transactional (ACID) if the inserted rows are packed and inserted as a single block (see Notes):
|
||||
- Atomic: an INSERT succeeds or is rejected as a whole: if a confirmation is sent to the client, then all rows were inserted; if an error is sent to the client, then no rows were inserted.
|
||||
- Consistent: if there are no table constraints violated, then all rows in an INSERT are inserted and the INSERT succeeds; if constraints are violated, then no rows are inserted.
|
||||
- Isolated: concurrent clients observe a consistent snapshot of the table–the state of the table either as if before INSERT or after successful INSERT; no partial state is seen;
|
||||
- Durable: successful INSERT is written to the filesystem before answering to the client, on single replica or multiple replicas (controlled by the `insert_quorum` setting), and ClickHouse can ask the OS to sync the filesystem data on the storage media (controlled by the `fsync_after_insert` setting).
|
||||
* If table has many partitions and INSERT covers many partitions–then insertion into every partition is transactional on its own;
|
||||
* INSERT into multiple tables with one statement is possible if materialized views are involved;
|
||||
* INSERT into Distributed table is not transactional as a whole, while insertion into every shard is transactional;
|
||||
* another example: insert into Buffer tables is neither atomic nor isolated or consistent or durable;
|
||||
* atomicity is ensured even if `async_insert` is enabled, but it can be turned off by the wait_for_async_insert setting;
|
||||
* max_insert_block_size is 1 000 000 by default and can be adjusted as needed;
|
||||
* if client did not receive the answer from the server, the client does not know if transaction succeeded, and it can repeat the transaction, using exactly-once insertion properties;
|
||||
* ClickHouse is using MVCC with snapshot isolation internally;
|
||||
* all ACID properties are valid even in case of server kill / crash;
|
||||
* either insert_quorum into different AZ or fsync should be enabled to ensure durable inserts in typical setup;
|
||||
* "consistency" in ACID terms does not cover the semantics of distributed systems, see https://jepsen.io/consistency which is controlled by different settings (select_sequential_consistency)
|
||||
* this explanation does not cover a new transactions feature that allow to have full-featured transactions over multiple tables, materialized views, for multiple SELECTs, etc.
|
||||
- Isolated: concurrent clients observe a consistent snapshot of the table–the state of the table either as it was before the INSERT attempt, or after the successful INSERT; no partial state is seen
|
||||
- Durable: a successful INSERT is written to the filesystem before answering to the client, on a single replica or multiple replicas (controlled by the `insert_quorum` setting), and ClickHouse can ask the OS to sync the filesystem data on the storage media (controlled by the `fsync_after_insert` setting).
|
||||
- INSERT into multiple tables with one statement is possible if materialized views are involved (the INSERT from the client is to a table which has associate materialized views).
|
||||
|
||||
## Case 2: INSERT into multiple partitions, of one table, of the MergeTree* family
|
||||
|
||||
Same as Case 1 above, with this detail:
|
||||
- If table has many partitions and INSERT covers many partitions–then insertion into every partition is transactional on its own
|
||||
|
||||
|
||||
## Case 3: INSERT into one distributed table of the MergeTree* family
|
||||
|
||||
Same as Case 1 above, with this detail:
|
||||
- INSERT into Distributed table is not transactional as a whole, while insertion into every shard is transactional
|
||||
|
||||
## Case 4: Using a Buffer table
|
||||
|
||||
- insert into Buffer tables is neither atomic nor isolated nor consistent nor durable
|
||||
|
||||
## Case 5: Using async_insert
|
||||
|
||||
Same as Case 1 above, with this detail:
|
||||
- atomicity is ensured even if `async_insert` is enabled and `wait_for_async_insert` is set to 1 (the default), but if `wait_for_async_insert` is set to 0, then atomicity is not ensured.
|
||||
|
||||
## Notes
|
||||
- rows inserted from the client in some data format are packed into a single block when:
|
||||
- the insert format is row-based (like CSV, TSV, Values, JSONEachRow, etc) and the data contains less then `max_insert_block_size` rows (~1 000 000 by default) or less then `min_chunk_bytes_for_parallel_parsing` bytes (10 MB by default) in case of parallel parsing is used (enabled by default)
|
||||
- the insert format is column-based (like Native, Parquet, ORC, etc) and the data contains only one block of data
|
||||
- the size of the inserted block in general may depend on many settings (for example: `max_block_size`, `max_insert_block_size`, `min_insert_block_size_rows`, `min_insert_block_size_bytes`, `preferred_block_size_bytes`, etc)
|
||||
- if the client did not receive an answer from the server, the client does not know if the transaction succeeded, and it can repeat the transaction, using exactly-once insertion properties
|
||||
- ClickHouse is using MVCC with snapshot isolation internally
|
||||
- all ACID properties are valid even in the case of server kill/crash
|
||||
- either insert_quorum into different AZ or fsync should be enabled to ensure durable inserts in the typical setup
|
||||
- "consistency" in ACID terms does not cover the semantics of distributed systems, see https://jepsen.io/consistency which is controlled by different settings (select_sequential_consistency)
|
||||
- this explanation does not cover a new transactions feature that allow to have full-featured transactions over multiple tables, materialized views, for multiple SELECTs, etc. (see the next section on Transactions, Commit, and Rollback)
|
||||
|
||||
## Transactions, Commit, and Rollback
|
||||
|
||||
|
@ -68,7 +68,7 @@ $ /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/
|
||||
$ rm -rf build
|
||||
$ mkdir build
|
||||
$ cd build
|
||||
$ cmake -DCMAKE_C_COMPILER=$(brew --prefix llvm)/bin/clang -DCMAKE_CXX_COMPILER==$(brew --prefix llvm)/bin/clang++ -DCMAKE_BUILD_TYPE=RelWithDebInfo -DENABLE_JEMALLOC=OFF ..
|
||||
$ cmake -DCMAKE_C_COMPILER=$(brew --prefix llvm)/bin/clang -DCMAKE_CXX_COMPILER=$(brew --prefix llvm)/bin/clang++ -DCMAKE_BUILD_TYPE=RelWithDebInfo -DENABLE_JEMALLOC=OFF ..
|
||||
$ cmake -DCMAKE_C_COMPILER=$(brew --prefix llvm)/bin/clang -DCMAKE_CXX_COMPILER=$(brew --prefix llvm)/bin/clang++ -DCMAKE_BUILD_TYPE=RelWithDebInfo -DENABLE_JEMALLOC=OFF ..
|
||||
$ cmake --build . --config RelWithDebInfo
|
||||
$ cd ..
|
||||
|
@ -401,8 +401,8 @@ $ clickhouse-client --format_csv_delimiter="|" --query="INSERT INTO test.csv FOR
|
||||
- [output_format_csv_crlf_end_of_line](../operations/settings/settings.md#output_format_csv_crlf_end_of_line) - если установлено значение true, конец строки в формате вывода CSV будет `\r\n` вместо `\n`. Значение по умолчанию - `false`.
|
||||
- [input_format_csv_skip_first_lines](../operations/settings/settings.md#input_format_csv_skip_first_lines) - пропустить указанное количество строк в начале данных. Значение по умолчанию - `0`.
|
||||
- [input_format_csv_detect_header](../operations/settings/settings.md#input_format_csv_detect_header) - обнаружить заголовок с именами и типами в формате CSV. Значение по умолчанию - `true`.
|
||||
- [input_format_csv_trim_whitespaces](../operations/settings/settings.md#input_format_csv_trim_whitespaces) - удалить пробелы и символы табуляции из строк без кавычек.
|
||||
Значение по умолчанию - `true`.
|
||||
- [input_format_csv_trim_whitespaces](../operations/settings/settings.md#input_format_csv_trim_whitespaces) - удалить пробелы и символы табуляции из строк без кавычек. Значение по умолчанию - `true`.
|
||||
- [input_format_csv_allow_variable_number_of_columns](../operations/settings/settings.md/#input_format_csv_allow_variable_number_of_columns) - игнорировать дополнительные столбцы (если файл содержит больше столбцов чем ожидается) и рассматривать отсутствующие поля в CSV в качестве значений по умолчанию. Значение по умолчанию - `false`.
|
||||
|
||||
## CSVWithNames {#csvwithnames}
|
||||
|
||||
|
@ -575,14 +575,60 @@ ClickHouse поддерживает динамическое изменение
|
||||
- `errorlog` - Файл лога ошибок.
|
||||
- `size` - Размер файла. Действует для `log` и `errorlog`. Как только файл достиг размера `size`, ClickHouse архивирует и переименовывает его, а на его месте создает новый файл лога.
|
||||
- `count` - Количество заархивированных файлов логов, которые сохраняет ClickHouse.
|
||||
- `stream_compress` – Сжимать `log` и `errorlog` с помощью алгоритма `lz4`. Чтобы активировать, узтановите значение `1` или `true`.
|
||||
|
||||
Имена файлов `log` и `errorlog` (только имя файла, а не директорий) поддерживают спецификаторы шаблонов даты и времени.
|
||||
|
||||
**Спецификаторы форматирования**
|
||||
С помощью следующих спецификаторов, можно определить шаблон для формирования имени файла. Столбец “Пример” показывает возможные значения на момент времени `2023-07-06 18:32:07`.
|
||||
|
||||
| Спецификатор | Описание | Пример |
|
||||
|--------------|---------------------------------------------------------------------------------------------------------------------|--------------------------|
|
||||
| %% | Литерал % | % |
|
||||
| %n | Символ новой строки | |
|
||||
| %t | Символ горизонтальной табуляции | |
|
||||
| %Y | Год как десятичное число, например, 2017 | 2023 |
|
||||
| %y | Последние 2 цифры года в виде десятичного числа (диапазон [00,99]) | 23 |
|
||||
| %C | Первые 2 цифры года в виде десятичного числа (диапазон [00,99]) | 20 |
|
||||
| %G | Год по неделям согласно [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601#Week_dates), то есть год, который содержит указанную неделю. Обычно используется вместе с %V. | 2023 |
|
||||
| %g | Последние 2 цифры [года по неделям ISO 8601](https://en.wikipedia.org/wiki/ISO_8601#Week_dates), т.е. года, содержащего указанную неделю (диапазон [00,99]). | 23 |
|
||||
| %b | Сокращённое название месяца, например Oct (зависит от локали) | Jul |
|
||||
| %h | Синоним %b | Jul |
|
||||
| %B | Полное название месяца, например, October (зависит от локали) | July |
|
||||
| %m | Месяц в виде десятичного числа (диапазон [01,12]) | 07 |
|
||||
| %U | Неделя года в виде десятичного числа (воскресенье - первый день недели) (диапазон [00,53]) | 27 |
|
||||
| %W | Неделя года в виде десятичного числа (понедельник - первый день недели) (диапазон [00,53]) | 27 |
|
||||
| %V | Неделя года ISO 8601 (диапазон [01,53]) | 27 |
|
||||
| %j | День года в виде десятичного числа (диапазон [001,366]) | 187 |
|
||||
| %d | День месяца в виде десятичного числа (диапазон [01,31]) Перед одиночной цифрой ставится ноль. | 06 |
|
||||
| %e | День месяца в виде десятичного числа (диапазон [1,31]). Перед одиночной цифрой ставится пробел. | 6 |
|
||||
| %a | Сокращённое название дня недели, например, Fri (зависит от локали) | Thu |
|
||||
| %A | Полный день недели, например, Friday (зависит от локали) | Thursday |
|
||||
| %w | День недели в виде десятичного числа, где воскресенье равно 0 (диапазон [0-6]) | 4 |
|
||||
| %u | День недели в виде десятичного числа, где понедельник равен 1 (формат ISO 8601) (диапазон [1-7]) | 4 |
|
||||
| %H | Час в виде десятичного числа, 24-часовой формат (диапазон [00-23]) | 18 |
|
||||
| %I | Час в виде десятичного числа, 12-часовой формат (диапазон [01,12]) | 06 |
|
||||
| %M | Минуты в виде десятичного числа (диапазон [00,59]) | 32 |
|
||||
| %S | Секунды как десятичное число (диапазон [00,60]) | 07 |
|
||||
| %c | Стандартная строка даты и времени, например, Sun Oct 17 04:41:13 2010 (зависит от локали) | Thu Jul 6 18:32:07 2023 |
|
||||
| %x | Локализованное представление даты (зависит от локали) | 07/06/23 |
|
||||
| %X | Локализованное представление времени, например, 18:40:20 или 6:40:20 PM (зависит от локали) | 18:32:07 |
|
||||
| %D | Эквивалентно "%m/%d/%y" | 07/06/23 |
|
||||
| %F | Эквивалентно "%Y-%m-%d" (формат даты ISO 8601) | 2023-07-06 |
|
||||
| %r | Локализованное 12-часовое время (зависит от локали) | 06:32:07 PM |
|
||||
| %R | Эквивалентно "%H:%M" | 18:32 |
|
||||
| %T | Эквивалентно "%H:%M:%S" (формат времени ISO 8601) | 18:32:07 |
|
||||
| %p | Локализованное обозначение a.m. или p.m. (зависит от локали) | PM |
|
||||
| %z | Смещение от UTC в формате ISO 8601 (например, -0430), или без символов, если информация о часовом поясе недоступна | +0800 |
|
||||
| %Z | Зависящее от локали название или аббревиатура часового пояса, если информация о часовом поясе доступна | Z AWST |
|
||||
|
||||
**Пример**
|
||||
|
||||
``` xml
|
||||
<logger>
|
||||
<level>trace</level>
|
||||
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
|
||||
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
|
||||
<log>/var/log/clickhouse-server/clickhouse-server-%F-%T.log</log>
|
||||
<errorlog>/var/log/clickhouse-server/clickhouse-server-%F-%T.err.log</errorlog>
|
||||
<size>1000M</size>
|
||||
<count>10</count>
|
||||
</logger>
|
||||
|
@ -1686,7 +1686,7 @@ SELECT * FROM table_with_enum_column_for_csv_insert;
|
||||
## input_format_csv_detect_header {#input_format_csv_detect_header}
|
||||
|
||||
Обнаружить заголовок с именами и типами в формате CSV.
|
||||
|
||||
|
||||
Значение по умолчанию - `true`.
|
||||
|
||||
## input_format_csv_skip_first_lines {#input_format_csv_skip_first_lines}
|
||||
@ -1727,6 +1727,12 @@ echo ' string ' | ./clickhouse local -q "select * from table FORMAT CSV" --in
|
||||
" string "
|
||||
```
|
||||
|
||||
## input_format_csv_allow_variable_number_of_columns {#input_format_csv_allow_variable_number_of_columns}
|
||||
|
||||
Игнорировать дополнительные столбцы (если файл содержит больше столбцов чем ожидается) и рассматривать отсутствующие поля в CSV в качестве значений по умолчанию.
|
||||
|
||||
Выключено по умолчанию.
|
||||
|
||||
## output_format_tsv_crlf_end_of_line {#settings-output-format-tsv-crlf-end-of-line}
|
||||
|
||||
Использовать в качестве разделителя строк для TSV формата CRLF (DOC/Windows стиль) вместо LF (Unix стиль).
|
||||
@ -4195,6 +4201,7 @@ SELECT *, timezone() FROM test_tz WHERE d = '2000-01-01 00:00:00' SETTINGS sessi
|
||||
### Шаблон
|
||||
Шаблон поддерживает следующие виды плейсхолдеров:
|
||||
|
||||
- `%a` — Полное исходное имя файла (например "sample.csv").
|
||||
- `%f` — Исходное имя файла без расширения (например "sample").
|
||||
- `%e` — Оригинальное расширение файла с точкой (например ".csv").
|
||||
- `%t` — Текущее время (в микросекундах).
|
||||
|
@ -8,7 +8,6 @@ slug: /ru/operations/system-tables/asynchronous_metric_log
|
||||
Столбцы:
|
||||
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — дата события.
|
||||
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — время события.
|
||||
- `event_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — время события в микросекундах.
|
||||
- `name` ([String](../../sql-reference/data-types/string.md)) — название метрики.
|
||||
- `value` ([Float64](../../sql-reference/data-types/float.md)) — значение метрики.
|
||||
|
||||
|
@ -122,6 +122,7 @@ FROM dt
|
||||
- [Настройка `date_time_input_format`](../../operations/settings/index.md#settings-date_time_input_format)
|
||||
- [Настройка `date_time_output_format`](../../operations/settings/index.md)
|
||||
- [Конфигурационный параметр сервера `timezone`](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone)
|
||||
- [Параметр `session_timezone`](../../operations/settings/settings.md#session_timezone)
|
||||
- [Операторы для работы с датой и временем](../../sql-reference/operators/index.md#operators-datetime)
|
||||
- [Тип данных `Date`](date.md)
|
||||
- [Тип данных `DateTime64`](datetime64.md)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user