mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 07:31:57 +00:00
Merge remote-tracking branch 'upstream/master' into cache-better-locks
This commit is contained in:
commit
a405d06e8a
14
.clang-tidy
14
.clang-tidy
@ -110,6 +110,7 @@ Checks: '*,
|
||||
-misc-const-correctness,
|
||||
-misc-no-recursion,
|
||||
-misc-non-private-member-variables-in-classes,
|
||||
-misc-confusable-identifiers, # useful but slooow
|
||||
|
||||
-modernize-avoid-c-arrays,
|
||||
-modernize-concat-nested-namespaces,
|
||||
@ -148,19 +149,6 @@ Checks: '*,
|
||||
-readability-use-anyofallof,
|
||||
|
||||
-zirkon-*,
|
||||
|
||||
-misc-*, # temporarily disabled due to being too slow
|
||||
# also disable checks in other categories which are aliases of checks in misc-*:
|
||||
# https://releases.llvm.org/15.0.0/tools/clang/tools/extra/docs/clang-tidy/checks/list.html
|
||||
-cert-dcl54-cpp, # alias of misc-new-delete-overloads
|
||||
-hicpp-new-delete-operators, # alias of misc-new-delete-overloads
|
||||
-cert-fio38-c, # alias of misc-non-copyable-objects
|
||||
-cert-dcl03-c, # alias of misc-static-assert
|
||||
-hicpp-static-assert, # alias of misc-static-assert
|
||||
-cert-err09-cpp, # alias of misc-throw-by-value-catch-by-reference
|
||||
-cert-err61-cpp, # alias of misc-throw-by-value-catch-by-reference
|
||||
-cppcoreguidelines-c-copy-assignment-signature, # alias of misc-unconventional-assign-operator
|
||||
-cppcoreguidelines-non-private-member-variables-in-classes, # alias of misc-non-private-member-variables-in-classes
|
||||
'
|
||||
|
||||
WarningsAsErrors: '*'
|
||||
|
2
.github/workflows/backport_branches.yml
vendored
2
.github/workflows/backport_branches.yml
vendored
@ -470,7 +470,7 @@ jobs:
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_server.py --release-type head --no-push \
|
||||
--image-repo clickhouse/clickhouse-server --image-path docker/server
|
||||
python3 docker_server.py --release-type head --no-push --no-ubuntu \
|
||||
python3 docker_server.py --release-type head --no-push \
|
||||
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
|
789
.github/workflows/master.yml
vendored
789
.github/workflows/master.yml
vendored
@ -862,7 +862,7 @@ jobs:
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_server.py --release-type head \
|
||||
--image-repo clickhouse/clickhouse-server --image-path docker/server
|
||||
python3 docker_server.py --release-type head --no-ubuntu \
|
||||
python3 docker_server.py --release-type head \
|
||||
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
@ -1131,7 +1131,7 @@ jobs:
|
||||
REPO_COPY=${{runner.temp}}/stateless_database_replicated/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=0
|
||||
RUN_BY_HASH_TOTAL=2
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
@ -1167,6 +1167,114 @@ jobs:
|
||||
REPO_COPY=${{runner.temp}}/stateless_database_replicated/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=1
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
FunctionalStatelessTestReleaseDatabaseReplicated2:
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_database_replicated
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (release, DatabaseReplicated)
|
||||
REPO_COPY=${{runner.temp}}/stateless_database_replicated/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=2
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
FunctionalStatelessTestReleaseDatabaseReplicated3:
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_database_replicated
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (release, DatabaseReplicated)
|
||||
REPO_COPY=${{runner.temp}}/stateless_database_replicated/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=3
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
FunctionalStatelessTestReleaseS3_0:
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_s3_storage
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (release, s3 storage)
|
||||
REPO_COPY=${{runner.temp}}/stateless_s3_storage/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=0
|
||||
RUN_BY_HASH_TOTAL=2
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -1190,7 +1298,7 @@ jobs:
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
FunctionalStatelessTestReleaseS3:
|
||||
FunctionalStatelessTestReleaseS3_1:
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
@ -1202,6 +1310,8 @@ jobs:
|
||||
CHECK_NAME=Stateless tests (release, s3 storage)
|
||||
REPO_COPY=${{runner.temp}}/stateless_s3_storage/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=1
|
||||
RUN_BY_HASH_TOTAL=2
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
@ -1271,7 +1381,7 @@ jobs:
|
||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=0
|
||||
RUN_BY_HASH_TOTAL=2
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
@ -1307,7 +1417,79 @@ jobs:
|
||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=1
|
||||
RUN_BY_HASH_TOTAL=2
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
FunctionalStatelessTestAsan2:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (asan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=2
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
FunctionalStatelessTestAsan3:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (asan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=3
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
@ -1343,7 +1525,7 @@ jobs:
|
||||
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=0
|
||||
RUN_BY_HASH_TOTAL=3
|
||||
RUN_BY_HASH_TOTAL=5
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
@ -1379,7 +1561,7 @@ jobs:
|
||||
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=1
|
||||
RUN_BY_HASH_TOTAL=3
|
||||
RUN_BY_HASH_TOTAL=5
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
@ -1415,7 +1597,7 @@ jobs:
|
||||
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=2
|
||||
RUN_BY_HASH_TOTAL=3
|
||||
RUN_BY_HASH_TOTAL=5
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
@ -1438,7 +1620,79 @@ jobs:
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
FunctionalStatelessTestUBsan:
|
||||
FunctionalStatelessTestTsan3:
|
||||
needs: [BuilderDebTsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=3
|
||||
RUN_BY_HASH_TOTAL=5
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
FunctionalStatelessTestTsan4:
|
||||
needs: [BuilderDebTsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=4
|
||||
RUN_BY_HASH_TOTAL=5
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
FunctionalStatelessTestUBsan0:
|
||||
needs: [BuilderDebUBsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
@ -1450,6 +1704,44 @@ jobs:
|
||||
CHECK_NAME=Stateless tests (ubsan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_ubsan/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=0
|
||||
RUN_BY_HASH_TOTAL=2
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
FunctionalStatelessTestUBsan1:
|
||||
needs: [BuilderDebUBsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_ubsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (ubsan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_ubsan/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=1
|
||||
RUN_BY_HASH_TOTAL=2
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
@ -1485,7 +1777,7 @@ jobs:
|
||||
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=0
|
||||
RUN_BY_HASH_TOTAL=3
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
@ -1521,7 +1813,7 @@ jobs:
|
||||
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=1
|
||||
RUN_BY_HASH_TOTAL=3
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
@ -1557,7 +1849,115 @@ jobs:
|
||||
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=2
|
||||
RUN_BY_HASH_TOTAL=3
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
FunctionalStatelessTestMsan3:
|
||||
needs: [BuilderDebMsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_memory
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (msan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=3
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
FunctionalStatelessTestMsan4:
|
||||
needs: [BuilderDebMsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_memory
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (msan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=4
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
FunctionalStatelessTestMsan5:
|
||||
needs: [BuilderDebMsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_memory
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (msan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=5
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
@ -1593,7 +1993,7 @@ jobs:
|
||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=0
|
||||
RUN_BY_HASH_TOTAL=3
|
||||
RUN_BY_HASH_TOTAL=5
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
@ -1629,7 +2029,7 @@ jobs:
|
||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=1
|
||||
RUN_BY_HASH_TOTAL=3
|
||||
RUN_BY_HASH_TOTAL=5
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
@ -1665,7 +2065,79 @@ jobs:
|
||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=2
|
||||
RUN_BY_HASH_TOTAL=3
|
||||
RUN_BY_HASH_TOTAL=5
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
FunctionalStatelessTestDebug3:
|
||||
needs: [BuilderDebDebug]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (debug)
|
||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=3
|
||||
RUN_BY_HASH_TOTAL=5
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
FunctionalStatelessTestDebug4:
|
||||
needs: [BuilderDebDebug]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (debug)
|
||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=4
|
||||
RUN_BY_HASH_TOTAL=5
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
@ -2116,7 +2588,7 @@ jobs:
|
||||
CHECK_NAME=Integration tests (asan)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=0
|
||||
RUN_BY_HASH_TOTAL=3
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
@ -2151,7 +2623,7 @@ jobs:
|
||||
CHECK_NAME=Integration tests (asan)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=1
|
||||
RUN_BY_HASH_TOTAL=3
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
@ -2186,7 +2658,112 @@ jobs:
|
||||
CHECK_NAME=Integration tests (asan)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=2
|
||||
RUN_BY_HASH_TOTAL=3
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
IntegrationTestsAsan3:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (asan)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=3
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
IntegrationTestsAsan4:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (asan)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=4
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
IntegrationTestsAsan5:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (asan)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=5
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
@ -2221,7 +2798,7 @@ jobs:
|
||||
CHECK_NAME=Integration tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||
RUN_BY_HASH_NUM=0
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
@ -2256,7 +2833,7 @@ jobs:
|
||||
CHECK_NAME=Integration tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||
RUN_BY_HASH_NUM=1
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
@ -2291,7 +2868,7 @@ jobs:
|
||||
CHECK_NAME=Integration tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||
RUN_BY_HASH_NUM=2
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
@ -2326,7 +2903,77 @@ jobs:
|
||||
CHECK_NAME=Integration tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||
RUN_BY_HASH_NUM=3
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
IntegrationTestsTsan4:
|
||||
needs: [BuilderDebTsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||
RUN_BY_HASH_NUM=4
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
IntegrationTestsTsan5:
|
||||
needs: [BuilderDebTsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||
RUN_BY_HASH_NUM=5
|
||||
RUN_BY_HASH_TOTAL=6
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
@ -2361,7 +3008,7 @@ jobs:
|
||||
CHECK_NAME=Integration tests (release)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse
|
||||
RUN_BY_HASH_NUM=0
|
||||
RUN_BY_HASH_TOTAL=2
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
@ -2396,7 +3043,77 @@ jobs:
|
||||
CHECK_NAME=Integration tests (release)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse
|
||||
RUN_BY_HASH_NUM=1
|
||||
RUN_BY_HASH_TOTAL=2
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
IntegrationTestsRelease2:
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_release
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (release)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse
|
||||
RUN_BY_HASH_NUM=2
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
IntegrationTestsRelease3:
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_release
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (release)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse
|
||||
RUN_BY_HASH_NUM=3
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
@ -3116,23 +3833,36 @@ jobs:
|
||||
- FunctionalStatelessTestDebug0
|
||||
- FunctionalStatelessTestDebug1
|
||||
- FunctionalStatelessTestDebug2
|
||||
- FunctionalStatelessTestDebug3
|
||||
- FunctionalStatelessTestDebug4
|
||||
- FunctionalStatelessTestRelease
|
||||
- FunctionalStatelessTestReleaseDatabaseOrdinary
|
||||
- FunctionalStatelessTestReleaseDatabaseReplicated0
|
||||
- FunctionalStatelessTestReleaseDatabaseReplicated1
|
||||
- FunctionalStatelessTestReleaseDatabaseReplicated2
|
||||
- FunctionalStatelessTestReleaseDatabaseReplicated3
|
||||
- FunctionalStatelessTestAarch64
|
||||
- FunctionalStatelessTestAsan0
|
||||
- FunctionalStatelessTestAsan1
|
||||
- FunctionalStatelessTestAsan2
|
||||
- FunctionalStatelessTestAsan3
|
||||
- FunctionalStatelessTestTsan0
|
||||
- FunctionalStatelessTestTsan1
|
||||
- FunctionalStatelessTestTsan2
|
||||
- FunctionalStatelessTestTsan3
|
||||
- FunctionalStatelessTestTsan4
|
||||
- FunctionalStatelessTestMsan0
|
||||
- FunctionalStatelessTestMsan1
|
||||
- FunctionalStatelessTestMsan2
|
||||
- FunctionalStatelessTestUBsan
|
||||
- FunctionalStatelessTestMsan3
|
||||
- FunctionalStatelessTestMsan4
|
||||
- FunctionalStatelessTestMsan5
|
||||
- FunctionalStatelessTestUBsan0
|
||||
- FunctionalStatelessTestUBsan1
|
||||
- FunctionalStatefulTestDebug
|
||||
- FunctionalStatefulTestRelease
|
||||
- FunctionalStatelessTestReleaseS3
|
||||
- FunctionalStatelessTestReleaseS3_0
|
||||
- FunctionalStatelessTestReleaseS3_1
|
||||
- FunctionalStatefulTestAarch64
|
||||
- FunctionalStatefulTestAsan
|
||||
- FunctionalStatefulTestTsan
|
||||
@ -3146,12 +3876,19 @@ jobs:
|
||||
- IntegrationTestsAsan0
|
||||
- IntegrationTestsAsan1
|
||||
- IntegrationTestsAsan2
|
||||
- IntegrationTestsAsan3
|
||||
- IntegrationTestsAsan4
|
||||
- IntegrationTestsAsan5
|
||||
- IntegrationTestsRelease0
|
||||
- IntegrationTestsRelease1
|
||||
- IntegrationTestsRelease2
|
||||
- IntegrationTestsRelease3
|
||||
- IntegrationTestsTsan0
|
||||
- IntegrationTestsTsan1
|
||||
- IntegrationTestsTsan2
|
||||
- IntegrationTestsTsan3
|
||||
- IntegrationTestsTsan4
|
||||
- IntegrationTestsTsan5
|
||||
- PerformanceComparisonX86-0
|
||||
- PerformanceComparisonX86-1
|
||||
- PerformanceComparisonX86-2
|
||||
|
2
.github/workflows/pull_request.yml
vendored
2
.github/workflows/pull_request.yml
vendored
@ -918,7 +918,7 @@ jobs:
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_server.py --release-type head --no-push \
|
||||
--image-repo clickhouse/clickhouse-server --image-path docker/server
|
||||
python3 docker_server.py --release-type head --no-push --no-ubuntu \
|
||||
python3 docker_server.py --release-type head --no-push \
|
||||
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
|
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@ -55,7 +55,7 @@ jobs:
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_server.py --release-type auto --version "$GITHUB_TAG" \
|
||||
--image-repo clickhouse/clickhouse-server --image-path docker/server
|
||||
python3 docker_server.py --release-type auto --version "$GITHUB_TAG" --no-ubuntu \
|
||||
python3 docker_server.py --release-type auto --version "$GITHUB_TAG" \
|
||||
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
|
2
.github/workflows/release_branches.yml
vendored
2
.github/workflows/release_branches.yml
vendored
@ -527,7 +527,7 @@ jobs:
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_server.py --release-type head --no-push \
|
||||
--image-repo clickhouse/clickhouse-server --image-path docker/server
|
||||
python3 docker_server.py --release-type head --no-push --no-ubuntu \
|
||||
python3 docker_server.py --release-type head --no-push \
|
||||
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
|
72
CHANGELOG.md
72
CHANGELOG.md
@ -18,60 +18,60 @@
|
||||
* Do not allow const and non-deterministic secondary indices [#46839](https://github.com/ClickHouse/ClickHouse/pull/46839) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
|
||||
#### New Feature
|
||||
* Add new mode for splitting the work on replicas using settings `parallel_replicas_custom_key` and `parallel_replicas_custom_key_filter_type`. If the cluster consists of a single shard with multiple replicas, up to `max_parallel_replicas` will be randomly picked and turned into shards. For each shard, a corresponding filter is added to the query on the initiator before being sent to the shard. If the cluster consists of multiple shards, it will behave the same as `sample_key` but with the possibility to define an arbitrary key. [#45108](https://github.com/ClickHouse/ClickHouse/pull/45108) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Add a new mode for splitting the work on replicas using settings `parallel_replicas_custom_key` and `parallel_replicas_custom_key_filter_type`. If the cluster consists of a single shard with multiple replicas, up to `max_parallel_replicas` will be randomly picked and turned into shards. For each shard, a corresponding filter is added to the query on the initiator before being sent to the shard. If the cluster consists of multiple shards, it will behave the same as `sample_key` but with the possibility to define an arbitrary key. [#45108](https://github.com/ClickHouse/ClickHouse/pull/45108) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* An option to display partial result on cancel: Added query setting `partial_result_on_first_cancel` allowing the canceled query (e.g. due to Ctrl-C) to return a partial result. [#45689](https://github.com/ClickHouse/ClickHouse/pull/45689) ([Alexey Perevyshin](https://github.com/alexX512)).
|
||||
* Added support of arbitrary tables engines for temporary tables (except for Replicated and KeeperMap engines). Close [#31497](https://github.com/ClickHouse/ClickHouse/issues/31497). [#46071](https://github.com/ClickHouse/ClickHouse/pull/46071) ([Roman Vasin](https://github.com/rvasin)).
|
||||
* Add support for replication of user-defined SQL functions using a centralized storage in Keeper. [#46085](https://github.com/ClickHouse/ClickHouse/pull/46085) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||
* Add support for replication of user-defined SQL functions using centralized storage in Keeper. [#46085](https://github.com/ClickHouse/ClickHouse/pull/46085) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||
* Implement `system.server_settings` (similar to `system.settings`), which will contain server configurations. [#46550](https://github.com/ClickHouse/ClickHouse/pull/46550) ([pufit](https://github.com/pufit)).
|
||||
* Support for `UNDROP TABLE` query. Closes [#46811](https://github.com/ClickHouse/ClickHouse/issues/46811). [#47241](https://github.com/ClickHouse/ClickHouse/pull/47241) ([chen](https://github.com/xiedeyantu)).
|
||||
* Allow separate grants for named collections (e.g. to be able to give `SHOW/CREATE/ALTER/DROP named collection` access only to certain collections, instead of all at once). Closes [#40894](https://github.com/ClickHouse/ClickHouse/issues/40894). Add new access type `NAMED_COLLECTION_CONTROL` which is not given to default user unless explicitly added to user config (is required to be able to do `GRANT ALL`), also `show_named_collections` is no longer obligatory to be manually specified for default user to be able to have full access rights as was in 23.2. [#46241](https://github.com/ClickHouse/ClickHouse/pull/46241) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Allow separate grants for named collections (e.g. to be able to give `SHOW/CREATE/ALTER/DROP named collection` access only to certain collections, instead of all at once). Closes [#40894](https://github.com/ClickHouse/ClickHouse/issues/40894). Add new access type `NAMED_COLLECTION_CONTROL` which is not given to user default unless explicitly added to the user config (is required to be able to do `GRANT ALL`), also `show_named_collections` is no longer obligatory to be manually specified for user default to be able to have full access rights as was in 23.2. [#46241](https://github.com/ClickHouse/ClickHouse/pull/46241) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Allow nested custom disks. Previously custom disks supported only flat disk structure. [#47106](https://github.com/ClickHouse/ClickHouse/pull/47106) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Intruduce a function `widthBucket` (with a `WIDTH_BUCKET` alias for compatibility). [#42974](https://github.com/ClickHouse/ClickHouse/issues/42974). [#46790](https://github.com/ClickHouse/ClickHouse/pull/46790) ([avoiderboi](https://github.com/avoiderboi)).
|
||||
* Add new function `parseDateTime`/`parseDateTimeInJodaSyntax` according to specified format string. parseDateTime parses string to datetime in MySQL syntax, parseDateTimeInJodaSyntax parses in Joda syntax. [#46815](https://github.com/ClickHouse/ClickHouse/pull/46815) ([李扬](https://github.com/taiyang-li)).
|
||||
* Use `dummy UInt8` for default structure of table function `null`. Closes [#46930](https://github.com/ClickHouse/ClickHouse/issues/46930). [#47006](https://github.com/ClickHouse/ClickHouse/pull/47006) ([flynn](https://github.com/ucasfl)).
|
||||
* Introduce a function `widthBucket` (with a `WIDTH_BUCKET` alias for compatibility). [#42974](https://github.com/ClickHouse/ClickHouse/issues/42974). [#46790](https://github.com/ClickHouse/ClickHouse/pull/46790) ([avoiderboi](https://github.com/avoiderboi)).
|
||||
* Add new function `parseDateTime`/`parseDateTimeInJodaSyntax` according to the specified format string. parseDateTime parses String to DateTime in MySQL syntax, parseDateTimeInJodaSyntax parses in Joda syntax. [#46815](https://github.com/ClickHouse/ClickHouse/pull/46815) ([李扬](https://github.com/taiyang-li)).
|
||||
* Use `dummy UInt8` for the default structure of table function `null`. Closes [#46930](https://github.com/ClickHouse/ClickHouse/issues/46930). [#47006](https://github.com/ClickHouse/ClickHouse/pull/47006) ([flynn](https://github.com/ucasfl)).
|
||||
* Support for date format with a comma, like `Dec 15, 2021` in the `parseDateTimeBestEffort` function. Closes [#46816](https://github.com/ClickHouse/ClickHouse/issues/46816). [#47071](https://github.com/ClickHouse/ClickHouse/pull/47071) ([chen](https://github.com/xiedeyantu)).
|
||||
* Add settings `http_wait_end_of_query` and `http_response_buffer_size` that corresponds to URL params `wait_end_of_query` and `buffer_size` for HTTP interface. This allows to change these settings in the profiles. [#47108](https://github.com/ClickHouse/ClickHouse/pull/47108) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Add settings `http_wait_end_of_query` and `http_response_buffer_size` that corresponds to URL params `wait_end_of_query` and `buffer_size` for the HTTP interface. This allows changing these settings in the profiles. [#47108](https://github.com/ClickHouse/ClickHouse/pull/47108) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Add `system.dropped_tables` table that shows tables that were dropped from `Atomic` databases but were not completely removed yet. [#47364](https://github.com/ClickHouse/ClickHouse/pull/47364) ([chen](https://github.com/xiedeyantu)).
|
||||
* Add `INSTR` as alias of `positionCaseInsensitive` for MySQL compatibility. Closes [#47529](https://github.com/ClickHouse/ClickHouse/issues/47529). [#47535](https://github.com/ClickHouse/ClickHouse/pull/47535) ([flynn](https://github.com/ucasfl)).
|
||||
* Added `toDecimalString` function allowing to convert numbers to string with fixed precision. [#47838](https://github.com/ClickHouse/ClickHouse/pull/47838) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||
* Add a merge tree setting `max_number_of_mutations_for_replica`. It limits the number of part mutations per replica to the specified amount. Zero means no limit on the number of mutations per replica (the execution can still be constrained by other settings). [#48047](https://github.com/ClickHouse/ClickHouse/pull/48047) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Add Map-related function `mapFromArrays`, which allows us to create map from a pair of arrays. [#31125](https://github.com/ClickHouse/ClickHouse/pull/31125) ([李扬](https://github.com/taiyang-li)).
|
||||
* Allow control compression in Parquet/ORC/Arrow output formats, support more compression for input formats. This closes [#13541](https://github.com/ClickHouse/ClickHouse/issues/13541). [#47114](https://github.com/ClickHouse/ClickHouse/pull/47114) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add the Map-related function `mapFromArrays`, which allows the creation of a map from a pair of arrays. [#31125](https://github.com/ClickHouse/ClickHouse/pull/31125) ([李扬](https://github.com/taiyang-li)).
|
||||
* Allow control of compression in Parquet/ORC/Arrow output formats, adds support for more compression input formats. This closes [#13541](https://github.com/ClickHouse/ClickHouse/issues/13541). [#47114](https://github.com/ClickHouse/ClickHouse/pull/47114) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add SSL User Certificate authentication to the native protocol. Closes [#47077](https://github.com/ClickHouse/ClickHouse/issues/47077). [#47596](https://github.com/ClickHouse/ClickHouse/pull/47596) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Add *OrNull() and *OrZero() variants for `parseDateTime`, add alias `str_to_date` for MySQL parity. [#48000](https://github.com/ClickHouse/ClickHouse/pull/48000) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Added operator `REGEXP` (similar to operators "LIKE", "IN", "MOD" etc.) for better compatibility with MySQL [#47869](https://github.com/ClickHouse/ClickHouse/pull/47869) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
|
||||
#### Performance Improvement
|
||||
* Marks in memory are now compressed, using 3-6x less memory. [#47290](https://github.com/ClickHouse/ClickHouse/pull/47290) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Backups for large numbers of files were unbelievably slow in previous versions. Not anymore. Now they are unbelievably fast. [#47251](https://github.com/ClickHouse/ClickHouse/pull/47251) ([Alexey Milovidov](https://github.com/alexey-milovidov)). Introduced a separate thread pool for backup's IO operations. This will allow to scale it independently of other pools and increase performance. [#47174](https://github.com/ClickHouse/ClickHouse/pull/47174) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). Use MultiRead request and retries for collecting metadata at final stage of backup processing. [#47243](https://github.com/ClickHouse/ClickHouse/pull/47243) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). If a backup and restoring data are both in S3 then server-side copy should be used from now on. [#47546](https://github.com/ClickHouse/ClickHouse/pull/47546) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Backups for large numbers of files were unbelievably slow in previous versions. Not anymore. Now they are unbelievably fast. [#47251](https://github.com/ClickHouse/ClickHouse/pull/47251) ([Alexey Milovidov](https://github.com/alexey-milovidov)). Introduced a separate thread pool for backup's IO operations. This will allow scaling it independently of other pools and increase performance. [#47174](https://github.com/ClickHouse/ClickHouse/pull/47174) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). Use MultiRead request and retries for collecting metadata at the final stage of backup processing. [#47243](https://github.com/ClickHouse/ClickHouse/pull/47243) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). If a backup and restoring data are both in S3 then server-side copy should be used from now on. [#47546](https://github.com/ClickHouse/ClickHouse/pull/47546) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fixed excessive reading in queries with `FINAL`. [#47801](https://github.com/ClickHouse/ClickHouse/pull/47801) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Setting `max_final_threads` would be set to number of cores at server startup (by the same algorithm as we use for `max_threads`). This improves concurrency of `final` execution on servers with high number of CPUs. [#47915](https://github.com/ClickHouse/ClickHouse/pull/47915) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Setting `max_final_threads` would be set to the number of cores at server startup (by the same algorithm as used for `max_threads`). This improves the concurrency of `final` execution on servers with high number of CPUs. [#47915](https://github.com/ClickHouse/ClickHouse/pull/47915) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Allow executing reading pipeline for DIRECT dictionary with CLICKHOUSE source in multiple threads. To enable set `dictionary_use_async_executor=1` in `SETTINGS` section for source in `CREATE DICTIONARY` statement. [#47986](https://github.com/ClickHouse/ClickHouse/pull/47986) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Optimize one nullable key aggregate performance. [#45772](https://github.com/ClickHouse/ClickHouse/pull/45772) ([LiuNeng](https://github.com/liuneng1994)).
|
||||
* Implemented lowercase `tokenbf_v1` index utilization for `hasTokenOrNull`, `hasTokenCaseInsensitive` and `hasTokenCaseInsensitiveOrNull`. [#46252](https://github.com/ClickHouse/ClickHouse/pull/46252) ([ltrk2](https://github.com/ltrk2)).
|
||||
* Optimize functions `position` and `LIKE` by searching the first two chars using SIMD. [#46289](https://github.com/ClickHouse/ClickHouse/pull/46289) ([Jiebin Sun](https://github.com/jiebinn)).
|
||||
* Optimize queries from the `system.detached_parts`, which could be significantly large. Added several sources with respect to the block size limitation; in each block an IO thread pool is used to calculate the part size, i.e. to make syscalls in parallel. [#46624](https://github.com/ClickHouse/ClickHouse/pull/46624) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Optimize queries from the `system.detached_parts`, which could be significantly large. Added several sources with respect to the block size limitation; in each block, an IO thread pool is used to calculate the part size, i.e. to make syscalls in parallel. [#46624](https://github.com/ClickHouse/ClickHouse/pull/46624) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Increase the default value of `max_replicated_merges_in_queue` for ReplicatedMergeTree tables from 16 to 1000. It allows faster background merge operation on clusters with a very large number of replicas, such as clusters with shared storage in ClickHouse Cloud. [#47050](https://github.com/ClickHouse/ClickHouse/pull/47050) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Updated `clickhouse-copier` to use `GROUP BY` instead of `DISTINCT` to get list of partitions. For large tables this reduced the select time from over 500s to under 1s. [#47386](https://github.com/ClickHouse/ClickHouse/pull/47386) ([Clayton McClure](https://github.com/cmcclure-twilio)).
|
||||
* Updated `clickhouse-copier` to use `GROUP BY` instead of `DISTINCT` to get the list of partitions. For large tables, this reduced the select time from over 500s to under 1s. [#47386](https://github.com/ClickHouse/ClickHouse/pull/47386) ([Clayton McClure](https://github.com/cmcclure-twilio)).
|
||||
* Fix performance degradation in `ASOF JOIN`. [#47544](https://github.com/ClickHouse/ClickHouse/pull/47544) ([Ongkong](https://github.com/ongkong)).
|
||||
* Even more batching in Keeper. Avoid breaking batches on read requests to improve performance. [#47978](https://github.com/ClickHouse/ClickHouse/pull/47978) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Allow PREWHERE for Merge with different DEFAULT expression for column. [#46831](https://github.com/ClickHouse/ClickHouse/pull/46831) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Even more batching in Keeper. Improve performance by avoiding breaking batches on read requests. [#47978](https://github.com/ClickHouse/ClickHouse/pull/47978) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Allow PREWHERE for Merge with different DEFAULT expressions for columns. [#46831](https://github.com/ClickHouse/ClickHouse/pull/46831) ([Azat Khuzhin](https://github.com/azat)).
|
||||
|
||||
#### Experimental Feature
|
||||
* Parallel replicas: Improved the overall performance by better utilizing local replica. And forbid reading with parallel replicas from non-replicated MergeTree by default. [#47858](https://github.com/ClickHouse/ClickHouse/pull/47858) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Parallel replicas: Improved the overall performance by better utilizing the local replica, and forbid the reading with parallel replicas from non-replicated MergeTree by default. [#47858](https://github.com/ClickHouse/ClickHouse/pull/47858) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Support filter push down to left table for JOIN with `Join`, `Dictionary` and `EmbeddedRocksDB` tables if the experimental Analyzer is enabled. [#47280](https://github.com/ClickHouse/ClickHouse/pull/47280) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Now ReplicatedMergeTree with zero copy replication has less load to Keeper. [#47676](https://github.com/ClickHouse/ClickHouse/pull/47676) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix create materialized view with MaterializedPostgreSQL [#40807](https://github.com/ClickHouse/ClickHouse/pull/40807) ([Maksim Buren](https://github.com/maks-buren630501)).
|
||||
|
||||
#### Improvement
|
||||
* Enable `input_format_json_ignore_unknown_keys_in_named_tuple` by default. [#46742](https://github.com/ClickHouse/ClickHouse/pull/46742) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Allow to ignore errors while pushing to MATERIALIZED VIEW (add new setting `materialized_views_ignore_errors`, by default to `false`, but it is set to `true` for flushing logs to `system.*_log` tables unconditionally). [#46658](https://github.com/ClickHouse/ClickHouse/pull/46658) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Allow errors to be ignored while pushing to MATERIALIZED VIEW (add new setting `materialized_views_ignore_errors`, by default to `false`, but it is set to `true` for flushing logs to `system.*_log` tables unconditionally). [#46658](https://github.com/ClickHouse/ClickHouse/pull/46658) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Track the file queue of distributed sends in memory. [#45491](https://github.com/ClickHouse/ClickHouse/pull/45491) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Now `X-ClickHouse-Query-Id` and `X-ClickHouse-Timezone` headers are added to response in all queries via http protocol. Previously it was done only for `SELECT` queries. [#46364](https://github.com/ClickHouse/ClickHouse/pull/46364) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Now `X-ClickHouse-Query-Id` and `X-ClickHouse-Timezone` headers are added to responses in all queries via HTTP protocol. Previously it was done only for `SELECT` queries. [#46364](https://github.com/ClickHouse/ClickHouse/pull/46364) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* External tables from `MongoDB`: support for connection to a replica set via a URI with a host:port enum and support for the readPreference option in MongoDB dictionaries. Example URI: mongodb://db0.example.com:27017,db1.example.com:27017,db2.example.com:27017/?replicaSet=myRepl&readPreference=primary. [#46524](https://github.com/ClickHouse/ClickHouse/pull/46524) ([artem-yadr](https://github.com/artem-yadr)).
|
||||
* This improvement should be invisible for users. Re-implement projection analysis on top of query plan. Added setting `query_plan_optimize_projection=1` to switch between old and new version. Fixes [#44963](https://github.com/ClickHouse/ClickHouse/issues/44963). [#46537](https://github.com/ClickHouse/ClickHouse/pull/46537) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Use parquet format v2 instead of v1 in output format by default. Add setting `output_format_parquet_version` to control parquet version, possible values `1.0`, `2.4`, `2.6`, `2.latest` (default). [#46617](https://github.com/ClickHouse/ClickHouse/pull/46617) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* It is now possible using new configuration syntax to configure Kafka topics with periods (`.`) in their name. [#46752](https://github.com/ClickHouse/ClickHouse/pull/46752) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Use Parquet format v2 instead of v1 in output format by default. Add setting `output_format_parquet_version` to control parquet version, possible values `1.0`, `2.4`, `2.6`, `2.latest` (default). [#46617](https://github.com/ClickHouse/ClickHouse/pull/46617) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* It is now possible to use the new configuration syntax to configure Kafka topics with periods (`.`) in their name. [#46752](https://github.com/ClickHouse/ClickHouse/pull/46752) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix heuristics that check hyperscan patterns for problematic repeats. [#46819](https://github.com/ClickHouse/ClickHouse/pull/46819) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Don't report ZK node exists to system.errors when a block was created concurrently by a different replica. [#46820](https://github.com/ClickHouse/ClickHouse/pull/46820) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Increase the limit for opened files in `clickhouse-local`. It will be able to read from `web` tables on servers with a huge number of CPU cores. Do not back off reading from the URL table engine in case of too many opened files. This closes [#46852](https://github.com/ClickHouse/ClickHouse/issues/46852). [#46853](https://github.com/ClickHouse/ClickHouse/pull/46853) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
@ -79,7 +79,7 @@
|
||||
* Added update `system.backups` after every processed task to track the progress of backups. [#46989](https://github.com/ClickHouse/ClickHouse/pull/46989) ([Aleksandr Musorin](https://github.com/AVMusorin)).
|
||||
* Allow types conversion in Native input format. Add settings `input_format_native_allow_types_conversion` that controls it (enabled by default). [#46990](https://github.com/ClickHouse/ClickHouse/pull/46990) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Allow IPv4 in the `range` function to generate IP ranges. [#46995](https://github.com/ClickHouse/ClickHouse/pull/46995) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Improve exception message when it's impossible to make part move from one volume/disk to another. [#47032](https://github.com/ClickHouse/ClickHouse/pull/47032) ([alesapin](https://github.com/alesapin)).
|
||||
* Improve exception message when it's impossible to move a part from one volume/disk to another. [#47032](https://github.com/ClickHouse/ClickHouse/pull/47032) ([alesapin](https://github.com/alesapin)).
|
||||
* Support `Bool` type in `JSONType` function. Previously `Null` type was mistakenly returned for bool values. [#47046](https://github.com/ClickHouse/ClickHouse/pull/47046) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Use `_request_body` parameter to configure predefined HTTP queries. [#47086](https://github.com/ClickHouse/ClickHouse/pull/47086) ([Constantine Peresypkin](https://github.com/pkit)).
|
||||
* Automatic indentation in the built-in UI SQL editor when Enter is pressed. [#47113](https://github.com/ClickHouse/ClickHouse/pull/47113) ([Alexey Korepanov](https://github.com/alexkorep)).
|
||||
@ -87,27 +87,27 @@
|
||||
* Previously, the `repeat` function's second argument only accepted an unsigned integer type, which meant it could not accept values such as -1. This behavior differed from that of the Spark function. In this update, the repeat function has been modified to match the behavior of the Spark function. It now accepts the same types of inputs, including negative integers. Extensive testing has been performed to verify the correctness of the updated implementation. [#47134](https://github.com/ClickHouse/ClickHouse/pull/47134) ([KevinyhZou](https://github.com/KevinyhZou)). Note: the changelog entry was rewritten by ChatGPT.
|
||||
* Remove `::__1` part from stacktraces. Display `std::basic_string<char, ...` as `String` in stacktraces. [#47171](https://github.com/ClickHouse/ClickHouse/pull/47171) ([Mike Kot](https://github.com/myrrc)).
|
||||
* Reimplement interserver mode to avoid replay attacks (note, that change is backward compatible with older servers). [#47213](https://github.com/ClickHouse/ClickHouse/pull/47213) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Better recognize regular expression groups and refine the regexp_tree dictionary. [#47218](https://github.com/ClickHouse/ClickHouse/pull/47218) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Improve recognition of regular expression groups and refine the regexp_tree dictionary. [#47218](https://github.com/ClickHouse/ClickHouse/pull/47218) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Keeper improvement: Add new 4LW `clrs` to clean resources used by Keeper (e.g. release unused memory). [#47256](https://github.com/ClickHouse/ClickHouse/pull/47256) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Add optional arguments to codecs `DoubleDelta(bytes_size)`, `Gorilla(bytes_size)`, `FPC(level, float_size)`, it will allow using this codecs without column type in `clickhouse-compressor`. Fix possible abrots and arithmetic errors in `clickhouse-compressor` with these codecs. Fixes: https://github.com/ClickHouse/ClickHouse/discussions/47262. [#47271](https://github.com/ClickHouse/ClickHouse/pull/47271) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add optional arguments to codecs `DoubleDelta(bytes_size)`, `Gorilla(bytes_size)`, `FPC(level, float_size)`, this allows using these codecs without column type in `clickhouse-compressor`. Fix possible aborts and arithmetic errors in `clickhouse-compressor` with these codecs. Fixes: https://github.com/ClickHouse/ClickHouse/discussions/47262. [#47271](https://github.com/ClickHouse/ClickHouse/pull/47271) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add support for big int types to the `runningDifference` function. Closes [#47194](https://github.com/ClickHouse/ClickHouse/issues/47194). [#47322](https://github.com/ClickHouse/ClickHouse/pull/47322) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Add an expiration window for S3 credentials that have an expiration time to avoid `ExpiredToken` errors in some edge cases. It can be controlled with `expiration_window_seconds` config, the default is 120 seconds. [#47423](https://github.com/ClickHouse/ClickHouse/pull/47423) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Support Decimals and Date32 in `Avro` format. [#47434](https://github.com/ClickHouse/ClickHouse/pull/47434) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Do not start the server if an interrupted conversion from `Ordinary` to `Atomic` was detected, print a better error message with troubleshooting instructions. [#47487](https://github.com/ClickHouse/ClickHouse/pull/47487) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Add a new column `kind` to the `system.opentelemetry_span_log`. This column holds the value of [SpanKind](https://opentelemetry.io/docs/reference/specification/trace/api/#spankind) defined in OpenTelemtry. [#47499](https://github.com/ClickHouse/ClickHouse/pull/47499) ([Frank Chen](https://github.com/FrankChen021)).
|
||||
* Allow reading/writing nested arrays in `Protobuf` format with only root field name as column name. Previously column name should've contain all nested field names (like `a.b.c Array(Array(Array(UInt32)))`, now you can use just `a Array(Array(Array(UInt32)))`. [#47650](https://github.com/ClickHouse/ClickHouse/pull/47650) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Added an optional `STRICT` modifier for `SYSTEM SYNC REPLICA` which makes the query wait for replication queue to become empty (just like it worked before https://github.com/ClickHouse/ClickHouse/pull/45648). [#47659](https://github.com/ClickHouse/ClickHouse/pull/47659) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Improvement name of some OpenTelemetry span logs. [#47667](https://github.com/ClickHouse/ClickHouse/pull/47667) ([Frank Chen](https://github.com/FrankChen021)).
|
||||
* Allow reading/writing nested arrays in `Protobuf` format with only the root field name as column name. Previously column name should've contained all nested field names (like `a.b.c Array(Array(Array(UInt32)))`, now you can use just `a Array(Array(Array(UInt32)))`. [#47650](https://github.com/ClickHouse/ClickHouse/pull/47650) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Added an optional `STRICT` modifier for `SYSTEM SYNC REPLICA` which makes the query wait for the replication queue to become empty (just like it worked before https://github.com/ClickHouse/ClickHouse/pull/45648). [#47659](https://github.com/ClickHouse/ClickHouse/pull/47659) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Improve the naming of some OpenTelemetry span logs. [#47667](https://github.com/ClickHouse/ClickHouse/pull/47667) ([Frank Chen](https://github.com/FrankChen021)).
|
||||
* Prevent using too long chains of aggregate function combinators (they can lead to slow queries in the analysis stage). This closes [#47715](https://github.com/ClickHouse/ClickHouse/issues/47715). [#47716](https://github.com/ClickHouse/ClickHouse/pull/47716) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Support for subquery in parameterized views; resolves [#46741](https://github.com/ClickHouse/ClickHouse/issues/46741) [#47725](https://github.com/ClickHouse/ClickHouse/pull/47725) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Fix memory leak in MySQL integration (reproduces with `connection_auto_close=1`). [#47732](https://github.com/ClickHouse/ClickHouse/pull/47732) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Improved error handling in the code related to Decimal parameters, resulting in more informative error messages. Previously, when incorrect Decimal parameters were supplied, the error message generated was unclear or unhelpful. With this update, the error print message has been fixed to provide more detailed and useful information, making it easier to identify and correct issues related to Decimal parameters. [#47812](https://github.com/ClickHouse/ClickHouse/pull/47812) ([Yu Feng](https://github.com/Vigor-jpg)). Note: this changelog entry is rewritten by ChatGPT.
|
||||
* Improved error handling in the code related to Decimal parameters, resulting in more informative error messages. Previously, when incorrect Decimal parameters were supplied, the error message generated was unclear or unhelpful. With this update, the error message printed has been fixed to provide more detailed and useful information, making it easier to identify and correct issues related to Decimal parameters. [#47812](https://github.com/ClickHouse/ClickHouse/pull/47812) ([Yu Feng](https://github.com/Vigor-jpg)). Note: this changelog entry is rewritten by ChatGPT.
|
||||
* The parameter `exact_rows_before_limit` is used to make `rows_before_limit_at_least` is designed to accurately reflect the number of rows returned before the limit is reached. This pull request addresses issues encountered when the query involves distributed processing across multiple shards or sorting operations. Prior to this update, these scenarios were not functioning as intended. [#47874](https://github.com/ClickHouse/ClickHouse/pull/47874) ([Amos Bird](https://github.com/amosbird)).
|
||||
* ThreadPools metrics introspection. [#47880](https://github.com/ClickHouse/ClickHouse/pull/47880) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Add `WriteBufferFromS3Microseconds` and `WriteBufferFromS3RequestsErrors` profile events. [#47885](https://github.com/ClickHouse/ClickHouse/pull/47885) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Add `--link` and `--noninteractive` (`-y`) options to clickhouse install. Closes [#47750](https://github.com/ClickHouse/ClickHouse/issues/47750). [#47887](https://github.com/ClickHouse/ClickHouse/pull/47887) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Add `--link` and `--noninteractive` (`-y`) options to ClickHouse install. Closes [#47750](https://github.com/ClickHouse/ClickHouse/issues/47750). [#47887](https://github.com/ClickHouse/ClickHouse/pull/47887) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fixed `UNKNOWN_TABLE` exception when attaching to a materialized view that has dependent tables that are not available. This might be useful when trying to restore state from a backup. [#47975](https://github.com/ClickHouse/ClickHouse/pull/47975) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||
* Fix case when (optional) path is not added to encrypted disk configuration. [#47981](https://github.com/ClickHouse/ClickHouse/pull/47981) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix case when the (optional) path is not added to an encrypted disk configuration. [#47981](https://github.com/ClickHouse/ClickHouse/pull/47981) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Support for CTE in parameterized views Implementation: Updated to allow query parameters while evaluating scalar subqueries. [#48065](https://github.com/ClickHouse/ClickHouse/pull/48065) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Support big integers `(U)Int128/(U)Int256`, `Map` with any key type and `DateTime64` with any precision (not only 3 and 6). [#48119](https://github.com/ClickHouse/ClickHouse/pull/48119) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Allow skipping errors related to unknown enum values in row input formats. [#48133](https://github.com/ClickHouse/ClickHouse/pull/48133) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
@ -117,7 +117,7 @@
|
||||
* Fuzz `EXPLAIN` queries in the AST Fuzzer. [#47803](https://github.com/ClickHouse/ClickHouse/pull/47803) [#47852](https://github.com/ClickHouse/ClickHouse/pull/47852) ([flynn](https://github.com/ucasfl)).
|
||||
* Split stress test and the automated backward compatibility check (now Upgrade check). [#44879](https://github.com/ClickHouse/ClickHouse/pull/44879) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Updated the Ubuntu Image for Docker to calm down some bogus security reports. [#46784](https://github.com/ClickHouse/ClickHouse/pull/46784) ([Julio Jimenez](https://github.com/juliojimenez)). Please note that ClickHouse has no dependencies and does not require Docker.
|
||||
* Adds a prompt to allow the removal of an existing `cickhouse` download when using "curl | sh" download of ClickHouse. Prompt is "ClickHouse binary clickhouse already exists. Overwrite? \[y/N\]". [#46859](https://github.com/ClickHouse/ClickHouse/pull/46859) ([Dan Roscigno](https://github.com/DanRoscigno)).
|
||||
* Adds a prompt to allow the removal of an existing `clickhouse` download when using "curl | sh" download of ClickHouse. Prompt is "ClickHouse binary clickhouse already exists. Overwrite? \[y/N\]". [#46859](https://github.com/ClickHouse/ClickHouse/pull/46859) ([Dan Roscigno](https://github.com/DanRoscigno)).
|
||||
* Fix error during server startup on old distros (e.g. Amazon Linux 2) and on ARM that glibc 2.28 symbols are not found. [#47008](https://github.com/ClickHouse/ClickHouse/pull/47008) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Prepare for clang 16. [#47027](https://github.com/ClickHouse/ClickHouse/pull/47027) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Added a CI check which ensures ClickHouse can run with an old glibc on ARM. [#47063](https://github.com/ClickHouse/ClickHouse/pull/47063) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
@ -125,20 +125,20 @@
|
||||
* Speed up the build a little. [#47714](https://github.com/ClickHouse/ClickHouse/pull/47714) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Bump `vectorscan` to 5.4.9. [#47955](https://github.com/ClickHouse/ClickHouse/pull/47955) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Add a unit test to assert Apache Arrow's fatal logging does not abort. It covers the changes in [ClickHouse/arrow#16](https://github.com/ClickHouse/arrow/pull/16). [#47958](https://github.com/ClickHouse/ClickHouse/pull/47958) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||
* Restore the ability of native macOS debug server build to start. [#48050](https://github.com/ClickHouse/ClickHouse/pull/48050) ([Robert Schulze](https://github.com/rschu1ze)). Note: this change is only relevant for development, as the ClickHouse's official builds are done with cross-compilation.
|
||||
* Restore the ability of native macOS debug server build to start. [#48050](https://github.com/ClickHouse/ClickHouse/pull/48050) ([Robert Schulze](https://github.com/rschu1ze)). Note: this change is only relevant for development, as the ClickHouse official builds are done with cross-compilation.
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Fix formats parser resetting, test processing bad messages in `Kafka` [#45693](https://github.com/ClickHouse/ClickHouse/pull/45693) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix data size calculation in Keeper [#46086](https://github.com/ClickHouse/ClickHouse/pull/46086) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fixed a bug in automatic retries of `DROP TABLE` query with `ReplicatedMergeTree` tables and `Atomic` databases. In rare cases it could lead to `Can't get data for node /zk_path/log_pointer` and `The specified key does not exist` errors if ZooKeeper session expired during DROP and a new replicated table with the same path in ZooKeeper was created in parallel. [#46384](https://github.com/ClickHouse/ClickHouse/pull/46384) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix incorrect alias recursion while normalizing queries, that prevented some queries to run. [#46609](https://github.com/ClickHouse/ClickHouse/pull/46609) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fixed a bug in automatic retries of `DROP TABLE` query with `ReplicatedMergeTree` tables and `Atomic` databases. In rare cases it could lead to `Can't get data for node /zk_path/log_pointer` and `The specified key does not exist` errors if the ZooKeeper session expired during DROP and a new replicated table with the same path in ZooKeeper was created in parallel. [#46384](https://github.com/ClickHouse/ClickHouse/pull/46384) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix incorrect alias recursion while normalizing queries that prevented some queries to run. [#46609](https://github.com/ClickHouse/ClickHouse/pull/46609) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix IPv4/IPv6 serialization/deserialization in binary formats [#46616](https://github.com/ClickHouse/ClickHouse/pull/46616) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* ActionsDAG: do not change result of `and` during optimization [#46653](https://github.com/ClickHouse/ClickHouse/pull/46653) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||
* Improve queries cancellation when a client dies [#46681](https://github.com/ClickHouse/ClickHouse/pull/46681) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Improve query cancellation when a client dies [#46681](https://github.com/ClickHouse/ClickHouse/pull/46681) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix arithmetic operations in aggregate optimization [#46705](https://github.com/ClickHouse/ClickHouse/pull/46705) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Fix possible `clickhouse-local`'s abort on JSONEachRow schema inference [#46731](https://github.com/ClickHouse/ClickHouse/pull/46731) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix changing an expired role [#46772](https://github.com/ClickHouse/ClickHouse/pull/46772) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix combined PREWHERE column accumulated from multiple steps [#46785](https://github.com/ClickHouse/ClickHouse/pull/46785) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Fix combined PREWHERE column accumulation from multiple steps [#46785](https://github.com/ClickHouse/ClickHouse/pull/46785) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Use initial range for fetching file size in HTTP read buffer. Without this change, some remote files couldn't be processed. [#46824](https://github.com/ClickHouse/ClickHouse/pull/46824) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix the incorrect progress bar while using the URL tables [#46830](https://github.com/ClickHouse/ClickHouse/pull/46830) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix MSan report in `maxIntersections` function [#46847](https://github.com/ClickHouse/ClickHouse/pull/46847) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
@ -186,7 +186,7 @@
|
||||
* Fix for "index file `cidx` is unexpectedly long" [#48010](https://github.com/ClickHouse/ClickHouse/pull/48010) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Fix MaterializedPostgreSQL query to get attributes (replica-identity) [#48015](https://github.com/ClickHouse/ClickHouse/pull/48015) ([Solomatov Sergei](https://github.com/solomatovs)).
|
||||
* parseDateTime(): Fix UB (signed integer overflow) [#48019](https://github.com/ClickHouse/ClickHouse/pull/48019) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Use uniq names for Records in Avro to avoid reusing its schema [#48057](https://github.com/ClickHouse/ClickHouse/pull/48057) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Use unique names for Records in Avro to avoid reusing its schema [#48057](https://github.com/ClickHouse/ClickHouse/pull/48057) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Correctly set TCP/HTTP socket timeouts in Keeper [#48108](https://github.com/ClickHouse/ClickHouse/pull/48108) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix possible member call on null pointer in `Avro` format [#48184](https://github.com/ClickHouse/ClickHouse/pull/48184) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
||||
|
@ -13,9 +13,10 @@ The following versions of ClickHouse server are currently being supported with s
|
||||
|
||||
| Version | Supported |
|
||||
|:-|:-|
|
||||
| 23.3 | ✔️ |
|
||||
| 23.2 | ✔️ |
|
||||
| 23.1 | ✔️ |
|
||||
| 22.12 | ✔️ |
|
||||
| 22.12 | ❌ |
|
||||
| 22.11 | ❌ |
|
||||
| 22.10 | ❌ |
|
||||
| 22.9 | ❌ |
|
||||
@ -24,7 +25,7 @@ The following versions of ClickHouse server are currently being supported with s
|
||||
| 22.6 | ❌ |
|
||||
| 22.5 | ❌ |
|
||||
| 22.4 | ❌ |
|
||||
| 22.3 | ✔️ |
|
||||
| 22.3 | ❌ |
|
||||
| 22.2 | ❌ |
|
||||
| 22.1 | ❌ |
|
||||
| 21.* | ❌ |
|
||||
|
@ -36,7 +36,7 @@
|
||||
|
||||
namespace detail
|
||||
{
|
||||
template <char ...chars> constexpr bool is_in(char x) { return ((x == chars) || ...); }
|
||||
template <char ...chars> constexpr bool is_in(char x) { return ((x == chars) || ...); } // NOLINT(misc-redundant-expression)
|
||||
|
||||
#if defined(__SSE2__)
|
||||
template <char s0>
|
||||
|
@ -35,7 +35,7 @@ public:
|
||||
Self & operator=(T && rhs) { t = std::move(rhs); return *this;}
|
||||
|
||||
// NOLINTBEGIN(google-explicit-constructor)
|
||||
operator const T & () const { return t; }
|
||||
constexpr operator const T & () const { return t; }
|
||||
operator T & () { return t; }
|
||||
// NOLINTEND(google-explicit-constructor)
|
||||
|
||||
|
81
base/glibc-compatibility/musl/expf.c
Normal file
81
base/glibc-compatibility/musl/expf.c
Normal file
@ -0,0 +1,81 @@
|
||||
/* origin: FreeBSD /usr/src/lib/msun/src/e_expf.c */
|
||||
/*
|
||||
* Conversion to float by Ian Lance Taylor, Cygnus Support, ian@cygnus.com.
|
||||
*/
|
||||
/*
|
||||
* ====================================================
|
||||
* Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
|
||||
*
|
||||
* Developed at SunPro, a Sun Microsystems, Inc. business.
|
||||
* Permission to use, copy, modify, and distribute this
|
||||
* software is freely granted, provided that this notice
|
||||
* is preserved.
|
||||
* ====================================================
|
||||
*/
|
||||
|
||||
#include "libm.h"
|
||||
|
||||
static const float
|
||||
half[2] = {0.5,-0.5},
|
||||
ln2hi = 6.9314575195e-1f, /* 0x3f317200 */
|
||||
ln2lo = 1.4286067653e-6f, /* 0x35bfbe8e */
|
||||
invln2 = 1.4426950216e+0f, /* 0x3fb8aa3b */
|
||||
/*
|
||||
* Domain [-0.34568, 0.34568], range ~[-4.278e-9, 4.447e-9]:
|
||||
* |x*(exp(x)+1)/(exp(x)-1) - p(x)| < 2**-27.74
|
||||
*/
|
||||
P1 = 1.6666625440e-1f, /* 0xaaaa8f.0p-26 */
|
||||
P2 = -2.7667332906e-3f; /* -0xb55215.0p-32 */
|
||||
|
||||
float expf(float x)
|
||||
{
|
||||
float_t hi, lo, c, xx, y;
|
||||
int k, sign;
|
||||
uint32_t hx;
|
||||
|
||||
GET_FLOAT_WORD(hx, x);
|
||||
sign = hx >> 31; /* sign bit of x */
|
||||
hx &= 0x7fffffff; /* high word of |x| */
|
||||
|
||||
/* special cases */
|
||||
if (hx >= 0x42aeac50) { /* if |x| >= -87.33655f or NaN */
|
||||
if (hx >= 0x42b17218 && !sign) { /* x >= 88.722839f */
|
||||
/* overflow */
|
||||
x *= 0x1p127f;
|
||||
return x;
|
||||
}
|
||||
if (sign) {
|
||||
/* underflow */
|
||||
FORCE_EVAL(-0x1p-149f/x);
|
||||
if (hx >= 0x42cff1b5) /* x <= -103.972084f */
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* argument reduction */
|
||||
if (hx > 0x3eb17218) { /* if |x| > 0.5 ln2 */
|
||||
if (hx > 0x3f851592) /* if |x| > 1.5 ln2 */
|
||||
k = invln2*x + half[sign];
|
||||
else
|
||||
k = 1 - sign - sign;
|
||||
hi = x - k*ln2hi; /* k*ln2hi is exact here */
|
||||
lo = k*ln2lo;
|
||||
x = hi - lo;
|
||||
} else if (hx > 0x39000000) { /* |x| > 2**-14 */
|
||||
k = 0;
|
||||
hi = x;
|
||||
lo = 0;
|
||||
} else {
|
||||
/* raise inexact */
|
||||
FORCE_EVAL(0x1p127f + x);
|
||||
return 1 + x;
|
||||
}
|
||||
|
||||
/* x is now in primary range */
|
||||
xx = x*x;
|
||||
c = x - xx*(P1+xx*P2);
|
||||
y = 1 + (x*c/(2-c) - lo + hi);
|
||||
if (k == 0)
|
||||
return y;
|
||||
return scalbnf(y, k);
|
||||
}
|
31
base/glibc-compatibility/musl/scalbnf.c
Normal file
31
base/glibc-compatibility/musl/scalbnf.c
Normal file
@ -0,0 +1,31 @@
|
||||
#include <math.h>
|
||||
#include <stdint.h>
|
||||
|
||||
float scalbnf(float x, int n)
|
||||
{
|
||||
union {float f; uint32_t i;} u;
|
||||
float_t y = x;
|
||||
|
||||
if (n > 127) {
|
||||
y *= 0x1p127f;
|
||||
n -= 127;
|
||||
if (n > 127) {
|
||||
y *= 0x1p127f;
|
||||
n -= 127;
|
||||
if (n > 127)
|
||||
n = 127;
|
||||
}
|
||||
} else if (n < -126) {
|
||||
y *= 0x1p-126f;
|
||||
n += 126;
|
||||
if (n < -126) {
|
||||
y *= 0x1p-126f;
|
||||
n += 126;
|
||||
if (n < -126)
|
||||
n = -126;
|
||||
}
|
||||
}
|
||||
u.i = (uint32_t)(0x7f+n)<<23;
|
||||
x = y * u.f;
|
||||
return x;
|
||||
}
|
@ -2,11 +2,11 @@
|
||||
|
||||
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
||||
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
||||
SET(VERSION_REVISION 54472)
|
||||
SET(VERSION_REVISION 54473)
|
||||
SET(VERSION_MAJOR 23)
|
||||
SET(VERSION_MINOR 3)
|
||||
SET(VERSION_MINOR 4)
|
||||
SET(VERSION_PATCH 1)
|
||||
SET(VERSION_GITHASH 52bf836e03a6ba7cf2d654eaaf73231701abc3a2)
|
||||
SET(VERSION_DESCRIBE v23.3.1.2537-testing)
|
||||
SET(VERSION_STRING 23.3.1.2537)
|
||||
SET(VERSION_GITHASH 46e85357ce2da2a99f56ee83a079e892d7ec3726)
|
||||
SET(VERSION_DESCRIBE v23.4.1.1-testing)
|
||||
SET(VERSION_STRING 23.4.1.1)
|
||||
# end of autochange
|
||||
|
2
contrib/arrow
vendored
2
contrib/arrow
vendored
@ -1 +1 @@
|
||||
Subproject commit d03245f801f798c63ee9a7d2b8914a9e5c5cd666
|
||||
Subproject commit 1f1b3d35fb6eb73e6492d3afd8a85cde848d174f
|
@ -202,6 +202,7 @@ set(ARROW_SRCS
|
||||
"${LIBRARY_DIR}/builder.cc"
|
||||
"${LIBRARY_DIR}/buffer.cc"
|
||||
"${LIBRARY_DIR}/chunked_array.cc"
|
||||
"${LIBRARY_DIR}/chunk_resolver.cc"
|
||||
"${LIBRARY_DIR}/compare.cc"
|
||||
"${LIBRARY_DIR}/config.cc"
|
||||
"${LIBRARY_DIR}/datum.cc"
|
||||
@ -268,6 +269,10 @@ set(ARROW_SRCS
|
||||
"${LIBRARY_DIR}/util/uri.cc"
|
||||
"${LIBRARY_DIR}/util/utf8.cc"
|
||||
"${LIBRARY_DIR}/util/value_parsing.cc"
|
||||
"${LIBRARY_DIR}/util/byte_size.cc"
|
||||
"${LIBRARY_DIR}/util/debug.cc"
|
||||
"${LIBRARY_DIR}/util/tracing.cc"
|
||||
"${LIBRARY_DIR}/util/atfork_internal.cc"
|
||||
"${LIBRARY_DIR}/vendored/base64.cpp"
|
||||
"${LIBRARY_DIR}/vendored/datetime/tz.cpp"
|
||||
|
||||
@ -301,9 +306,11 @@ set(ARROW_SRCS
|
||||
"${LIBRARY_DIR}/compute/exec/source_node.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/sink_node.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/order_by_impl.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/partition_util.cc"
|
||||
"${LIBRARY_DIR}/compute/function.cc"
|
||||
"${LIBRARY_DIR}/compute/function_internal.cc"
|
||||
"${LIBRARY_DIR}/compute/kernel.cc"
|
||||
"${LIBRARY_DIR}/compute/light_array.cc"
|
||||
"${LIBRARY_DIR}/compute/registry.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/aggregate_basic.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/aggregate_mode.cc"
|
||||
@ -317,21 +324,28 @@ set(ARROW_SRCS
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_cast_boolean.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_cast_dictionary.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_cast_internal.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_cast_extension.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_cast_nested.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_cast_numeric.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_cast_string.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_cast_temporal.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_compare.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_nested.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_random.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_round.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_set_lookup.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_string.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_temporal_binary.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_temporal_unary.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_validity.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_if_else.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_string_ascii.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_string_utf8.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/util_internal.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/vector_array_sort.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/vector_cumulative_ops.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/vector_hash.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/vector_rank.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/vector_select_k.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/vector_nested.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/vector_replace.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/vector_selection.cc"
|
||||
@ -340,13 +354,15 @@ set(ARROW_SRCS
|
||||
"${LIBRARY_DIR}/compute/exec/union_node.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/key_hash.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/key_map.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/key_compare.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/key_encode.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/util.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/hash_join_dict.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/hash_join.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/hash_join_node.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/task_util.cc"
|
||||
"${LIBRARY_DIR}/compute/row/encode_internal.cc"
|
||||
"${LIBRARY_DIR}/compute/row/grouper.cc"
|
||||
"${LIBRARY_DIR}/compute/row/compare_internal.cc"
|
||||
"${LIBRARY_DIR}/compute/row/row_internal.cc"
|
||||
|
||||
"${LIBRARY_DIR}/ipc/dictionary.cc"
|
||||
"${LIBRARY_DIR}/ipc/feather.cc"
|
||||
@ -357,7 +373,8 @@ set(ARROW_SRCS
|
||||
"${LIBRARY_DIR}/ipc/writer.cc"
|
||||
|
||||
"${ARROW_SRC_DIR}/arrow/adapters/orc/adapter.cc"
|
||||
"${ARROW_SRC_DIR}/arrow/adapters/orc/adapter_util.cc"
|
||||
"${ARROW_SRC_DIR}/arrow/adapters/orc/util.cc"
|
||||
"${ARROW_SRC_DIR}/arrow/adapters/orc/options.cc"
|
||||
)
|
||||
|
||||
add_definitions(-DARROW_WITH_LZ4)
|
||||
|
@ -1,3 +1,6 @@
|
||||
# The Dockerfile.ubuntu exists for the tests/ci/docker_server.py script
|
||||
# If the image is built from Dockerfile.alpine, then the `-alpine` suffix is added automatically,
|
||||
# so the only purpose of Dockerfile.ubuntu is to push `latest`, `head` and so on w/o suffixes
|
||||
FROM ubuntu:20.04 AS glibc-donor
|
||||
|
||||
ARG TARGETARCH
|
||||
@ -29,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
esac
|
||||
|
||||
ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release"
|
||||
ARG VERSION="23.2.4.12"
|
||||
ARG VERSION="23.3.1.2823"
|
||||
ARG PACKAGES="clickhouse-keeper"
|
||||
|
||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||
|
1
docker/keeper/Dockerfile.ubuntu
Symbolic link
1
docker/keeper/Dockerfile.ubuntu
Symbolic link
@ -0,0 +1 @@
|
||||
Dockerfile
|
@ -33,7 +33,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="23.2.4.12"
|
||||
ARG VERSION="23.3.1.2823"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||
|
@ -22,7 +22,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
||||
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="deb https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||
ARG VERSION="23.2.4.12"
|
||||
ARG VERSION="23.3.1.2823"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
# set non-empty deb_location_url url to create a docker image
|
||||
|
26
docs/changelogs/v22.12.6.22-stable.md
Normal file
26
docs/changelogs/v22.12.6.22-stable.md
Normal file
@ -0,0 +1,26 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v22.12.6.22-stable (10d87f90261) FIXME as compared to v22.12.5.34-stable (b82d6401ca1)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix changing an expired role [#46772](https://github.com/ClickHouse/ClickHouse/pull/46772) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix bug in zero-copy replication disk choice during fetch [#47010](https://github.com/ClickHouse/ClickHouse/pull/47010) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix NOT_IMPLEMENTED error with CROSS JOIN and algorithm = auto [#47068](https://github.com/ClickHouse/ClickHouse/pull/47068) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix query parameters [#47488](https://github.com/ClickHouse/ClickHouse/pull/47488) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Hotfix for too verbose warnings in HTTP [#47903](https://github.com/ClickHouse/ClickHouse/pull/47903) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Better error messages in ReplicatedMergeTreeAttachThread [#47454](https://github.com/ClickHouse/ClickHouse/pull/47454) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Add a fuse for backport branches w/o a created PR [#47760](https://github.com/ClickHouse/ClickHouse/pull/47760) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Only valid Reviews.STATES overwrite existing reviews [#47789](https://github.com/ClickHouse/ClickHouse/pull/47789) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Place short return before big block, improve logging [#47822](https://github.com/ClickHouse/ClickHouse/pull/47822) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Artifacts s3 prefix [#47945](https://github.com/ClickHouse/ClickHouse/pull/47945) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix tsan error lock-order-inversion [#47953](https://github.com/ClickHouse/ClickHouse/pull/47953) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
29
docs/changelogs/v22.3.20.29-lts.md
Normal file
29
docs/changelogs/v22.3.20.29-lts.md
Normal file
@ -0,0 +1,29 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v22.3.20.29-lts (297b4dd5e55) FIXME as compared to v22.3.19.6-lts (467e0a7bd77)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#46979](https://github.com/ClickHouse/ClickHouse/issues/46979): Apply `ALTER TABLE table_name ON CLUSTER cluster MOVE PARTITION|PART partition_expr TO DISK|VOLUME 'disk_name'` to all replicas. Because `ALTER TABLE t MOVE` is not replicated. [#46402](https://github.com/ClickHouse/ClickHouse/pull/46402) ([lizhuoyu5](https://github.com/lzydmxy)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix incorrect alias recursion in QueryNormalizer [#46609](https://github.com/ClickHouse/ClickHouse/pull/46609) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix arithmetic operations in aggregate optimization [#46705](https://github.com/ClickHouse/ClickHouse/pull/46705) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Fix MSan report in `maxIntersections` function [#46847](https://github.com/ClickHouse/ClickHouse/pull/46847) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix wrong results of some LIKE searches when the LIKE pattern contains quoted non-quotable characters [#46875](https://github.com/ClickHouse/ClickHouse/pull/46875) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix possible deadlock in QueryStatus [#47161](https://github.com/ClickHouse/ClickHouse/pull/47161) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Update typing for a new PyGithub version [#47123](https://github.com/ClickHouse/ClickHouse/pull/47123) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Add a manual trigger for release workflow [#47302](https://github.com/ClickHouse/ClickHouse/pull/47302) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Add a fuse for backport branches w/o a created PR [#47760](https://github.com/ClickHouse/ClickHouse/pull/47760) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Only valid Reviews.STATES overwrite existing reviews [#47789](https://github.com/ClickHouse/ClickHouse/pull/47789) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Place short return before big block, improve logging [#47822](https://github.com/ClickHouse/ClickHouse/pull/47822) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix tsan error lock-order-inversion [#47953](https://github.com/ClickHouse/ClickHouse/pull/47953) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
29
docs/changelogs/v22.8.16.32-lts.md
Normal file
29
docs/changelogs/v22.8.16.32-lts.md
Normal file
@ -0,0 +1,29 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v22.8.16.32-lts (7c4be737bd0) FIXME as compared to v22.8.15.23-lts (d36fa168bbf)
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#48344](https://github.com/ClickHouse/ClickHouse/issues/48344): Use sccache as a replacement for ccache and using S3 as cache backend. [#46240](https://github.com/ClickHouse/ClickHouse/pull/46240) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Backported in [#48250](https://github.com/ClickHouse/ClickHouse/issues/48250): The `clickhouse/clickhouse-keeper` image used to be pushed only with tags `-alpine`, e.g. `latest-alpine`. As it was suggested in https://github.com/ClickHouse/examples/pull/2, now it will be pushed as suffixless too. [#48236](https://github.com/ClickHouse/ClickHouse/pull/48236) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix bug in zero-copy replication disk choice during fetch [#47010](https://github.com/ClickHouse/ClickHouse/pull/47010) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix query parameters [#47488](https://github.com/ClickHouse/ClickHouse/pull/47488) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix wait for zero copy lock during move [#47631](https://github.com/ClickHouse/ClickHouse/pull/47631) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix crash in polygonsSymDifferenceCartesian [#47702](https://github.com/ClickHouse/ClickHouse/pull/47702) ([pufit](https://github.com/pufit)).
|
||||
* Backport to 22.8: Fix moving broken parts to the detached for the object storage disk on startup [#48273](https://github.com/ClickHouse/ClickHouse/pull/48273) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Add a fuse for backport branches w/o a created PR [#47760](https://github.com/ClickHouse/ClickHouse/pull/47760) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Only valid Reviews.STATES overwrite existing reviews [#47789](https://github.com/ClickHouse/ClickHouse/pull/47789) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Place short return before big block, improve logging [#47822](https://github.com/ClickHouse/ClickHouse/pull/47822) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Artifacts s3 prefix [#47945](https://github.com/ClickHouse/ClickHouse/pull/47945) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix tsan error lock-order-inversion [#47953](https://github.com/ClickHouse/ClickHouse/pull/47953) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
34
docs/changelogs/v23.1.6.42-stable.md
Normal file
34
docs/changelogs/v23.1.6.42-stable.md
Normal file
@ -0,0 +1,34 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.1.6.42-stable (783ddf67991) FIXME as compared to v23.1.5.24-stable (0e51b53ba99)
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#48215](https://github.com/ClickHouse/ClickHouse/issues/48215): Use sccache as a replacement for ccache and using S3 as cache backend. [#46240](https://github.com/ClickHouse/ClickHouse/pull/46240) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Backported in [#48254](https://github.com/ClickHouse/ClickHouse/issues/48254): The `clickhouse/clickhouse-keeper` image used to be pushed only with tags `-alpine`, e.g. `latest-alpine`. As it was suggested in https://github.com/ClickHouse/examples/pull/2, now it will be pushed as suffixless too. [#48236](https://github.com/ClickHouse/ClickHouse/pull/48236) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix changing an expired role [#46772](https://github.com/ClickHouse/ClickHouse/pull/46772) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix bug in zero-copy replication disk choice during fetch [#47010](https://github.com/ClickHouse/ClickHouse/pull/47010) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix NOT_IMPLEMENTED error with CROSS JOIN and algorithm = auto [#47068](https://github.com/ClickHouse/ClickHouse/pull/47068) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Disable logical expression optimizer for expression with aliases. [#47451](https://github.com/ClickHouse/ClickHouse/pull/47451) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix query parameters [#47488](https://github.com/ClickHouse/ClickHouse/pull/47488) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Parameterized view bug fix 47287 47247 [#47495](https://github.com/ClickHouse/ClickHouse/pull/47495) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Fix wait for zero copy lock during move [#47631](https://github.com/ClickHouse/ClickHouse/pull/47631) ([alesapin](https://github.com/alesapin)).
|
||||
* Hotfix for too verbose warnings in HTTP [#47903](https://github.com/ClickHouse/ClickHouse/pull/47903) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Better error messages in ReplicatedMergeTreeAttachThread [#47454](https://github.com/ClickHouse/ClickHouse/pull/47454) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix `00933_test_fix_extra_seek_on_compressed_cache` in releases. [#47490](https://github.com/ClickHouse/ClickHouse/pull/47490) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a fuse for backport branches w/o a created PR [#47760](https://github.com/ClickHouse/ClickHouse/pull/47760) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Only valid Reviews.STATES overwrite existing reviews [#47789](https://github.com/ClickHouse/ClickHouse/pull/47789) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Place short return before big block, improve logging [#47822](https://github.com/ClickHouse/ClickHouse/pull/47822) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Artifacts s3 prefix [#47945](https://github.com/ClickHouse/ClickHouse/pull/47945) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix tsan error lock-order-inversion [#47953](https://github.com/ClickHouse/ClickHouse/pull/47953) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
40
docs/changelogs/v23.2.5.46-stable.md
Normal file
40
docs/changelogs/v23.2.5.46-stable.md
Normal file
@ -0,0 +1,40 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.2.5.46-stable (b50faecbb12) FIXME as compared to v23.2.4.12-stable (8fe866cb035)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#48164](https://github.com/ClickHouse/ClickHouse/issues/48164): Fixed `UNKNOWN_TABLE` exception when attaching to a materialized view that has dependent tables that are not available. This might be useful when trying to restore state from a backup. [#47975](https://github.com/ClickHouse/ClickHouse/pull/47975) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#48216](https://github.com/ClickHouse/ClickHouse/issues/48216): Use sccache as a replacement for ccache and using S3 as cache backend. [#46240](https://github.com/ClickHouse/ClickHouse/pull/46240) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Backported in [#48256](https://github.com/ClickHouse/ClickHouse/issues/48256): The `clickhouse/clickhouse-keeper` image used to be pushed only with tags `-alpine`, e.g. `latest-alpine`. As it was suggested in https://github.com/ClickHouse/examples/pull/2, now it will be pushed as suffixless too. [#48236](https://github.com/ClickHouse/ClickHouse/pull/48236) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix changing an expired role [#46772](https://github.com/ClickHouse/ClickHouse/pull/46772) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix bug in zero-copy replication disk choice during fetch [#47010](https://github.com/ClickHouse/ClickHouse/pull/47010) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix NOT_IMPLEMENTED error with CROSS JOIN and algorithm = auto [#47068](https://github.com/ClickHouse/ClickHouse/pull/47068) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Disable logical expression optimizer for expression with aliases. [#47451](https://github.com/ClickHouse/ClickHouse/pull/47451) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix query parameters [#47488](https://github.com/ClickHouse/ClickHouse/pull/47488) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Parameterized view bug fix 47287 47247 [#47495](https://github.com/ClickHouse/ClickHouse/pull/47495) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Proper fix for bug in parquet, revert reverted [#45878](https://github.com/ClickHouse/ClickHouse/issues/45878) [#47538](https://github.com/ClickHouse/ClickHouse/pull/47538) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix wait for zero copy lock during move [#47631](https://github.com/ClickHouse/ClickHouse/pull/47631) ([alesapin](https://github.com/alesapin)).
|
||||
* Hotfix for too verbose warnings in HTTP [#47903](https://github.com/ClickHouse/ClickHouse/pull/47903) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* fix: keeper systemd service file include invalid inline comment [#47105](https://github.com/ClickHouse/ClickHouse/pull/47105) ([SuperDJY](https://github.com/cmsxbc)).
|
||||
* Better error messages in ReplicatedMergeTreeAttachThread [#47454](https://github.com/ClickHouse/ClickHouse/pull/47454) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix `00933_test_fix_extra_seek_on_compressed_cache` in releases. [#47490](https://github.com/ClickHouse/ClickHouse/pull/47490) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix startup on older systemd versions [#47689](https://github.com/ClickHouse/ClickHouse/pull/47689) ([Thomas Casteleyn](https://github.com/Hipska)).
|
||||
* Add a fuse for backport branches w/o a created PR [#47760](https://github.com/ClickHouse/ClickHouse/pull/47760) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Only valid Reviews.STATES overwrite existing reviews [#47789](https://github.com/ClickHouse/ClickHouse/pull/47789) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Place short return before big block, improve logging [#47822](https://github.com/ClickHouse/ClickHouse/pull/47822) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Artifacts s3 prefix [#47945](https://github.com/ClickHouse/ClickHouse/pull/47945) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix tsan error lock-order-inversion [#47953](https://github.com/ClickHouse/ClickHouse/pull/47953) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
545
docs/changelogs/v23.3.1.2823-lts.md
Normal file
545
docs/changelogs/v23.3.1.2823-lts.md
Normal file
@ -0,0 +1,545 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.3.1.2823-lts (46e85357ce2) FIXME as compared to v23.2.1.2537-stable (52bf836e03a)
|
||||
|
||||
#### Backward Incompatible Change
|
||||
* Relax symbols that are allowed in URL authority in *domain*RFC()/netloc(). [#46841](https://github.com/ClickHouse/ClickHouse/pull/46841) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Prohibit create tables based on KafkaEngine with DEFAULT/EPHEMERAL/ALIAS/MATERIALIZED statements for columns. [#47138](https://github.com/ClickHouse/ClickHouse/pull/47138) ([Aleksandr Musorin](https://github.com/AVMusorin)).
|
||||
* An "asynchronous connection drain" feature is removed. Related settings and metrics are removed as well. It was an internal feature, so the removal should not affect users who had never heard about that feature. [#47486](https://github.com/ClickHouse/ClickHouse/pull/47486) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Support 256-bit Decimal data type (more than 38 digits) in `arraySum`/`Min`/`Max`/`Avg`/`Product`, `arrayCumSum`/`CumSumNonNegative`, `arrayDifference`, array construction, IN operator, query parameters, `groupArrayMovingSum`, statistical functions, `min`/`max`/`any`/`argMin`/`argMax`, PostgreSQL wire protocol, MySQL table engine and function, `sumMap`, `mapAdd`, `mapSubtract`, `arrayIntersect`. Add support for big integers in `arrayIntersect`. Statistical aggregate functions involving moments (such as `corr` or various `TTest`s) will use `Float64` as their internal representation (they were using `Decimal128` before this change, but it was pointless), and these functions can return `nan` instead of `inf` in case of infinite variance. Some functions were allowed on `Decimal256` data types but returned `Decimal128` in previous versions - now it is fixed. This closes [#47569](https://github.com/ClickHouse/ClickHouse/issues/47569). This closes [#44864](https://github.com/ClickHouse/ClickHouse/issues/44864). This closes [#28335](https://github.com/ClickHouse/ClickHouse/issues/28335). [#47594](https://github.com/ClickHouse/ClickHouse/pull/47594) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Make backup_threads/restore_threads server settings. [#47881](https://github.com/ClickHouse/ClickHouse/pull/47881) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix the isIPv6String function which could have outputted a false positive result in the case of an incorrect IPv6 address. For example `1234::1234:` was considered a valid IPv6 address. [#47895](https://github.com/ClickHouse/ClickHouse/pull/47895) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
|
||||
#### New Feature
|
||||
* Add new mode for splitting the work on replicas using settings `parallel_replicas_custom_key` and `parallel_replicas_custom_key_filter_type`. If the cluster consists of a single shard with multiple replicas, up to `max_parallel_replicas` will be randomly picked and turned into shards. For each shard, a corresponding filter is added to the query on the initiator before being sent to the shard. If the cluster consists of multiple shards, it will behave the same as `sample_key` but with the possibility to define an arbitrary key. [#45108](https://github.com/ClickHouse/ClickHouse/pull/45108) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Added query setting `partial_result_on_first_cancel` allowing the canceled query (e.g. due to Ctrl-C) to return a partial result. [#45689](https://github.com/ClickHouse/ClickHouse/pull/45689) ([Alexey Perevyshin](https://github.com/alexX512)).
|
||||
* Added support of arbitrary tables engines for temporary tables except for Replicated and KeeperMap engines. Partially close [#31497](https://github.com/ClickHouse/ClickHouse/issues/31497). [#46071](https://github.com/ClickHouse/ClickHouse/pull/46071) ([Roman Vasin](https://github.com/rvasin)).
|
||||
* Add replication of user-defined SQL functions using ZooKeeper. [#46085](https://github.com/ClickHouse/ClickHouse/pull/46085) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||
* Implement `system.server_settings` (similar to `system.settings`), which will contain server configurations. [#46550](https://github.com/ClickHouse/ClickHouse/pull/46550) ([pufit](https://github.com/pufit)).
|
||||
* Intruduce a function `WIDTH_BUCKET`. [#42974](https://github.com/ClickHouse/ClickHouse/issues/42974). [#46790](https://github.com/ClickHouse/ClickHouse/pull/46790) ([avoiderboi](https://github.com/avoiderboi)).
|
||||
* Add new function parseDateTime/parseDateTimeInJodaSyntax according to specified format string. parseDateTime parses string to datetime in MySQL syntax, parseDateTimeInJodaSyntax parses in Joda syntax. [#46815](https://github.com/ClickHouse/ClickHouse/pull/46815) ([李扬](https://github.com/taiyang-li)).
|
||||
* Use `dummy UInt8` for default structure of table function `null`. Closes [#46930](https://github.com/ClickHouse/ClickHouse/issues/46930). [#47006](https://github.com/ClickHouse/ClickHouse/pull/47006) ([flynn](https://github.com/ucasfl)).
|
||||
* Dec 15, 2021 support for parseDateTimeBestEffort function. closes [#46816](https://github.com/ClickHouse/ClickHouse/issues/46816). [#47071](https://github.com/ClickHouse/ClickHouse/pull/47071) ([chen](https://github.com/xiedeyantu)).
|
||||
* Add function ULIDStringToDateTime(). Closes [#46945](https://github.com/ClickHouse/ClickHouse/issues/46945). [#47087](https://github.com/ClickHouse/ClickHouse/pull/47087) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Add settings `http_wait_end_of_query` and `http_response_buffer_size` that corresponds to URL params `wait_end_of_query` and `buffer_size` for HTTP interface. [#47108](https://github.com/ClickHouse/ClickHouse/pull/47108) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Support for `UNDROP TABLE` query. Closes [#46811](https://github.com/ClickHouse/ClickHouse/issues/46811). [#47241](https://github.com/ClickHouse/ClickHouse/pull/47241) ([chen](https://github.com/xiedeyantu)).
|
||||
* Add `system.marked_dropped_tables` table that shows tables that were dropped from `Atomic` databases but were not completely removed yet. [#47364](https://github.com/ClickHouse/ClickHouse/pull/47364) ([chen](https://github.com/xiedeyantu)).
|
||||
* Add `INSTR` as alias of `positionCaseInsensitive` for MySQL compatibility. Closes [#47529](https://github.com/ClickHouse/ClickHouse/issues/47529). [#47535](https://github.com/ClickHouse/ClickHouse/pull/47535) ([flynn](https://github.com/ucasfl)).
|
||||
* Added `toDecimalString` function allowing to convert numbers to string with fixed precision. [#47838](https://github.com/ClickHouse/ClickHouse/pull/47838) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||
* Added operator "REGEXP" (similar to operators "LIKE", "IN", "MOD" etc.) for better compatibility with MySQL. [#47869](https://github.com/ClickHouse/ClickHouse/pull/47869) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Allow executing reading pipeline for DIRECT dictionary with CLICKHOUSE source in multiple threads. To enable set `dictionary_use_async_executor=1` in `SETTINGS` section for source in `CREATE DICTIONARY` statement. [#47986](https://github.com/ClickHouse/ClickHouse/pull/47986) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Add merge tree setting `max_number_of_mutatuins_for_replica`. It limit the number of part mutations per replica to the specified amount. Zero means no limit on the number of mutations per replica (the execution can still be constrained by other settings). [#48047](https://github.com/ClickHouse/ClickHouse/pull/48047) ([Vladimir C](https://github.com/vdimir)).
|
||||
|
||||
#### Performance Improvement
|
||||
* Optimize one nullable key aggregate performance. [#45772](https://github.com/ClickHouse/ClickHouse/pull/45772) ([LiuNeng](https://github.com/liuneng1994)).
|
||||
* Implemented lowercase tokenbf_v1 index utilization for hasTokenOrNull, hasTokenCaseInsensitive and hasTokenCaseInsensitiveOrNull. [#46252](https://github.com/ClickHouse/ClickHouse/pull/46252) ([ltrk2](https://github.com/ltrk2)).
|
||||
* Optimize the generic SIMD StringSearcher by searching first two chars. [#46289](https://github.com/ClickHouse/ClickHouse/pull/46289) ([Jiebin Sun](https://github.com/jiebinn)).
|
||||
* System.detached_parts could be significant large. - added several sources with respects block size limitation - in each block iothread pool is used to calculate part size, ie to make syscalls in parallel. [#46624](https://github.com/ClickHouse/ClickHouse/pull/46624) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Increase the default value of `max_replicated_merges_in_queue` for ReplicatedMergeTree tables from 16 to 1000. It allows faster background merge operation on clusters with a very large number of replicas, such as clusters with shared storage in ClickHouse Cloud. [#47050](https://github.com/ClickHouse/ClickHouse/pull/47050) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Backups for large numbers of files were unbelievably slow in previous versions. [#47251](https://github.com/ClickHouse/ClickHouse/pull/47251) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Support filter push down to left table for JOIN with StorageJoin, StorageDictionary, StorageEmbeddedRocksDB. [#47280](https://github.com/ClickHouse/ClickHouse/pull/47280) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Marks in memory are now compressed, using 3-6x less memory. [#47290](https://github.com/ClickHouse/ClickHouse/pull/47290) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Updated copier to use group by instead of distinct to get list of partitions. For large tables this reduced the select time from over 500s to under 1s. [#47386](https://github.com/ClickHouse/ClickHouse/pull/47386) ([Clayton McClure](https://github.com/cmcclure-twilio)).
|
||||
* Address https://github.com/clickhouse/clickhouse/issues/46453. bisect marked https://github.com/clickhouse/clickhouse/pull/35525 as the bad changed. this pr looks to reverse the changes in that pr. [#47544](https://github.com/ClickHouse/ClickHouse/pull/47544) ([Ongkong](https://github.com/ongkong)).
|
||||
* Fixed excessive reading in queries with `FINAL`. [#47801](https://github.com/ClickHouse/ClickHouse/pull/47801) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Setting `max_final_threads` would be set to number of cores at server startup (by the same algorithm as we use for `max_threads`). This improves concurrency of `final` execution on servers with high number of CPUs. [#47915](https://github.com/ClickHouse/ClickHouse/pull/47915) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Avoid breaking batches on read requests to improve performance. [#47978](https://github.com/ClickHouse/ClickHouse/pull/47978) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
|
||||
#### Improvement
|
||||
* Add map related functions: mapFromArrays, which allows us to create map from a pair of arrays. [#31125](https://github.com/ClickHouse/ClickHouse/pull/31125) ([李扬](https://github.com/taiyang-li)).
|
||||
* Rewrite distributed sends to avoid using filesystem as a queue, use in-memory queue instead. [#45491](https://github.com/ClickHouse/ClickHouse/pull/45491) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Allow separate grants for named collections (e.g. to be able to give `SHOW/CREATE/ALTER/DROP named collection` access only to certain collections, instead of all at once). Closes [#40894](https://github.com/ClickHouse/ClickHouse/issues/40894). Add new access type `NAMED_COLLECTION_CONTROL` which is not given to default user unless explicitly added to user config (is required to be able to do `GRANT ALL`), also `show_named_collections` is no longer obligatory to be manually specified for default user to be able to have full access rights as was in 23.2. [#46241](https://github.com/ClickHouse/ClickHouse/pull/46241) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Now `X-ClickHouse-Query-Id` and `X-ClickHouse-Timezone` headers are added to response in all queries via http protocol. Previously it was done only for `SELECT` queries. [#46364](https://github.com/ClickHouse/ClickHouse/pull/46364) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Support for connection to a replica set via a URI with a host:port enum and support for the readPreference option in MongoDB dictionaries. Example URI: mongodb://db0.example.com:27017,db1.example.com:27017,db2.example.com:27017/?replicaSet=myRepl&readPreference=primary. [#46524](https://github.com/ClickHouse/ClickHouse/pull/46524) ([artem-yadr](https://github.com/artem-yadr)).
|
||||
* Re-implement projection analysis on top of query plan. Added setting `query_plan_optimize_projection=1` to switch between old and new version. Fixes [#44963](https://github.com/ClickHouse/ClickHouse/issues/44963). [#46537](https://github.com/ClickHouse/ClickHouse/pull/46537) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Use parquet format v2 instead of v1 in output format by default. Add setting `output_format_parquet_version` to control parquet version, possible values `v1_0`, `v2_4`, `v2_6`, `v2_latest` (default). [#46617](https://github.com/ClickHouse/ClickHouse/pull/46617) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Not for changelog - part of [#42648](https://github.com/ClickHouse/ClickHouse/issues/42648). [#46632](https://github.com/ClickHouse/ClickHouse/pull/46632) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Allow to ignore errors while pushing to MATERILIZED VIEW (add new setting `materialized_views_ignore_errors`, by default to `false`, but it is set to `true` for flushing logs to `system.*_log` tables unconditionally). [#46658](https://github.com/ClickHouse/ClickHouse/pull/46658) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Enable input_format_json_ignore_unknown_keys_in_named_tuple by default. [#46742](https://github.com/ClickHouse/ClickHouse/pull/46742) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* It is now possible using new configuration syntax to configure Kafka topics with periods in their name. [#46752](https://github.com/ClickHouse/ClickHouse/pull/46752) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix heuristics that check hyperscan patterns for problematic repeats. [#46819](https://github.com/ClickHouse/ClickHouse/pull/46819) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Don't report ZK node exists to system.errors when a block was created concurrently by a different replica. [#46820](https://github.com/ClickHouse/ClickHouse/pull/46820) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Allow PREWHERE for Merge with different DEFAULT expression for column. [#46831](https://github.com/ClickHouse/ClickHouse/pull/46831) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Increase the limit for opened files in `clickhouse-local`. It will be able to read from `web` tables on servers with a huge number of CPU cores. Do not back off reading from the URL table engine in case of too many opened files. This closes [#46852](https://github.com/ClickHouse/ClickHouse/issues/46852). [#46853](https://github.com/ClickHouse/ClickHouse/pull/46853) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Exceptions thrown when numbers cannot be parsed now have an easier-to-read exception message. [#46917](https://github.com/ClickHouse/ClickHouse/pull/46917) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Added update `system.backups` after every processed task. [#46989](https://github.com/ClickHouse/ClickHouse/pull/46989) ([Aleksandr Musorin](https://github.com/AVMusorin)).
|
||||
* Allow types conversion in Native input format. Add settings `input_format_native_allow_types_conversion` that controls it (enabled by default). [#46990](https://github.com/ClickHouse/ClickHouse/pull/46990) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Allow IPv4 in the `range` function to generate IP ranges. [#46995](https://github.com/ClickHouse/ClickHouse/pull/46995) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Role change was not promoted sometimes before https://github.com/ClickHouse/ClickHouse/pull/46772 This PR just adds tests. [#47002](https://github.com/ClickHouse/ClickHouse/pull/47002) ([Ilya Golshtein](https://github.com/ilejn)).
|
||||
* Improve exception message when it's impossible to make part move from one volume/disk to another. [#47032](https://github.com/ClickHouse/ClickHouse/pull/47032) ([alesapin](https://github.com/alesapin)).
|
||||
* Support `Bool` type in `JSONType` function. Previously `Null` type was mistakenly returned for bool values. [#47046](https://github.com/ClickHouse/ClickHouse/pull/47046) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Use _request_body parameter to configure predefined http queries. [#47086](https://github.com/ClickHouse/ClickHouse/pull/47086) ([Constantine Peresypkin](https://github.com/pkit)).
|
||||
* Removing logging of custom disk structure. [#47103](https://github.com/ClickHouse/ClickHouse/pull/47103) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Allow nested custom disks. Previously custom disks supported only flat disk structure. [#47106](https://github.com/ClickHouse/ClickHouse/pull/47106) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Automatic indentation in the built-in UI SQL editor when Enter is pressed. [#47113](https://github.com/ClickHouse/ClickHouse/pull/47113) ([Alexey Korepanov](https://github.com/alexkorep)).
|
||||
* Allow control compression in Parquet/ORC/Arrow output formats, support more compression for input formats. This closes [#13541](https://github.com/ClickHouse/ClickHouse/issues/13541). [#47114](https://github.com/ClickHouse/ClickHouse/pull/47114) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Self-extraction with 'sudo' will attempt to set uid and gid of extracted files to running user. [#47116](https://github.com/ClickHouse/ClickHouse/pull/47116) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Currently the funtion repeat's second argument must be unsigned integer type, which can not accept a integer value like -1. And this is different from the spark function, so I fix this here to make it same as spark. And it tested as below. [#47134](https://github.com/ClickHouse/ClickHouse/pull/47134) ([KevinyhZou](https://github.com/KevinyhZou)).
|
||||
* Remove `::__1` part from stacktraces. Display `std::basic_string<char, ...` as `String` in stacktraces. [#47171](https://github.com/ClickHouse/ClickHouse/pull/47171) ([Mike Kot](https://github.com/myrrc)).
|
||||
* Introduced a separate thread pool for backup IO operations. This will allow to scale it independently from other pool and increase performance. [#47174](https://github.com/ClickHouse/ClickHouse/pull/47174) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Reimplement interserver mode to avoid replay attacks (note, that change is backward compatible with older servers). [#47213](https://github.com/ClickHouse/ClickHouse/pull/47213) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Make function `optimizeregularexpression` recognize re groups and refine regexp tree dictionary. [#47218](https://github.com/ClickHouse/ClickHouse/pull/47218) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Use MultiRead request and retries for collecting metadata at final stage of backup processing. [#47243](https://github.com/ClickHouse/ClickHouse/pull/47243) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Keeper improvement: Add new 4LW `clrs` to clean resources used by Keeper (e.g. release unused memory). [#47256](https://github.com/ClickHouse/ClickHouse/pull/47256) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Add optional arguments to codecs `DoubleDelta(bytes_size)`, `Gorilla(bytes_size)`, `FPC(level, float_size)`, it will allow using this codecs without column type in `clickhouse-compressor`. Fix possible abrots and arithmetic errors in `clickhouse-compressor` with these codecs. Fixes: https://github.com/ClickHouse/ClickHouse/discussions/47262. [#47271](https://github.com/ClickHouse/ClickHouse/pull/47271) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add support for big int types to runningDifference() function. Closes [#47194](https://github.com/ClickHouse/ClickHouse/issues/47194). [#47322](https://github.com/ClickHouse/ClickHouse/pull/47322) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* PostgreSQL replication has been adjusted to use "FROM ONLY" clause while performing initial synchronization. This prevents double-fetching the same data in case the target PostgreSQL database uses table inheritance. [#47387](https://github.com/ClickHouse/ClickHouse/pull/47387) ([Maksym Sobolyev](https://github.com/sobomax)).
|
||||
* Add an expiration window for S3 credentials that have an expiration time to avoid `ExpiredToken` errors in some edge cases. It can be controlled with `expiration_window_seconds` config, the default is 120 seconds. [#47423](https://github.com/ClickHouse/ClickHouse/pull/47423) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Support Decimals and Date32 in Avro format. [#47434](https://github.com/ClickHouse/ClickHouse/pull/47434) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Do not start the server if an interrupted conversion from `Ordinary` to `Atomic` was detected, print a better error message with troubleshooting instructions. [#47487](https://github.com/ClickHouse/ClickHouse/pull/47487) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Add a new column `kind` to system.opentelemetry_span_log. This column holds the value of [SpanKind](https://opentelemetry.io/docs/reference/specification/trace/api/#spankind) defined in OpenTelemtry. [#47499](https://github.com/ClickHouse/ClickHouse/pull/47499) ([Frank Chen](https://github.com/FrankChen021)).
|
||||
* If a backup and restoring data are both in S3 then server-side copy should be used from now on. [#47546](https://github.com/ClickHouse/ClickHouse/pull/47546) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Add SSL User Certificate authentication to the native protocol. Closes [#47077](https://github.com/ClickHouse/ClickHouse/issues/47077). [#47596](https://github.com/ClickHouse/ClickHouse/pull/47596) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Allow reading/writing nested arrays in Protobuf with only root field name as column name. Previously column name should've contain all nested field names (like `a.b.c Array(Array(Array(UInt32)))`, now you can use just `a Array(Array(Array(UInt32)))`. [#47650](https://github.com/ClickHouse/ClickHouse/pull/47650) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Added an optional `STRICT` modifier for `SYSTEM SYNC REPLICA` which makes the query wait for replication queue to become empty (just like it worked before https://github.com/ClickHouse/ClickHouse/pull/45648). [#47659](https://github.com/ClickHouse/ClickHouse/pull/47659) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Improvement name of some span logs. [#47667](https://github.com/ClickHouse/ClickHouse/pull/47667) ([Frank Chen](https://github.com/FrankChen021)).
|
||||
* Now ReplicatedMergeTree with zero copy replication has less load to ZooKeeper. [#47676](https://github.com/ClickHouse/ClickHouse/pull/47676) ([alesapin](https://github.com/alesapin)).
|
||||
* Prevent using too long chains of aggregate function combinators (they can lead to slow queries in the analysis stage). This closes [#47715](https://github.com/ClickHouse/ClickHouse/issues/47715). [#47716](https://github.com/ClickHouse/ClickHouse/pull/47716) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Support for subquery in parameterized views resolves [#46741](https://github.com/ClickHouse/ClickHouse/issues/46741) Implementation: * Updated to pass the parameter is_create_parameterized_view to subquery processing. Testing: * Added test case with subquery for parameterized view. [#47725](https://github.com/ClickHouse/ClickHouse/pull/47725) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Fix memory leak in MySQL integration (reproduces with `connection_auto_close=1`). [#47732](https://github.com/ClickHouse/ClickHouse/pull/47732) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* AST Fuzzer support fuzz `EXPLAIN` query. [#47803](https://github.com/ClickHouse/ClickHouse/pull/47803) ([flynn](https://github.com/ucasfl)).
|
||||
* Fixed error print message while Decimal parameters is incorrect. [#47812](https://github.com/ClickHouse/ClickHouse/pull/47812) ([Yu Feng](https://github.com/Vigor-jpg)).
|
||||
* Add `X-ClickHouse-Query-Id` to HTTP response when queries fails to execute. [#47813](https://github.com/ClickHouse/ClickHouse/pull/47813) ([Frank Chen](https://github.com/FrankChen021)).
|
||||
* AST fuzzer support fuzzing `SELECT` query to `EXPLAIN` query randomly. [#47852](https://github.com/ClickHouse/ClickHouse/pull/47852) ([flynn](https://github.com/ucasfl)).
|
||||
* Improved the overall performance by better utilizing local replica. And forbid reading with parallel replicas from non-replicated MergeTree by default. [#47858](https://github.com/ClickHouse/ClickHouse/pull/47858) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* More accurate CPU usage indication for client: account for usage in some long-living server threads (Segmentator) and do regular CPU accounting for every thread. [#47870](https://github.com/ClickHouse/ClickHouse/pull/47870) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* The parameter `exact_rows_before_limit` is used to make `rows_before_limit_at_least` is designed to accurately reflect the number of rows returned before the limit is reached. This pull request addresses issues encountered when the query involves distributed processing across multiple shards or sorting operations. Prior to this update, these scenarios were not functioning as intended. [#47874](https://github.com/ClickHouse/ClickHouse/pull/47874) ([Amos Bird](https://github.com/amosbird)).
|
||||
* ThreadPool metrics introspection. [#47880](https://github.com/ClickHouse/ClickHouse/pull/47880) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Add `WriteBufferFromS3Microseconds` and `WriteBufferFromS3RequestsErrors` profile events. [#47885](https://github.com/ClickHouse/ClickHouse/pull/47885) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Add `--link` and `--noninteractive` (`-y`) options to clickhouse install. Closes [#47750](https://github.com/ClickHouse/ClickHouse/issues/47750). [#47887](https://github.com/ClickHouse/ClickHouse/pull/47887) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix decimal-256 text output issue on s390x. [#47932](https://github.com/ClickHouse/ClickHouse/pull/47932) ([MeenaRenganathan22](https://github.com/MeenaRenganathan22)).
|
||||
* Fixed `UNKNOWN_TABLE` exception when attaching to a materialized view that has dependent tables that are not available. This might be useful when trying to restore state from a backup. [#47975](https://github.com/ClickHouse/ClickHouse/pull/47975) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||
* Fix case when (optional) path is not added to encrypted disk configuration. [#47981](https://github.com/ClickHouse/ClickHouse/pull/47981) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Add *OrNull() and *OrZero() variants for parseDateTime(), add alias "str_to_date" for MySQL parity. [#48000](https://github.com/ClickHouse/ClickHouse/pull/48000) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Improve the code around `background_..._pool_size` settings reading. It should be configured via the main server configuration file. [#48055](https://github.com/ClickHouse/ClickHouse/pull/48055) ([filimonov](https://github.com/filimonov)).
|
||||
* Support for cte in parameterized views Implementation: * Updated to allow query parameters while evaluating scalar subqueries. Testing: * Added test case with cte for parameterized view. [#48065](https://github.com/ClickHouse/ClickHouse/pull/48065) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Add `NOSIGN` keyword for S3 table function and storage engine to avoid signing requests with provided credentials. Add `no_sign_request` config for all functionalities using S3. [#48092](https://github.com/ClickHouse/ClickHouse/pull/48092) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Support bin integers `(U)Int128/(U)Int256`, `Map` with any key type and `DateTime64` with any precision (not only 3 and 6). [#48119](https://github.com/ClickHouse/ClickHouse/pull/48119) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Support more ClickHouse types in MsgPack format: (U)Int128/(U)Int256, Enum8(16), Date32, Decimal(32|64|128|256), Tuples. [#48124](https://github.com/ClickHouse/ClickHouse/pull/48124) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* The output of some SHOW ... statements is now sorted. [#48127](https://github.com/ClickHouse/ClickHouse/pull/48127) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Allow skipping errors related to unknown enum values in row input formats. [#48133](https://github.com/ClickHouse/ClickHouse/pull/48133) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add `allow_distributed_ddl_queries` option to disallow distributed DDL queries for the cluster in the config. [#48171](https://github.com/ClickHouse/ClickHouse/pull/48171) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||
* Determine the hosts' order in `SHOW CLUSTER` query, a followup for [#48127](https://github.com/ClickHouse/ClickHouse/issues/48127) and [#46240](https://github.com/ClickHouse/ClickHouse/issues/46240). [#48235](https://github.com/ClickHouse/ClickHouse/pull/48235) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Split stress test and backward compatibility check (now Upgrade check). [#44879](https://github.com/ClickHouse/ClickHouse/pull/44879) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Use sccache as a replacement for ccache and using S3 as cache backend. [#46240](https://github.com/ClickHouse/ClickHouse/pull/46240) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Updated Ubuntu Image. [#46784](https://github.com/ClickHouse/ClickHouse/pull/46784) ([Julio Jimenez](https://github.com/juliojimenez)).
|
||||
* Adds a prompt to allow the removal of an existing `cickhouse` download when using "curl | sh" download of ClickHouse. Prompt is "ClickHouse binary clickhouse already exists. Overwrite? [y/N] ". [#46859](https://github.com/ClickHouse/ClickHouse/pull/46859) ([Dan Roscigno](https://github.com/DanRoscigno)).
|
||||
* Fix error during server startup on old distros (e.g. Amazon Linux 2) and on ARM that glibc 2.28 symbols are not found. [#47008](https://github.com/ClickHouse/ClickHouse/pull/47008) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Clang 16 is set to release in the next few days, making it an opportune time to update. [#47027](https://github.com/ClickHouse/ClickHouse/pull/47027) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Added a CI check which ensures ClickHouse can run with an old glibc on ARM. [#47063](https://github.com/ClickHouse/ClickHouse/pull/47063) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* ClickHouse now builds with C++23. [#47424](https://github.com/ClickHouse/ClickHouse/pull/47424) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fixed issue with starting `clickhouse-test` against custom clickhouse binary with `-b`. ... [#47578](https://github.com/ClickHouse/ClickHouse/pull/47578) ([Vasily Nemkov](https://github.com/Enmk)).
|
||||
* Add a style check to prevent incorrect usage of the `NDEBUG` macro. [#47699](https://github.com/ClickHouse/ClickHouse/pull/47699) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Speed up the build a little. [#47714](https://github.com/ClickHouse/ClickHouse/pull/47714) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Bump vectorscan to 5.4.9. [#47955](https://github.com/ClickHouse/ClickHouse/pull/47955) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Add a unit test to assert arrow fatal logging does not abort. It covers the changes in https://github.com/ClickHouse/arrow/pull/16. [#47958](https://github.com/ClickHouse/ClickHouse/pull/47958) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||
* Restore ability of native macos debug server build to start (this time for real). [#48050](https://github.com/ClickHouse/ClickHouse/pull/48050) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Functional tests will trigger JIT compilation more frequently, in a randomized fashion. See [#48120](https://github.com/ClickHouse/ClickHouse/issues/48120). [#48196](https://github.com/ClickHouse/ClickHouse/pull/48196) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* The `clickhouse/clickhouse-keeper` image used to be pushed only with tags `-alpine`, e.g. `latest-alpine`. As it was suggested in https://github.com/ClickHouse/examples/pull/2, now it will be pushed as suffixless too. [#48236](https://github.com/ClickHouse/ClickHouse/pull/48236) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix create materialized view with MaterializedPostgreSQL [#40807](https://github.com/ClickHouse/ClickHouse/pull/40807) ([Maksim Buren](https://github.com/maks-buren630501)).
|
||||
* Fix formats parser resetting, test processing bad messages in kafka [#45693](https://github.com/ClickHouse/ClickHouse/pull/45693) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix several `RENAME COLUMN` bugs. [#45911](https://github.com/ClickHouse/ClickHouse/pull/45911) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix data size calculation in Keeper [#46086](https://github.com/ClickHouse/ClickHouse/pull/46086) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fixes for 993 [#46384](https://github.com/ClickHouse/ClickHouse/pull/46384) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix incorrect alias recursion in QueryNormalizer [#46609](https://github.com/ClickHouse/ClickHouse/pull/46609) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix IPv4/IPv6 serialization/deserialization in binary formats [#46616](https://github.com/ClickHouse/ClickHouse/pull/46616) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* ActionsDAG: do not change result of and() during optimization [#46653](https://github.com/ClickHouse/ClickHouse/pull/46653) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||
* Fix queries cancellation when a client dies [#46681](https://github.com/ClickHouse/ClickHouse/pull/46681) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix arithmetic operations in aggregate optimization [#46705](https://github.com/ClickHouse/ClickHouse/pull/46705) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Fix possible clickhouse-local abort on JSONEachRow schema inference [#46731](https://github.com/ClickHouse/ClickHouse/pull/46731) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix changing an expired role [#46772](https://github.com/ClickHouse/ClickHouse/pull/46772) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix combined PREWHERE column accumulated from multiple steps [#46785](https://github.com/ClickHouse/ClickHouse/pull/46785) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Use initial range for fetching file size in HTTP read buffer [#46824](https://github.com/ClickHouse/ClickHouse/pull/46824) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix progress bar with URL [#46830](https://github.com/ClickHouse/ClickHouse/pull/46830) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Do not allow const and non-deterministic secondary indexes [#46839](https://github.com/ClickHouse/ClickHouse/pull/46839) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix MSan report in `maxIntersections` function [#46847](https://github.com/ClickHouse/ClickHouse/pull/46847) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix a bug in `Map` data type [#46856](https://github.com/ClickHouse/ClickHouse/pull/46856) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix wrong results of some LIKE searches when the LIKE pattern contains quoted non-quotable characters [#46875](https://github.com/ClickHouse/ClickHouse/pull/46875) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix - WITH FILL would produce abort when FillingTransform processing empty block [#46897](https://github.com/ClickHouse/ClickHouse/pull/46897) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix date and int inference from string in JSON [#46972](https://github.com/ClickHouse/ClickHouse/pull/46972) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix bug in zero-copy replication disk choice during fetch [#47010](https://github.com/ClickHouse/ClickHouse/pull/47010) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix typo in systemd service definition [#47051](https://github.com/ClickHouse/ClickHouse/pull/47051) ([Palash Goel](https://github.com/palash-goel)).
|
||||
* Fix NOT_IMPLEMENTED error with CROSS JOIN and algorithm = auto [#47068](https://github.com/ClickHouse/ClickHouse/pull/47068) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix the problem that the 'ReplicatedMergeTree' table failed to insert two similar data when the 'part_type' is configured as 'InMemory' mode. [#47121](https://github.com/ClickHouse/ClickHouse/pull/47121) ([liding1992](https://github.com/liding1992)).
|
||||
* External dictionaries / library-bridge: Fix error "unknown library method 'extDict_libClone'" [#47136](https://github.com/ClickHouse/ClickHouse/pull/47136) ([alex filatov](https://github.com/phil-88)).
|
||||
* Fix race in grace hash join with limit [#47153](https://github.com/ClickHouse/ClickHouse/pull/47153) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix concrete columns PREWHERE support [#47154](https://github.com/ClickHouse/ClickHouse/pull/47154) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix possible deadlock in QueryStatus [#47161](https://github.com/ClickHouse/ClickHouse/pull/47161) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backup_Restore_concurrency_check_node [#47216](https://github.com/ClickHouse/ClickHouse/pull/47216) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Forbid insert select for the same StorageJoin [#47260](https://github.com/ClickHouse/ClickHouse/pull/47260) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Skip merged partitions for `min_age_to_force_merge_seconds` merges [#47303](https://github.com/ClickHouse/ClickHouse/pull/47303) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Modify find_first_symbols so it works as expected for find_first_not_symbols [#47304](https://github.com/ClickHouse/ClickHouse/pull/47304) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||
* Fix big numbers inference in CSV [#47410](https://github.com/ClickHouse/ClickHouse/pull/47410) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Disable logical expression optimizer for expression with aliases. [#47451](https://github.com/ClickHouse/ClickHouse/pull/47451) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Remove a feature [#47456](https://github.com/ClickHouse/ClickHouse/pull/47456) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix error in `decodeURLComponent` [#47457](https://github.com/ClickHouse/ClickHouse/pull/47457) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix explain graph with projection [#47473](https://github.com/ClickHouse/ClickHouse/pull/47473) ([flynn](https://github.com/ucasfl)).
|
||||
* Fix query parameters [#47488](https://github.com/ClickHouse/ClickHouse/pull/47488) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Parameterized view bug fix 47287 47247 [#47495](https://github.com/ClickHouse/ClickHouse/pull/47495) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Fuzzer of data formats [#47519](https://github.com/ClickHouse/ClickHouse/pull/47519) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix monotonicity check for DateTime64 [#47526](https://github.com/ClickHouse/ClickHouse/pull/47526) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix block structure mismatch for nullable LowCardinality column [#47537](https://github.com/ClickHouse/ClickHouse/pull/47537) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Proper fix for bug in parquet, revert reverted [#45878](https://github.com/ClickHouse/ClickHouse/issues/45878) [#47538](https://github.com/ClickHouse/ClickHouse/pull/47538) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix BSONEachRow parallel parsing when document size is invalid [#47540](https://github.com/ClickHouse/ClickHouse/pull/47540) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Preserve error in system.distribution_queue on SYSTEM FLUSH DISTRIBUTED [#47541](https://github.com/ClickHouse/ClickHouse/pull/47541) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Revert "Revert "Backup_Restore_concurrency_check_node"" [#47586](https://github.com/ClickHouse/ClickHouse/pull/47586) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Check for duplicate column in BSONEachRow format [#47609](https://github.com/ClickHouse/ClickHouse/pull/47609) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix wait for zero copy lock during move [#47631](https://github.com/ClickHouse/ClickHouse/pull/47631) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix aggregation by partitions [#47634](https://github.com/ClickHouse/ClickHouse/pull/47634) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Fix bug in tuple as array serialization in BSONEachRow format [#47690](https://github.com/ClickHouse/ClickHouse/pull/47690) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix crash in polygonsSymDifferenceCartesian [#47702](https://github.com/ClickHouse/ClickHouse/pull/47702) ([pufit](https://github.com/pufit)).
|
||||
* Fix reading from storage `File` compressed files with `zlib` and `gzip` compression [#47796](https://github.com/ClickHouse/ClickHouse/pull/47796) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Improve empty query detection for PostgreSQL (for pgx golang driver) [#47854](https://github.com/ClickHouse/ClickHouse/pull/47854) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix DateTime monotonicity check for LowCardinality [#47860](https://github.com/ClickHouse/ClickHouse/pull/47860) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Use restore_threads (not backup_threads) for RESTORE ASYNC [#47861](https://github.com/ClickHouse/ClickHouse/pull/47861) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix DROP COLUMN with ReplicatedMergeTree containing projections [#47883](https://github.com/ClickHouse/ClickHouse/pull/47883) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix for Replicated database recovery [#47901](https://github.com/ClickHouse/ClickHouse/pull/47901) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Hotfix for too verbose warnings in HTTP [#47903](https://github.com/ClickHouse/ClickHouse/pull/47903) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix "Field value too long" in catboostEvaluate() [#47970](https://github.com/ClickHouse/ClickHouse/pull/47970) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix [#36971](https://github.com/ClickHouse/ClickHouse/issues/36971): Watchdog: exit with non-zero code if child process exits [#47973](https://github.com/ClickHouse/ClickHouse/pull/47973) ([Коренберг Марк](https://github.com/socketpair)).
|
||||
* Fix for index file cidx is unexpectedly long [#48010](https://github.com/ClickHouse/ClickHouse/pull/48010) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* fix MaterializedPostgreSQL query to get attributes (replica-identity) [#48015](https://github.com/ClickHouse/ClickHouse/pull/48015) ([Solomatov Sergei](https://github.com/solomatovs)).
|
||||
* parseDateTime(): Fix UB (signed integer overflow) [#48019](https://github.com/ClickHouse/ClickHouse/pull/48019) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Use uniq names for Records in Avro to avoid reusing its schema [#48057](https://github.com/ClickHouse/ClickHouse/pull/48057) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix crash in explain graph with StorageMerge [#48102](https://github.com/ClickHouse/ClickHouse/pull/48102) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Correctly set TCP/HTTP socket timeouts in Keeper [#48108](https://github.com/ClickHouse/ClickHouse/pull/48108) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix possible member call on null pointer in Avro format [#48184](https://github.com/ClickHouse/ClickHouse/pull/48184) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
||||
#### Build Improvement
|
||||
|
||||
* Update krb5 to 1.20.1-final to mitigate CVE-2022-42898. [#46485](https://github.com/ClickHouse/ClickHouse/pull/46485) ([MeenaRenganathan22](https://github.com/MeenaRenganathan22)).
|
||||
* Fixed random crash issues caused by bad pointers in libunwind for s390x. [#46755](https://github.com/ClickHouse/ClickHouse/pull/46755) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||
* Fixed http xz compression issue for s390x. [#46832](https://github.com/ClickHouse/ClickHouse/pull/46832) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||
* Fixed murmurhash function for s390x. [#47036](https://github.com/ClickHouse/ClickHouse/pull/47036) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||
* Fixed halfMD5 and broken cityHash function for s390x. [#47115](https://github.com/ClickHouse/ClickHouse/pull/47115) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||
* Fixed farmhash functions for s390x. [#47223](https://github.com/ClickHouse/ClickHouse/pull/47223) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||
* Fixed endian issue in hashing tuples for s390x. [#47371](https://github.com/ClickHouse/ClickHouse/pull/47371) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||
* Fixed SipHash integer hashing issue and byte order issue in random integer data from GenerateRandom storage engine for s390x. [#47576](https://github.com/ClickHouse/ClickHouse/pull/47576) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||
|
||||
#### NO CL ENTRY
|
||||
|
||||
* NO CL ENTRY: 'Revert "Fix several `RENAME COLUMN` bugs."'. [#46909](https://github.com/ClickHouse/ClickHouse/pull/46909) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* NO CL ENTRY: 'Revert "Add join_algorithm='grace_hash' to stress tests"'. [#46988](https://github.com/ClickHouse/ClickHouse/pull/46988) ([Pradeep Chhetri](https://github.com/chhetripradeep)).
|
||||
* NO CL ENTRY: 'Revert "Give users option of overwriting"'. [#47169](https://github.com/ClickHouse/ClickHouse/pull/47169) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* NO CL ENTRY: 'Revert "standardize admonitions"'. [#47413](https://github.com/ClickHouse/ClickHouse/pull/47413) ([Rich Raposa](https://github.com/rfraposa)).
|
||||
* NO CL ENTRY: 'Revert "Backup_Restore_concurrency_check_node"'. [#47581](https://github.com/ClickHouse/ClickHouse/pull/47581) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* NO CL ENTRY: 'Update storing-data.md'. [#47598](https://github.com/ClickHouse/ClickHouse/pull/47598) ([San](https://github.com/santrancisco)).
|
||||
* NO CL ENTRY: 'Revert "Fix BSONEachRow parallel parsing when document size is invalid"'. [#47672](https://github.com/ClickHouse/ClickHouse/pull/47672) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* NO CL ENTRY: 'Revert "New navigation"'. [#47694](https://github.com/ClickHouse/ClickHouse/pull/47694) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* NO CL ENTRY: 'Revert "Analyzer planner fixes before enable by default"'. [#47721](https://github.com/ClickHouse/ClickHouse/pull/47721) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* NO CL ENTRY: 'Revert "Revert "Analyzer planner fixes before enable by default""'. [#47748](https://github.com/ClickHouse/ClickHouse/pull/47748) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* NO CL ENTRY: 'Revert "Add sanity checks for writing number in variable length format"'. [#47850](https://github.com/ClickHouse/ClickHouse/pull/47850) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* NO CL ENTRY: 'Revert "Revert "Revert "Backup_Restore_concurrency_check_node"""'. [#47963](https://github.com/ClickHouse/ClickHouse/pull/47963) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Test differences between using materialize_ttl_recalculate_only=1/0 [#45304](https://github.com/ClickHouse/ClickHouse/pull/45304) ([Jordi Villar](https://github.com/jrdi)).
|
||||
* Fix query in stress script [#45480](https://github.com/ClickHouse/ClickHouse/pull/45480) ([Pradeep Chhetri](https://github.com/chhetripradeep)).
|
||||
* Add join_algorithm='grace_hash' to stress tests [#45607](https://github.com/ClickHouse/ClickHouse/pull/45607) ([Pradeep Chhetri](https://github.com/chhetripradeep)).
|
||||
* Support `group_by_use_nulls` setting in new analyzer [#45910](https://github.com/ClickHouse/ClickHouse/pull/45910) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Randomize setting `ratio_of_defaults_for_sparse_serialization` [#46118](https://github.com/ClickHouse/ClickHouse/pull/46118) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Add CrossToInnerJoinPass [#46408](https://github.com/ClickHouse/ClickHouse/pull/46408) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix flakiness of test_backup_restore_on_cluster/test_disallow_concurrency [#46517](https://github.com/ClickHouse/ClickHouse/pull/46517) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Map field to string fix [#46618](https://github.com/ClickHouse/ClickHouse/pull/46618) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Enable perf tests added in [#45364](https://github.com/ClickHouse/ClickHouse/issues/45364) [#46623](https://github.com/ClickHouse/ClickHouse/pull/46623) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Logical expression optimizer in new analyzer [#46644](https://github.com/ClickHouse/ClickHouse/pull/46644) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Named collections: finish replacing old code for storages [#46647](https://github.com/ClickHouse/ClickHouse/pull/46647) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Make tiny improvements [#46659](https://github.com/ClickHouse/ClickHouse/pull/46659) ([ltrk2](https://github.com/ltrk2)).
|
||||
* Fix openssl/s390x build (setenv + link order) [#46684](https://github.com/ClickHouse/ClickHouse/pull/46684) ([Boris Kuschel](https://github.com/bkuschel)).
|
||||
* Analyzer AutoFinalOnQueryPass fix [#46729](https://github.com/ClickHouse/ClickHouse/pull/46729) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Mark failed build reports as pending on reruns [#46736](https://github.com/ClickHouse/ClickHouse/pull/46736) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Do not reanalyze expressions from aggregation in projection [#46738](https://github.com/ClickHouse/ClickHouse/pull/46738) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Update CHANGELOG.md [#46766](https://github.com/ClickHouse/ClickHouse/pull/46766) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Poco: Remove some dead code [#46768](https://github.com/ClickHouse/ClickHouse/pull/46768) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* More concise logging at trace level for PREWHERE steps [#46771](https://github.com/ClickHouse/ClickHouse/pull/46771) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Follow-up to [#41534](https://github.com/ClickHouse/ClickHouse/issues/41534) [#46775](https://github.com/ClickHouse/ClickHouse/pull/46775) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix timeout for all expect tests (wrong usage of expect_after timeout) [#46779](https://github.com/ClickHouse/ClickHouse/pull/46779) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Reduce updates of Mergeable Check [#46781](https://github.com/ClickHouse/ClickHouse/pull/46781) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Updated Slack invite link [#46783](https://github.com/ClickHouse/ClickHouse/pull/46783) ([clickhouse-adrianfraguela](https://github.com/clickhouse-adrianfraguela)).
|
||||
* Print all stacktraces in hung check [#46787](https://github.com/ClickHouse/ClickHouse/pull/46787) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Quick temporary fix for stress tests [#46789](https://github.com/ClickHouse/ClickHouse/pull/46789) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Update version after release [#46792](https://github.com/ClickHouse/ClickHouse/pull/46792) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Update version_date.tsv and changelogs after v23.2.1.2537-stable [#46794](https://github.com/ClickHouse/ClickHouse/pull/46794) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Remove ZSTD version from CMake output [#46796](https://github.com/ClickHouse/ClickHouse/pull/46796) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Update version_date.tsv and changelogs after v22.11.6.44-stable [#46801](https://github.com/ClickHouse/ClickHouse/pull/46801) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* CMake: Add best effort checks that the build machine isn't too old [#46803](https://github.com/ClickHouse/ClickHouse/pull/46803) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix async reading pipeline when small limit is present [#46804](https://github.com/ClickHouse/ClickHouse/pull/46804) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Cleanup string search code [#46814](https://github.com/ClickHouse/ClickHouse/pull/46814) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Stateless cmake version [#46821](https://github.com/ClickHouse/ClickHouse/pull/46821) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* refine regexp tree dictionary [#46822](https://github.com/ClickHouse/ClickHouse/pull/46822) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Non-significant change [#46844](https://github.com/ClickHouse/ClickHouse/pull/46844) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a trap [#46845](https://github.com/ClickHouse/ClickHouse/pull/46845) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Better handling of fatal errors [#46846](https://github.com/ClickHouse/ClickHouse/pull/46846) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a test for [#43184](https://github.com/ClickHouse/ClickHouse/issues/43184) [#46848](https://github.com/ClickHouse/ClickHouse/pull/46848) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix wrong function name [#46849](https://github.com/ClickHouse/ClickHouse/pull/46849) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a test for [#45214](https://github.com/ClickHouse/ClickHouse/issues/45214) [#46850](https://github.com/ClickHouse/ClickHouse/pull/46850) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Final fixes for expect tests [#46857](https://github.com/ClickHouse/ClickHouse/pull/46857) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Small optimization of LIKE patterns with > 1 trailing % [#46869](https://github.com/ClickHouse/ClickHouse/pull/46869) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Add new metrics to system.asynchronous_metrics [#46886](https://github.com/ClickHouse/ClickHouse/pull/46886) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix flaky `test_concurrent_queries_restriction_by_query_kind` [#46887](https://github.com/ClickHouse/ClickHouse/pull/46887) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix test test_async_backups_to_same_destination. [#46888](https://github.com/ClickHouse/ClickHouse/pull/46888) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Make ASTSelectQuery::formatImpl() more robust [#46889](https://github.com/ClickHouse/ClickHouse/pull/46889) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* tests: fix 02116_interactive_hello for "official build" [#46911](https://github.com/ClickHouse/ClickHouse/pull/46911) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix some expect tests leftovers and enable them in fasttest [#46915](https://github.com/ClickHouse/ClickHouse/pull/46915) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Increase ddl timeout for DROP statement in backup restore tests [#46920](https://github.com/ClickHouse/ClickHouse/pull/46920) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* A better alternative to [#46344](https://github.com/ClickHouse/ClickHouse/issues/46344) [#46921](https://github.com/ClickHouse/ClickHouse/pull/46921) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Code review from @tavplubix [#46922](https://github.com/ClickHouse/ClickHouse/pull/46922) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Planner: trivial count optimization [#46923](https://github.com/ClickHouse/ClickHouse/pull/46923) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Typo: SIZES_OF_ARRAYS_DOESNT_MATCH --> SIZES_OF_ARRAYS_DONT_MATCH [#46940](https://github.com/ClickHouse/ClickHouse/pull/46940) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Another fix for clone() for ASTColumnMatchers [#46947](https://github.com/ClickHouse/ClickHouse/pull/46947) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Un-inline likePatternToRegexp() [#46950](https://github.com/ClickHouse/ClickHouse/pull/46950) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix missing format_description [#46959](https://github.com/ClickHouse/ClickHouse/pull/46959) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* ARM: Activate LDAPR with -march flag instead via -XClang [#46960](https://github.com/ClickHouse/ClickHouse/pull/46960) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Preset description on the tweak reset [#46963](https://github.com/ClickHouse/ClickHouse/pull/46963) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Update version_date.tsv and changelogs after v22.3.19.6-lts [#46964](https://github.com/ClickHouse/ClickHouse/pull/46964) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Update version_date.tsv and changelogs after v22.8.14.53-lts [#46969](https://github.com/ClickHouse/ClickHouse/pull/46969) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Better exception messages when schema_inference_hints is ill-formatted [#46971](https://github.com/ClickHouse/ClickHouse/pull/46971) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Decrease log level in "disks" [#46976](https://github.com/ClickHouse/ClickHouse/pull/46976) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Change the cherry-pick PR body [#46977](https://github.com/ClickHouse/ClickHouse/pull/46977) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Rename recent stateless tests to fix order [#46991](https://github.com/ClickHouse/ClickHouse/pull/46991) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Pass headers from StorageURL to WriteBufferFromHTTP [#46996](https://github.com/ClickHouse/ClickHouse/pull/46996) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||
* Change level log in executeQuery [#46997](https://github.com/ClickHouse/ClickHouse/pull/46997) ([Andrey Bystrov](https://github.com/AndyBys)).
|
||||
* Add thevar1able to trusted contributors [#46998](https://github.com/ClickHouse/ClickHouse/pull/46998) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||
* Use /etc/default/clickhouse in systemd too [#47003](https://github.com/ClickHouse/ClickHouse/pull/47003) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix tmp_path_template in HTTPHandler::processQuery [#47007](https://github.com/ClickHouse/ClickHouse/pull/47007) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix flaky azure test [#47011](https://github.com/ClickHouse/ClickHouse/pull/47011) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Temporary enable force_sync for keeper in CI [#47024](https://github.com/ClickHouse/ClickHouse/pull/47024) ([alesapin](https://github.com/alesapin)).
|
||||
* ActionsDAG: do not change result of and() during optimization - part 2 [#47028](https://github.com/ClickHouse/ClickHouse/pull/47028) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||
* Add upgrade check to stateful dependent field [#47031](https://github.com/ClickHouse/ClickHouse/pull/47031) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Disable path check in SQLite storage for clickhouse-local [#47052](https://github.com/ClickHouse/ClickHouse/pull/47052) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Terminate long-running offline non-busy runners in EC2 [#47064](https://github.com/ClickHouse/ClickHouse/pull/47064) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix Keeper with `force_sync = false` [#47065](https://github.com/ClickHouse/ClickHouse/pull/47065) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Update version_date.tsv and changelogs after v23.2.2.20-stable [#47069](https://github.com/ClickHouse/ClickHouse/pull/47069) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Update version_date.tsv and changelogs after v23.1.4.58-stable [#47070](https://github.com/ClickHouse/ClickHouse/pull/47070) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Update version_date.tsv and changelogs after v22.12.4.76-stable [#47074](https://github.com/ClickHouse/ClickHouse/pull/47074) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Fix empty result when selection from only one side of join in analyzer [#47093](https://github.com/ClickHouse/ClickHouse/pull/47093) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Suppress "Cannot flush" for Distributed tables in upgrade check [#47095](https://github.com/ClickHouse/ClickHouse/pull/47095) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Make stacktraces in hung check more readable [#47096](https://github.com/ClickHouse/ClickHouse/pull/47096) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* release lambda resources before detaching thread group [#47098](https://github.com/ClickHouse/ClickHouse/pull/47098) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Analyzer Planner fixes before enable by default [#47101](https://github.com/ClickHouse/ClickHouse/pull/47101) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* do flushUntrackedMemory when context switches [#47102](https://github.com/ClickHouse/ClickHouse/pull/47102) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* fix: keeper systemd service file include invalid inline comment [#47105](https://github.com/ClickHouse/ClickHouse/pull/47105) ([SuperDJY](https://github.com/cmsxbc)).
|
||||
* Add code for autoscaling lambda [#47107](https://github.com/ClickHouse/ClickHouse/pull/47107) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Enable lightweight delete support by default [#47109](https://github.com/ClickHouse/ClickHouse/pull/47109) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Update typing for a new PyGithub version [#47123](https://github.com/ClickHouse/ClickHouse/pull/47123) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Shorten some code with CTAD [#47139](https://github.com/ClickHouse/ClickHouse/pull/47139) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Make 01710_projections more stable. [#47145](https://github.com/ClickHouse/ClickHouse/pull/47145) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* fix_JSON_searchField [#47147](https://github.com/ClickHouse/ClickHouse/pull/47147) ([Aleksei Tikhomirov](https://github.com/aletik256)).
|
||||
* Mark 01771_bloom_filter_not_has as no-parallel and long [#47148](https://github.com/ClickHouse/ClickHouse/pull/47148) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Use unique names and paths in `test_replicated_database` [#47152](https://github.com/ClickHouse/ClickHouse/pull/47152) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Add stupid retries in clickhouse-test health check. [#47158](https://github.com/ClickHouse/ClickHouse/pull/47158) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* 02346_full_text_search.sql: Add result separators to simplify analysis [#47166](https://github.com/ClickHouse/ClickHouse/pull/47166) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* More correct handling of fatal errors [#47175](https://github.com/ClickHouse/ClickHouse/pull/47175) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Update read in StorageMemory [#47180](https://github.com/ClickHouse/ClickHouse/pull/47180) ([Konstantin Morozov](https://github.com/k-morozov)).
|
||||
* Doc update for mapFromArrays() [#47183](https://github.com/ClickHouse/ClickHouse/pull/47183) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix failure context for Upgrade check [#47191](https://github.com/ClickHouse/ClickHouse/pull/47191) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Add support for different expected errors [#47196](https://github.com/ClickHouse/ClickHouse/pull/47196) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix ip coding on s390x [#47208](https://github.com/ClickHouse/ClickHouse/pull/47208) ([Suzy Wang](https://github.com/SuzyWangIBMer)).
|
||||
* Add real client (initiator server) address into the logs for interserver mode [#47214](https://github.com/ClickHouse/ClickHouse/pull/47214) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix 01019_alter_materialized_view_consistent [#47215](https://github.com/ClickHouse/ClickHouse/pull/47215) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix RewriteArrayExistsToHasPass [#47225](https://github.com/ClickHouse/ClickHouse/pull/47225) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Release shared ptrs after finishing a transaction [#47245](https://github.com/ClickHouse/ClickHouse/pull/47245) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Add default constructor for `MultiReadResponse` [#47254](https://github.com/ClickHouse/ClickHouse/pull/47254) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Join threads if exception happened in `ZooKeeperImpl` constructor [#47261](https://github.com/ClickHouse/ClickHouse/pull/47261) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* use std::lerp, constexpr hex.h [#47268](https://github.com/ClickHouse/ClickHouse/pull/47268) ([Mike Kot](https://github.com/myrrc)).
|
||||
* Update version_date.tsv and changelogs after v23.2.3.17-stable [#47269](https://github.com/ClickHouse/ClickHouse/pull/47269) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Fix bug in zero copy replica which can lead to dataloss [#47274](https://github.com/ClickHouse/ClickHouse/pull/47274) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix typo [#47282](https://github.com/ClickHouse/ClickHouse/pull/47282) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Follow-up to [#46681](https://github.com/ClickHouse/ClickHouse/issues/46681) [#47284](https://github.com/ClickHouse/ClickHouse/pull/47284) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix test 02566_ipv4_ipv6_binary_formats [#47295](https://github.com/ClickHouse/ClickHouse/pull/47295) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Set fixed index_granularity for test 00636 [#47298](https://github.com/ClickHouse/ClickHouse/pull/47298) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Add a manual trigger for release workflow [#47302](https://github.com/ClickHouse/ClickHouse/pull/47302) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix 02570_fallback_from_async_insert [#47308](https://github.com/ClickHouse/ClickHouse/pull/47308) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Catch exceptions in LiveViewPeriodicRefreshTask [#47309](https://github.com/ClickHouse/ClickHouse/pull/47309) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix MergeTreeTransaction::isReadOnly [#47310](https://github.com/ClickHouse/ClickHouse/pull/47310) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix an assertion with implicit transactions in interserver mode [#47312](https://github.com/ClickHouse/ClickHouse/pull/47312) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix `File exists` error in Upgrade check [#47314](https://github.com/ClickHouse/ClickHouse/pull/47314) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Support transformQueryForExternalDatabase for analyzer [#47316](https://github.com/ClickHouse/ClickHouse/pull/47316) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Disable parallel format in health check [#47318](https://github.com/ClickHouse/ClickHouse/pull/47318) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Analyzer - fix combine logic for limit expression and limit setting [#47324](https://github.com/ClickHouse/ClickHouse/pull/47324) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Suppress expected errors from test 01111 in Upgrade check [#47365](https://github.com/ClickHouse/ClickHouse/pull/47365) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix GROUPING function initialization for grouping sets [#47370](https://github.com/ClickHouse/ClickHouse/pull/47370) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Add join_algorithm='grace_hash' to stress tests [#47372](https://github.com/ClickHouse/ClickHouse/pull/47372) ([Pradeep Chhetri](https://github.com/chhetripradeep)).
|
||||
* Fix 02343_group_by_use_nulls test in new analyzer [#47373](https://github.com/ClickHouse/ClickHouse/pull/47373) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Disable 02368_cancel_write_into_hdfs in stress tests [#47382](https://github.com/ClickHouse/ClickHouse/pull/47382) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Analyzer planner fixes before enable by default [#47383](https://github.com/ClickHouse/ClickHouse/pull/47383) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix `ALTER CLEAR COLUMN` with sparse columns [#47384](https://github.com/ClickHouse/ClickHouse/pull/47384) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix: apply reading in order for distinct [#47385](https://github.com/ClickHouse/ClickHouse/pull/47385) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* add checks for ptr [#47398](https://github.com/ClickHouse/ClickHouse/pull/47398) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Remove distinct on top of MergingAggregatedStep [#47399](https://github.com/ClickHouse/ClickHouse/pull/47399) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Update LRUFileCachePriority.cpp [#47411](https://github.com/ClickHouse/ClickHouse/pull/47411) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Make test 02473_optimize_old_parts less flaky [#47416](https://github.com/ClickHouse/ClickHouse/pull/47416) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Add test to prevent regressions when using bitmapHasAny [#47419](https://github.com/ClickHouse/ClickHouse/pull/47419) ([Jordi Villar](https://github.com/jrdi)).
|
||||
* Update README.md [#47421](https://github.com/ClickHouse/ClickHouse/pull/47421) ([Tyler Hannan](https://github.com/tylerhannan)).
|
||||
* Refactor query cache (make use of CacheBase) [#47428](https://github.com/ClickHouse/ClickHouse/pull/47428) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Suppress Hung Check with UBsan [#47429](https://github.com/ClickHouse/ClickHouse/pull/47429) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* [docs] Document add async_insert_max_query_number [#47431](https://github.com/ClickHouse/ClickHouse/pull/47431) ([Antonio Bonuccelli](https://github.com/nellicus)).
|
||||
* Apply settings for EXPLAIN earlier (in the same way we do for SELECT). [#47433](https://github.com/ClickHouse/ClickHouse/pull/47433) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Update version_date.tsv and changelogs after v23.2.4.12-stable [#47448](https://github.com/ClickHouse/ClickHouse/pull/47448) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Fix aggregation-in-order with aliases. [#47449](https://github.com/ClickHouse/ClickHouse/pull/47449) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix 01429_join_on_error_messages [#47450](https://github.com/ClickHouse/ClickHouse/pull/47450) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Update version_date.tsv and changelogs after v23.1.5.24-stable [#47452](https://github.com/ClickHouse/ClickHouse/pull/47452) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Update version_date.tsv and changelogs after v22.12.5.34-stable [#47453](https://github.com/ClickHouse/ClickHouse/pull/47453) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Better error messages in ReplicatedMergeTreeAttachThread [#47454](https://github.com/ClickHouse/ClickHouse/pull/47454) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Update version_date.tsv and changelogs after v22.8.15.23-lts [#47455](https://github.com/ClickHouse/ClickHouse/pull/47455) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Disable grace hash join in upgrade check [#47474](https://github.com/ClickHouse/ClickHouse/pull/47474) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Revert [#46622](https://github.com/ClickHouse/ClickHouse/issues/46622) (test_async_insert_memory) [#47476](https://github.com/ClickHouse/ClickHouse/pull/47476) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix `00933_test_fix_extra_seek_on_compressed_cache` in releases. [#47490](https://github.com/ClickHouse/ClickHouse/pull/47490) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix long test `02371_select_projection_normal_agg.sql` [#47491](https://github.com/ClickHouse/ClickHouse/pull/47491) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Revert [#45878](https://github.com/ClickHouse/ClickHouse/issues/45878) and add a test [#47492](https://github.com/ClickHouse/ClickHouse/pull/47492) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Planner JOIN TREE build fix [#47498](https://github.com/ClickHouse/ClickHouse/pull/47498) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Better support of identifiers from compound expressions in analyzer [#47506](https://github.com/ClickHouse/ClickHouse/pull/47506) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Adapt some tests to pass with and without the analyzer [#47525](https://github.com/ClickHouse/ClickHouse/pull/47525) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Small enhancements [#47534](https://github.com/ClickHouse/ClickHouse/pull/47534) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Support constants in INTERPOLATE clause (new analyzer) [#47539](https://github.com/ClickHouse/ClickHouse/pull/47539) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Remove TOTALS handling in FillingTransform [#47542](https://github.com/ClickHouse/ClickHouse/pull/47542) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Hide too noisy log messages, fix some tests [#47547](https://github.com/ClickHouse/ClickHouse/pull/47547) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix some flaky tests [#47553](https://github.com/ClickHouse/ClickHouse/pull/47553) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* remove counters for threads, fix negative counters [#47564](https://github.com/ClickHouse/ClickHouse/pull/47564) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Fix typo [#47565](https://github.com/ClickHouse/ClickHouse/pull/47565) ([hq1](https://github.com/aerosol)).
|
||||
* Fixes for upgrade check [#47570](https://github.com/ClickHouse/ClickHouse/pull/47570) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Change error code in case of columns definitions was empty in ODBC [#47573](https://github.com/ClickHouse/ClickHouse/pull/47573) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Add missing SYSTEM FLUSH LOGS for log messages statistics [#47575](https://github.com/ClickHouse/ClickHouse/pull/47575) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix performance regression in aggregation [#47582](https://github.com/ClickHouse/ClickHouse/pull/47582) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* ReadFromMergeTree explain prewhere and row policy actions [#47583](https://github.com/ClickHouse/ClickHouse/pull/47583) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix possible failures of 01300_client_save_history_when_terminated_long [#47606](https://github.com/ClickHouse/ClickHouse/pull/47606) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* checksum: do not check inverted index files [#47607](https://github.com/ClickHouse/ClickHouse/pull/47607) ([save-my-heart](https://github.com/save-my-heart)).
|
||||
* Add sanity checks for writing number in variable length format [#47608](https://github.com/ClickHouse/ClickHouse/pull/47608) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Analyzer planner fixes before enable by default [#47622](https://github.com/ClickHouse/ClickHouse/pull/47622) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix exception message in clickhouse-test [#47625](https://github.com/ClickHouse/ClickHouse/pull/47625) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* FillingTransform: remove unnecessary indirection when accessing columns [#47632](https://github.com/ClickHouse/ClickHouse/pull/47632) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* fix typo in HashJoin insertion that enables debug code in release build [#46726](https://github.com/ClickHouse/ClickHouse/issues/46726) [#47647](https://github.com/ClickHouse/ClickHouse/pull/47647) ([jorisgio](https://github.com/jorisgio)).
|
||||
* clang-tidy >= 15: write CheckOptions in dictionary format [#47648](https://github.com/ClickHouse/ClickHouse/pull/47648) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* CMake: Build ClickHouse w/o GNU extensions [#47651](https://github.com/ClickHouse/ClickHouse/pull/47651) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Faster fasttest [#47654](https://github.com/ClickHouse/ClickHouse/pull/47654) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Add background pools size metrics [#47656](https://github.com/ClickHouse/ClickHouse/pull/47656) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Improve ThreadPool [#47657](https://github.com/ClickHouse/ClickHouse/pull/47657) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* cmake: remove support for gold linker [#47660](https://github.com/ClickHouse/ClickHouse/pull/47660) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Updated events and recordings [#47668](https://github.com/ClickHouse/ClickHouse/pull/47668) ([clickhouse-adrianfraguela](https://github.com/clickhouse-adrianfraguela)).
|
||||
* Follow-up to [#47660](https://github.com/ClickHouse/ClickHouse/issues/47660): Further removal of gold linker support [#47669](https://github.com/ClickHouse/ClickHouse/pull/47669) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Enable parallel execution for two tests [#47670](https://github.com/ClickHouse/ClickHouse/pull/47670) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Restore native macos build [#47673](https://github.com/ClickHouse/ClickHouse/pull/47673) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* CMake: Remove further cruft from build [#47680](https://github.com/ClickHouse/ClickHouse/pull/47680) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* fix test / remove hardcoded database [#47682](https://github.com/ClickHouse/ClickHouse/pull/47682) ([Denny Crane](https://github.com/den-crane)).
|
||||
* Apply log_queries_cut_to_length in MergeTreeWhereOptimizer [#47684](https://github.com/ClickHouse/ClickHouse/pull/47684) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix logical error in evaluate constant expression [#47685](https://github.com/ClickHouse/ClickHouse/pull/47685) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Try making `test_keeper_mntr_data_size` less flaky [#47687](https://github.com/ClickHouse/ClickHouse/pull/47687) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix limit offset [#47688](https://github.com/ClickHouse/ClickHouse/pull/47688) ([flynn](https://github.com/ucasfl)).
|
||||
* Fix startup on older systemd versions [#47689](https://github.com/ClickHouse/ClickHouse/pull/47689) ([Thomas Casteleyn](https://github.com/Hipska)).
|
||||
* More random query id in tests [#47700](https://github.com/ClickHouse/ClickHouse/pull/47700) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Add a style check for unsafe code [#47703](https://github.com/ClickHouse/ClickHouse/pull/47703) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Make the code in Join less disgusting [#47712](https://github.com/ClickHouse/ClickHouse/pull/47712) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fixup git reference to LLVM [#47719](https://github.com/ClickHouse/ClickHouse/pull/47719) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Preparation for libcxx(abi), llvm, clang-tidy 16 [#47722](https://github.com/ClickHouse/ClickHouse/pull/47722) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Rename cfg parameter query_cache.size to query_cache.max_size [#47724](https://github.com/ClickHouse/ClickHouse/pull/47724) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Add optimization for MemoryStorageStep [#47726](https://github.com/ClickHouse/ClickHouse/pull/47726) ([Konstantin Morozov](https://github.com/k-morozov)).
|
||||
* Fix aggregation with constant key in planner [#47727](https://github.com/ClickHouse/ClickHouse/pull/47727) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Disable setting in 02343_group_by_use_nulls_distributed (for new analyzer) [#47728](https://github.com/ClickHouse/ClickHouse/pull/47728) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Add a test for [#21469](https://github.com/ClickHouse/ClickHouse/issues/21469) [#47736](https://github.com/ClickHouse/ClickHouse/pull/47736) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a test for [#23804](https://github.com/ClickHouse/ClickHouse/issues/23804) [#47737](https://github.com/ClickHouse/ClickHouse/pull/47737) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a test for [#18937](https://github.com/ClickHouse/ClickHouse/issues/18937) [#47738](https://github.com/ClickHouse/ClickHouse/pull/47738) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a test for [#17756](https://github.com/ClickHouse/ClickHouse/issues/17756) [#47739](https://github.com/ClickHouse/ClickHouse/pull/47739) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a test for [#23162](https://github.com/ClickHouse/ClickHouse/issues/23162) [#47740](https://github.com/ClickHouse/ClickHouse/pull/47740) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* remove unused code [#47743](https://github.com/ClickHouse/ClickHouse/pull/47743) ([flynn](https://github.com/ucasfl)).
|
||||
* Fix broken cross-compiled macos builds [#47744](https://github.com/ClickHouse/ClickHouse/pull/47744) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Randomize query cache settings [#47749](https://github.com/ClickHouse/ClickHouse/pull/47749) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Clarify steps for reopened cherry-pick PRs [#47755](https://github.com/ClickHouse/ClickHouse/pull/47755) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix ZK exception error message [#47757](https://github.com/ClickHouse/ClickHouse/pull/47757) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Add ComparisonTupleEliminationVisitor [#47758](https://github.com/ClickHouse/ClickHouse/pull/47758) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Add a fuse for backport branches w/o a created PR [#47760](https://github.com/ClickHouse/ClickHouse/pull/47760) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix partition ID byte order for s390x [#47769](https://github.com/ClickHouse/ClickHouse/pull/47769) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||
* Stop `wait for quorum` retries on shutdown [#47770](https://github.com/ClickHouse/ClickHouse/pull/47770) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* More preparation for upgrade to libcxx(abi), llvm, clang-tidy 16 [#47771](https://github.com/ClickHouse/ClickHouse/pull/47771) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Only valid Reviews.STATES overwrite existing reviews [#47789](https://github.com/ClickHouse/ClickHouse/pull/47789) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Apply black formatter to all python scripts [#47790](https://github.com/ClickHouse/ClickHouse/pull/47790) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Try fix window view test [#47791](https://github.com/ClickHouse/ClickHouse/pull/47791) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Update test for nested lambdas [#47795](https://github.com/ClickHouse/ClickHouse/pull/47795) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Decrease scale_down ratio for faster deflation [#47798](https://github.com/ClickHouse/ClickHouse/pull/47798) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix 993 and two other tests [#47802](https://github.com/ClickHouse/ClickHouse/pull/47802) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix flaky test 02417_opentelemetry_insert_on_distributed_table [#47811](https://github.com/ClickHouse/ClickHouse/pull/47811) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Make 01086_odbc_roundtrip less flaky [#47820](https://github.com/ClickHouse/ClickHouse/pull/47820) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Place short return before big block, improve logging [#47822](https://github.com/ClickHouse/ClickHouse/pull/47822) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* [FixTests] Remove wrong chassert() in UserDefinedSQLObjectsLoaderFromZooKeeper.cpp [#47839](https://github.com/ClickHouse/ClickHouse/pull/47839) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix test test_replicated_merge_tree_encryption_codec [#47851](https://github.com/ClickHouse/ClickHouse/pull/47851) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Allow injecting timeout errors on Keeper [#47856](https://github.com/ClickHouse/ClickHouse/pull/47856) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Comment stale cherry-pick PRs once a day to remind for resolving conflicts [#47857](https://github.com/ClickHouse/ClickHouse/pull/47857) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Followup to [#47802](https://github.com/ClickHouse/ClickHouse/issues/47802) [#47864](https://github.com/ClickHouse/ClickHouse/pull/47864) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Slightly better error message [#47868](https://github.com/ClickHouse/ClickHouse/pull/47868) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Make test_server_reload non-parallel [#47871](https://github.com/ClickHouse/ClickHouse/pull/47871) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* aspell-dict.txt: keep sorted things sorted [#47878](https://github.com/ClickHouse/ClickHouse/pull/47878) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* throw exception when all retries exhausted [#47902](https://github.com/ClickHouse/ClickHouse/pull/47902) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Fix GRANT query formatting [#47908](https://github.com/ClickHouse/ClickHouse/pull/47908) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix exception type in arrayElement function [#47909](https://github.com/ClickHouse/ClickHouse/pull/47909) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix logical error in DistributedSink [#47916](https://github.com/ClickHouse/ClickHouse/pull/47916) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix terminate in parts check thread [#47917](https://github.com/ClickHouse/ClickHouse/pull/47917) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Limit keeper request batching by size in bytes [#47918](https://github.com/ClickHouse/ClickHouse/pull/47918) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Improve replicated user defined functions [#47919](https://github.com/ClickHouse/ClickHouse/pull/47919) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Update 01072_window_view_multiple_columns_groupby.sh [#47928](https://github.com/ClickHouse/ClickHouse/pull/47928) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Added test. Closes [#12264](https://github.com/ClickHouse/ClickHouse/issues/12264) [#47931](https://github.com/ClickHouse/ClickHouse/pull/47931) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Disallow concurrent backup restore test - removed SYSTEM SYNC [#47944](https://github.com/ClickHouse/ClickHouse/pull/47944) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Artifacts s3 prefix [#47945](https://github.com/ClickHouse/ClickHouse/pull/47945) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Set content-length for empty POST requests [#47950](https://github.com/ClickHouse/ClickHouse/pull/47950) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix test `02050_client_profile_events` [#47951](https://github.com/ClickHouse/ClickHouse/pull/47951) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Fix tsan error lock-order-inversion [#47953](https://github.com/ClickHouse/ClickHouse/pull/47953) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Update docs for parseDateTime() (follow-up to [#46815](https://github.com/ClickHouse/ClickHouse/issues/46815)) [#47959](https://github.com/ClickHouse/ClickHouse/pull/47959) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Docs: Update secondary index example [#47961](https://github.com/ClickHouse/ClickHouse/pull/47961) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix compilation on MacOS [#47967](https://github.com/ClickHouse/ClickHouse/pull/47967) ([Jordi Villar](https://github.com/jrdi)).
|
||||
* [Refactoring] Move information about current hosts and list of all hosts to BackupCoordination [#47971](https://github.com/ClickHouse/ClickHouse/pull/47971) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Stabilize tests for new function parseDateTimeInJodaSyntax [#47974](https://github.com/ClickHouse/ClickHouse/pull/47974) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Docs: Fix links [#47976](https://github.com/ClickHouse/ClickHouse/pull/47976) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Try fix rabbitmq test [#47987](https://github.com/ClickHouse/ClickHouse/pull/47987) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Better type check in arrayElement function [#47989](https://github.com/ClickHouse/ClickHouse/pull/47989) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix incorrect code indentation [#48011](https://github.com/ClickHouse/ClickHouse/pull/48011) ([exmy](https://github.com/exmy)).
|
||||
* CMake: Remove configuration of CMAKE_SHARED_LINKER_FLAGS [#48018](https://github.com/ClickHouse/ClickHouse/pull/48018) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Remove the old changelog script [#48042](https://github.com/ClickHouse/ClickHouse/pull/48042) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix automatic indentation in the built-in UI SQL editor [#48045](https://github.com/ClickHouse/ClickHouse/pull/48045) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Rename `system.marked_dropped_tables` to `dropped_tables` [#48048](https://github.com/ClickHouse/ClickHouse/pull/48048) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Automatically correct some mistakes in the changelog [#48052](https://github.com/ClickHouse/ClickHouse/pull/48052) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Docs: Document [FULL] keyword in SHOW TABLES [#48061](https://github.com/ClickHouse/ClickHouse/pull/48061) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix stateless tests numbers [#48063](https://github.com/ClickHouse/ClickHouse/pull/48063) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Docs: Update syntax of some SHOW queries [#48064](https://github.com/ClickHouse/ClickHouse/pull/48064) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Simplify backup coordination for file infos [#48095](https://github.com/ClickHouse/ClickHouse/pull/48095) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* materialized pg small fix [#48098](https://github.com/ClickHouse/ClickHouse/pull/48098) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Update SQLite to 3.41.2 [#48101](https://github.com/ClickHouse/ClickHouse/pull/48101) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix test numbers again and enforce it with style [#48106](https://github.com/ClickHouse/ClickHouse/pull/48106) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* s390x reinterpret as float64 [#48112](https://github.com/ClickHouse/ClickHouse/pull/48112) ([Suzy Wang](https://github.com/SuzyWangIBMer)).
|
||||
* Remove slow outdated test [#48114](https://github.com/ClickHouse/ClickHouse/pull/48114) ([alesapin](https://github.com/alesapin)).
|
||||
* Cosmetic follow-up to [#46252](https://github.com/ClickHouse/ClickHouse/issues/46252) [#48128](https://github.com/ClickHouse/ClickHouse/pull/48128) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Merging "Support undrop table" [#48130](https://github.com/ClickHouse/ClickHouse/pull/48130) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix double whitespace in exception message [#48132](https://github.com/ClickHouse/ClickHouse/pull/48132) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Improve script for updating clickhouse-docs [#48135](https://github.com/ClickHouse/ClickHouse/pull/48135) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix stdlib compatibility issues [#48150](https://github.com/ClickHouse/ClickHouse/pull/48150) ([DimasKovas](https://github.com/DimasKovas)).
|
||||
* Make test test_disallow_concurrency less flaky [#48152](https://github.com/ClickHouse/ClickHouse/pull/48152) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Remove unused mockSystemDatabase from gtest_transform_query_for_exter… [#48162](https://github.com/ClickHouse/ClickHouse/pull/48162) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Update environmental-sensors.md [#48166](https://github.com/ClickHouse/ClickHouse/pull/48166) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Correctly handle NULL constants in logical optimizer for new analyzer [#48168](https://github.com/ClickHouse/ClickHouse/pull/48168) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Try making KeeperMap test more stable [#48170](https://github.com/ClickHouse/ClickHouse/pull/48170) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Deprecate EXPLAIN QUERY TREE with disabled analyzer. [#48177](https://github.com/ClickHouse/ClickHouse/pull/48177) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Use uniq file names in 02149_* tests to avoid SIGBUS in stress tests [#48187](https://github.com/ClickHouse/ClickHouse/pull/48187) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Update style in ParserKQLSort.cpp [#48199](https://github.com/ClickHouse/ClickHouse/pull/48199) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Remove support for std::unary/binary_function (removed in C++17) [#48204](https://github.com/ClickHouse/ClickHouse/pull/48204) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Remove unused setting [#48208](https://github.com/ClickHouse/ClickHouse/pull/48208) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove wrong assert from LogicalExpressionOptimizerPass [#48214](https://github.com/ClickHouse/ClickHouse/pull/48214) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* MySQL compatibility: Make str_to_date alias case-insensitive [#48220](https://github.com/ClickHouse/ClickHouse/pull/48220) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Disable AST optimizations for projection analysis. [#48221](https://github.com/ClickHouse/ClickHouse/pull/48221) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix Too big of a difference between test numbers [#48224](https://github.com/ClickHouse/ClickHouse/pull/48224) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Stabilize 02477_age [#48225](https://github.com/ClickHouse/ClickHouse/pull/48225) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Rename setting stop_reading_on_first_cancel [#48226](https://github.com/ClickHouse/ClickHouse/pull/48226) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Address flaky 02346_full_text_search [#48227](https://github.com/ClickHouse/ClickHouse/pull/48227) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix incorrect ThreadPool usage after ThreadPool introspection [#48244](https://github.com/ClickHouse/ClickHouse/pull/48244) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* fix test numbers again [#48264](https://github.com/ClickHouse/ClickHouse/pull/48264) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
|
||||
#### Testing Improvement
|
||||
|
||||
* Fixed functional test 02534_keyed_siphash and 02552_siphash128_reference for s390x. [#47615](https://github.com/ClickHouse/ClickHouse/pull/47615) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||
|
@ -11,14 +11,14 @@ This is intended for continuous integration checks that run on Linux servers. If
|
||||
|
||||
The cross-build for macOS is based on the [Build instructions](../development/build.md), follow them first.
|
||||
|
||||
## Install Clang-14
|
||||
## Install Clang-15
|
||||
|
||||
Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup.
|
||||
For example the commands for Bionic are like:
|
||||
|
||||
``` bash
|
||||
sudo echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-14 main" >> /etc/apt/sources.list
|
||||
sudo apt-get install clang-14
|
||||
sudo echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-15 main" >> /etc/apt/sources.list
|
||||
sudo apt-get install clang-15
|
||||
```
|
||||
|
||||
## Install Cross-Compilation Toolset {#install-cross-compilation-toolset}
|
||||
@ -55,7 +55,7 @@ curl -L 'https://github.com/phracker/MacOSX-SDKs/releases/download/10.15/MacOSX1
|
||||
cd ClickHouse
|
||||
mkdir build-darwin
|
||||
cd build-darwin
|
||||
CC=clang-14 CXX=clang++-14 cmake -DCMAKE_AR:FILEPATH=${CCTOOLS}/bin/x86_64-apple-darwin-ar -DCMAKE_INSTALL_NAME_TOOL=${CCTOOLS}/bin/x86_64-apple-darwin-install_name_tool -DCMAKE_RANLIB:FILEPATH=${CCTOOLS}/bin/x86_64-apple-darwin-ranlib -DLINKER_NAME=${CCTOOLS}/bin/x86_64-apple-darwin-ld -DCMAKE_TOOLCHAIN_FILE=cmake/darwin/toolchain-x86_64.cmake ..
|
||||
CC=clang-15 CXX=clang++-15 cmake -DCMAKE_AR:FILEPATH=${CCTOOLS}/bin/x86_64-apple-darwin-ar -DCMAKE_INSTALL_NAME_TOOL=${CCTOOLS}/bin/x86_64-apple-darwin-install_name_tool -DCMAKE_RANLIB:FILEPATH=${CCTOOLS}/bin/x86_64-apple-darwin-ranlib -DLINKER_NAME=${CCTOOLS}/bin/x86_64-apple-darwin-ld -DCMAKE_TOOLCHAIN_FILE=cmake/darwin/toolchain-x86_64.cmake ..
|
||||
ninja
|
||||
```
|
||||
|
||||
|
@ -12,7 +12,7 @@ This engine provides integration with [Amazon S3](https://aws.amazon.com/s3/) ec
|
||||
|
||||
``` sql
|
||||
CREATE TABLE s3_engine_table (name String, value UInt32)
|
||||
ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, [compression])
|
||||
ENGINE = S3(path [, NOSIGN | aws_access_key_id, aws_secret_access_key,] format, [compression])
|
||||
[PARTITION BY expr]
|
||||
[SETTINGS ...]
|
||||
```
|
||||
@ -20,6 +20,7 @@ CREATE TABLE s3_engine_table (name String, value UInt32)
|
||||
**Engine parameters**
|
||||
|
||||
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [below](#wildcards-in-path).
|
||||
- `NOSIGN` - If this keyword is provided in place of credentials, all the requests will not be signed.
|
||||
- `format` — The [format](../../../interfaces/formats.md#formats) of the file.
|
||||
- `aws_access_key_id`, `aws_secret_access_key` - Long-term credentials for the [AWS](https://aws.amazon.com/) account user. You can use these to authenticate your requests. Parameter is optional. If credentials are not specified, they are used from the configuration file. For more information see [Using S3 for Data Storage](../mergetree-family/mergetree.md#table_engine-mergetree-s3).
|
||||
- `compression` — Compression type. Supported values: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. Parameter is optional. By default, it will autodetect compression by file extension.
|
||||
@ -151,6 +152,7 @@ The following settings can be specified in configuration file for given endpoint
|
||||
- `region` — Specifies S3 region name. Optional.
|
||||
- `use_insecure_imds_request` — If set to `true`, S3 client will use insecure IMDS request while obtaining credentials from Amazon EC2 metadata. Optional, default value is `false`.
|
||||
- `expiration_window_seconds` — Grace period for checking if expiration-based credentials have expired. Optional, default value is `120`.
|
||||
- `no_sign_request` - Ignore all the credentials so requests are not signed. Useful for accessing public buckets.
|
||||
- `header` — Adds specified HTTP header to a request to given endpoint. Optional, can be specified multiple times.
|
||||
- `server_side_encryption_customer_key_base64` — If specified, required headers for accessing S3 objects with SSE-C encryption will be set. Optional.
|
||||
- `max_single_read_retries` — The maximum number of attempts during single read. Default value is `4`. Optional.
|
||||
@ -168,6 +170,7 @@ The following settings can be specified in configuration file for given endpoint
|
||||
<!-- <use_environment_credentials>false</use_environment_credentials> -->
|
||||
<!-- <use_insecure_imds_request>false</use_insecure_imds_request> -->
|
||||
<!-- <expiration_window_seconds>120</expiration_window_seconds> -->
|
||||
<!-- <no_sign_request>false</no_sign_request> -->
|
||||
<!-- <header>Authorization: Bearer SOME-TOKEN</header> -->
|
||||
<!-- <server_side_encryption_customer_key_base64>BASE64-ENCODED-KEY</server_side_encryption_customer_key_base64> -->
|
||||
<!-- <max_single_read_retries>4</max_single_read_retries> -->
|
||||
@ -175,6 +178,17 @@ The following settings can be specified in configuration file for given endpoint
|
||||
</s3>
|
||||
```
|
||||
|
||||
## Accessing public buckets
|
||||
|
||||
ClickHouse tries to fetch credentials from many different types of sources.
|
||||
Sometimes, it can produce problems when accessing some buckets that are public causing the client to return `403` error code.
|
||||
This issue can be avoided by using `NOSIGN` keyword, forcing the client to ignore all the credentials, and not sign the requests.
|
||||
|
||||
``` sql
|
||||
CREATE TABLE big_table (name String, value UInt32)
|
||||
ENGINE = S3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/aapl_stock.csv', NOSIGN, 'CSVWithNames');
|
||||
```
|
||||
|
||||
## See also
|
||||
|
||||
- [s3 table function](../../../sql-reference/table-functions/s3.md)
|
||||
|
@ -78,7 +78,8 @@ Of course, it's possible to manually run `CREATE TABLE` with same path on nonrel
|
||||
|
||||
### Inserts
|
||||
|
||||
When new rows are inserted into `KeeperMap`, if the key already exists, the value will be updated, otherwise new key is created.
|
||||
When new rows are inserted into `KeeperMap`, if the key does not exist, a new entry for the key is created.
|
||||
If the key exists, and setting `keeper_map_strict_mode` is set to `true`, an exception is thrown, otherwise, the value for the key is overwritten.
|
||||
|
||||
Example:
|
||||
|
||||
@ -89,6 +90,7 @@ INSERT INTO keeper_map_table VALUES ('some key', 1, 'value', 3.2);
|
||||
### Deletes
|
||||
|
||||
Rows can be deleted using `DELETE` query or `TRUNCATE`.
|
||||
If the key exists, and setting `keeper_map_strict_mode` is set to `true`, fetching and deleting data will succeed only if it can be executed atomically.
|
||||
|
||||
```sql
|
||||
DELETE FROM keeper_map_table WHERE key LIKE 'some%' AND v1 > 1;
|
||||
@ -105,6 +107,7 @@ TRUNCATE TABLE keeper_map_table;
|
||||
### Updates
|
||||
|
||||
Values can be updated using `ALTER TABLE` query. Primary key cannot be updated.
|
||||
If setting `keeper_map_strict_mode` is set to `true`, fetching and updating data will succeed only if it's executed atomically.
|
||||
|
||||
```sql
|
||||
ALTER TABLE keeper_map_table UPDATE v1 = v1 * 10 + 2 WHERE key LIKE 'some%' AND v3 > 3.1;
|
||||
|
@ -67,7 +67,8 @@ CREATE TABLE youtube
|
||||
(
|
||||
`id` String,
|
||||
`fetch_date` DateTime,
|
||||
`upload_date` String,
|
||||
`upload_date_str` String,
|
||||
`upload_date` Date,
|
||||
`title` String,
|
||||
`uploader_id` String,
|
||||
`uploader` String,
|
||||
@ -87,7 +88,7 @@ CREATE TABLE youtube
|
||||
`video_badges` String
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY (upload_date, uploader);
|
||||
ORDER BY (uploader, upload_date);
|
||||
```
|
||||
|
||||
3. The following command streams the records from the S3 files into the `youtube` table.
|
||||
@ -101,8 +102,9 @@ INSERT INTO youtube
|
||||
SETTINGS input_format_null_as_default = 1
|
||||
SELECT
|
||||
id,
|
||||
parseDateTimeBestEffortUS(toString(fetch_date)) AS fetch_date,
|
||||
upload_date,
|
||||
parseDateTimeBestEffortUSOrZero(toString(fetch_date)) AS fetch_date,
|
||||
upload_date AS upload_date_str,
|
||||
toDate(parseDateTimeBestEffortUSOrZero(upload_date::String)) AS upload_date,
|
||||
ifNull(title, '') AS title,
|
||||
uploader_id,
|
||||
ifNull(uploader, '') AS uploader,
|
||||
@ -121,13 +123,26 @@ SELECT
|
||||
ifNull(uploader_badges, '') AS uploader_badges,
|
||||
ifNull(video_badges, '') AS video_badges
|
||||
FROM s3Cluster(
|
||||
'default',
|
||||
'https://clickhouse-public-datasets.s3.amazonaws.com/youtube/original/files/*.zst',
|
||||
'JSONLines'
|
||||
);
|
||||
'default',
|
||||
'https://clickhouse-public-datasets.s3.amazonaws.com/youtube/original/files/*.zst',
|
||||
'JSONLines'
|
||||
)
|
||||
SETTINGS
|
||||
max_download_threads = 24,
|
||||
max_insert_threads = 64,
|
||||
max_insert_block_size = 100000000,
|
||||
min_insert_block_size_rows = 100000000,
|
||||
min_insert_block_size_bytes = 500000000;
|
||||
```
|
||||
|
||||
4. Open a new tab in the SQL Console of ClickHouse Cloud (or a new `clickhouse-client` window) and watch the count increase. It will take a while to insert 4.56B rows, depending on your server resources. (Withtout any tweaking of settings, it takes about 4.5 hours.)
|
||||
Some comments about our `INSERT` command:
|
||||
|
||||
- The `parseDateTimeBestEffortUSOrZero` function is handy when the incoming date fields may not be in the proper format. If `fetch_date` does not get parsed properly, it will be set to `0`
|
||||
- The `upload_date` column contains valid dates, but it also contains strings like "4 hours ago" - which is certainly not a valid date. We decided to store the original value in `upload_date_str` and attempt to parse it with `toDate(parseDateTimeBestEffortUSOrZero(upload_date::String))`. If the parsing fails we just get `0`
|
||||
- We used `ifNull` to avoid getting `NULL` values in our table. If an incoming value is `NULL`, the `ifNull` function is setting the value to an empty string
|
||||
- It takes a long time to download the data, so we added a `SETTINGS` clause to spread out the work over more threads while making sure the block sizes stayed fairly large
|
||||
|
||||
4. Open a new tab in the SQL Console of ClickHouse Cloud (or a new `clickhouse-client` window) and watch the count increase. It will take a while to insert 4.56B rows, depending on your server resources. (Without any tweaking of settings, it takes about 4.5 hours.)
|
||||
|
||||
```sql
|
||||
SELECT formatReadableQuantity(count())
|
||||
@ -200,7 +215,7 @@ FROM youtube
|
||||
WHERE (title ILIKE '%ClickHouse%') OR (description ILIKE '%ClickHouse%')
|
||||
ORDER BY
|
||||
like_count DESC,
|
||||
view_count DESC
|
||||
view_count DESC;
|
||||
```
|
||||
|
||||
This query has to process every row, and also parse through two columns of strings. Even then, we get decent performance at 4.15M rows/second:
|
||||
@ -224,7 +239,6 @@ The results look like:
|
||||
|
||||
When commenting is disabled, are people more likely to like or dislike to express their feelings about a video?
|
||||
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
concat('< ', formatReadableQuantity(view_range)) AS views,
|
||||
@ -244,11 +258,10 @@ FROM
|
||||
) WHERE view_range > 1
|
||||
ORDER BY
|
||||
is_comments_enabled ASC,
|
||||
num_views ASC
|
||||
num_views ASC;
|
||||
```
|
||||
|
||||
```response
|
||||
|
||||
┌─views─────────────┬─is_comments_enabled─┬────prob_like_dislike─┐
|
||||
│ < 10.00 │ false │ 0.08224180712685371 │
|
||||
│ < 100.00 │ false │ 0.06346337759167248 │
|
||||
@ -273,7 +286,6 @@ ORDER BY
|
||||
└───────────────────┴─────────────────────┴──────────────────────┘
|
||||
|
||||
22 rows in set. Elapsed: 8.460 sec. Processed 4.56 billion rows, 77.48 GB (538.73 million rows/s., 9.16 GB/s.)
|
||||
|
||||
```
|
||||
|
||||
Enabling comments seems to be correlated with a higher rate of engagement.
|
||||
@ -284,13 +296,12 @@ Enabling comments seems to be correlated with a higher rate of engagement.
|
||||
```sql
|
||||
SELECT
|
||||
toStartOfMonth(toDateTime(upload_date)) AS month,
|
||||
uniq(uploader_id) AS uploaders,
|
||||
count() as num_videos,
|
||||
uniq(uploader_id) AS uploaders,
|
||||
count() as num_videos,
|
||||
sum(view_count) as view_count
|
||||
FROM youtube
|
||||
WHERE (month >= '2005-01-01') AND (month < '2021-12-01')
|
||||
GROUP BY month
|
||||
ORDER BY month ASC
|
||||
ORDER BY month ASC;
|
||||
```
|
||||
|
||||
```response
|
||||
@ -327,11 +338,12 @@ With advances in speech recognition, it’s easier than ever to create subtitles
|
||||
SELECT
|
||||
toStartOfMonth(upload_date) AS month,
|
||||
countIf(has_subtitles) / count() AS percent_subtitles,
|
||||
percent_subtitles - any(percent_subtitles) OVER (ORDER BY month ASC ROWS BETWEEN 1 PRECEDING AND 1 PRECEDING) AS previous
|
||||
percent_subtitles - any(percent_subtitles) OVER (
|
||||
ORDER BY month ASC ROWS BETWEEN 1 PRECEDING AND 1 PRECEDING
|
||||
) AS previous
|
||||
FROM youtube
|
||||
WHERE (month >= '2015-01-01') AND (month < '2021-12-02')
|
||||
GROUP BY month
|
||||
ORDER BY month ASC
|
||||
ORDER BY month ASC;
|
||||
```
|
||||
|
||||
```response
|
||||
@ -352,7 +364,7 @@ ORDER BY month ASC
|
||||
|
||||
```
|
||||
|
||||
The data results show a spike in 2009. Apparently at that, time YouTube was removing their community captions feature, which allowed you to upload captions for other people's video.
|
||||
The data results show a spike in 2009. Apparently at that, time YouTube was removing their community captions feature, which allowed you to upload captions for other people's video.
|
||||
This prompted a very successful campaign to have creators add captions to their videos for hard of hearing and deaf viewers.
|
||||
|
||||
|
||||
@ -379,9 +391,7 @@ GROUP BY
|
||||
uploader
|
||||
ORDER BY
|
||||
month ASC,
|
||||
total_views DESC
|
||||
|
||||
1001 rows in set. Elapsed: 34.917 sec. Processed 4.58 billion rows, 69.08 GB (131.15 million rows/s., 1.98 GB/s.)
|
||||
total_views DESC;
|
||||
```
|
||||
|
||||
```response
|
||||
@ -421,13 +431,10 @@ GROUP BY
|
||||
ORDER BY
|
||||
view_range ASC,
|
||||
is_comments_enabled ASC
|
||||
)
|
||||
|
||||
20 rows in set. Elapsed: 9.043 sec. Processed 4.56 billion rows, 77.48 GB (503.99 million rows/s., 8.57 GB/s.)
|
||||
);
|
||||
```
|
||||
|
||||
```response
|
||||
|
||||
┌─view_range────────┬─is_comments_enabled─┬─like_ratio─┐
|
||||
│ < 10.00 │ false │ 0.66 │
|
||||
│ < 10.00 │ true │ 0.66 │
|
||||
@ -450,7 +457,6 @@ ORDER BY
|
||||
│ < 10.00 billion │ false │ 1.77 │
|
||||
│ < 10.00 billion │ true │ 19.5 │
|
||||
└───────────────────┴─────────────────────┴────────────┘
|
||||
|
||||
```
|
||||
|
||||
### How are views distributed?
|
||||
@ -468,9 +474,7 @@ FROM
|
||||
)
|
||||
ARRAY JOIN
|
||||
quantiles,
|
||||
labels
|
||||
|
||||
12 rows in set. Elapsed: 1.864 sec. Processed 4.56 billion rows, 36.46 GB (2.45 billion rows/s., 19.56 GB/s.)
|
||||
labels;
|
||||
```
|
||||
|
||||
```response
|
||||
|
@ -1235,8 +1235,8 @@ For output it uses the following correspondence between ClickHouse types and BSO
|
||||
| ClickHouse type | BSON Type |
|
||||
|-----------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------|
|
||||
| [Bool](/docs/en/sql-reference/data-types/boolean.md) | `\x08` boolean |
|
||||
| [Int8/UInt8](/docs/en/sql-reference/data-types/int-uint.md) | `\x10` int32 |
|
||||
| [Int16UInt16](/docs/en/sql-reference/data-types/int-uint.md) | `\x10` int32 |
|
||||
| [Int8/UInt8](/docs/en/sql-reference/data-types/int-uint.md)/[Enum8](/docs/en/sql-reference/data-types/enum.md) | `\x10` int32 |
|
||||
| [Int16/UInt16](/docs/en/sql-reference/data-types/int-uint.md)/[Enum16](/docs/en/sql-reference/data-types/enum.md) | `\x10` int32 |
|
||||
| [Int32](/docs/en/sql-reference/data-types/int-uint.md) | `\x10` int32 |
|
||||
| [UInt32](/docs/en/sql-reference/data-types/int-uint.md) | `\x12` int64 |
|
||||
| [Int64/UInt64](/docs/en/sql-reference/data-types/int-uint.md) | `\x12` int64 |
|
||||
@ -1255,30 +1255,30 @@ For output it uses the following correspondence between ClickHouse types and BSO
|
||||
| [Array](/docs/en/sql-reference/data-types/array.md) | `\x04` array |
|
||||
| [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `\x04` array |
|
||||
| [Named Tuple](/docs/en/sql-reference/data-types/tuple.md) | `\x03` document |
|
||||
| [Map](/docs/en/sql-reference/data-types/map.md) (with String keys) | `\x03` document |
|
||||
| [Map](/docs/en/sql-reference/data-types/map.md) | `\x03` document |
|
||||
| [IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md) | `\x10` int32 |
|
||||
| [IPv6](/docs/en/sql-reference/data-types/domains/ipv6.md) | `\x05` binary, `\x00` binary subtype |
|
||||
|
||||
For input it uses the following correspondence between BSON types and ClickHouse types:
|
||||
|
||||
| BSON Type | ClickHouse Type |
|
||||
|------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `\x01` double | [Float32/Float64](/docs/en/sql-reference/data-types/float.md) |
|
||||
| `\x02` string | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md) |
|
||||
| `\x03` document | [Map](/docs/en/sql-reference/data-types/map.md)/[Named Tuple](/docs/en/sql-reference/data-types/tuple.md) |
|
||||
| `\x04` array | [Array](/docs/en/sql-reference/data-types/array.md)/[Tuple](/docs/en/sql-reference/data-types/tuple.md) |
|
||||
| `\x05` binary, `\x00` binary subtype | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md)/[IPv6](/docs/en/sql-reference/data-types/domains/ipv6.md) |
|
||||
| `\x05` binary, `\x02` old binary subtype | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md) |
|
||||
| `\x05` binary, `\x03` old uuid subtype | [UUID](/docs/en/sql-reference/data-types/uuid.md) |
|
||||
| `\x05` binary, `\x04` uuid subtype | [UUID](/docs/en/sql-reference/data-types/uuid.md) |
|
||||
| `\x07` ObjectId | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md) |
|
||||
| `\x08` boolean | [Bool](/docs/en/sql-reference/data-types/boolean.md) |
|
||||
| `\x09` datetime | [DateTime64](/docs/en/sql-reference/data-types/datetime64.md) |
|
||||
| `\x0A` null value | [NULL](/docs/en/sql-reference/data-types/nullable.md) |
|
||||
| `\x0D` JavaScript code | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md) |
|
||||
| `\x0E` symbol | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md) |
|
||||
| `\x10` int32 | [Int32/UInt32](/docs/en/sql-reference/data-types/int-uint.md)/[Decimal32](/docs/en/sql-reference/data-types/decimal.md)/[IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md) |
|
||||
| `\x12` int64 | [Int64/UInt64](/docs/en/sql-reference/data-types/int-uint.md)/[Decimal64](/docs/en/sql-reference/data-types/decimal.md)/[DateTime64](/docs/en/sql-reference/data-types/datetime64.md) |
|
||||
| BSON Type | ClickHouse Type |
|
||||
|------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `\x01` double | [Float32/Float64](/docs/en/sql-reference/data-types/float.md) |
|
||||
| `\x02` string | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md) |
|
||||
| `\x03` document | [Map](/docs/en/sql-reference/data-types/map.md)/[Named Tuple](/docs/en/sql-reference/data-types/tuple.md) |
|
||||
| `\x04` array | [Array](/docs/en/sql-reference/data-types/array.md)/[Tuple](/docs/en/sql-reference/data-types/tuple.md) |
|
||||
| `\x05` binary, `\x00` binary subtype | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md)/[IPv6](/docs/en/sql-reference/data-types/domains/ipv6.md) |
|
||||
| `\x05` binary, `\x02` old binary subtype | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md) |
|
||||
| `\x05` binary, `\x03` old uuid subtype | [UUID](/docs/en/sql-reference/data-types/uuid.md) |
|
||||
| `\x05` binary, `\x04` uuid subtype | [UUID](/docs/en/sql-reference/data-types/uuid.md) |
|
||||
| `\x07` ObjectId | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md) |
|
||||
| `\x08` boolean | [Bool](/docs/en/sql-reference/data-types/boolean.md) |
|
||||
| `\x09` datetime | [DateTime64](/docs/en/sql-reference/data-types/datetime64.md) |
|
||||
| `\x0A` null value | [NULL](/docs/en/sql-reference/data-types/nullable.md) |
|
||||
| `\x0D` JavaScript code | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md) |
|
||||
| `\x0E` symbol | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md) |
|
||||
| `\x10` int32 | [Int32/UInt32](/docs/en/sql-reference/data-types/int-uint.md)/[Decimal32](/docs/en/sql-reference/data-types/decimal.md)/[IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md)/[Enum8/Enum16](/docs/en/sql-reference/data-types/enum.md) |
|
||||
| `\x12` int64 | [Int64/UInt64](/docs/en/sql-reference/data-types/int-uint.md)/[Decimal64](/docs/en/sql-reference/data-types/decimal.md)/[DateTime64](/docs/en/sql-reference/data-types/datetime64.md) |
|
||||
|
||||
Other BSON types are not supported. Also, it performs conversion between different integer types (for example, you can insert BSON int32 value into ClickHouse UInt8).
|
||||
Big integers and decimals (Int128/UInt128/Int256/UInt256/Decimal128/Decimal256) can be parsed from BSON Binary value with `\x00` binary subtype. In this case this format will validate that the size of binary data equals the size of expected value.
|
||||
@ -1610,29 +1610,34 @@ See also [Format Schema](#formatschema).
|
||||
|
||||
The table below shows supported data types and how they match ClickHouse [data types](/docs/en/sql-reference/data-types/index.md) in `INSERT` and `SELECT` queries.
|
||||
|
||||
| CapnProto data type (`INSERT`) | ClickHouse data type | CapnProto data type (`SELECT`) |
|
||||
|----------------------------------|------------------------------------------------------------------------------------------------------------------------|------------------------------|
|
||||
| `UINT8`, `BOOL` | [UInt8](/docs/en/sql-reference/data-types/int-uint.md) | `UINT8` |
|
||||
| `INT8` | [Int8](/docs/en/sql-reference/data-types/int-uint.md) | `INT8` |
|
||||
| `UINT16` | [UInt16](/docs/en/sql-reference/data-types/int-uint.md), [Date](/docs/en/sql-reference/data-types/date.md) | `UINT16` |
|
||||
| `INT16` | [Int16](/docs/en/sql-reference/data-types/int-uint.md) | `INT16` |
|
||||
| `UINT32` | [UInt32](/docs/en/sql-reference/data-types/int-uint.md), [DateTime](/docs/en/sql-reference/data-types/datetime.md) | `UINT32` |
|
||||
| `INT32` | [Int32](/docs/en/sql-reference/data-types/int-uint.md) | `INT32` |
|
||||
| `UINT64` | [UInt64](/docs/en/sql-reference/data-types/int-uint.md) | `UINT64` |
|
||||
| `INT64` | [Int64](/docs/en/sql-reference/data-types/int-uint.md), [DateTime64](/docs/en/sql-reference/data-types/datetime.md) | `INT64` |
|
||||
| `FLOAT32` | [Float32](/docs/en/sql-reference/data-types/float.md) | `FLOAT32` |
|
||||
| `FLOAT64` | [Float64](/docs/en/sql-reference/data-types/float.md) | `FLOAT64` |
|
||||
| `TEXT, DATA` | [String](/docs/en/sql-reference/data-types/string.md), [FixedString](/docs/en/sql-reference/data-types/fixedstring.md) | `TEXT, DATA` |
|
||||
| `union(T, Void), union(Void, T)` | [Nullable(T)](/docs/en/sql-reference/data-types/date.md) | `union(T, Void), union(Void, T)` |
|
||||
| `ENUM` | [Enum(8\ |16)](/docs/en/sql-reference/data-types/enum.md) | `ENUM` |
|
||||
| `LIST` | [Array](/docs/en/sql-reference/data-types/array.md) | `LIST` |
|
||||
| `STRUCT` | [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `STRUCT` |
|
||||
| `UINT32` | [IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md) | `UINT32` |
|
||||
| `DATA` | [IPv6](/docs/en/sql-reference/data-types/domains/ipv6.md) | `DATA` |
|
||||
| CapnProto data type (`INSERT`) | ClickHouse data type | CapnProto data type (`SELECT`) |
|
||||
|------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------|
|
||||
| `UINT8`, `BOOL` | [UInt8](/docs/en/sql-reference/data-types/int-uint.md) | `UINT8` |
|
||||
| `INT8` | [Int8](/docs/en/sql-reference/data-types/int-uint.md) | `INT8` |
|
||||
| `UINT16` | [UInt16](/docs/en/sql-reference/data-types/int-uint.md), [Date](/docs/en/sql-reference/data-types/date.md) | `UINT16` |
|
||||
| `INT16` | [Int16](/docs/en/sql-reference/data-types/int-uint.md) | `INT16` |
|
||||
| `UINT32` | [UInt32](/docs/en/sql-reference/data-types/int-uint.md), [DateTime](/docs/en/sql-reference/data-types/datetime.md) | `UINT32` |
|
||||
| `INT32` | [Int32](/docs/en/sql-reference/data-types/int-uint.md), [Decimal32](/docs/en/sql-reference/data-types/decimal.md) | `INT32` |
|
||||
| `UINT64` | [UInt64](/docs/en/sql-reference/data-types/int-uint.md) | `UINT64` |
|
||||
| `INT64` | [Int64](/docs/en/sql-reference/data-types/int-uint.md), [DateTime64](/docs/en/sql-reference/data-types/datetime.md), [Decimal64](/docs/en/sql-reference/data-types/decimal.md) | `INT64` |
|
||||
| `FLOAT32` | [Float32](/docs/en/sql-reference/data-types/float.md) | `FLOAT32` |
|
||||
| `FLOAT64` | [Float64](/docs/en/sql-reference/data-types/float.md) | `FLOAT64` |
|
||||
| `TEXT, DATA` | [String](/docs/en/sql-reference/data-types/string.md), [FixedString](/docs/en/sql-reference/data-types/fixedstring.md) | `TEXT, DATA` |
|
||||
| `union(T, Void), union(Void, T)` | [Nullable(T)](/docs/en/sql-reference/data-types/date.md) | `union(T, Void), union(Void, T)` |
|
||||
| `ENUM` | [Enum(8/16)](/docs/en/sql-reference/data-types/enum.md) | `ENUM` |
|
||||
| `LIST` | [Array](/docs/en/sql-reference/data-types/array.md) | `LIST` |
|
||||
| `STRUCT` | [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `STRUCT` |
|
||||
| `UINT32` | [IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md) | `UINT32` |
|
||||
| `DATA` | [IPv6](/docs/en/sql-reference/data-types/domains/ipv6.md) | `DATA` |
|
||||
| `DATA` | [Int128/UInt128/Int256/UInt256](/docs/en/sql-reference/data-types/int-uint.md) | `DATA` |
|
||||
| `DATA` | [Decimal128/Decimal256](/docs/en/sql-reference/data-types/decimal.md) | `DATA` |
|
||||
| `STRUCT(entries LIST(STRUCT(key Key, value Value)))` | [Map](/docs/en/sql-reference/data-types/map.md) | `STRUCT(entries LIST(STRUCT(key Key, value Value)))` |
|
||||
|
||||
Integer types can be converted into each other during input/output.
|
||||
|
||||
For working with `Enum` in CapnProto format use the [format_capn_proto_enum_comparising_mode](/docs/en/operations/settings/settings-formats.md/#format_capn_proto_enum_comparising_mode) setting.
|
||||
|
||||
Arrays can be nested and can have a value of the `Nullable` type as an argument. `Tuple` type also can be nested.
|
||||
Arrays can be nested and can have a value of the `Nullable` type as an argument. `Tuple` and `Map` types also can be nested.
|
||||
|
||||
### Inserting and Selecting Data {#inserting-and-selecting-data-capnproto}
|
||||
|
||||
@ -2285,22 +2290,28 @@ ClickHouse supports reading and writing [MessagePack](https://msgpack.org/) data
|
||||
|
||||
### Data Types Matching {#data-types-matching-msgpack}
|
||||
|
||||
| MessagePack data type (`INSERT`) | ClickHouse data type | MessagePack data type (`SELECT`) |
|
||||
|--------------------------------------------------------------------|-----------------------------------------------------------------|------------------------------------|
|
||||
| `uint N`, `positive fixint` | [UIntN](/docs/en/sql-reference/data-types/int-uint.md) | `uint N` |
|
||||
| `int N`, `negative fixint` | [IntN](/docs/en/sql-reference/data-types/int-uint.md) | `int N` |
|
||||
| `bool` | [UInt8](/docs/en/sql-reference/data-types/int-uint.md) | `uint 8` |
|
||||
| `fixstr`, `str 8`, `str 16`, `str 32`, `bin 8`, `bin 16`, `bin 32` | [String](/docs/en/sql-reference/data-types/string.md) | `bin 8`, `bin 16`, `bin 32` |
|
||||
| `fixstr`, `str 8`, `str 16`, `str 32`, `bin 8`, `bin 16`, `bin 32` | [FixedString](/docs/en/sql-reference/data-types/fixedstring.md) | `bin 8`, `bin 16`, `bin 32` |
|
||||
| `float 32` | [Float32](/docs/en/sql-reference/data-types/float.md) | `float 32` |
|
||||
| `float 64` | [Float64](/docs/en/sql-reference/data-types/float.md) | `float 64` |
|
||||
| `uint 16` | [Date](/docs/en/sql-reference/data-types/date.md) | `uint 16` |
|
||||
| `uint 32` | [DateTime](/docs/en/sql-reference/data-types/datetime.md) | `uint 32` |
|
||||
| `uint 64` | [DateTime64](/docs/en/sql-reference/data-types/datetime.md) | `uint 64` |
|
||||
| `fixarray`, `array 16`, `array 32` | [Array](/docs/en/sql-reference/data-types/array.md) | `fixarray`, `array 16`, `array 32` |
|
||||
| `fixmap`, `map 16`, `map 32` | [Map](/docs/en/sql-reference/data-types/map.md) | `fixmap`, `map 16`, `map 32` |
|
||||
| `uint 32` | [IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md) | `uint 32` |
|
||||
| `bin 8` | [String](/docs/en/sql-reference/data-types/string.md) | `bin 8` |
|
||||
| MessagePack data type (`INSERT`) | ClickHouse data type | MessagePack data type (`SELECT`) |
|
||||
|--------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------|----------------------------------|
|
||||
| `uint N`, `positive fixint` | [UIntN](/docs/en/sql-reference/data-types/int-uint.md) | `uint N` |
|
||||
| `int N`, `negative fixint` | [IntN](/docs/en/sql-reference/data-types/int-uint.md) | `int N` |
|
||||
| `bool` | [UInt8](/docs/en/sql-reference/data-types/int-uint.md) | `uint 8` |
|
||||
| `fixstr`, `str 8`, `str 16`, `str 32`, `bin 8`, `bin 16`, `bin 32` | [String](/docs/en/sql-reference/data-types/string.md) | `bin 8`, `bin 16`, `bin 32` |
|
||||
| `fixstr`, `str 8`, `str 16`, `str 32`, `bin 8`, `bin 16`, `bin 32` | [FixedString](/docs/en/sql-reference/data-types/fixedstring.md) | `bin 8`, `bin 16`, `bin 32` |
|
||||
| `float 32` | [Float32](/docs/en/sql-reference/data-types/float.md) | `float 32` |
|
||||
| `float 64` | [Float64](/docs/en/sql-reference/data-types/float.md) | `float 64` |
|
||||
| `uint 16` | [Date](/docs/en/sql-reference/data-types/date.md) | `uint 16` |
|
||||
| `int 32` | [Date32](/docs/en/sql-reference/data-types/date32.md) | `int 32` |
|
||||
| `uint 32` | [DateTime](/docs/en/sql-reference/data-types/datetime.md) | `uint 32` |
|
||||
| `uint 64` | [DateTime64](/docs/en/sql-reference/data-types/datetime.md) | `uint 64` |
|
||||
| `fixarray`, `array 16`, `array 32` | [Array](/docs/en/sql-reference/data-types/array.md)/[Tuple](/docs/en/sql-reference/data-types/tuple.md) | `fixarray`, `array 16`, `array 32` |
|
||||
| `fixmap`, `map 16`, `map 32` | [Map](/docs/en/sql-reference/data-types/map.md) | `fixmap`, `map 16`, `map 32` |
|
||||
| `uint 32` | [IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md) | `uint 32` |
|
||||
| `bin 8` | [String](/docs/en/sql-reference/data-types/string.md) | `bin 8` |
|
||||
| `int 8` | [Enum8](/docs/en/sql-reference/data-types/enum.md) | `int 8` |
|
||||
| `bin 8` | [(U)Int128/(U)Int256](/docs/en/sql-reference/data-types/int-uint.md) | `bin 8` |
|
||||
| `int 32` | [Decimal32](/docs/en/sql-reference/data-types/decimal.md) | `int 32` |
|
||||
| `int 64` | [Decimal64](/docs/en/sql-reference/data-types/decimal.md) | `int 64` |
|
||||
| `bin 8` | [Decimal128/Decimal256](/docs/en/sql-reference/data-types/decimal.md) | `bin 8 ` |
|
||||
|
||||
Example:
|
||||
|
||||
|
@ -244,10 +244,12 @@ Example of configuration:
|
||||
<database>system</database>
|
||||
<user>foo</user>
|
||||
<password>secret</password>
|
||||
<secure>1</secure>
|
||||
</remote1>
|
||||
</named_collections>
|
||||
</clickhouse>
|
||||
```
|
||||
`secure` is not needed for connection because of `remoteSecure`, but it can be used for dictionaries.
|
||||
|
||||
### Example of using named collections with the `remote`/`remoteSecure` functions
|
||||
|
||||
|
@ -103,6 +103,20 @@ cached - for that use setting [query_cache_min_query_runs](settings/settings.md#
|
||||
Entries in the query cache become stale after a certain time period (time-to-live). By default, this period is 60 seconds but a different
|
||||
value can be specified at session, profile or query level using setting [query_cache_ttl](settings/settings.md#query-cache-ttl).
|
||||
|
||||
Entries in the query cache are compressed by default. This reduces the overall memory consumption at the cost of slower writes into / reads
|
||||
from the query cache. To disable compression, use setting [query_cache_compress_entries](settings/settings.md#query-cache-compress-entries).
|
||||
|
||||
ClickHouse reads table data in blocks of [max_block_size](settings/settings.md#settings-max_block_size) rows. Due to filtering, aggregation,
|
||||
etc., result blocks are typically much smaller than 'max_block_size' but there are also cases where they are much bigger. Setting
|
||||
[query_cache_squash_partial_results](settings/settings.md#query-cache-squash-partial-results) (enabled by default) controls if result blocks
|
||||
are squashed (if they are tiny) or split (if they are large) into blocks of 'max_block_size' size before insertion into the query result
|
||||
cache. This reduces performance of writes into the query cache but improves compression rate of cache entries and provides more natural
|
||||
block granularity when query results are later served from the query cache.
|
||||
|
||||
As a result, the query cache stores for each query multiple (partial)
|
||||
result blocks. While this behavior is a good default, it can be suppressed using setting
|
||||
[query_cache_squash_partial_query_results](settings/settings.md#query-cache-squash-partial-query-results).
|
||||
|
||||
Also, results of queries with non-deterministic functions such as `rand()` and `now()` are not cached. This can be overruled using
|
||||
setting [query_cache_store_results_of_queries_with_nondeterministic_functions](settings/settings.md#query-cache-store-results-of-queries-with-nondeterministic-functions).
|
||||
|
||||
|
@ -1047,7 +1047,7 @@ Default value: `0`.
|
||||
|
||||
Sets the number of threads performing background merges and mutations for tables with MergeTree engines. This setting is also could be applied at server startup from the `default` profile configuration for backward compatibility at the ClickHouse server start. You can only increase the number of threads at runtime. To lower the number of threads you have to restart the server. By adjusting this setting, you manage CPU and disk load. Smaller pool size utilizes less CPU and disk resources, but background processes advance slower which might eventually impact query performance.
|
||||
|
||||
Before changing it, please also take a look at related MergeTree settings, such as `number_of_free_entries_in_pool_to_lower_max_size_of_merge` and `number_of_free_entries_in_pool_to_execute_mutation`.
|
||||
Before changing it, please also take a look at related MergeTree settings, such as [number_of_free_entries_in_pool_to_lower_max_size_of_merge](../../operations/settings/merge-tree-settings.md#number-of-free-entries-in-pool-to-lower-max-size-of-merge) and [number_of_free_entries_in_pool_to_execute_mutation](../../operations/settings/merge-tree-settings.md#number-of-free-entries-in-pool-to-execute-mutation).
|
||||
|
||||
Possible values:
|
||||
|
||||
|
@ -553,6 +553,32 @@ Default value: 8192
|
||||
|
||||
Merge reads rows from parts in blocks of `merge_max_block_size` rows, then merges and writes the result into a new part. The read block is placed in RAM, so `merge_max_block_size` affects the size of the RAM required for the merge. Thus, merges can consume a large amount of RAM for tables with very wide rows (if the average row size is 100kb, then when merging 10 parts, (100kb * 10 * 8192) = ~ 8GB of RAM). By decreasing `merge_max_block_size`, you can reduce the amount of RAM required for a merge but slow down a merge.
|
||||
|
||||
## number_of_free_entries_in_pool_to_lower_max_size_of_merge {#number-of-free-entries-in-pool-to-lower-max-size-of-merge}
|
||||
|
||||
When there is less than specified number of free entries in pool (or replicated queue), start to lower maximum size of merge to process (or to put in queue).
|
||||
This is to allow small merges to process - not filling the pool with long running merges.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Any positive integer.
|
||||
|
||||
Default value: 8
|
||||
|
||||
## number_of_free_entries_in_pool_to_execute_mutation {#number-of-free-entries-in-pool-to-execute-mutation}
|
||||
|
||||
When there is less than specified number of free entries in pool, do not execute part mutations.
|
||||
This is to leave free threads for regular merges and avoid "Too many parts".
|
||||
|
||||
Possible values:
|
||||
|
||||
- Any positive integer.
|
||||
|
||||
Default value: 20
|
||||
|
||||
**Usage**
|
||||
|
||||
The value of the `number_of_free_entries_in_pool_to_execute_mutation` setting should be less than the value of the [background_pool_size](/docs/en/operations/server-configuration-parameters/settings#background_pool_size) * [background_pool_size](/docs/en/operations/server-configuration-parameters/settings#background_merges_mutations_concurrency_ratio). Otherwise, ClickHouse throws an exception.
|
||||
|
||||
## max_part_loading_threads {#max-part-loading-threads}
|
||||
|
||||
The maximum number of threads that read parts when ClickHouse starts.
|
||||
|
@ -1128,7 +1128,7 @@ Default value: `2.latest`.
|
||||
|
||||
Compression method used in output Parquet format. Supported codecs: `snappy`, `lz4`, `brotli`, `zstd`, `gzip`, `none` (uncompressed)
|
||||
|
||||
Default value: `snappy`.
|
||||
Default value: `lz4`.
|
||||
|
||||
## Hive format settings {#hive-format-settings}
|
||||
|
||||
|
@ -1435,6 +1435,28 @@ Possible values:
|
||||
|
||||
Default value: `0`
|
||||
|
||||
## query_cache_compress_entries {#query-cache-compress-entries}
|
||||
|
||||
Compress entries in the [query cache](../query-cache.md). Lessens the memory consumption of the query cache at the cost of slower inserts into / reads from it.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 - Disabled
|
||||
- 1 - Enabled
|
||||
|
||||
Default value: `1`
|
||||
|
||||
## query_cache_squash_partial_results {#query-cache-squash-partial-results}
|
||||
|
||||
Squash partial result blocks to blocks of size [max_block_size](#setting-max_block_size). Reduces performance of inserts into the [query cache](../query-cache.md) but improves the compressability of cache entries (see [query_cache_compress-entries](#query_cache_compress_entries)).
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 - Disabled
|
||||
- 1 - Enabled
|
||||
|
||||
Default value: `1`
|
||||
|
||||
## query_cache_ttl {#query-cache-ttl}
|
||||
|
||||
After this time in seconds entries in the [query cache](../query-cache.md) become stale.
|
||||
@ -4021,8 +4043,8 @@ Possible values:
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## stop_reading_on_first_cancel {#stop_reading_on_first_cancel}
|
||||
When set to `true` and the user wants to interrupt a query (for example using `Ctrl+C` on the client), then the query continues execution only on data that was already read from the table. Afterward, it will return a partial result of the query for the part of the table that was read. To fully stop the execution of a query without a partial result, the user should send 2 cancel requests.
|
||||
## partial_result_on_first_cancel {#partial_result_on_first_cancel}
|
||||
When set to `true` and the user wants to interrupt a query (for example using `Ctrl+C` on the client), then the query continues execution only on data that was already read from the table. Afterwards, it will return a partial result of the query for the part of the table that was read. To fully stop the execution of a query without a partial result, the user should send 2 cancel requests.
|
||||
|
||||
**Example without setting on Ctrl+C**
|
||||
```sql
|
||||
@ -4037,7 +4059,7 @@ Query was cancelled.
|
||||
|
||||
**Example with setting on Ctrl+C**
|
||||
```sql
|
||||
SELECT sum(number) FROM numbers(10000000000) SETTINGS stop_reading_on_first_cancel=true
|
||||
SELECT sum(number) FROM numbers(10000000000) SETTINGS partial_result_on_first_cancel=true
|
||||
|
||||
┌──────sum(number)─┐
|
||||
│ 1355411451286266 │
|
||||
@ -4049,3 +4071,44 @@ SELECT sum(number) FROM numbers(10000000000) SETTINGS stop_reading_on_first_canc
|
||||
Possible values: `true`, `false`
|
||||
|
||||
Default value: `false`
|
||||
## function_json_value_return_type_allow_nullable
|
||||
|
||||
Control whether allow to return `NULL` when value is not exist for JSON_VALUE function.
|
||||
|
||||
```sql
|
||||
SELECT JSON_VALUE('{"hello":"world"}', '$.b') settings function_json_value_return_type_allow_nullable=true;
|
||||
|
||||
┌─JSON_VALUE('{"hello":"world"}', '$.b')─┐
|
||||
│ ᴺᵁᴸᴸ │
|
||||
└────────────────────────────────────────┘
|
||||
|
||||
1 row in set. Elapsed: 0.001 sec.
|
||||
```
|
||||
|
||||
Possible values:
|
||||
|
||||
- true — Allow.
|
||||
- false — Disallow.
|
||||
|
||||
Default value: `false`.
|
||||
|
||||
## function_json_value_return_type_allow_complex
|
||||
|
||||
Control whether allow to return complex type (such as: struct, array, map) for json_value function.
|
||||
|
||||
```sql
|
||||
SELECT JSON_VALUE('{"hello":{"world":"!"}}', '$.hello') settings function_json_value_return_type_allow_complex=true
|
||||
|
||||
┌─JSON_VALUE('{"hello":{"world":"!"}}', '$.hello')─┐
|
||||
│ {"world":"!"} │
|
||||
└──────────────────────────────────────────────────┘
|
||||
|
||||
1 row in set. Elapsed: 0.001 sec.
|
||||
```
|
||||
|
||||
Possible values:
|
||||
|
||||
- true — Allow.
|
||||
- false — Disallow.
|
||||
|
||||
Default value: `false`.
|
||||
|
@ -51,10 +51,14 @@ But for storing archives with rare queries, shelves will work.
|
||||
## RAID {#raid}
|
||||
|
||||
When using HDD, you can combine their RAID-10, RAID-5, RAID-6 or RAID-50.
|
||||
For Linux, software RAID is better (with `mdadm`). We do not recommend using LVM.
|
||||
For Linux, software RAID is better (with `mdadm`).
|
||||
When creating RAID-10, select the `far` layout.
|
||||
If your budget allows, choose RAID-10.
|
||||
|
||||
LVM by itself (without RAID or `mdadm`) is ok, but making RAID with it or combining it with `mdadm` is a less explored option, and there will be more chances for mistakes
|
||||
(selecting wrong chunk size; misalignment of chunks; choosing a wrong raid type; forgetting to cleanup disks). If you are confident
|
||||
in using LVM, there is nothing against using it.
|
||||
|
||||
If you have more than 4 disks, use RAID-6 (preferred) or RAID-50, instead of RAID-5.
|
||||
When using RAID-5, RAID-6 or RAID-50, always increase stripe_cache_size, since the default value is usually not the best choice.
|
||||
|
||||
@ -70,7 +74,7 @@ Never set the block size too small or too large.
|
||||
You can use RAID-0 on SSD.
|
||||
Regardless of RAID use, always use replication for data security.
|
||||
|
||||
Enable NCQ with a long queue. For HDD, choose the CFQ scheduler, and for SSD, choose noop. Don’t reduce the ‘readahead’ setting.
|
||||
Enable NCQ with a long queue. For HDD, choose the mq-deadline or CFQ scheduler, and for SSD, choose noop. Don’t reduce the ‘readahead’ setting.
|
||||
For HDD, enable the write cache.
|
||||
|
||||
Make sure that [`fstrim`](https://en.wikipedia.org/wiki/Trim_(computing)) is enabled for NVME and SSD disks in your OS (usually it's implemented using a cronjob or systemd service).
|
||||
|
@ -8,10 +8,150 @@ sidebar_label: clickhouse-local
|
||||
|
||||
The `clickhouse-local` program enables you to perform fast processing on local files, without having to deploy and configure the ClickHouse server. It accepts data that represent tables and queries them using [ClickHouse SQL dialect](../../sql-reference/index.md). `clickhouse-local` uses the same core as ClickHouse server, so it supports most of the features and the same set of formats and table engines.
|
||||
|
||||
By default `clickhouse-local` has access to data on the same host, and it does not depend on the server's configuration. It also supports loading server configuration using `--config-file` argument. For temporary data, a unique temporary data directory is created by default.
|
||||
## Download clickhouse-local
|
||||
|
||||
`clickhouse-local` is executed using the same `clickhouse` binary that runs the ClickHouse server and `clickhouse-client`. The easiest way to download the latest version is with the following command:
|
||||
|
||||
```bash
|
||||
curl https://clickhouse.com/ | sh
|
||||
```
|
||||
|
||||
:::note
|
||||
The binary you just downloaded can run all sorts of ClickHouse tools and utilities. If you want to run ClickHouse as a database server, check out the [Quick Start](../../quick-start.mdx).
|
||||
:::
|
||||
|
||||
## Query data in a CSV file using SQL
|
||||
|
||||
A common use of `clickhouse-local` is to run ad-hoc queries on files: where you don't have to insert the data into a table. `clickhouse-local` can stream the data from a file into a temporary table and execute your SQL.
|
||||
|
||||
If the file is sitting on the same machine as `clickhouse-local`, use the `file` table engine. The following `reviews.tsv` file contains a sampling of Amazon product reviews:
|
||||
|
||||
```bash
|
||||
./clickhouse local -q "SELECT * FROM file('reviews.tsv')"
|
||||
```
|
||||
|
||||
ClickHouse knows the file uses a tab-separated format from filename extension. If you need to explicitly specify the format, simply add one of the [many ClickHouse input formats](../../interfaces/formats.md):
|
||||
```bash
|
||||
./clickhouse local -q "SELECT * FROM file('reviews.tsv', 'TabSeparated')"
|
||||
```
|
||||
|
||||
The `file` table function creates a table, and you can use `DESCRIBE` to see the inferred schema:
|
||||
|
||||
```bash
|
||||
./clickhouse local -q "DESCRIBE file('reviews.tsv')"
|
||||
```
|
||||
|
||||
```response
|
||||
marketplace Nullable(String)
|
||||
customer_id Nullable(Int64)
|
||||
review_id Nullable(String)
|
||||
product_id Nullable(String)
|
||||
product_parent Nullable(Int64)
|
||||
product_title Nullable(String)
|
||||
product_category Nullable(String)
|
||||
star_rating Nullable(Int64)
|
||||
helpful_votes Nullable(Int64)
|
||||
total_votes Nullable(Int64)
|
||||
vine Nullable(String)
|
||||
verified_purchase Nullable(String)
|
||||
review_headline Nullable(String)
|
||||
review_body Nullable(String)
|
||||
review_date Nullable(Date)
|
||||
```
|
||||
|
||||
Let's find a product with the highest rating:
|
||||
|
||||
```bash
|
||||
./clickhouse local -q "SELECT
|
||||
argMax(product_title,star_rating),
|
||||
max(star_rating)
|
||||
FROM file('reviews.tsv')"
|
||||
```
|
||||
|
||||
```response
|
||||
Monopoly Junior Board Game 5
|
||||
```
|
||||
|
||||
## Query data in a Parquet file in AWS S3
|
||||
|
||||
If you have a file in S3, use `clickhouse-local` and the `s3` table function to query the file in place (without inserting the data into a ClickHouse table). We have a file named `house_0.parquet` in a public bucket that contains home prices of property sold in the United Kingdom. Let's see how many rows it has:
|
||||
|
||||
```bash
|
||||
./clickhouse local -q "
|
||||
SELECT count()
|
||||
FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/house_parquet/house_0.parquet')"
|
||||
```
|
||||
|
||||
The file has 2.7M rows:
|
||||
|
||||
```response
|
||||
2772030
|
||||
```
|
||||
|
||||
It's always useful to see what the inferred schema that ClickHouse determines from the file:
|
||||
|
||||
```bash
|
||||
./clickhouse local -q "DESCRIBE s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/house_parquet/house_0.parquet')"
|
||||
```
|
||||
|
||||
```response
|
||||
price Nullable(Int64)
|
||||
date Nullable(UInt16)
|
||||
postcode1 Nullable(String)
|
||||
postcode2 Nullable(String)
|
||||
type Nullable(String)
|
||||
is_new Nullable(UInt8)
|
||||
duration Nullable(String)
|
||||
addr1 Nullable(String)
|
||||
addr2 Nullable(String)
|
||||
street Nullable(String)
|
||||
locality Nullable(String)
|
||||
town Nullable(String)
|
||||
district Nullable(String)
|
||||
county Nullable(String)
|
||||
```
|
||||
|
||||
Let's see what the most expensive neighborhoods are:
|
||||
|
||||
```bash
|
||||
./clickhouse local -q "
|
||||
SELECT
|
||||
town,
|
||||
district,
|
||||
count() AS c,
|
||||
round(avg(price)) AS price,
|
||||
bar(price, 0, 5000000, 100)
|
||||
FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/house_parquet/house_0.parquet')
|
||||
GROUP BY
|
||||
town,
|
||||
district
|
||||
HAVING c >= 100
|
||||
ORDER BY price DESC
|
||||
LIMIT 10"
|
||||
```
|
||||
|
||||
```response
|
||||
LONDON CITY OF LONDON 886 2271305 █████████████████████████████████████████████▍
|
||||
LEATHERHEAD ELMBRIDGE 206 1176680 ███████████████████████▌
|
||||
LONDON CITY OF WESTMINSTER 12577 1108221 ██████████████████████▏
|
||||
LONDON KENSINGTON AND CHELSEA 8728 1094496 █████████████████████▉
|
||||
HYTHE FOLKESTONE AND HYTHE 130 1023980 ████████████████████▍
|
||||
CHALFONT ST GILES CHILTERN 113 835754 ████████████████▋
|
||||
AMERSHAM BUCKINGHAMSHIRE 113 799596 ███████████████▉
|
||||
VIRGINIA WATER RUNNYMEDE 356 789301 ███████████████▊
|
||||
BARNET ENFIELD 282 740514 ██████████████▊
|
||||
NORTHWOOD THREE RIVERS 184 731609 ██████████████▋
|
||||
```
|
||||
|
||||
:::tip
|
||||
When you are ready to insert your files into ClickHouse, startup a ClickHouse server and insert the results of your `file` and `s3` table functions into a `MergeTree` table. View the [Quick Start](../../quick-start.mdx) for more details.
|
||||
:::
|
||||
|
||||
|
||||
## Usage {#usage}
|
||||
|
||||
By default `clickhouse-local` has access to data of a ClickHouse server on the same host, and it does not depend on the server's configuration. It also supports loading server configuration using `--config-file` argument. For temporary data, a unique temporary data directory is created by default.
|
||||
|
||||
Basic usage (Linux):
|
||||
|
||||
``` bash
|
||||
@ -24,7 +164,9 @@ Basic usage (Mac):
|
||||
$ ./clickhouse local --structure "table_structure" --input-format "format_of_incoming_data" --query "query"
|
||||
```
|
||||
|
||||
Also supported on Windows through WSL2.
|
||||
:::note
|
||||
`clickhouse-local` is also supported on Windows through WSL2.
|
||||
:::
|
||||
|
||||
Arguments:
|
||||
|
||||
|
@ -0,0 +1,76 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/quantileApprox
|
||||
sidebar_position: 204
|
||||
---
|
||||
|
||||
# quantileApprox
|
||||
|
||||
Computes the [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence using the [Greenwald-Khanna](http://infolab.stanford.edu/~datar/courses/cs361a/papers/quantiles.pdf) algorithm. The Greenwald-Khanna algorithm is an algorithm used to compute quantiles on a stream of data in a highly efficient manner. It was introduced by Michael Greenwald and Sanjeev Khanna in 2001. It is widely used in databases and big data systems where computing accurate quantiles on a large stream of data in real-time is necessary. The algorithm is highly efficient, taking only O(log n) space and O(log log n) time per item (where n is the size of the input). It is also highly accurate, providing an approximate quantile value with high probability.
|
||||
|
||||
`quantileApprox` is different from other quantile functions in ClickHouse, because it enables user to control the accuracy of the approximate quantile result.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
quantileApprox(accuracy, level)(expr)
|
||||
```
|
||||
|
||||
Alias: `medianApprox`.
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `accuracy` — Accuracy of quantile. Constant positive integer. Larger accuracy value means less error. For example, if the accuracy argument is set to 100, the computed quantile will have an error no greater than 1% with high probability. There is a trade-off between the accuracy of the computed quantiles and the computational complexity of the algorithm. A larger accuracy requires more memory and computational resources to compute the quantile accurately, while a smaller accuracy argument allows for a faster and more memory-efficient computation but with a slightly lower accuracy.
|
||||
|
||||
- `level` — Level of quantile. Optional parameter. Constant floating-point number from 0 to 1. Default value: 0.5. At `level=0.5` the function calculates [median](https://en.wikipedia.org/wiki/Median).
|
||||
|
||||
- `expr` — Expression over the column values resulting in numeric [data types](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) or [DateTime](../../../sql-reference/data-types/datetime.md).
|
||||
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Quantile of the specified level and accuracy.
|
||||
|
||||
|
||||
Type:
|
||||
|
||||
- [Float64](../../../sql-reference/data-types/float.md) for numeric data type input.
|
||||
- [Date](../../../sql-reference/data-types/date.md) if input values have the `Date` type.
|
||||
- [DateTime](../../../sql-reference/data-types/datetime.md) if input values have the `DateTime` type.
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
SELECT quantileApprox(1, 0.25)(number + 1)
|
||||
FROM numbers(1000)
|
||||
|
||||
┌─quantileApprox(1, 0.25)(plus(number, 1))─┐
|
||||
│ 1 │
|
||||
└──────────────────────────────────────────┘
|
||||
|
||||
SELECT quantileApprox(10, 0.25)(number + 1)
|
||||
FROM numbers(1000)
|
||||
|
||||
┌─quantileApprox(10, 0.25)(plus(number, 1))─┐
|
||||
│ 156 │
|
||||
└───────────────────────────────────────────┘
|
||||
|
||||
SELECT quantileApprox(100, 0.25)(number + 1)
|
||||
FROM numbers(1000)
|
||||
|
||||
┌─quantileApprox(100, 0.25)(plus(number, 1))─┐
|
||||
│ 251 │
|
||||
└────────────────────────────────────────────┘
|
||||
|
||||
SELECT quantileApprox(1000, 0.25)(number + 1)
|
||||
FROM numbers(1000)
|
||||
|
||||
┌─quantileApprox(1000, 0.25)(plus(number, 1))─┐
|
||||
│ 249 │
|
||||
└─────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
**See Also**
|
||||
|
||||
- [median](../../../sql-reference/aggregate-functions/reference/median.md#median)
|
||||
- [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles)
|
@ -114,3 +114,59 @@ Result:
|
||||
│ [249.75,499.5,749.25,899.1,949.05,989.01,998.001] │
|
||||
└─────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## quantilesApprox
|
||||
|
||||
`quantilesApprox` works similarly with `quantileApprox` but allows us to calculate quantities at different levels simultaneously and returns an array.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
quantilesApprox(accuracy, level1, level2, ...)(expr)
|
||||
```
|
||||
|
||||
**Returned value**
|
||||
|
||||
- [Array](../../../sql-reference/data-types/array.md) of quantiles of the specified levels.
|
||||
|
||||
Type of array values:
|
||||
|
||||
- [Float64](../../../sql-reference/data-types/float.md) for numeric data type input.
|
||||
- [Date](../../../sql-reference/data-types/date.md) if input values have the `Date` type.
|
||||
- [DateTime](../../../sql-reference/data-types/datetime.md) if input values have the `DateTime` type.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
|
||||
``` sql
|
||||
SELECT quantilesApprox(1, 0.25, 0.5, 0.75)(number + 1)
|
||||
FROM numbers(1000)
|
||||
|
||||
┌─quantilesApprox(1, 0.25, 0.5, 0.75)(plus(number, 1))─┐
|
||||
│ [1,1,1] │
|
||||
└──────────────────────────────────────────────────────┘
|
||||
|
||||
SELECT quantilesApprox(10, 0.25, 0.5, 0.75)(number + 1)
|
||||
FROM numbers(1000)
|
||||
|
||||
┌─quantilesApprox(10, 0.25, 0.5, 0.75)(plus(number, 1))─┐
|
||||
│ [156,413,659] │
|
||||
└───────────────────────────────────────────────────────┘
|
||||
|
||||
|
||||
SELECT quantilesApprox(100, 0.25, 0.5, 0.75)(number + 1)
|
||||
FROM numbers(1000)
|
||||
|
||||
┌─quantilesApprox(100, 0.25, 0.5, 0.75)(plus(number, 1))─┐
|
||||
│ [251,498,741] │
|
||||
└────────────────────────────────────────────────────────┘
|
||||
|
||||
SELECT quantilesApprox(1000, 0.25, 0.5, 0.75)(number + 1)
|
||||
FROM numbers(1000)
|
||||
|
||||
┌─quantilesApprox(1000, 0.25, 0.5, 0.75)(plus(number, 1))─┐
|
||||
│ [249,499,749] │
|
||||
└─────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
@ -1463,28 +1463,28 @@ Result:
|
||||
└───────────────────────┘
|
||||
```
|
||||
|
||||
## FROM\_UNIXTIME
|
||||
## fromUnixTimestamp
|
||||
|
||||
Function converts Unix timestamp to a calendar date and a time of a day. When there is only a single argument of [Integer](../../sql-reference/data-types/int-uint.md) type, it acts in the same way as [toDateTime](../../sql-reference/functions/type-conversion-functions.md#todatetime) and return [DateTime](../../sql-reference/data-types/datetime.md) type.
|
||||
|
||||
FROM_UNIXTIME uses MySQL datetime format style, refer to https://dev.mysql.com/doc/refman/8.0/en/date-and-time-functions.html#function_date-format.
|
||||
fromUnixTimestamp uses MySQL datetime format style, refer to https://dev.mysql.com/doc/refman/8.0/en/date-and-time-functions.html#function_date-format.
|
||||
|
||||
Alias: `fromUnixTimestamp`.
|
||||
Alias: `FROM_UNIXTIME`.
|
||||
|
||||
**Example:**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT FROM_UNIXTIME(423543535);
|
||||
SELECT fromUnixTimestamp(423543535);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─FROM_UNIXTIME(423543535)─┐
|
||||
│ 1983-06-04 10:58:55 │
|
||||
└──────────────────────────┘
|
||||
┌─fromUnixTimestamp(423543535)─┐
|
||||
│ 1983-06-04 10:58:55 │
|
||||
└──────────────────────────────┘
|
||||
```
|
||||
|
||||
When there are two or three arguments, the first an [Integer](../../sql-reference/data-types/int-uint.md), [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md), the second a constant format string and the third an optional constant time zone string — it acts in the same way as [formatDateTime](#formatdatetime) and return [String](../../sql-reference/data-types/string.md#string) type.
|
||||
@ -1492,7 +1492,7 @@ When there are two or three arguments, the first an [Integer](../../sql-referenc
|
||||
For example:
|
||||
|
||||
```sql
|
||||
SELECT FROM_UNIXTIME(1234334543, '%Y-%m-%d %R:%S') AS DateTime;
|
||||
SELECT fromUnixTimestamp(1234334543, '%Y-%m-%d %R:%S') AS DateTime;
|
||||
```
|
||||
|
||||
```text
|
||||
@ -1505,11 +1505,12 @@ SELECT FROM_UNIXTIME(1234334543, '%Y-%m-%d %R:%S') AS DateTime;
|
||||
|
||||
- [fromUnixTimestampInJodaSyntax](##fromUnixTimestampInJodaSyntax)
|
||||
|
||||
|
||||
## fromUnixTimestampInJodaSyntax
|
||||
Similar to FROM_UNIXTIME, except that it formats time in Joda style instead of MySQL style. Refer to https://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html.
|
||||
|
||||
Similar to fromUnixTimestamp, except that it formats time in Joda style instead of MySQL style. Refer to https://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html.
|
||||
|
||||
**Example:**
|
||||
|
||||
Query:
|
||||
``` sql
|
||||
SELECT fromUnixTimestampInJodaSyntax(1669804872, 'yyyy-MM-dd HH:mm:ss', 'UTC');
|
||||
@ -1517,12 +1518,11 @@ SELECT fromUnixTimestampInJodaSyntax(1669804872, 'yyyy-MM-dd HH:mm:ss', 'UTC');
|
||||
|
||||
Result:
|
||||
```
|
||||
┌─fromUnixTimestampInJodaSyntax(1669804872, 'yyyy-MM-dd HH:mm:ss', 'UTC')─┐
|
||||
┌─fromUnixTimestampInJodaSyntax(1669804872, 'yyyy-MM-dd HH:mm:ss', 'UTC')────┐
|
||||
│ 2022-11-30 10:41:12 │
|
||||
└────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
## toModifiedJulianDay
|
||||
|
||||
Converts a [Proleptic Gregorian calendar](https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar) date in text form `YYYY-MM-DD` to a [Modified Julian Day](https://en.wikipedia.org/wiki/Julian_day#Variants) number in Int32. This function supports date from `0000-01-01` to `9999-12-31`. It raises an exception if the argument cannot be parsed as a date, or the date is invalid.
|
||||
|
@ -91,7 +91,7 @@ The command must read arguments from `STDIN` and must output the result to `STDO
|
||||
**Example**
|
||||
|
||||
Creating `test_function` using XML configuration.
|
||||
File test_function.xml.
|
||||
File `test_function.xml` (`/etc/clickhouse-server/test_function.xml` with default path settings).
|
||||
```xml
|
||||
<functions>
|
||||
<function>
|
||||
@ -108,7 +108,7 @@ File test_function.xml.
|
||||
</functions>
|
||||
```
|
||||
|
||||
Script file inside `user_scripts` folder `test_function.py`.
|
||||
Script file inside `user_scripts` folder `test_function.py` (`/var/lib/clickhouse/user_scripts/test_function.py` with default path settings).
|
||||
|
||||
```python
|
||||
#!/usr/bin/python3
|
||||
@ -136,7 +136,7 @@ Result:
|
||||
```
|
||||
|
||||
Creating `test_function_sum` manually specifying `execute_direct` to `0` using XML configuration.
|
||||
File test_function.xml.
|
||||
File `test_function.xml` (`/etc/clickhouse-server/test_function.xml` with default path settings).
|
||||
```xml
|
||||
<functions>
|
||||
<function>
|
||||
@ -173,7 +173,7 @@ Result:
|
||||
```
|
||||
|
||||
Creating `test_function_sum_json` with named arguments and format [JSONEachRow](../../interfaces/formats.md#jsoneachrow) using XML configuration.
|
||||
File test_function.xml.
|
||||
File `test_function.xml` (`/etc/clickhouse-server/test_function.xml` with default path settings).
|
||||
```xml
|
||||
<functions>
|
||||
<function>
|
||||
@ -195,7 +195,7 @@ File test_function.xml.
|
||||
</functions>
|
||||
```
|
||||
|
||||
Script file inside `user_scripts` folder `test_function_sum_json.py`.
|
||||
Script file inside `user_scripts` folder `test_function_sum_json.py` (`/var/lib/clickhouse/user_scripts/test_function_sum_json.py` with default path settings).
|
||||
|
||||
```python
|
||||
#!/usr/bin/python3
|
||||
@ -228,7 +228,7 @@ Result:
|
||||
```
|
||||
|
||||
Executable user defined functions can take constant parameters configured in `command` setting (works only for user defined functions with `executable` type).
|
||||
File test_function_parameter_python.xml.
|
||||
File `test_function_parameter_python.xml` (`/etc/clickhouse-server/test_function_parameter_python.xml` with default path settings).
|
||||
```xml
|
||||
<functions>
|
||||
<function>
|
||||
@ -244,7 +244,7 @@ File test_function_parameter_python.xml.
|
||||
</functions>
|
||||
```
|
||||
|
||||
Script file inside `user_scripts` folder `test_function_parameter_python.py`.
|
||||
Script file inside `user_scripts` folder `test_function_parameter_python.py` (`/var/lib/clickhouse/user_scripts/test_function_parameter_python.py` with default path settings).
|
||||
|
||||
```python
|
||||
#!/usr/bin/python3
|
||||
|
@ -401,7 +401,7 @@ Before version 21.11 the order of arguments was wrong, i.e. JSON_QUERY(path, jso
|
||||
|
||||
Parses a JSON and extract a value as JSON scalar.
|
||||
|
||||
If the value does not exist, an empty string will be returned.
|
||||
If the value does not exist, an empty string will be returned by default, and by SET `function_return_type_allow_nullable` = `true`, `NULL` will be returned. If the value is complex type (such as: struct, array, map), an empty string will be returned by default, and by SET `function_json_value_return_type_allow_complex` = `true`, the complex value will be returned.
|
||||
|
||||
Example:
|
||||
|
||||
@ -410,6 +410,8 @@ SELECT JSON_VALUE('{"hello":"world"}', '$.hello');
|
||||
SELECT JSON_VALUE('{"array":[[0, 1, 2, 3, 4, 5], [0, -1, -2, -3, -4, -5]]}', '$.array[*][0 to 2, 4]');
|
||||
SELECT JSON_VALUE('{"hello":2}', '$.hello');
|
||||
SELECT toTypeName(JSON_VALUE('{"hello":2}', '$.hello'));
|
||||
select JSON_VALUE('{"hello":"world"}', '$.b') settings function_return_type_allow_nullable=true;
|
||||
select JSON_VALUE('{"hello":{"world":"!"}}', '$.hello') settings function_json_value_return_type_allow_complex=true;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
@ -208,7 +208,7 @@ Type: [Array](../../sql-reference/data-types/array.md)([Tuple](../../sql-referen
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE tupletest (`col` Tuple(user_ID UInt64, session_ID UInt64) ENGINE = Memory;
|
||||
CREATE TABLE tupletest (col Tuple(user_ID UInt64, session_ID UInt64)) ENGINE = Memory;
|
||||
|
||||
INSERT INTO tupletest VALUES (tuple( 100, 2502)), (tuple(1,100));
|
||||
|
||||
@ -227,11 +227,11 @@ Result:
|
||||
It is possible to transform colums to rows using this function:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE tupletest (`col` Tuple(CPU Float64, Memory Float64, Disk Float64)) ENGINE = Memory;
|
||||
CREATE TABLE tupletest (col Tuple(CPU Float64, Memory Float64, Disk Float64)) ENGINE = Memory;
|
||||
|
||||
INSERT INTO tupletest VALUES(tuple(3.3, 5.5, 6.6));
|
||||
|
||||
SELECT arrayJoin(tupleToNameValuePairs(col))FROM tupletest;
|
||||
SELECT arrayJoin(tupleToNameValuePairs(col)) FROM tupletest;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
@ -68,9 +68,9 @@ Result:
|
||||
|
||||
## mapFromArrays
|
||||
|
||||
Merges an [Array](../../sql-reference/data-types/array.md) of keys and an [Array](../../sql-reference/data-types/array.md) of values into a [Map(key, value)](../../sql-reference/data-types/map.md).
|
||||
Merges an [Array](../../sql-reference/data-types/array.md) of keys and an [Array](../../sql-reference/data-types/array.md) of values into a [Map(key, value)](../../sql-reference/data-types/map.md). Notice that the second argument could also be a [Map](../../sql-reference/data-types/map.md), thus it is casted to an Array when executing.
|
||||
|
||||
The function is a more convenient alternative to `CAST((key_array, value_array), 'Map(key_type, value_type)')`. For example, instead of writing `CAST((['aa', 'bb'], [4, 5]), 'Map(String, UInt32)')`, you can write `mapFromArrays(['aa', 'bb'], [4, 5])`.
|
||||
The function is a more convenient alternative to `CAST((key_array, value_array_or_map), 'Map(key_type, value_type)')`. For example, instead of writing `CAST((['aa', 'bb'], [4, 5]), 'Map(String, UInt32)')`, you can write `mapFromArrays(['aa', 'bb'], [4, 5])`.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -82,11 +82,11 @@ Alias: `MAP_FROM_ARRAYS(keys, values)`
|
||||
|
||||
**Arguments**
|
||||
- `keys` — Given key array to create a map from. The nested type of array must be: [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md), [LowCardinality](../../sql-reference/data-types/lowcardinality.md), [FixedString](../../sql-reference/data-types/fixedstring.md), [UUID](../../sql-reference/data-types/uuid.md), [Date](../../sql-reference/data-types/date.md), [DateTime](../../sql-reference/data-types/datetime.md), [Date32](../../sql-reference/data-types/date32.md), [Enum](../../sql-reference/data-types/enum.md)
|
||||
- `values` - Given value array to create a map from.
|
||||
- `values` - Given value array or map to create a map from.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A map whose keys and values are constructed from the key and value arrays
|
||||
- A map whose keys and values are constructed from the key array and value array/map.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -94,13 +94,17 @@ Query:
|
||||
|
||||
```sql
|
||||
select mapFromArrays(['a', 'b', 'c'], [1, 2, 3])
|
||||
```
|
||||
|
||||
```text
|
||||
|
||||
┌─mapFromArrays(['a', 'b', 'c'], [1, 2, 3])─┐
|
||||
│ {'a':1,'b':2,'c':3} │
|
||||
└───────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
SELECT mapFromArrays([1, 2, 3], map('a', 1, 'b', 2, 'c', 3))
|
||||
|
||||
┌─mapFromArrays([1, 2, 3], map('a', 1, 'b', 2, 'c', 3))─┐
|
||||
│ {1:('a',1),2:('b',2),3:('c',3)} │
|
||||
└───────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## mapAdd
|
||||
|
||||
|
@ -21,15 +21,6 @@ DELETE FROM hits WHERE Title LIKE '%hello%';
|
||||
|
||||
Lightweight deletes are asynchronous by default. Set `mutations_sync` equal to 1 to wait for one replica to process the statement, and set `mutations_sync` to 2 to wait for all replicas.
|
||||
|
||||
:::note
|
||||
This feature is experimental and requires you to set `allow_experimental_lightweight_delete` to true:
|
||||
|
||||
```sql
|
||||
SET allow_experimental_lightweight_delete = true;
|
||||
```
|
||||
|
||||
:::
|
||||
|
||||
:::note
|
||||
`DELETE FROM` requires the `ALTER DELETE` privilege:
|
||||
```sql
|
||||
@ -64,6 +55,3 @@ With the described implementation now we can see what can negatively affect 'DEL
|
||||
- Table having a very large number of data parts
|
||||
- Having a lot of data in Compact parts—in a Compact part, all columns are stored in one file.
|
||||
|
||||
:::note
|
||||
This implementation might change in the future.
|
||||
:::
|
||||
|
@ -30,7 +30,7 @@ This statement is identical to the query:
|
||||
SELECT name FROM system.databases [WHERE name [NOT] LIKE | ILIKE '<pattern>'] [LIMIT <N>] [INTO OUTFILE filename] [FORMAT format]
|
||||
```
|
||||
|
||||
### Examples
|
||||
**Examples**
|
||||
|
||||
Getting database names, containing the symbols sequence 'de' in their names:
|
||||
|
||||
@ -92,7 +92,7 @@ Result:
|
||||
└────────────────────────────────┘
|
||||
```
|
||||
|
||||
### See Also
|
||||
**See also**
|
||||
|
||||
- [CREATE DATABASE](https://clickhouse.com/docs/en/sql-reference/statements/create/database/#query-language-create-database)
|
||||
|
||||
@ -128,7 +128,7 @@ This statement is identical to the query:
|
||||
SELECT name FROM system.tables [WHERE name [NOT] LIKE | ILIKE '<pattern>'] [LIMIT <N>] [INTO OUTFILE <filename>] [FORMAT <format>]
|
||||
```
|
||||
|
||||
### Examples
|
||||
**Examples**
|
||||
|
||||
Getting table names, containing the symbols sequence 'user' in their names:
|
||||
|
||||
@ -191,11 +191,59 @@ Result:
|
||||
└────────────────────────────────┘
|
||||
```
|
||||
|
||||
### See Also
|
||||
**See also**
|
||||
|
||||
- [Create Tables](https://clickhouse.com/docs/en/getting-started/tutorial/#create-tables)
|
||||
- [SHOW CREATE TABLE](https://clickhouse.com/docs/en/sql-reference/statements/show/#show-create-table)
|
||||
|
||||
## SHOW COLUMNS
|
||||
|
||||
Displays a list of columns
|
||||
|
||||
```sql
|
||||
SHOW [EXTENDED] [FULL] COLUMNS {FROM | IN} <table> [{FROM | IN} <db>] [{[NOT] {LIKE | ILIKE} '<pattern>' | WHERE <expr>}] [LIMIT <N>] [INTO
|
||||
OUTFILE <filename>] [FORMAT <format>]
|
||||
```
|
||||
|
||||
The database and table name can be specified in abbreviated form as `<db>.<table>`, i.e. `FROM tab FROM db` and `FROM db.tab` are
|
||||
equivalent. If no database is specified, the query returns the list of columns from the current database.
|
||||
|
||||
The optional keyword `EXTENDED` currently has no effect, it only exists for MySQL compatibility.
|
||||
|
||||
The optional keyword `FULL` causes the output to include the collation, comment and privilege columns.
|
||||
|
||||
`SHOW COLUMNS` produces a result table with the following structure:
|
||||
- field - The name of the column (String)
|
||||
- type - The column data type (String)
|
||||
- null - If the column data type is Nullable (UInt8)
|
||||
- key - `PRI` if the column is part of the primary key, `SOR` if the column is part of the sorting key, empty otherwise (String)
|
||||
- default - Default expression of the column if it is of type `ALIAS`, `DEFAULT`, or `MATERIALIZED`, otherwise `NULL`. (Nullable(String))
|
||||
- extra - Additional information, currently unused (String)
|
||||
- collation - (only if `FULL` keyword was specified) Collation of the column, always `NULL` because ClickHouse has no per-column collations (Nullable(String))
|
||||
- comment - (only if `FULL` keyword was specified) Comment on the column (String)
|
||||
- privilege - (only if `FULL` keyword was specified) The privilege you have on this column, currently not available (String)
|
||||
|
||||
**Examples**
|
||||
|
||||
Getting information about all columns in table 'order' starting with 'delivery_':
|
||||
|
||||
```sql
|
||||
SHOW COLUMNS FROM 'orders' LIKE 'delivery_%'
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─field───────────┬─type─────┬─null─┬─key─────┬─default─┬─extra─┐
|
||||
│ delivery_date │ DateTime │ 0 │ PRI SOR │ ᴺᵁᴸᴸ │ │
|
||||
│ delivery_status │ Bool │ 0 │ │ ᴺᵁᴸᴸ │ │
|
||||
└─────────────────┴──────────┴──────┴─────────┴─────────┴───────┘
|
||||
```
|
||||
|
||||
**See also**
|
||||
|
||||
- [system.columns](https://clickhouse.com/docs/en/operations/system-tables/columns)
|
||||
|
||||
## SHOW DICTIONARIES
|
||||
|
||||
Displays a list of [Dictionaries](../../sql-reference/dictionaries/index.md).
|
||||
@ -212,7 +260,7 @@ You can get the same results as the `SHOW DICTIONARIES` query in the following w
|
||||
SELECT name FROM system.dictionaries WHERE database = <db> [AND name LIKE <pattern>] [LIMIT <N>] [INTO OUTFILE <filename>] [FORMAT <format>]
|
||||
```
|
||||
|
||||
**Example**
|
||||
**Examples**
|
||||
|
||||
The following query selects the first two rows from the list of tables in the `system` database, whose names contain `reg`.
|
||||
|
||||
@ -231,7 +279,7 @@ SHOW DICTIONARIES FROM db LIKE '%reg%' LIMIT 2
|
||||
|
||||
Shows privileges for a user.
|
||||
|
||||
### Syntax
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
SHOW GRANTS [FOR user1 [, user2 ...]]
|
||||
@ -245,7 +293,7 @@ Shows parameters that were used at a [user creation](../../sql-reference/stateme
|
||||
|
||||
`SHOW CREATE USER` does not output user passwords.
|
||||
|
||||
### Syntax
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
SHOW CREATE USER [name1 [, name2 ...] | CURRENT_USER]
|
||||
@ -255,7 +303,7 @@ SHOW CREATE USER [name1 [, name2 ...] | CURRENT_USER]
|
||||
|
||||
Shows parameters that were used at a [role creation](../../sql-reference/statements/create/role.md).
|
||||
|
||||
### Syntax
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
SHOW CREATE ROLE name1 [, name2 ...]
|
||||
@ -265,7 +313,7 @@ SHOW CREATE ROLE name1 [, name2 ...]
|
||||
|
||||
Shows parameters that were used at a [row policy creation](../../sql-reference/statements/create/row-policy.md).
|
||||
|
||||
### Syntax
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
SHOW CREATE [ROW] POLICY name ON [database1.]table1 [, [database2.]table2 ...]
|
||||
@ -275,7 +323,7 @@ SHOW CREATE [ROW] POLICY name ON [database1.]table1 [, [database2.]table2 ...]
|
||||
|
||||
Shows parameters that were used at a [quota creation](../../sql-reference/statements/create/quota.md).
|
||||
|
||||
### Syntax
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
SHOW CREATE QUOTA [name1 [, name2 ...] | CURRENT]
|
||||
@ -285,7 +333,7 @@ SHOW CREATE QUOTA [name1 [, name2 ...] | CURRENT]
|
||||
|
||||
Shows parameters that were used at a [settings profile creation](../../sql-reference/statements/create/settings-profile.md).
|
||||
|
||||
### Syntax
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
SHOW CREATE [SETTINGS] PROFILE name1 [, name2 ...]
|
||||
@ -295,7 +343,7 @@ SHOW CREATE [SETTINGS] PROFILE name1 [, name2 ...]
|
||||
|
||||
Returns a list of [user account](../../guides/sre/user-management/index.md#user-account-management) names. To view user accounts parameters, see the system table [system.users](../../operations/system-tables/users.md#system_tables-users).
|
||||
|
||||
### Syntax
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
SHOW USERS
|
||||
@ -305,7 +353,7 @@ SHOW USERS
|
||||
|
||||
Returns a list of [roles](../../guides/sre/user-management/index.md#role-management). To view another parameters, see system tables [system.roles](../../operations/system-tables/roles.md#system_tables-roles) and [system.role_grants](../../operations/system-tables/role-grants.md#system_tables-role_grants).
|
||||
|
||||
### Syntax
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
SHOW [CURRENT|ENABLED] ROLES
|
||||
@ -314,7 +362,7 @@ SHOW [CURRENT|ENABLED] ROLES
|
||||
|
||||
Returns a list of [setting profiles](../../guides/sre/user-management/index.md#settings-profiles-management). To view user accounts parameters, see the system table [settings_profiles](../../operations/system-tables/settings_profiles.md#system_tables-settings_profiles).
|
||||
|
||||
### Syntax
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
SHOW [SETTINGS] PROFILES
|
||||
@ -324,7 +372,7 @@ SHOW [SETTINGS] PROFILES
|
||||
|
||||
Returns a list of [row policies](../../guides/sre/user-management/index.md#row-policy-management) for the specified table. To view user accounts parameters, see the system table [system.row_policies](../../operations/system-tables/row_policies.md#system_tables-row_policies).
|
||||
|
||||
### Syntax
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
SHOW [ROW] POLICIES [ON [db.]table]
|
||||
@ -334,7 +382,7 @@ SHOW [ROW] POLICIES [ON [db.]table]
|
||||
|
||||
Returns a list of [quotas](../../guides/sre/user-management/index.md#quotas-management). To view quotas parameters, see the system table [system.quotas](../../operations/system-tables/quotas.md#system_tables-quotas).
|
||||
|
||||
### Syntax
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
SHOW QUOTAS
|
||||
@ -344,7 +392,7 @@ SHOW QUOTAS
|
||||
|
||||
Returns a [quota](../../operations/quotas.md) consumption for all users or for current user. To view another parameters, see system tables [system.quotas_usage](../../operations/system-tables/quotas_usage.md#system_tables-quotas_usage) and [system.quota_usage](../../operations/system-tables/quota_usage.md#system_tables-quota_usage).
|
||||
|
||||
### Syntax
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
SHOW [CURRENT] QUOTA
|
||||
@ -353,7 +401,7 @@ SHOW [CURRENT] QUOTA
|
||||
|
||||
Shows all [users](../../guides/sre/user-management/index.md#user-account-management), [roles](../../guides/sre/user-management/index.md#role-management), [profiles](../../guides/sre/user-management/index.md#settings-profiles-management), etc. and all their [grants](../../sql-reference/statements/grant.md#grant-privileges).
|
||||
|
||||
### Syntax
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
SHOW ACCESS
|
||||
@ -366,13 +414,14 @@ Returns a list of clusters. All available clusters are listed in the [system.clu
|
||||
`SHOW CLUSTER name` query displays the contents of system.clusters table for this cluster.
|
||||
:::
|
||||
|
||||
### Syntax
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
SHOW CLUSTER '<name>'
|
||||
SHOW CLUSTERS [[NOT] LIKE|ILIKE '<pattern>'] [LIMIT <N>]
|
||||
```
|
||||
### Examples
|
||||
|
||||
**Examples**
|
||||
|
||||
Query:
|
||||
|
||||
|
@ -114,11 +114,11 @@ This will also create system tables even if message queue is empty.
|
||||
|
||||
## RELOAD CONFIG
|
||||
|
||||
Reloads ClickHouse configuration. Used when configuration is stored in ZooKeeper.
|
||||
Reloads ClickHouse configuration. Used when configuration is stored in ZooKeeper. Note that `SYSTEM RELOAD CONFIG` does not reload `USER` configuration stored in ZooKeeper, it only reloads `USER` configuration that is stored in `users.xml`. To reload all `USER` config use `SYSTEM RELOAD USERS`
|
||||
|
||||
## RELOAD USERS
|
||||
|
||||
Reloads all access storages, including: users.xml, local disk access storage, replicated (in ZooKeeper) access storage. Note that `SYSTEM RELOAD CONFIG` will only reload users.xml access storage.
|
||||
Reloads all access storages, including: users.xml, local disk access storage, replicated (in ZooKeeper) access storage.
|
||||
|
||||
## SHUTDOWN
|
||||
|
||||
@ -224,6 +224,14 @@ Clears freezed backup with the specified name from all the disks. See more about
|
||||
SYSTEM UNFREEZE WITH NAME <backup_name>
|
||||
```
|
||||
|
||||
### WAIT LOADING PARTS
|
||||
|
||||
Wait until all asynchronously loading data parts of a table (outdated data parts) will became loaded.
|
||||
|
||||
``` sql
|
||||
SYSTEM WAIT LOADING PARTS [db.]merge_tree_family_table_name
|
||||
```
|
||||
|
||||
## Managing ReplicatedMergeTree Tables
|
||||
|
||||
ClickHouse can manage background replication related processes in [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md#table_engines-replication) tables.
|
||||
@ -283,10 +291,14 @@ SYSTEM START REPLICATION QUEUES [[db.]replicated_merge_tree_family_table_name]
|
||||
Wait until a `ReplicatedMergeTree` table will be synced with other replicas in a cluster, but no more than `receive_timeout` seconds.
|
||||
|
||||
``` sql
|
||||
SYSTEM SYNC REPLICA [ON CLUSTER cluster_name] [db.]replicated_merge_tree_family_table_name [STRICT]
|
||||
SYSTEM SYNC REPLICA [ON CLUSTER cluster_name] [db.]replicated_merge_tree_family_table_name [STRICT | LIGHTWEIGHT | PULL]
|
||||
```
|
||||
|
||||
After running this statement the `[db.]replicated_merge_tree_family_table_name` fetches commands from the common replicated log into its own replication queue, and then the query waits till the replica processes all of the fetched commands. If a `STRICT` modifier was specified then the query waits for the replication queue to become empty. The `STRICT` version may never succeed if new entries constantly appear in the replication queue.
|
||||
After running this statement the `[db.]replicated_merge_tree_family_table_name` fetches commands from the common replicated log into its own replication queue, and then the query waits till the replica processes all of the fetched commands. The following modifiers are supported:
|
||||
|
||||
- If a `STRICT` modifier was specified then the query waits for the replication queue to become empty. The `STRICT` version may never succeed if new entries constantly appear in the replication queue.
|
||||
- If a `LIGHTWEIGHT` modifier was specified then the query waits only for `GET_PART`, `ATTACH_PART`, `DROP_RANGE`, `REPLACE_RANGE` and `DROP_PART` entries to be processed.
|
||||
- If a `PULL` modifier was specified then the query pulls new replication queue entries from ZooKeeper, but does not wait for anything to be processed.
|
||||
|
||||
### RESTART REPLICA
|
||||
|
||||
|
@ -12,7 +12,7 @@ Provides a table-like interface to select/insert files in [Amazon S3](https://aw
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
s3(path [,aws_access_key_id, aws_secret_access_key] [,format] [,structure] [,compression])
|
||||
s3(path [, NOSIGN | aws_access_key_id, aws_secret_access_key] [,format] [,structure] [,compression])
|
||||
```
|
||||
|
||||
:::tip GCS
|
||||
@ -33,6 +33,7 @@ For GCS, substitute your HMAC key and HMAC secret where you see `aws_access_key_
|
||||
and not ~~https://storage.cloud.google.com~~.
|
||||
:::
|
||||
|
||||
- `NOSIGN` - If this keyword is provided in place of credentials, all the requests will not be signed.
|
||||
- `format` — The [format](../../interfaces/formats.md#formats) of the file.
|
||||
- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`.
|
||||
- `compression` — Parameter is optional. Supported values: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. By default, it will autodetect compression by file extension.
|
||||
@ -185,6 +186,21 @@ INSERT INTO TABLE FUNCTION
|
||||
```
|
||||
As a result, the data is written into three files in different buckets: `my_bucket_1/file.csv`, `my_bucket_10/file.csv`, and `my_bucket_20/file.csv`.
|
||||
|
||||
## Accessing public buckets
|
||||
|
||||
ClickHouse tries to fetch credentials from many different types of sources.
|
||||
Sometimes, it can produce problems when accessing some buckets that are public causing the client to return `403` error code.
|
||||
This issue can be avoided by using `NOSIGN` keyword, forcing the client to ignore all the credentials, and not sign the requests.
|
||||
|
||||
``` sql
|
||||
SELECT *
|
||||
FROM s3(
|
||||
'https://datasets-documentation.s3.eu-west-3.amazonaws.com/aapl_stock.csv',
|
||||
NOSIGN,
|
||||
'CSVWithNames'
|
||||
)
|
||||
LIMIT 5;
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
|
@ -4046,7 +4046,7 @@ ALTER TABLE test FREEZE SETTINGS alter_partition_verbose_result = 1;
|
||||
|
||||
Значение по умолчанию: `''`.
|
||||
|
||||
## stop_reading_on_first_cancel {#stop_reading_on_first_cancel}
|
||||
## partial_result_on_first_cancel {#partial_result_on_first_cancel}
|
||||
Если установлено значение `true` и пользователь хочет прервать запрос (например, с помощью `Ctrl+C` на клиенте), то запрос продолжает выполнение только для данных, которые уже были считаны из таблицы. После этого он вернет частичный результат запроса для той части таблицы, которая была прочитана. Чтобы полностью остановить выполнение запроса без частичного результата, пользователь должен отправить 2 запроса отмены.
|
||||
|
||||
**Пример с выключенной настройкой при нажатии Ctrl+C**
|
||||
@ -4062,7 +4062,7 @@ Query was cancelled.
|
||||
|
||||
**Пример с включенной настройкой при нажатии Ctrl+C**
|
||||
```sql
|
||||
SELECT sum(number) FROM numbers(10000000000) SETTINGS stop_reading_on_first_cancel=true
|
||||
SELECT sum(number) FROM numbers(10000000000) SETTINGS partial_result_on_first_cancel=true
|
||||
|
||||
┌──────sum(number)─┐
|
||||
│ 1355411451286266 │
|
||||
|
@ -272,10 +272,14 @@ SYSTEM START REPLICATION QUEUES [[db.]replicated_merge_tree_family_table_name]
|
||||
Ждет когда таблица семейства `ReplicatedMergeTree` будет синхронизирована с другими репликами в кластере, но не более `receive_timeout` секунд:
|
||||
|
||||
``` sql
|
||||
SYSTEM SYNC REPLICA [db.]replicated_merge_tree_family_table_name [STRICT]
|
||||
SYSTEM SYNC REPLICA [db.]replicated_merge_tree_family_table_name [STRICT | LIGHTWEIGHT | PULL]
|
||||
```
|
||||
|
||||
После выполнения этого запроса таблица `[db.]replicated_merge_tree_family_table_name` загружает команды из общего реплицированного лога в свою собственную очередь репликации. Затем запрос ждет, пока реплика не обработает все загруженные команды. Если указан модификатор `STRICT`, то запрос ждёт когда очередь репликации станет пустой. Строгий вариант запроса может никогда не завершиться успешно, если в очереди репликации постоянно появляются новые записи.
|
||||
После выполнения этого запроса таблица `[db.]replicated_merge_tree_family_table_name` загружает команды из общего реплицированного лога в свою собственную очередь репликации. Затем запрос ждет, пока реплика не обработает все загруженные команды. Поддерживаются следующие модификаторы:
|
||||
|
||||
- Если указан модификатор `STRICT`, то запрос ждёт когда очередь репликации станет пустой. Строгий вариант запроса может никогда не завершиться успешно, если в очереди репликации постоянно появляются новые записи.
|
||||
- Если указан модификатор `LIGHTWEIGHT`, то запрос ждёт когда будут обработаны записи `GET_PART`, `ATTACH_PART`, `DROP_RANGE`, `REPLACE_RANGE` и `DROP_PART`.
|
||||
- Если указан модификатор `PULL`, то запрос только загружает записи очереди репликации из ZooKeeper и не ждёт выполнения чего-либо.
|
||||
|
||||
### RESTART REPLICA {#query_language-system-restart-replica}
|
||||
|
||||
|
@ -240,7 +240,7 @@ SYSTEM START REPLICATION QUEUES [[db.]replicated_merge_tree_family_table_name]
|
||||
|
||||
|
||||
``` sql
|
||||
SYSTEM SYNC REPLICA [db.]replicated_merge_tree_family_table_name
|
||||
SYSTEM SYNC REPLICA [db.]replicated_merge_tree_family_table_name [STRICT | LIGHTWEIGHT | PULL]
|
||||
```
|
||||
|
||||
### RESTART REPLICA {#query_language-system-restart-replica}
|
||||
|
@ -1,4 +1,5 @@
|
||||
#include "ClusterCopierApp.h"
|
||||
#include <Common/ZooKeeper/ZooKeeper.h>
|
||||
#include <Common/StatusFile.h>
|
||||
#include <Common/TerminalSize.h>
|
||||
#include <IO/ConnectionTimeouts.h>
|
||||
@ -192,6 +193,8 @@ void ClusterCopierApp::mainImpl()
|
||||
if (!task_file.empty())
|
||||
copier->uploadTaskDescription(task_path, task_file, config().getBool("task-upload-force", false));
|
||||
|
||||
zkutil::validateZooKeeperConfig(config());
|
||||
|
||||
copier->init();
|
||||
copier->process(ConnectionTimeouts::getTCPTimeoutsWithoutFailover(context->getSettingsRef()));
|
||||
|
||||
|
@ -89,8 +89,12 @@ static std::vector<std::string> extractFromConfig(
|
||||
if (has_zk_includes && process_zk_includes)
|
||||
{
|
||||
DB::ConfigurationPtr bootstrap_configuration(new Poco::Util::XMLConfiguration(config_xml));
|
||||
|
||||
zkutil::validateZooKeeperConfig(*bootstrap_configuration);
|
||||
|
||||
zkutil::ZooKeeperPtr zookeeper = std::make_shared<zkutil::ZooKeeper>(
|
||||
*bootstrap_configuration, "zookeeper", nullptr);
|
||||
*bootstrap_configuration, bootstrap_configuration->has("zookeeper") ? "zookeeper" : "keeper", nullptr);
|
||||
|
||||
zkutil::ZooKeeperNodeCache zk_node_cache([&] { return zookeeper; });
|
||||
config_xml = processor.processConfig(&has_zk_includes, &zk_node_cache);
|
||||
}
|
||||
|
@ -87,7 +87,7 @@ void MetricsTransmitter::transmit(std::vector<ProfileEvents::Count> & prev_count
|
||||
|
||||
if (send_events)
|
||||
{
|
||||
for (size_t i = 0, end = ProfileEvents::end(); i < end; ++i)
|
||||
for (ProfileEvents::Event i = ProfileEvents::Event(0), end = ProfileEvents::end(); i < end; ++i)
|
||||
{
|
||||
const auto counter = ProfileEvents::global_counters[i].load(std::memory_order_relaxed);
|
||||
const auto counter_increment = counter - prev_counters[i];
|
||||
@ -100,7 +100,7 @@ void MetricsTransmitter::transmit(std::vector<ProfileEvents::Count> & prev_count
|
||||
|
||||
if (send_events_cumulative)
|
||||
{
|
||||
for (size_t i = 0, end = ProfileEvents::end(); i < end; ++i)
|
||||
for (ProfileEvents::Event i = ProfileEvents::Event(0), end = ProfileEvents::end(); i < end; ++i)
|
||||
{
|
||||
const auto counter = ProfileEvents::global_counters[i].load(std::memory_order_relaxed);
|
||||
std::string key{ProfileEvents::getName(static_cast<ProfileEvents::Event>(i))};
|
||||
@ -110,7 +110,7 @@ void MetricsTransmitter::transmit(std::vector<ProfileEvents::Count> & prev_count
|
||||
|
||||
if (send_metrics)
|
||||
{
|
||||
for (size_t i = 0, end = CurrentMetrics::end(); i < end; ++i)
|
||||
for (CurrentMetrics::Metric i = CurrentMetrics::Metric(0), end = CurrentMetrics::end(); i < end; ++i)
|
||||
{
|
||||
const auto value = CurrentMetrics::values[i].load(std::memory_order_relaxed);
|
||||
|
||||
|
@ -815,7 +815,8 @@ try
|
||||
}
|
||||
);
|
||||
|
||||
bool has_zookeeper = config().has("zookeeper");
|
||||
zkutil::validateZooKeeperConfig(config());
|
||||
bool has_zookeeper = zkutil::hasZooKeeperConfig(config());
|
||||
|
||||
zkutil::ZooKeeperNodeCache main_config_zk_node_cache([&] { return global_context->getZooKeeper(); });
|
||||
zkutil::EventPtr main_config_zk_changed_event = std::make_shared<Poco::Event>();
|
||||
@ -1307,7 +1308,7 @@ try
|
||||
{
|
||||
/// We do not load ZooKeeper configuration on the first config loading
|
||||
/// because TestKeeper server is not started yet.
|
||||
if (config->has("zookeeper"))
|
||||
if (zkutil::hasZooKeeperConfig(*config))
|
||||
global_context->reloadZooKeeperIfChanged(config);
|
||||
|
||||
global_context->reloadAuxiliaryZooKeepersConfigIfChanged(config);
|
||||
|
@ -288,7 +288,8 @@ public:
|
||||
readVarUInt(size, buf);
|
||||
|
||||
if (unlikely(size > AGGREGATE_FUNCTION_GROUP_ARRAY_MAX_ARRAY_SIZE))
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size");
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE,
|
||||
"Too large array size (maximum: {})", AGGREGATE_FUNCTION_GROUP_ARRAY_MAX_ARRAY_SIZE);
|
||||
|
||||
if (limit_num_elems && unlikely(size > max_elems))
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size, it should not exceed {}", max_elems);
|
||||
@ -367,7 +368,8 @@ struct GroupArrayNodeBase
|
||||
UInt64 size;
|
||||
readVarUInt(size, buf);
|
||||
if (unlikely(size > AGGREGATE_FUNCTION_GROUP_ARRAY_MAX_ARRAY_SIZE))
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size");
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE,
|
||||
"Too large array size (maximum: {})", AGGREGATE_FUNCTION_GROUP_ARRAY_MAX_ARRAY_SIZE);
|
||||
|
||||
Node * node = reinterpret_cast<Node *>(arena->alignedAlloc(sizeof(Node) + size, alignof(Node)));
|
||||
node->size = size;
|
||||
@ -621,7 +623,8 @@ public:
|
||||
return;
|
||||
|
||||
if (unlikely(elems > AGGREGATE_FUNCTION_GROUP_ARRAY_MAX_ARRAY_SIZE))
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size");
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE,
|
||||
"Too large array size (maximum: {})", AGGREGATE_FUNCTION_GROUP_ARRAY_MAX_ARRAY_SIZE);
|
||||
|
||||
if (limit_num_elems && unlikely(elems > max_elems))
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size, it should not exceed {}", max_elems);
|
||||
|
@ -79,7 +79,8 @@ public:
|
||||
{
|
||||
length_to_resize = applyVisitor(FieldVisitorConvertToNumber<UInt64>(), params[1]);
|
||||
if (length_to_resize > AGGREGATE_FUNCTION_GROUP_ARRAY_INSERT_AT_MAX_SIZE)
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size");
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE,
|
||||
"Too large array size (maximum: {})", AGGREGATE_FUNCTION_GROUP_ARRAY_INSERT_AT_MAX_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
@ -167,7 +168,8 @@ public:
|
||||
readVarUInt(size, buf);
|
||||
|
||||
if (size > AGGREGATE_FUNCTION_GROUP_ARRAY_INSERT_AT_MAX_SIZE)
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size");
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE,
|
||||
"Too large array size (maximum: {})", AGGREGATE_FUNCTION_GROUP_ARRAY_INSERT_AT_MAX_SIZE);
|
||||
|
||||
Array & arr = data(place).value;
|
||||
|
||||
|
@ -144,7 +144,8 @@ public:
|
||||
readVarUInt(size, buf);
|
||||
|
||||
if (unlikely(size > AGGREGATE_FUNCTION_MOVING_MAX_ARRAY_SIZE))
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size");
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE,
|
||||
"Too large array size (maximum: {})", AGGREGATE_FUNCTION_MOVING_MAX_ARRAY_SIZE);
|
||||
|
||||
if (size > 0)
|
||||
{
|
||||
|
@ -127,7 +127,8 @@ public:
|
||||
if (size == 0)
|
||||
throw Exception(ErrorCodes::INCORRECT_DATA, "Incorrect size (0) in groupBitmap.");
|
||||
if (size > max_size)
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size in groupBitmap.");
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE,
|
||||
"Too large array size in groupBitmap (maximum: {})", max_size);
|
||||
|
||||
/// TODO: this is unnecessary copying - it will be better to read and deserialize in one pass.
|
||||
std::unique_ptr<char[]> buf(new char[size]);
|
||||
|
@ -294,7 +294,8 @@ public:
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too many bins");
|
||||
static constexpr size_t max_size = 1_GiB;
|
||||
if (size > max_size)
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size in histogram.");
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE,
|
||||
"Too large array size in histogram (maximum: {})", max_size);
|
||||
|
||||
buf.readStrict(reinterpret_cast<char *>(points), size * sizeof(WeightedValue));
|
||||
}
|
||||
|
@ -117,7 +117,7 @@ struct AggregateFunctionIntervalLengthSumData
|
||||
readBinary(size, buf);
|
||||
|
||||
if (unlikely(size > MAX_ARRAY_SIZE))
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size");
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size (maximum: {})", MAX_ARRAY_SIZE);
|
||||
|
||||
segments.clear();
|
||||
segments.reserve(size);
|
||||
|
@ -0,0 +1,36 @@
|
||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||
#include <AggregateFunctions/AggregateFunctionKolmogorovSmirnovTest.h>
|
||||
#include <AggregateFunctions/FactoryHelpers.h>
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
struct Settings;
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
AggregateFunctionPtr createAggregateFunctionKolmogorovSmirnovTest(
|
||||
const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings *)
|
||||
{
|
||||
assertBinary(name, argument_types);
|
||||
|
||||
if (!isNumber(argument_types[0]) || !isNumber(argument_types[1]))
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Aggregate function {} only supports numerical types", name);
|
||||
|
||||
return std::make_shared<AggregateFunctionKolmogorovSmirnov>(argument_types, parameters);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
void registerAggregateFunctionKolmogorovSmirnovTest(AggregateFunctionFactory & factory)
|
||||
{
|
||||
factory.registerFunction("kolmogorovSmirnovTest", createAggregateFunctionKolmogorovSmirnovTest, AggregateFunctionFactory::CaseInsensitive);
|
||||
}
|
||||
|
||||
}
|
323
src/AggregateFunctions/AggregateFunctionKolmogorovSmirnovTest.h
Normal file
323
src/AggregateFunctions/AggregateFunctionKolmogorovSmirnovTest.h
Normal file
@ -0,0 +1,323 @@
|
||||
#pragma once
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
#include <AggregateFunctions/StatCommon.h>
|
||||
#include <Columns/ColumnVector.h>
|
||||
#include <Columns/ColumnTuple.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/ArenaAllocator.h>
|
||||
#include <Common/PODArray_fwd.h>
|
||||
#include <base/types.h>
|
||||
#include <DataTypes/DataTypeNullable.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <DataTypes/DataTypeTuple.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
struct Settings;
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
struct KolmogorovSmirnov : public StatisticalSample<Float64, Float64>
|
||||
{
|
||||
enum class Alternative
|
||||
{
|
||||
TwoSided,
|
||||
Less,
|
||||
Greater
|
||||
};
|
||||
|
||||
std::pair<Float64, Float64> getResult(Alternative alternative, String method)
|
||||
{
|
||||
::sort(x.begin(), x.end());
|
||||
::sort(y.begin(), y.end());
|
||||
|
||||
Float64 max_s = std::numeric_limits<Float64>::min();
|
||||
Float64 min_s = std::numeric_limits<Float64>::max();
|
||||
Float64 now_s = 0;
|
||||
UInt64 pos_x = 0;
|
||||
UInt64 pos_y = 0;
|
||||
UInt64 n1 = x.size();
|
||||
UInt64 n2 = y.size();
|
||||
|
||||
const Float64 n1_d = 1. / n1;
|
||||
const Float64 n2_d = 1. / n2;
|
||||
const Float64 tol = 1e-7;
|
||||
|
||||
// reference: https://en.wikipedia.org/wiki/Kolmogorov%E2%80%93Smirnov_test
|
||||
while (pos_x < x.size() && pos_y < y.size())
|
||||
{
|
||||
if (likely(fabs(x[pos_x] - y[pos_y]) >= tol))
|
||||
{
|
||||
if (x[pos_x] < y[pos_y])
|
||||
{
|
||||
now_s += n1_d;
|
||||
++pos_x;
|
||||
}
|
||||
else
|
||||
{
|
||||
now_s -= n2_d;
|
||||
++pos_y;
|
||||
}
|
||||
max_s = std::max(max_s, now_s);
|
||||
min_s = std::min(min_s, now_s);
|
||||
}
|
||||
else
|
||||
{
|
||||
now_s += n1_d;
|
||||
++pos_x;
|
||||
}
|
||||
}
|
||||
now_s += n1_d * (x.size() - pos_x) - n2_d * (y.size() - pos_y);
|
||||
min_s = std::min(min_s, now_s);
|
||||
max_s = std::max(max_s, now_s);
|
||||
|
||||
Float64 d = 0;
|
||||
if (alternative == Alternative::TwoSided)
|
||||
d = std::max(std::abs(max_s), std::abs(min_s));
|
||||
else if (alternative == Alternative::Less)
|
||||
d = -min_s;
|
||||
else if (alternative == Alternative::Greater)
|
||||
d = max_s;
|
||||
|
||||
UInt64 g = std::__gcd(n1, n2);
|
||||
UInt64 nx_g = n1 / g;
|
||||
UInt64 ny_g = n2 / g;
|
||||
|
||||
if (method == "auto")
|
||||
method = std::max(n1, n2) <= 10000 ? "exact" : "asymp";
|
||||
else if (method == "exact" && nx_g >= std::numeric_limits<Int32>::max() / ny_g)
|
||||
method = "asymp";
|
||||
|
||||
Float64 p_value = std::numeric_limits<Float64>::infinity();
|
||||
|
||||
if (method == "exact")
|
||||
{
|
||||
/* reference:
|
||||
* Gunar Schröer and Dietrich Trenkler
|
||||
* Exact and Randomization Distributions of Kolmogorov-Smirnov, Tests for Two or Three Samples
|
||||
*
|
||||
* and
|
||||
*
|
||||
* Thomas Viehmann
|
||||
* Numerically more stable computation of the p-values for the two-sample Kolmogorov-Smirnov test
|
||||
*/
|
||||
if (n2 > n1)
|
||||
std::swap(n1, n2);
|
||||
|
||||
const Float64 f_n1 = static_cast<Float64>(n1);
|
||||
const Float64 f_n2 = static_cast<Float64>(n2);
|
||||
const Float64 k_d = (0.5 + floor(d * f_n2 * f_n1 - tol)) / (f_n2 * f_n1);
|
||||
PaddedPODArray<Float64> c(n1 + 1);
|
||||
|
||||
auto check = alternative == Alternative::TwoSided ?
|
||||
[](const Float64 & q, const Float64 & r, const Float64 & s) { return fabs(r - s) >= q; }
|
||||
: [](const Float64 & q, const Float64 & r, const Float64 & s) { return r - s >= q; };
|
||||
|
||||
c[0] = 0;
|
||||
for (UInt64 j = 1; j <= n1; j++)
|
||||
if (check(k_d, 0., j / f_n1))
|
||||
c[j] = 1.;
|
||||
else
|
||||
c[j] = c[j - 1];
|
||||
|
||||
for (UInt64 i = 1; i <= n2; i++)
|
||||
{
|
||||
if (check(k_d, i / f_n2, 0.))
|
||||
c[0] = 1.;
|
||||
for (UInt64 j = 1; j <= n1; j++)
|
||||
if (check(k_d, i / f_n2, j / f_n1))
|
||||
c[j] = 1.;
|
||||
else
|
||||
{
|
||||
Float64 v = i / static_cast<Float64>(i + j);
|
||||
Float64 w = j / static_cast<Float64>(i + j);
|
||||
c[j] = v * c[j] + w * c[j - 1];
|
||||
}
|
||||
}
|
||||
p_value = c[n1];
|
||||
}
|
||||
else if (method == "asymp")
|
||||
{
|
||||
Float64 n = std::min(n1, n2);
|
||||
Float64 m = std::max(n1, n2);
|
||||
Float64 p = sqrt((n * m) / (n + m)) * d;
|
||||
|
||||
if (alternative == Alternative::TwoSided)
|
||||
{
|
||||
/* reference:
|
||||
* J.DURBIN
|
||||
* Distribution theory for tests based on the sample distribution function
|
||||
*/
|
||||
Float64 new_val, old_val, s, w, z;
|
||||
UInt64 k_max = static_cast<UInt64>(sqrt(2 - log(tol)));
|
||||
|
||||
if (p < 1)
|
||||
{
|
||||
z = - (M_PI_2 * M_PI_4) / (p * p);
|
||||
w = log(p);
|
||||
s = 0;
|
||||
for (UInt64 k = 1; k < k_max; k += 2)
|
||||
s += exp(k * k * z - w);
|
||||
p = s / 0.398942280401432677939946059934;
|
||||
}
|
||||
else
|
||||
{
|
||||
z = -2 * p * p;
|
||||
s = -1;
|
||||
UInt64 k = 1;
|
||||
old_val = 0;
|
||||
new_val = 1;
|
||||
while (fabs(old_val - new_val) > tol)
|
||||
{
|
||||
old_val = new_val;
|
||||
new_val += 2 * s * exp(z * k * k);
|
||||
s *= -1;
|
||||
k++;
|
||||
}
|
||||
p = new_val;
|
||||
}
|
||||
p_value = 1 - p;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* reference:
|
||||
* J. L. HODGES, Jr
|
||||
* The significance probability of the Smirnov two-sample test
|
||||
*/
|
||||
|
||||
// Use Hodges' suggested approximation Eqn 5.3
|
||||
// Requires m to be the larger of (n1, n2)
|
||||
Float64 expt = -2 * p * p - 2 * p * (m + 2 * n) / sqrt(m * n * (m + n)) / 3.0;
|
||||
p_value = exp(expt);
|
||||
}
|
||||
}
|
||||
return {d, p_value};
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
class AggregateFunctionKolmogorovSmirnov final:
|
||||
public IAggregateFunctionDataHelper<KolmogorovSmirnov, AggregateFunctionKolmogorovSmirnov>
|
||||
{
|
||||
private:
|
||||
using Alternative = typename KolmogorovSmirnov::Alternative;
|
||||
Alternative alternative = Alternative::TwoSided;
|
||||
String method = "auto";
|
||||
|
||||
public:
|
||||
explicit AggregateFunctionKolmogorovSmirnov(const DataTypes & arguments, const Array & params)
|
||||
: IAggregateFunctionDataHelper<KolmogorovSmirnov, AggregateFunctionKolmogorovSmirnov> ({arguments}, {}, createResultType())
|
||||
{
|
||||
if (params.size() > 2)
|
||||
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Aggregate function {} require two parameter or less", getName());
|
||||
|
||||
if (params.empty())
|
||||
return;
|
||||
|
||||
if (params[0].getType() != Field::Types::String)
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Aggregate function {} require first parameter to be a String", getName());
|
||||
|
||||
const auto & param = params[0].get<String>();
|
||||
if (param == "two-sided")
|
||||
alternative = Alternative::TwoSided;
|
||||
else if (param == "less")
|
||||
alternative = Alternative::Less;
|
||||
else if (param == "greater")
|
||||
alternative = Alternative::Greater;
|
||||
else
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unknown parameter in aggregate function {}. "
|
||||
"It must be one of: 'two-sided', 'less', 'greater'", getName());
|
||||
|
||||
if (params.size() != 2)
|
||||
return;
|
||||
|
||||
if (params[1].getType() != Field::Types::String)
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Aggregate function {} require second parameter to be a String", getName());
|
||||
|
||||
method = params[1].get<String>();
|
||||
if (method != "auto" && method != "exact" && method != "asymp")
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unknown method in aggregate function {}. "
|
||||
"It must be one of: 'auto', 'exact', 'asymp'", getName());
|
||||
}
|
||||
|
||||
String getName() const override
|
||||
{
|
||||
return "kolmogorovSmirnovTest";
|
||||
}
|
||||
|
||||
bool allocatesMemoryInArena() const override { return true; }
|
||||
|
||||
static DataTypePtr createResultType()
|
||||
{
|
||||
DataTypes types
|
||||
{
|
||||
std::make_shared<DataTypeNumber<Float64>>(),
|
||||
std::make_shared<DataTypeNumber<Float64>>(),
|
||||
};
|
||||
|
||||
Strings names
|
||||
{
|
||||
"d_statistic",
|
||||
"p_value"
|
||||
};
|
||||
|
||||
return std::make_shared<DataTypeTuple>(
|
||||
std::move(types),
|
||||
std::move(names)
|
||||
);
|
||||
}
|
||||
|
||||
void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena * arena) const override
|
||||
{
|
||||
Float64 value = columns[0]->getFloat64(row_num);
|
||||
UInt8 is_second = columns[1]->getUInt(row_num);
|
||||
if (is_second)
|
||||
this->data(place).addY(value, arena);
|
||||
else
|
||||
this->data(place).addX(value, arena);
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
|
||||
{
|
||||
this->data(place).merge(this->data(rhs), arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).write(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
|
||||
{
|
||||
this->data(place).read(buf, arena);
|
||||
}
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
{
|
||||
if (!this->data(place).size_x || !this->data(place).size_y)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Aggregate function {} require both samples to be non empty", getName());
|
||||
|
||||
auto [d_statistic, p_value] = this->data(place).getResult(alternative, method);
|
||||
|
||||
/// Because p-value is a probability.
|
||||
p_value = std::min(1.0, std::max(0.0, p_value));
|
||||
|
||||
auto & column_tuple = assert_cast<ColumnTuple &>(to);
|
||||
auto & column_stat = assert_cast<ColumnVector<Float64> &>(column_tuple.getColumn(0));
|
||||
auto & column_value = assert_cast<ColumnVector<Float64> &>(column_tuple.getColumn(1));
|
||||
|
||||
column_stat.getData().push_back(d_statistic);
|
||||
column_value.getData().push_back(p_value);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
}
|
@ -140,7 +140,8 @@ public:
|
||||
readVarUInt(size, buf);
|
||||
|
||||
if (unlikely(size > AGGREGATE_FUNCTION_MAX_INTERSECTIONS_MAX_ARRAY_SIZE))
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size");
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE,
|
||||
"Too large array size (maximum: {})", AGGREGATE_FUNCTION_MAX_INTERSECTIONS_MAX_ARRAY_SIZE);
|
||||
|
||||
auto & value = this->data(place).value;
|
||||
|
||||
|
@ -26,9 +26,11 @@ namespace ErrorCodes
|
||||
{
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
template <typename> class QuantileTiming;
|
||||
template <typename> class QuantileApprox;
|
||||
|
||||
|
||||
/** Generic aggregate function for calculation of quantiles.
|
||||
@ -60,6 +62,7 @@ private:
|
||||
using ColVecType = ColumnVectorOrDecimal<Value>;
|
||||
|
||||
static constexpr bool returns_float = !(std::is_same_v<FloatReturnType, void>);
|
||||
static constexpr bool is_quantile_approx = std::is_same_v<Data, QuantileApprox<Value>>;
|
||||
static_assert(!is_decimal<Value> || !returns_float);
|
||||
|
||||
QuantileLevels<Float64> levels;
|
||||
@ -67,22 +70,57 @@ private:
|
||||
/// Used when there are single level to get.
|
||||
Float64 level = 0.5;
|
||||
|
||||
/// Used for the approximate version of the algorithm (Greenwald-Khanna)
|
||||
ssize_t accuracy = 10000;
|
||||
|
||||
DataTypePtr & argument_type;
|
||||
|
||||
public:
|
||||
AggregateFunctionQuantile(const DataTypes & argument_types_, const Array & params)
|
||||
: IAggregateFunctionDataHelper<Data, AggregateFunctionQuantile<Value, Data, Name, has_second_arg, FloatReturnType, returns_many>>(
|
||||
argument_types_, params, createResultType(argument_types_))
|
||||
, levels(params, returns_many)
|
||||
, levels(is_quantile_approx && !params.empty() ? Array(params.begin() + 1, params.end()) : params, returns_many)
|
||||
, level(levels.levels[0])
|
||||
, argument_type(this->argument_types[0])
|
||||
{
|
||||
if (!returns_many && levels.size() > 1)
|
||||
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Aggregate function {} require one parameter or less", getName());
|
||||
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Aggregate function {} requires one level parameter or less", getName());
|
||||
|
||||
if constexpr (is_quantile_approx)
|
||||
{
|
||||
if (params.empty())
|
||||
throw Exception(
|
||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Aggregate function {} requires at least one param", getName());
|
||||
|
||||
const auto & accuracy_field = params[0];
|
||||
if (!isInt64OrUInt64FieldType(accuracy_field.getType()))
|
||||
throw Exception(
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Aggregate function {} requires accuracy parameter with integer type", getName());
|
||||
|
||||
if (accuracy_field.getType() == Field::Types::Int64)
|
||||
accuracy = accuracy_field.get<Int64>();
|
||||
else
|
||||
accuracy = accuracy_field.get<UInt64>();
|
||||
|
||||
if (accuracy <= 0)
|
||||
throw Exception(
|
||||
ErrorCodes::BAD_ARGUMENTS,
|
||||
"Aggregate function {} requires accuracy parameter with positive value but is {}",
|
||||
getName(),
|
||||
accuracy);
|
||||
}
|
||||
}
|
||||
|
||||
String getName() const override { return Name::name; }
|
||||
|
||||
void create(AggregateDataPtr __restrict place) const override /// NOLINT
|
||||
{
|
||||
if constexpr (is_quantile_approx)
|
||||
new (place) Data(accuracy);
|
||||
else
|
||||
new (place) Data;
|
||||
}
|
||||
|
||||
static DataTypePtr createResultType(const DataTypes & argument_types_)
|
||||
{
|
||||
DataTypePtr res;
|
||||
@ -257,4 +295,7 @@ struct NameQuantilesBFloat16 { static constexpr auto name = "quantilesBFloat16";
|
||||
struct NameQuantileBFloat16Weighted { static constexpr auto name = "quantileBFloat16Weighted"; };
|
||||
struct NameQuantilesBFloat16Weighted { static constexpr auto name = "quantilesBFloat16Weighted"; };
|
||||
|
||||
struct NameQuantileApprox { static constexpr auto name = "quantileApprox"; };
|
||||
struct NameQuantilesApprox { static constexpr auto name = "quantilesApprox"; };
|
||||
|
||||
}
|
||||
|
71
src/AggregateFunctions/AggregateFunctionQuantileApprox.cpp
Normal file
71
src/AggregateFunctions/AggregateFunctionQuantileApprox.cpp
Normal file
@ -0,0 +1,71 @@
|
||||
#include <AggregateFunctions/AggregateFunctionQuantile.h>
|
||||
#include <AggregateFunctions/QuantileApprox.h>
|
||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||
#include <AggregateFunctions/Helpers.h>
|
||||
#include <DataTypes/DataTypeDate.h>
|
||||
#include <DataTypes/DataTypeDateTime.h>
|
||||
#include <Core/Field.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
template <typename Value, bool _> using FuncQuantileApprox = AggregateFunctionQuantile<Value, QuantileApprox<Value>, NameQuantileApprox, false, void, false>;
|
||||
template <typename Value, bool _> using FuncQuantilesApprox = AggregateFunctionQuantile<Value, QuantileApprox<Value>, NameQuantilesApprox, false, void, true>;
|
||||
|
||||
template <template <typename, bool> class Function>
|
||||
AggregateFunctionPtr createAggregateFunctionQuantile(
|
||||
const std::string & name, const DataTypes & argument_types, const Array & params, const Settings *)
|
||||
{
|
||||
/// Second argument type check doesn't depend on the type of the first one.
|
||||
Function<void, true>::assertSecondArg(argument_types);
|
||||
|
||||
const DataTypePtr & argument_type = argument_types[0];
|
||||
WhichDataType which(argument_type);
|
||||
|
||||
#define DISPATCH(TYPE) \
|
||||
if (which.idx == TypeIndex::TYPE) \
|
||||
return std::make_shared<Function<TYPE, true>>(argument_types, params);
|
||||
FOR_BASIC_NUMERIC_TYPES(DISPATCH)
|
||||
#undef DISPATCH
|
||||
|
||||
if (which.idx == TypeIndex::Date) return std::make_shared<Function<DataTypeDate::FieldType, false>>(argument_types, params);
|
||||
if (which.idx == TypeIndex::DateTime) return std::make_shared<Function<DataTypeDateTime::FieldType, false>>(argument_types, params);
|
||||
|
||||
if (which.idx == TypeIndex::Decimal32) return std::make_shared<Function<Decimal32, false>>(argument_types, params);
|
||||
if (which.idx == TypeIndex::Decimal64) return std::make_shared<Function<Decimal64, false>>(argument_types, params);
|
||||
if (which.idx == TypeIndex::Decimal128) return std::make_shared<Function<Decimal128, false>>(argument_types, params);
|
||||
if (which.idx == TypeIndex::Decimal256) return std::make_shared<Function<Decimal256, false>>(argument_types, params);
|
||||
if (which.idx == TypeIndex::DateTime64) return std::make_shared<Function<DateTime64, false>>(argument_types, params);
|
||||
|
||||
if (which.idx == TypeIndex::Int128) return std::make_shared<Function<Int128, true>>(argument_types, params);
|
||||
if (which.idx == TypeIndex::UInt128) return std::make_shared<Function<UInt128, true>>(argument_types, params);
|
||||
if (which.idx == TypeIndex::Int256) return std::make_shared<Function<Int256, true>>(argument_types, params);
|
||||
if (which.idx == TypeIndex::UInt256) return std::make_shared<Function<UInt256, true>>(argument_types, params);
|
||||
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument for aggregate function {}",
|
||||
argument_type->getName(), name);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void registerAggregateFunctionsQuantileApprox(AggregateFunctionFactory & factory)
|
||||
{
|
||||
/// For aggregate functions returning array we cannot return NULL on empty set.
|
||||
AggregateFunctionProperties properties = { .returns_default_when_only_null = true };
|
||||
|
||||
factory.registerFunction(NameQuantileApprox::name, createAggregateFunctionQuantile<FuncQuantileApprox>);
|
||||
factory.registerFunction(NameQuantilesApprox::name, {createAggregateFunctionQuantile<FuncQuantilesApprox>, properties});
|
||||
|
||||
/// 'median' is an alias for 'quantile'
|
||||
factory.registerAlias("medianApprox", NameQuantileApprox::name);
|
||||
}
|
||||
|
||||
}
|
@ -324,7 +324,8 @@ public:
|
||||
return;
|
||||
|
||||
if (unlikely(size > max_node_size_deserialize))
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size");
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE,
|
||||
"Too large array size (maximum: {})", max_node_size_deserialize);
|
||||
|
||||
auto & value = data(place).value;
|
||||
|
||||
|
@ -1,5 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <base/arithmeticOverflow.h>
|
||||
|
||||
#include <array>
|
||||
#include <string_view>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
@ -43,7 +45,19 @@ struct AggregateFunctionSparkbarData
|
||||
|
||||
auto [it, inserted] = points.insert({x, y});
|
||||
if (!inserted)
|
||||
it->getMapped() += y;
|
||||
{
|
||||
if constexpr (std::is_floating_point_v<Y>)
|
||||
{
|
||||
it->getMapped() += y;
|
||||
return it->getMapped();
|
||||
}
|
||||
else
|
||||
{
|
||||
Y res;
|
||||
bool has_overfllow = common::addOverflow(it->getMapped(), y, res);
|
||||
it->getMapped() = has_overfllow ? std::numeric_limits<Y>::max() : res;
|
||||
}
|
||||
}
|
||||
return it->getMapped();
|
||||
}
|
||||
|
||||
@ -117,6 +131,7 @@ class AggregateFunctionSparkbar final
|
||||
{
|
||||
|
||||
private:
|
||||
static constexpr size_t BAR_LEVELS = 8;
|
||||
const size_t width = 0;
|
||||
|
||||
/// Range for x specified in parameters.
|
||||
@ -126,8 +141,8 @@ private:
|
||||
|
||||
size_t updateFrame(ColumnString::Chars & frame, Y value) const
|
||||
{
|
||||
static constexpr std::array<std::string_view, 9> bars{" ", "▁", "▂", "▃", "▄", "▅", "▆", "▇", "█"};
|
||||
const auto & bar = (isNaN(value) || value < 1 || 8 < value) ? bars[0] : bars[static_cast<UInt8>(value)];
|
||||
static constexpr std::array<std::string_view, BAR_LEVELS + 1> bars{" ", "▁", "▂", "▃", "▄", "▅", "▆", "▇", "█"};
|
||||
const auto & bar = (isNaN(value) || value < 1 || static_cast<Y>(BAR_LEVELS) < value) ? bars[0] : bars[static_cast<UInt8>(value)];
|
||||
frame.insert(bar.begin(), bar.end());
|
||||
return bar.size();
|
||||
}
|
||||
@ -161,7 +176,7 @@ private:
|
||||
}
|
||||
|
||||
PaddedPODArray<Y> histogram(width, 0);
|
||||
PaddedPODArray<UInt64> fhistogram(width, 0);
|
||||
PaddedPODArray<UInt64> count_histogram(width, 0); /// The number of points in each bucket
|
||||
|
||||
for (const auto & point : data.points)
|
||||
{
|
||||
@ -176,22 +191,30 @@ private:
|
||||
Float64 w = histogram.size();
|
||||
size_t index = std::min<size_t>(static_cast<size_t>(w / delta * value), histogram.size() - 1);
|
||||
|
||||
if (std::numeric_limits<Y>::max() - histogram[index] > point.getMapped())
|
||||
Y res;
|
||||
bool has_overfllow = false;
|
||||
if constexpr (std::is_floating_point_v<Y>)
|
||||
res = histogram[index] + point.getMapped();
|
||||
else
|
||||
has_overfllow = common::addOverflow(histogram[index], point.getMapped(), res);
|
||||
|
||||
if (unlikely(has_overfllow))
|
||||
{
|
||||
histogram[index] += point.getMapped();
|
||||
fhistogram[index] += 1;
|
||||
/// In case of overflow, just saturate
|
||||
/// Do not count new values, because we do not know how many of them were added
|
||||
histogram[index] = std::numeric_limits<Y>::max();
|
||||
}
|
||||
else
|
||||
{
|
||||
/// In case of overflow, just saturate
|
||||
histogram[index] = std::numeric_limits<Y>::max();
|
||||
histogram[index] = res;
|
||||
count_histogram[index] += 1;
|
||||
}
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < histogram.size(); ++i)
|
||||
{
|
||||
if (fhistogram[i] > 0)
|
||||
histogram[i] /= fhistogram[i];
|
||||
if (count_histogram[i] > 0)
|
||||
histogram[i] /= count_histogram[i];
|
||||
}
|
||||
|
||||
Y y_max = 0;
|
||||
@ -209,12 +232,30 @@ private:
|
||||
return;
|
||||
}
|
||||
|
||||
/// Scale the histogram to the range [0, BAR_LEVELS]
|
||||
for (auto & y : histogram)
|
||||
{
|
||||
if (isNaN(y) || y <= 0)
|
||||
{
|
||||
y = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
constexpr auto levels_num = static_cast<Y>(BAR_LEVELS - 1);
|
||||
if constexpr (std::is_floating_point_v<Y>)
|
||||
{
|
||||
y = y / (y_max / levels_num) + 1;
|
||||
}
|
||||
else
|
||||
y = y * 7 / y_max + 1;
|
||||
{
|
||||
Y scaled;
|
||||
bool has_overfllow = common::mulOverflow<Y>(y, levels_num, scaled);
|
||||
|
||||
if (has_overfllow)
|
||||
y = y / (y_max / levels_num) + 1;
|
||||
else
|
||||
y = scaled / y_max + 1;
|
||||
}
|
||||
}
|
||||
|
||||
size_t sz = 0;
|
||||
|
477
src/AggregateFunctions/QuantileApprox.h
Normal file
477
src/AggregateFunctions/QuantileApprox.h
Normal file
@ -0,0 +1,477 @@
|
||||
#pragma once
|
||||
|
||||
#include <cmath>
|
||||
#include <base/sort.h>
|
||||
#include <Common/RadixSort.h>
|
||||
#include <IO/WriteBuffer.h>
|
||||
#include <IO/ReadBuffer.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
class ApproxSampler
|
||||
{
|
||||
public:
|
||||
struct Stats
|
||||
{
|
||||
T value; // the sampled value
|
||||
Int64 g; // the minimum rank jump from the previous value's minimum rank
|
||||
Int64 delta; // the maximum span of the rank
|
||||
|
||||
Stats() = default;
|
||||
Stats(T value_, Int64 g_, Int64 delta_) : value(value_), g(g_), delta(delta_) {}
|
||||
};
|
||||
|
||||
struct QueryResult
|
||||
{
|
||||
size_t index;
|
||||
Int64 rank;
|
||||
T value;
|
||||
|
||||
QueryResult(size_t index_, Int64 rank_, T value_) : index(index_), rank(rank_), value(value_) { }
|
||||
};
|
||||
|
||||
ApproxSampler() = default;
|
||||
|
||||
explicit ApproxSampler(
|
||||
double relative_error_,
|
||||
size_t compress_threshold_ = default_compress_threshold,
|
||||
size_t count_ = 0,
|
||||
bool compressed_ = false)
|
||||
: relative_error(relative_error_)
|
||||
, compress_threshold(compress_threshold_)
|
||||
, count(count_)
|
||||
, compressed(compressed_)
|
||||
{
|
||||
sampled.reserve(compress_threshold);
|
||||
backup_sampled.reserve(compress_threshold);
|
||||
|
||||
head_sampled.reserve(default_head_size);
|
||||
}
|
||||
|
||||
bool isCompressed() const { return compressed; }
|
||||
void setCompressed() { compressed = true; }
|
||||
|
||||
void insert(T x)
|
||||
{
|
||||
head_sampled.push_back(x);
|
||||
compressed = false;
|
||||
if (head_sampled.size() >= default_head_size)
|
||||
{
|
||||
withHeadBufferInserted();
|
||||
if (sampled.size() >= compress_threshold)
|
||||
compress();
|
||||
}
|
||||
}
|
||||
|
||||
void query(const Float64 * percentiles, const size_t * indices, size_t size, T * result) const
|
||||
{
|
||||
if (!head_sampled.empty())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot operate on an uncompressed summary, call compress() first");
|
||||
|
||||
if (sampled.empty())
|
||||
{
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
result[i] = T();
|
||||
return;
|
||||
}
|
||||
|
||||
Int64 current_max = std::numeric_limits<Int64>::min();
|
||||
for (const auto & stats : sampled)
|
||||
current_max = std::max(stats.delta + stats.g, current_max);
|
||||
Int64 target_error = current_max/2;
|
||||
|
||||
size_t index= 0;
|
||||
auto min_rank = sampled[0].g;
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
{
|
||||
double percentile = percentiles[indices[i]];
|
||||
if (percentile <= relative_error)
|
||||
{
|
||||
result[indices[i]] = sampled.front().value;
|
||||
}
|
||||
else if (percentile >= 1 - relative_error)
|
||||
{
|
||||
result[indices[i]] = sampled.back().value;
|
||||
}
|
||||
else
|
||||
{
|
||||
QueryResult res = findApproxQuantile(index, min_rank, target_error, percentile);
|
||||
index = res.index;
|
||||
min_rank = res.rank;
|
||||
result[indices[i]] = res.value;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void compress()
|
||||
{
|
||||
if (compressed)
|
||||
return;
|
||||
|
||||
withHeadBufferInserted();
|
||||
|
||||
doCompress(2 * relative_error * count);
|
||||
compressed = true;
|
||||
}
|
||||
|
||||
|
||||
void merge(const ApproxSampler & other)
|
||||
{
|
||||
if (other.count == 0)
|
||||
return;
|
||||
else if (count == 0)
|
||||
{
|
||||
compress_threshold = other.compress_threshold;
|
||||
relative_error = other.relative_error;
|
||||
count = other.count;
|
||||
compressed = other.compressed;
|
||||
|
||||
sampled.resize(other.sampled.size());
|
||||
memcpy(sampled.data(), other.sampled.data(), sizeof(Stats) * other.sampled.size());
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Merge the two buffers.
|
||||
// The GK algorithm is a bit unclear about it, but we need to adjust the statistics during the
|
||||
// merging. The main idea is that samples that come from one side will suffer from the lack of
|
||||
// precision of the other.
|
||||
// As a concrete example, take two QuantileSummaries whose samples (value, g, delta) are:
|
||||
// `a = [(0, 1, 0), (20, 99, 0)]` and `b = [(10, 1, 0), (30, 49, 0)]`
|
||||
// This means `a` has 100 values, whose minimum is 0 and maximum is 20,
|
||||
// while `b` has 50 values, between 10 and 30.
|
||||
// The resulting samples of the merge will be:
|
||||
// a+b = [(0, 1, 0), (10, 1, ??), (20, 99, ??), (30, 49, 0)]
|
||||
// The values of `g` do not change, as they represent the minimum number of values between two
|
||||
// consecutive samples. The values of `delta` should be adjusted, however.
|
||||
// Take the case of the sample `10` from `b`. In the original stream, it could have appeared
|
||||
// right after `0` (as expressed by `g=1`) or right before `20`, so `delta=99+0-1=98`.
|
||||
// In the GK algorithm's style of working in terms of maximum bounds, one can observe that the
|
||||
// maximum additional uncertainty over samples coming from `b` is `max(g_a + delta_a) =
|
||||
// floor(2 * eps_a * n_a)`. Likewise, additional uncertainty over samples from `a` is
|
||||
// `floor(2 * eps_b * n_b)`.
|
||||
// Only samples that interleave the other side are affected. That means that samples from
|
||||
// one side that are lesser (or greater) than all samples from the other side are just copied
|
||||
// unmodified.
|
||||
// If the merging instances have different `relativeError`, the resulting instance will carry
|
||||
// the largest one: `eps_ab = max(eps_a, eps_b)`.
|
||||
// The main invariant of the GK algorithm is kept:
|
||||
// `max(g_ab + delta_ab) <= floor(2 * eps_ab * (n_a + n_b))` since
|
||||
// `max(g_ab + delta_ab) <= floor(2 * eps_a * n_a) + floor(2 * eps_b * n_b)`
|
||||
// Finally, one can see how the `insert(x)` operation can be expressed as `merge([(x, 1, 0])`
|
||||
compress();
|
||||
|
||||
backup_sampled.clear();
|
||||
backup_sampled.reserve(sampled.size() + other.sampled.size());
|
||||
double merged_relative_error = std::max(relative_error, other.relative_error);
|
||||
size_t merged_count = count + other.count;
|
||||
Int64 additional_self_delta = static_cast<Int64>(std::floor(2 * other.relative_error * other.count));
|
||||
Int64 additional_other_delta = static_cast<Int64>(std::floor(2 * relative_error * count));
|
||||
|
||||
// Do a merge of two sorted lists until one of the lists is fully consumed
|
||||
size_t self_idx = 0;
|
||||
size_t other_idx = 0;
|
||||
while (self_idx < sampled.size() && other_idx < other.sampled.size())
|
||||
{
|
||||
const Stats & self_sample = sampled[self_idx];
|
||||
const Stats & other_sample = other.sampled[other_idx];
|
||||
|
||||
// Detect next sample
|
||||
Stats next_sample;
|
||||
Int64 additional_delta = 0;
|
||||
if (self_sample.value < other_sample.value)
|
||||
{
|
||||
++self_idx;
|
||||
next_sample = self_sample;
|
||||
additional_delta = other_idx > 0 ? additional_self_delta : 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
++other_idx;
|
||||
next_sample = other_sample;
|
||||
additional_delta = self_idx > 0 ? additional_other_delta : 0;
|
||||
}
|
||||
|
||||
// Insert it
|
||||
next_sample.delta += additional_delta;
|
||||
backup_sampled.emplace_back(std::move(next_sample));
|
||||
}
|
||||
|
||||
// Copy the remaining samples from the other list
|
||||
// (by construction, at most one `while` loop will run)
|
||||
while (self_idx < sampled.size())
|
||||
{
|
||||
backup_sampled.emplace_back(sampled[self_idx]);
|
||||
++self_idx;
|
||||
}
|
||||
while (other_idx < other.sampled.size())
|
||||
{
|
||||
backup_sampled.emplace_back(other.sampled[other_idx]);
|
||||
++other_idx;
|
||||
}
|
||||
|
||||
std::swap(sampled, backup_sampled);
|
||||
relative_error = merged_relative_error;
|
||||
count = merged_count;
|
||||
compress_threshold = other.compress_threshold;
|
||||
|
||||
doCompress(2 * merged_relative_error * merged_count);
|
||||
compressed = true;
|
||||
}
|
||||
}
|
||||
|
||||
void write(WriteBuffer & buf) const
|
||||
{
|
||||
writeIntBinary<size_t>(compress_threshold, buf);
|
||||
writeFloatBinary<double>(relative_error, buf);
|
||||
writeIntBinary<size_t>(count, buf);
|
||||
writeIntBinary<size_t>(sampled.size(), buf);
|
||||
|
||||
for (const auto & stats : sampled)
|
||||
{
|
||||
writeFloatBinary<T>(stats.value, buf);
|
||||
writeIntBinary<Int64>(stats.g, buf);
|
||||
writeIntBinary<Int64>(stats.delta, buf);
|
||||
}
|
||||
}
|
||||
|
||||
void read(ReadBuffer & buf)
|
||||
{
|
||||
readIntBinary<size_t>(compress_threshold, buf);
|
||||
readFloatBinary<double>(relative_error, buf);
|
||||
readIntBinary<size_t>(count, buf);
|
||||
|
||||
size_t sampled_len = 0;
|
||||
readIntBinary<size_t>(sampled_len, buf);
|
||||
sampled.resize(sampled_len);
|
||||
|
||||
for (size_t i = 0; i < sampled_len; ++i)
|
||||
{
|
||||
auto stats = sampled[i];
|
||||
readFloatBinary<T>(stats.value, buf);
|
||||
readIntBinary<Int64>(stats.g, buf);
|
||||
readIntBinary<Int64>(stats.delta, buf);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
QueryResult findApproxQuantile(size_t index, Int64 min_rank_at_index, double target_error, double percentile) const
|
||||
{
|
||||
Stats curr_sample = sampled[index];
|
||||
Int64 rank = static_cast<Int64>(std::ceil(percentile * count));
|
||||
size_t i = index;
|
||||
Int64 min_rank = min_rank_at_index;
|
||||
while (i < sampled.size() - 1)
|
||||
{
|
||||
Int64 max_rank = min_rank + curr_sample.delta;
|
||||
if (max_rank - target_error <= rank && rank <= min_rank + target_error)
|
||||
return {i, min_rank, curr_sample.value};
|
||||
else
|
||||
{
|
||||
++i;
|
||||
curr_sample = sampled[i];
|
||||
min_rank += curr_sample.g;
|
||||
}
|
||||
}
|
||||
return {sampled.size()-1, 0, sampled.back().value};
|
||||
}
|
||||
|
||||
void withHeadBufferInserted()
|
||||
{
|
||||
if (head_sampled.empty())
|
||||
return;
|
||||
|
||||
bool use_radix_sort = head_sampled.size() >= 256 && (is_arithmetic_v<T> && !is_big_int_v<T>);
|
||||
if (use_radix_sort)
|
||||
RadixSort<RadixSortNumTraits<T>>::executeLSD(head_sampled.data(), head_sampled.size());
|
||||
else
|
||||
::sort(head_sampled.begin(), head_sampled.end());
|
||||
|
||||
backup_sampled.clear();
|
||||
backup_sampled.reserve(sampled.size() + head_sampled.size());
|
||||
|
||||
size_t sample_idx = 0;
|
||||
size_t ops_idx = 0;
|
||||
size_t current_count = count;
|
||||
for (; ops_idx < head_sampled.size(); ++ops_idx)
|
||||
{
|
||||
T current_sample = head_sampled[ops_idx];
|
||||
|
||||
// Add all the samples before the next observation.
|
||||
while (sample_idx < sampled.size() && sampled[sample_idx].value <= current_sample)
|
||||
{
|
||||
backup_sampled.emplace_back(sampled[sample_idx]);
|
||||
++sample_idx;
|
||||
}
|
||||
|
||||
// If it is the first one to insert, of if it is the last one
|
||||
++current_count;
|
||||
Int64 delta;
|
||||
if (backup_sampled.empty() || (sample_idx == sampled.size() && ops_idx == (head_sampled.size() - 1)))
|
||||
delta = 0;
|
||||
else
|
||||
delta = static_cast<Int64>(std::floor(2 * relative_error * current_count));
|
||||
|
||||
backup_sampled.emplace_back(current_sample, 1, delta);
|
||||
}
|
||||
|
||||
// Add all the remaining existing samples
|
||||
for (; sample_idx < sampled.size(); ++sample_idx)
|
||||
backup_sampled.emplace_back(sampled[sample_idx]);
|
||||
|
||||
std::swap(sampled, backup_sampled);
|
||||
head_sampled.clear();
|
||||
count = current_count;
|
||||
}
|
||||
|
||||
|
||||
void doCompress(double merge_threshold)
|
||||
{
|
||||
if (sampled.empty())
|
||||
return;
|
||||
|
||||
backup_sampled.clear();
|
||||
// Start for the last element, which is always part of the set.
|
||||
// The head contains the current new head, that may be merged with the current element.
|
||||
Stats head = sampled.back();
|
||||
ssize_t i = sampled.size() - 2;
|
||||
|
||||
// Do not compress the last element
|
||||
while (i >= 1)
|
||||
{
|
||||
// The current sample:
|
||||
const auto & sample1 = sampled[i];
|
||||
// Do we need to compress?
|
||||
if (sample1.g + head.g + head.delta < merge_threshold)
|
||||
{
|
||||
// Do not insert yet, just merge the current element into the head.
|
||||
head.g += sample1.g;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Prepend the current head, and keep the current sample as target for merging.
|
||||
backup_sampled.push_back(head);
|
||||
head = sample1;
|
||||
}
|
||||
--i;
|
||||
}
|
||||
|
||||
backup_sampled.push_back(head);
|
||||
// If necessary, add the minimum element:
|
||||
auto curr_head = sampled.front();
|
||||
|
||||
// don't add the minimum element if `currentSamples` has only one element (both `currHead` and
|
||||
// `head` point to the same element)
|
||||
if (curr_head.value <= head.value && sampled.size() > 1)
|
||||
backup_sampled.emplace_back(sampled.front());
|
||||
|
||||
std::reverse(backup_sampled.begin(), backup_sampled.end());
|
||||
std::swap(sampled, backup_sampled);
|
||||
}
|
||||
|
||||
double relative_error;
|
||||
size_t compress_threshold;
|
||||
size_t count = 0;
|
||||
bool compressed;
|
||||
|
||||
PaddedPODArray<Stats> sampled;
|
||||
PaddedPODArray<Stats> backup_sampled;
|
||||
|
||||
PaddedPODArray<T> head_sampled;
|
||||
|
||||
static constexpr size_t default_compress_threshold = 10000;
|
||||
static constexpr size_t default_head_size = 50000;
|
||||
};
|
||||
|
||||
template <typename Value>
|
||||
class QuantileApprox
|
||||
{
|
||||
private:
|
||||
using Data = ApproxSampler<Value>;
|
||||
mutable Data data;
|
||||
|
||||
public:
|
||||
QuantileApprox() = default;
|
||||
|
||||
explicit QuantileApprox(size_t accuracy) : data(1.0 / static_cast<double>(accuracy)) { }
|
||||
|
||||
void add(const Value & x)
|
||||
{
|
||||
data.insert(x);
|
||||
}
|
||||
|
||||
template <typename Weight>
|
||||
void add(const Value &, const Weight &)
|
||||
{
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method add with weight is not implemented for GKSampler");
|
||||
}
|
||||
|
||||
void merge(const QuantileApprox & rhs)
|
||||
{
|
||||
if (!data.isCompressed())
|
||||
data.compress();
|
||||
|
||||
data.merge(rhs.data);
|
||||
}
|
||||
|
||||
void serialize(WriteBuffer & buf) const
|
||||
{
|
||||
/// Always compress before serialization
|
||||
if (!data.isCompressed())
|
||||
data.compress();
|
||||
|
||||
data.write(buf);
|
||||
}
|
||||
|
||||
void deserialize(ReadBuffer & buf)
|
||||
{
|
||||
data.read(buf);
|
||||
|
||||
data.setCompressed();
|
||||
}
|
||||
|
||||
/// Get the value of the `level` quantile. The level must be between 0 and 1.
|
||||
Value get(Float64 level)
|
||||
{
|
||||
if (!data.isCompressed())
|
||||
data.compress();
|
||||
|
||||
Value res;
|
||||
size_t indice = 0;
|
||||
data.query(&level, &indice, 1, &res);
|
||||
return res;
|
||||
}
|
||||
|
||||
/// Get the `size` values of `levels` quantiles. Write `size` results starting with `result` address.
|
||||
/// indices - an array of index levels such that the corresponding elements will go in ascending order.
|
||||
void getMany(const Float64 * levels, const size_t * indices, size_t size, Value * result)
|
||||
{
|
||||
if (!data.isCompressed())
|
||||
data.compress();
|
||||
|
||||
data.query(levels, indices, size, result);
|
||||
}
|
||||
|
||||
Float64 getFloat64(Float64 /*level*/)
|
||||
{
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method getFloat64 is not implemented for GKSampler");
|
||||
}
|
||||
|
||||
void getManyFloat(const Float64 * /*levels*/, const size_t * /*indices*/, size_t /*size*/, Float64 * /*result*/)
|
||||
{
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method getManyFloat is not implemented for GKSampler");
|
||||
}
|
||||
};
|
||||
|
||||
}
|
@ -58,7 +58,8 @@ struct QuantileExactBase
|
||||
size_t size = 0;
|
||||
readVarUInt(size, buf);
|
||||
if (unlikely(size > QUANTILE_EXACT_MAX_ARRAY_SIZE))
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size");
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE,
|
||||
"Too large array size (maximum: {})", QUANTILE_EXACT_MAX_ARRAY_SIZE);
|
||||
array.resize(size);
|
||||
buf.readStrict(reinterpret_cast<char *>(array.data()), size * sizeof(array[0]));
|
||||
}
|
||||
|
@ -213,7 +213,8 @@ public:
|
||||
size_t size = std::min(total_values, sample_count);
|
||||
static constexpr size_t MAX_RESERVOIR_SIZE = 1_GiB;
|
||||
if (unlikely(size > MAX_RESERVOIR_SIZE))
|
||||
throw DB::Exception(DB::ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size");
|
||||
throw DB::Exception(DB::ErrorCodes::TOO_LARGE_ARRAY_SIZE,
|
||||
"Too large array size (maximum: {})", MAX_RESERVOIR_SIZE);
|
||||
|
||||
samples.resize(size);
|
||||
|
||||
|
@ -166,7 +166,8 @@ public:
|
||||
|
||||
static constexpr size_t MAX_RESERVOIR_SIZE = 1_GiB;
|
||||
if (unlikely(size > MAX_RESERVOIR_SIZE))
|
||||
throw DB::Exception(DB::ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size");
|
||||
throw DB::Exception(DB::ErrorCodes::TOO_LARGE_ARRAY_SIZE,
|
||||
"Too large array size (maximum: {})", MAX_RESERVOIR_SIZE);
|
||||
|
||||
samples.resize(size);
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
|
@ -32,6 +32,7 @@ void registerAggregateFunctionsQuantileTDigest(AggregateFunctionFactory &);
|
||||
void registerAggregateFunctionsQuantileTDigestWeighted(AggregateFunctionFactory &);
|
||||
void registerAggregateFunctionsQuantileBFloat16(AggregateFunctionFactory &);
|
||||
void registerAggregateFunctionsQuantileBFloat16Weighted(AggregateFunctionFactory &);
|
||||
void registerAggregateFunctionsQuantileApprox(AggregateFunctionFactory &);
|
||||
void registerAggregateFunctionsSequenceMatch(AggregateFunctionFactory &);
|
||||
void registerAggregateFunctionWindowFunnel(AggregateFunctionFactory &);
|
||||
void registerAggregateFunctionRate(AggregateFunctionFactory &);
|
||||
@ -79,6 +80,7 @@ void registerAggregateFunctionExponentialMovingAverage(AggregateFunctionFactory
|
||||
void registerAggregateFunctionSparkbar(AggregateFunctionFactory &);
|
||||
void registerAggregateFunctionIntervalLengthSum(AggregateFunctionFactory &);
|
||||
void registerAggregateFunctionAnalysisOfVariance(AggregateFunctionFactory &);
|
||||
void registerAggregateFunctionKolmogorovSmirnovTest(AggregateFunctionFactory & factory);
|
||||
|
||||
class AggregateFunctionCombinatorFactory;
|
||||
void registerAggregateFunctionCombinatorIf(AggregateFunctionCombinatorFactory &);
|
||||
@ -123,6 +125,7 @@ void registerAggregateFunctions()
|
||||
registerAggregateFunctionsQuantileTDigestWeighted(factory);
|
||||
registerAggregateFunctionsQuantileBFloat16(factory);
|
||||
registerAggregateFunctionsQuantileBFloat16Weighted(factory);
|
||||
registerAggregateFunctionsQuantileApprox(factory);
|
||||
registerAggregateFunctionsSequenceMatch(factory);
|
||||
registerAggregateFunctionWindowFunnel(factory);
|
||||
registerAggregateFunctionRate(factory);
|
||||
@ -170,6 +173,7 @@ void registerAggregateFunctions()
|
||||
registerAggregateFunctionExponentialMovingAverage(factory);
|
||||
registerAggregateFunctionSparkbar(factory);
|
||||
registerAggregateFunctionAnalysisOfVariance(factory);
|
||||
registerAggregateFunctionKolmogorovSmirnovTest(factory);
|
||||
|
||||
registerWindowFunctions(factory);
|
||||
}
|
||||
|
@ -86,7 +86,12 @@ public:
|
||||
|
||||
DataTypePtr getResultType() const override
|
||||
{
|
||||
return getExpression()->getResultType();
|
||||
return result_type;
|
||||
}
|
||||
|
||||
void resolve(DataTypePtr lambda_type)
|
||||
{
|
||||
result_type = std::move(lambda_type);
|
||||
}
|
||||
|
||||
void dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const override;
|
||||
@ -102,6 +107,7 @@ protected:
|
||||
|
||||
private:
|
||||
Names argument_names;
|
||||
DataTypePtr result_type;
|
||||
|
||||
static constexpr size_t arguments_child_index = 0;
|
||||
static constexpr size_t expression_child_index = 1;
|
||||
|
599
src/Analyzer/Passes/CNF.cpp
Normal file
599
src/Analyzer/Passes/CNF.cpp
Normal file
@ -0,0 +1,599 @@
|
||||
#include <Analyzer/Passes/CNF.h>
|
||||
|
||||
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||
#include <Analyzer/FunctionNode.h>
|
||||
#include <Analyzer/ConstantNode.h>
|
||||
|
||||
#include <Interpreters/TreeCNFConverter.h>
|
||||
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <IO/Operators.h>
|
||||
|
||||
#include <Functions/FunctionFactory.h>
|
||||
|
||||
#include <Common/checkStackSize.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int TOO_MANY_TEMPORARY_COLUMNS;
|
||||
}
|
||||
|
||||
namespace Analyzer
|
||||
{
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
bool isLogicalFunction(const FunctionNode & function_node)
|
||||
{
|
||||
const std::string_view name = function_node.getFunctionName();
|
||||
return name == "and" || name == "or" || name == "not";
|
||||
}
|
||||
|
||||
template <typename... Args>
|
||||
QueryTreeNodePtr createFunctionNode(const FunctionOverloadResolverPtr & function_resolver, Args &&... args)
|
||||
{
|
||||
auto function_node = std::make_shared<FunctionNode>(function_resolver->getName());
|
||||
auto & new_arguments = function_node->getArguments().getNodes();
|
||||
new_arguments.reserve(sizeof...(args));
|
||||
(new_arguments.push_back(std::forward<Args>(args)), ...);
|
||||
function_node->resolveAsFunction(function_resolver);
|
||||
return function_node;
|
||||
}
|
||||
|
||||
size_t countAtoms(const QueryTreeNodePtr & node)
|
||||
{
|
||||
checkStackSize();
|
||||
|
||||
const auto * function_node = node->as<FunctionNode>();
|
||||
if (!function_node || !isLogicalFunction(*function_node))
|
||||
return 1;
|
||||
|
||||
size_t atom_count = 0;
|
||||
const auto & arguments = function_node->getArguments().getNodes();
|
||||
for (const auto & argument : arguments)
|
||||
atom_count += countAtoms(argument);
|
||||
|
||||
return atom_count;
|
||||
}
|
||||
|
||||
class SplitMultiLogicVisitor
|
||||
{
|
||||
public:
|
||||
explicit SplitMultiLogicVisitor(ContextPtr context)
|
||||
: current_context(std::move(context))
|
||||
{}
|
||||
|
||||
void visit(QueryTreeNodePtr & node)
|
||||
{
|
||||
checkStackSize();
|
||||
|
||||
auto * function_node = node->as<FunctionNode>();
|
||||
if (!function_node || !isLogicalFunction(*function_node))
|
||||
return;
|
||||
|
||||
const auto & name = function_node->getFunctionName();
|
||||
|
||||
if (name == "and" || name == "or")
|
||||
{
|
||||
auto function_resolver = FunctionFactory::instance().get(name, current_context);
|
||||
|
||||
const auto & arguments = function_node->getArguments().getNodes();
|
||||
if (arguments.size() > 2)
|
||||
{
|
||||
QueryTreeNodePtr current = arguments[0];
|
||||
for (size_t i = 1; i < arguments.size(); ++i)
|
||||
current = createFunctionNode(function_resolver, std::move(current), arguments[i]);
|
||||
|
||||
auto & new_function_node = current->as<FunctionNode &>();
|
||||
function_node->getArguments().getNodes() = std::move(new_function_node.getArguments().getNodes());
|
||||
function_node->resolveAsFunction(function_resolver);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
assert(name == "not");
|
||||
}
|
||||
|
||||
auto & arguments = function_node->getArguments().getNodes();
|
||||
for (auto & argument : arguments)
|
||||
visit(argument);
|
||||
}
|
||||
|
||||
private:
|
||||
ContextPtr current_context;
|
||||
};
|
||||
|
||||
class PushNotVisitor
|
||||
{
|
||||
public:
|
||||
explicit PushNotVisitor(const ContextPtr & context)
|
||||
: not_function_resolver(FunctionFactory::instance().get("not", context))
|
||||
, or_function_resolver(FunctionFactory::instance().get("or", context))
|
||||
, and_function_resolver(FunctionFactory::instance().get("and", context))
|
||||
{}
|
||||
|
||||
void visit(QueryTreeNodePtr & node, bool add_negation)
|
||||
{
|
||||
checkStackSize();
|
||||
|
||||
auto * function_node = node->as<FunctionNode>();
|
||||
|
||||
if (!function_node || !isLogicalFunction(*function_node))
|
||||
{
|
||||
if (add_negation)
|
||||
node = createFunctionNode(not_function_resolver, std::move(node));
|
||||
return;
|
||||
}
|
||||
|
||||
std::string_view function_name = function_node->getFunctionName();
|
||||
if (function_name == "and" || function_name == "or")
|
||||
{
|
||||
if (add_negation)
|
||||
{
|
||||
if (function_name == "and")
|
||||
function_node->resolveAsFunction(or_function_resolver);
|
||||
else
|
||||
function_node->resolveAsFunction(and_function_resolver);
|
||||
}
|
||||
|
||||
auto & arguments = function_node->getArguments().getNodes();
|
||||
for (auto & argument : arguments)
|
||||
visit(argument, add_negation);
|
||||
return;
|
||||
}
|
||||
|
||||
assert(function_name == "not");
|
||||
auto & arguments = function_node->getArguments().getNodes();
|
||||
assert(arguments.size() == 1);
|
||||
node = arguments[0];
|
||||
visit(node, !add_negation);
|
||||
}
|
||||
|
||||
private:
|
||||
const FunctionOverloadResolverPtr not_function_resolver;
|
||||
const FunctionOverloadResolverPtr or_function_resolver;
|
||||
const FunctionOverloadResolverPtr and_function_resolver;
|
||||
};
|
||||
|
||||
class PushOrVisitor
|
||||
{
|
||||
public:
|
||||
PushOrVisitor(ContextPtr context, size_t max_atoms_, size_t num_atoms_)
|
||||
: max_atoms(max_atoms_)
|
||||
, num_atoms(num_atoms_)
|
||||
, and_resolver(FunctionFactory::instance().get("and", context))
|
||||
, or_resolver(FunctionFactory::instance().get("or", context))
|
||||
{}
|
||||
|
||||
bool visit(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (max_atoms && num_atoms > max_atoms)
|
||||
return false;
|
||||
|
||||
checkStackSize();
|
||||
|
||||
auto * function_node = node->as<FunctionNode>();
|
||||
|
||||
if (!function_node)
|
||||
return true;
|
||||
|
||||
std::string_view name = function_node->getFunctionName();
|
||||
|
||||
if (name == "or" || name == "and")
|
||||
{
|
||||
auto & arguments = function_node->getArguments().getNodes();
|
||||
for (auto & argument : arguments)
|
||||
visit(argument);
|
||||
}
|
||||
|
||||
if (name == "or")
|
||||
{
|
||||
auto & arguments = function_node->getArguments().getNodes();
|
||||
assert(arguments.size() == 2);
|
||||
|
||||
size_t and_node_id = arguments.size();
|
||||
|
||||
for (size_t i = 0; i < arguments.size(); ++i)
|
||||
{
|
||||
auto & argument = arguments[i];
|
||||
if (auto * argument_function_node = argument->as<FunctionNode>();
|
||||
argument_function_node && argument_function_node->getFunctionName() == "and")
|
||||
and_node_id = i;
|
||||
}
|
||||
|
||||
if (and_node_id == arguments.size())
|
||||
return true;
|
||||
|
||||
auto & other_node = arguments[1 - and_node_id];
|
||||
auto & and_function_arguments = arguments[and_node_id]->as<FunctionNode &>().getArguments().getNodes();
|
||||
|
||||
auto lhs = createFunctionNode(or_resolver, other_node->clone(), std::move(and_function_arguments[0]));
|
||||
num_atoms += countAtoms(other_node);
|
||||
|
||||
auto rhs = createFunctionNode(or_resolver, std::move(other_node), std::move(and_function_arguments[1]));
|
||||
node = createFunctionNode(and_resolver, std::move(lhs), std::move(rhs));
|
||||
|
||||
visit(node);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
private:
|
||||
size_t max_atoms;
|
||||
size_t num_atoms;
|
||||
|
||||
const FunctionOverloadResolverPtr and_resolver;
|
||||
const FunctionOverloadResolverPtr or_resolver;
|
||||
};
|
||||
|
||||
class CollectGroupsVisitor
|
||||
{
|
||||
public:
|
||||
void visit(QueryTreeNodePtr & node)
|
||||
{
|
||||
CNF::OrGroup or_group;
|
||||
visitImpl(node, or_group);
|
||||
if (!or_group.empty())
|
||||
and_group.insert(std::move(or_group));
|
||||
}
|
||||
|
||||
CNF::AndGroup and_group;
|
||||
|
||||
private:
|
||||
void visitImpl(QueryTreeNodePtr & node, CNF::OrGroup & or_group)
|
||||
{
|
||||
checkStackSize();
|
||||
|
||||
auto * function_node = node->as<FunctionNode>();
|
||||
if (!function_node || !isLogicalFunction(*function_node))
|
||||
{
|
||||
or_group.insert(CNF::AtomicFormula{false, std::move(node)});
|
||||
return;
|
||||
}
|
||||
|
||||
std::string_view name = function_node->getFunctionName();
|
||||
|
||||
if (name == "and")
|
||||
{
|
||||
auto & arguments = function_node->getArguments().getNodes();
|
||||
for (auto & argument : arguments)
|
||||
{
|
||||
CNF::OrGroup argument_or_group;
|
||||
visitImpl(argument, argument_or_group);
|
||||
if (!argument_or_group.empty())
|
||||
and_group.insert(std::move(argument_or_group));
|
||||
}
|
||||
}
|
||||
else if (name == "or")
|
||||
{
|
||||
auto & arguments = function_node->getArguments().getNodes();
|
||||
for (auto & argument : arguments)
|
||||
visitImpl(argument, or_group);
|
||||
}
|
||||
else
|
||||
{
|
||||
assert(name == "not");
|
||||
auto & arguments = function_node->getArguments().getNodes();
|
||||
or_group.insert(CNF::AtomicFormula{true, std::move(arguments[0])});
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
std::optional<CNF::AtomicFormula> tryInvertFunction(
|
||||
const CNF::AtomicFormula & atom, const ContextPtr & context, const std::unordered_map<std::string, std::string> & inverse_relations)
|
||||
{
|
||||
auto * function_node = atom.node_with_hash.node->as<FunctionNode>();
|
||||
if (!function_node)
|
||||
return std::nullopt;
|
||||
|
||||
if (auto it = inverse_relations.find(function_node->getFunctionName()); it != inverse_relations.end())
|
||||
{
|
||||
auto inverse_function_resolver = FunctionFactory::instance().get(it->second, context);
|
||||
function_node->resolveAsFunction(inverse_function_resolver);
|
||||
return CNF::AtomicFormula{!atom.negative, atom.node_with_hash.node};
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
bool CNF::AtomicFormula::operator==(const AtomicFormula & rhs) const
|
||||
{
|
||||
return negative == rhs.negative && node_with_hash == rhs.node_with_hash;
|
||||
}
|
||||
|
||||
bool CNF::AtomicFormula::operator<(const AtomicFormula & rhs) const
|
||||
{
|
||||
if (node_with_hash.hash > rhs.node_with_hash.hash)
|
||||
return false;
|
||||
|
||||
return node_with_hash.hash < rhs.node_with_hash.hash || negative < rhs.negative;
|
||||
}
|
||||
|
||||
std::string CNF::dump() const
|
||||
{
|
||||
WriteBufferFromOwnString res;
|
||||
bool first = true;
|
||||
for (const auto & group : statements)
|
||||
{
|
||||
if (!first)
|
||||
res << " AND ";
|
||||
first = false;
|
||||
res << "(";
|
||||
bool first_in_group = true;
|
||||
for (const auto & atom : group)
|
||||
{
|
||||
if (!first_in_group)
|
||||
res << " OR ";
|
||||
first_in_group = false;
|
||||
if (atom.negative)
|
||||
res << " NOT ";
|
||||
res << atom.node_with_hash.node->formatASTForErrorMessage();
|
||||
}
|
||||
res << ")";
|
||||
}
|
||||
|
||||
return res.str();
|
||||
}
|
||||
|
||||
CNF & CNF::transformGroups(std::function<OrGroup(const OrGroup &)> fn)
|
||||
{
|
||||
AndGroup result;
|
||||
|
||||
for (const auto & group : statements)
|
||||
{
|
||||
auto new_group = fn(group);
|
||||
if (!new_group.empty())
|
||||
result.insert(std::move(new_group));
|
||||
}
|
||||
|
||||
statements = std::move(result);
|
||||
return *this;
|
||||
}
|
||||
|
||||
CNF & CNF::transformAtoms(std::function<AtomicFormula(const AtomicFormula &)> fn)
|
||||
{
|
||||
transformGroups([fn](const OrGroup & group)
|
||||
{
|
||||
OrGroup result;
|
||||
for (const auto & atom : group)
|
||||
{
|
||||
auto new_atom = fn(atom);
|
||||
if (new_atom.node_with_hash.node)
|
||||
result.insert(std::move(new_atom));
|
||||
}
|
||||
|
||||
return result;
|
||||
});
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
CNF & CNF::pushNotIntoFunctions(const ContextPtr & context)
|
||||
{
|
||||
transformAtoms([&](const AtomicFormula & atom)
|
||||
{
|
||||
return pushNotIntoFunction(atom, context);
|
||||
});
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
CNF::AtomicFormula CNF::pushNotIntoFunction(const AtomicFormula & atom, const ContextPtr & context)
|
||||
{
|
||||
if (!atom.negative)
|
||||
return atom;
|
||||
|
||||
static const std::unordered_map<std::string, std::string> inverse_relations = {
|
||||
{"equals", "notEquals"},
|
||||
{"less", "greaterOrEquals"},
|
||||
{"lessOrEquals", "greater"},
|
||||
{"in", "notIn"},
|
||||
{"like", "notLike"},
|
||||
{"empty", "notEmpty"},
|
||||
{"notEquals", "equals"},
|
||||
{"greaterOrEquals", "less"},
|
||||
{"greater", "lessOrEquals"},
|
||||
{"notIn", "in"},
|
||||
{"notLike", "like"},
|
||||
{"notEmpty", "empty"},
|
||||
};
|
||||
|
||||
if (auto inverted_atom = tryInvertFunction(atom, context, inverse_relations);
|
||||
inverted_atom.has_value())
|
||||
return std::move(*inverted_atom);
|
||||
|
||||
return atom;
|
||||
}
|
||||
|
||||
CNF & CNF::pullNotOutFunctions(const ContextPtr & context)
|
||||
{
|
||||
transformAtoms([&](const AtomicFormula & atom)
|
||||
{
|
||||
static const std::unordered_map<std::string, std::string> inverse_relations = {
|
||||
{"notEquals", "equals"},
|
||||
{"greaterOrEquals", "less"},
|
||||
{"greater", "lessOrEquals"},
|
||||
{"notIn", "in"},
|
||||
{"notLike", "like"},
|
||||
{"notEmpty", "empty"},
|
||||
};
|
||||
|
||||
if (auto inverted_atom = tryInvertFunction(atom, context, inverse_relations);
|
||||
inverted_atom.has_value())
|
||||
return std::move(*inverted_atom);
|
||||
|
||||
return atom;
|
||||
});
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
CNF & CNF::filterAlwaysTrueGroups(std::function<bool(const OrGroup &)> predicate)
|
||||
{
|
||||
AndGroup filtered;
|
||||
for (const auto & or_group : statements)
|
||||
{
|
||||
if (predicate(or_group))
|
||||
filtered.insert(or_group);
|
||||
}
|
||||
|
||||
statements = std::move(filtered);
|
||||
return *this;
|
||||
}
|
||||
|
||||
CNF & CNF::filterAlwaysFalseAtoms(std::function<bool(const AtomicFormula &)> predicate)
|
||||
{
|
||||
AndGroup filtered;
|
||||
for (const auto & or_group : statements)
|
||||
{
|
||||
OrGroup filtered_group;
|
||||
for (const auto & atom : or_group)
|
||||
{
|
||||
if (predicate(atom))
|
||||
filtered_group.insert(atom);
|
||||
}
|
||||
|
||||
if (!filtered_group.empty())
|
||||
filtered.insert(std::move(filtered_group));
|
||||
else
|
||||
{
|
||||
filtered.clear();
|
||||
filtered_group.insert(AtomicFormula{false, QueryTreeNodePtrWithHash{std::make_shared<ConstantNode>(static_cast<UInt8>(0))}});
|
||||
filtered.insert(std::move(filtered_group));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
statements = std::move(filtered);
|
||||
return *this;
|
||||
}
|
||||
|
||||
CNF & CNF::reduce()
|
||||
{
|
||||
while (true)
|
||||
{
|
||||
AndGroup new_statements = reduceOnceCNFStatements(statements);
|
||||
if (statements == new_statements)
|
||||
{
|
||||
statements = filterCNFSubsets(statements);
|
||||
return *this;
|
||||
}
|
||||
else
|
||||
statements = new_statements;
|
||||
}
|
||||
}
|
||||
|
||||
void CNF::appendGroup(const AndGroup & and_group)
|
||||
{
|
||||
for (const auto & or_group : and_group)
|
||||
statements.emplace(or_group);
|
||||
}
|
||||
|
||||
CNF::CNF(AndGroup statements_)
|
||||
: statements(std::move(statements_))
|
||||
{}
|
||||
|
||||
std::optional<CNF> CNF::tryBuildCNF(const QueryTreeNodePtr & node, ContextPtr context, size_t max_growth_multiplier)
|
||||
{
|
||||
auto node_cloned = node->clone();
|
||||
|
||||
size_t atom_count = countAtoms(node_cloned);
|
||||
size_t max_atoms = max_growth_multiplier ? std::max(MAX_ATOMS_WITHOUT_CHECK, atom_count * max_growth_multiplier) : 0;
|
||||
|
||||
{
|
||||
SplitMultiLogicVisitor visitor(context);
|
||||
visitor.visit(node_cloned);
|
||||
}
|
||||
|
||||
{
|
||||
PushNotVisitor visitor(context);
|
||||
visitor.visit(node_cloned, false);
|
||||
}
|
||||
|
||||
if (PushOrVisitor visitor(context, max_atoms, atom_count);
|
||||
!visitor.visit(node_cloned))
|
||||
return std::nullopt;
|
||||
|
||||
CollectGroupsVisitor collect_visitor;
|
||||
collect_visitor.visit(node_cloned);
|
||||
|
||||
if (collect_visitor.and_group.empty())
|
||||
return std::nullopt;
|
||||
|
||||
return CNF{std::move(collect_visitor.and_group)};
|
||||
}
|
||||
|
||||
CNF CNF::toCNF(const QueryTreeNodePtr & node, ContextPtr context, size_t max_growth_multiplier)
|
||||
{
|
||||
auto cnf = tryBuildCNF(node, context, max_growth_multiplier);
|
||||
if (!cnf)
|
||||
throw Exception(ErrorCodes::TOO_MANY_TEMPORARY_COLUMNS,
|
||||
"Cannot convert expression '{}' to CNF, because it produces to many clauses."
|
||||
"Size of boolean formula in CNF can be exponential of size of source formula.");
|
||||
|
||||
return *cnf;
|
||||
}
|
||||
|
||||
QueryTreeNodePtr CNF::toQueryTree(ContextPtr context) const
|
||||
{
|
||||
if (statements.empty())
|
||||
return nullptr;
|
||||
|
||||
QueryTreeNodes and_arguments;
|
||||
and_arguments.reserve(statements.size());
|
||||
|
||||
auto not_resolver = FunctionFactory::instance().get("not", context);
|
||||
auto or_resolver = FunctionFactory::instance().get("or", context);
|
||||
auto and_resolver = FunctionFactory::instance().get("and", context);
|
||||
|
||||
const auto function_node_from_atom = [&](const auto & atom) -> QueryTreeNodePtr
|
||||
{
|
||||
auto cloned_node = atom.node_with_hash.node->clone();
|
||||
if (atom.negative)
|
||||
return createFunctionNode(not_resolver, std::move(cloned_node));
|
||||
|
||||
return std::move(cloned_node);
|
||||
};
|
||||
|
||||
for (const auto & or_group : statements)
|
||||
{
|
||||
if (or_group.size() == 1)
|
||||
{
|
||||
const auto & atom = *or_group.begin();
|
||||
and_arguments.push_back(function_node_from_atom(atom));
|
||||
}
|
||||
else
|
||||
{
|
||||
QueryTreeNodes or_arguments;
|
||||
or_arguments.reserve(or_group.size());
|
||||
|
||||
for (const auto & atom : or_group)
|
||||
or_arguments.push_back(function_node_from_atom(atom));
|
||||
|
||||
auto or_function = std::make_shared<FunctionNode>("or");
|
||||
or_function->getArguments().getNodes() = std::move(or_arguments);
|
||||
or_function->resolveAsFunction(or_resolver);
|
||||
|
||||
and_arguments.push_back(std::move(or_function));
|
||||
}
|
||||
}
|
||||
|
||||
if (and_arguments.size() == 1)
|
||||
return std::move(and_arguments[0]);
|
||||
|
||||
auto and_function = std::make_shared<FunctionNode>("and");
|
||||
and_function->getArguments().getNodes() = std::move(and_arguments);
|
||||
and_function->resolveAsFunction(and_resolver);
|
||||
|
||||
return and_function;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
67
src/Analyzer/Passes/CNF.h
Normal file
67
src/Analyzer/Passes/CNF.h
Normal file
@ -0,0 +1,67 @@
|
||||
#pragma once
|
||||
|
||||
#include <Analyzer/HashUtils.h>
|
||||
#include <Analyzer/IQueryTreeNode.h>
|
||||
|
||||
#include <Common/SipHash.h>
|
||||
|
||||
#include <Interpreters/Context_fwd.h>
|
||||
|
||||
#include <unordered_set>
|
||||
|
||||
namespace DB::Analyzer
|
||||
{
|
||||
|
||||
class CNF
|
||||
{
|
||||
public:
|
||||
struct AtomicFormula
|
||||
{
|
||||
bool negative = false;
|
||||
QueryTreeNodePtrWithHash node_with_hash;
|
||||
|
||||
bool operator==(const AtomicFormula & rhs) const;
|
||||
bool operator<(const AtomicFormula & rhs) const;
|
||||
};
|
||||
|
||||
// Different hash is generated for different order, so we use std::set
|
||||
using OrGroup = std::set<AtomicFormula>;
|
||||
using AndGroup = std::set<OrGroup>;
|
||||
|
||||
std::string dump() const;
|
||||
|
||||
static constexpr size_t DEFAULT_MAX_GROWTH_MULTIPLIER = 20;
|
||||
static constexpr size_t MAX_ATOMS_WITHOUT_CHECK = 200;
|
||||
|
||||
CNF & transformAtoms(std::function<AtomicFormula(const AtomicFormula &)> fn);
|
||||
CNF & transformGroups(std::function<OrGroup(const OrGroup &)> fn);
|
||||
|
||||
CNF & filterAlwaysTrueGroups(std::function<bool(const OrGroup &)> predicate);
|
||||
CNF & filterAlwaysFalseAtoms(std::function<bool(const AtomicFormula &)> predicate);
|
||||
|
||||
CNF & reduce();
|
||||
|
||||
void appendGroup(const AndGroup & and_group);
|
||||
|
||||
/// Convert "NOT fn" to a single node representing inverse of "fn"
|
||||
CNF & pushNotIntoFunctions(const ContextPtr & context);
|
||||
CNF & pullNotOutFunctions(const ContextPtr & context);
|
||||
|
||||
static AtomicFormula pushNotIntoFunction(const AtomicFormula & atom, const ContextPtr & context);
|
||||
|
||||
explicit CNF(AndGroup statements_);
|
||||
|
||||
static std::optional<CNF> tryBuildCNF(const QueryTreeNodePtr & node, ContextPtr context, size_t max_growth_multiplier = DEFAULT_MAX_GROWTH_MULTIPLIER);
|
||||
static CNF toCNF(const QueryTreeNodePtr & node, ContextPtr context, size_t max_growth_multiplier = DEFAULT_MAX_GROWTH_MULTIPLIER);
|
||||
|
||||
QueryTreeNodePtr toQueryTree(ContextPtr context) const;
|
||||
|
||||
const auto & getStatements() const
|
||||
{
|
||||
return statements;
|
||||
}
|
||||
private:
|
||||
AndGroup statements;
|
||||
};
|
||||
|
||||
}
|
733
src/Analyzer/Passes/ConvertQueryToCNFPass.cpp
Normal file
733
src/Analyzer/Passes/ConvertQueryToCNFPass.cpp
Normal file
@ -0,0 +1,733 @@
|
||||
#include <Analyzer/Passes/ConvertQueryToCNFPass.h>
|
||||
|
||||
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||
#include <Analyzer/FunctionNode.h>
|
||||
#include <Analyzer/TableNode.h>
|
||||
#include <Analyzer/ColumnNode.h>
|
||||
#include <Analyzer/TableFunctionNode.h>
|
||||
#include <Analyzer/ConstantNode.h>
|
||||
#include <Analyzer/Passes/CNF.h>
|
||||
#include <Analyzer/Utils.h>
|
||||
|
||||
#include <Storages/IStorage.h>
|
||||
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include "Analyzer/HashUtils.h"
|
||||
#include "Analyzer/IQueryTreeNode.h"
|
||||
#include "Interpreters/ComparisonGraph.h"
|
||||
#include "base/types.h"
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
std::optional<Analyzer::CNF> tryConvertQueryToCNF(const QueryTreeNodePtr & node, const ContextPtr & context)
|
||||
{
|
||||
auto cnf_form = Analyzer::CNF::tryBuildCNF(node, context);
|
||||
if (!cnf_form)
|
||||
return std::nullopt;
|
||||
|
||||
cnf_form->pushNotIntoFunctions(context);
|
||||
return cnf_form;
|
||||
}
|
||||
|
||||
enum class MatchState : uint8_t
|
||||
{
|
||||
FULL_MATCH, /// a = b
|
||||
PARTIAL_MATCH, /// a = not b
|
||||
NONE,
|
||||
};
|
||||
|
||||
MatchState match(const Analyzer::CNF::AtomicFormula & a, const Analyzer::CNF::AtomicFormula & b)
|
||||
{
|
||||
using enum MatchState;
|
||||
if (a.node_with_hash != b.node_with_hash)
|
||||
return NONE;
|
||||
|
||||
return a.negative == b.negative ? FULL_MATCH : PARTIAL_MATCH;
|
||||
}
|
||||
|
||||
bool checkIfGroupAlwaysTrueFullMatch(const Analyzer::CNF::OrGroup & group, const ConstraintsDescription::QueryTreeData & query_tree_constraints)
|
||||
{
|
||||
/// We have constraints in CNF.
|
||||
/// CNF is always true => Each OR group in CNF is always true.
|
||||
/// So, we try to check whether we have al least one OR group from CNF as subset in our group.
|
||||
/// If we've found one then our group is always true too.
|
||||
|
||||
const auto & constraints_data = query_tree_constraints.getConstraintData();
|
||||
std::vector<size_t> found(constraints_data.size());
|
||||
for (size_t i = 0; i < constraints_data.size(); ++i)
|
||||
found[i] = constraints_data[i].size();
|
||||
|
||||
for (const auto & atom : group)
|
||||
{
|
||||
const auto constraint_atom_ids = query_tree_constraints.getAtomIds(atom.node_with_hash);
|
||||
if (constraint_atom_ids)
|
||||
{
|
||||
const auto constraint_atoms = query_tree_constraints.getAtomsById(*constraint_atom_ids);
|
||||
for (size_t i = 0; i < constraint_atoms.size(); ++i)
|
||||
{
|
||||
if (match(constraint_atoms[i], atom) == MatchState::FULL_MATCH)
|
||||
{
|
||||
if ((--found[(*constraint_atom_ids)[i].group_id]) == 0)
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool checkIfGroupAlwaysTrueGraph(const Analyzer::CNF::OrGroup & group, const ComparisonGraph<QueryTreeNodePtr> & graph)
|
||||
{
|
||||
/// We try to find at least one atom that is always true by using comparison graph.
|
||||
for (const auto & atom : group)
|
||||
{
|
||||
const auto * function_node = atom.node_with_hash.node->as<FunctionNode>();
|
||||
if (function_node)
|
||||
{
|
||||
const auto & arguments = function_node->getArguments().getNodes();
|
||||
if (arguments.size() == 2)
|
||||
{
|
||||
const auto expected = ComparisonGraph<QueryTreeNodePtr>::atomToCompareResult(atom);
|
||||
if (graph.isAlwaysCompare(expected, arguments[0], arguments[1]))
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool checkIfAtomAlwaysFalseFullMatch(const Analyzer::CNF::AtomicFormula & atom, const ConstraintsDescription::QueryTreeData & query_tree_constraints)
|
||||
{
|
||||
const auto constraint_atom_ids = query_tree_constraints.getAtomIds(atom.node_with_hash);
|
||||
if (constraint_atom_ids)
|
||||
{
|
||||
for (const auto & constraint_atom : query_tree_constraints.getAtomsById(*constraint_atom_ids))
|
||||
{
|
||||
const auto match_result = match(constraint_atom, atom);
|
||||
if (match_result == MatchState::PARTIAL_MATCH)
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool checkIfAtomAlwaysFalseGraph(const Analyzer::CNF::AtomicFormula & atom, const ComparisonGraph<QueryTreeNodePtr> & graph)
|
||||
{
|
||||
const auto * function_node = atom.node_with_hash.node->as<FunctionNode>();
|
||||
if (!function_node)
|
||||
return false;
|
||||
|
||||
const auto & arguments = function_node->getArguments().getNodes();
|
||||
if (arguments.size() != 2)
|
||||
return false;
|
||||
|
||||
/// TODO: special support for !=
|
||||
const auto expected = ComparisonGraph<QueryTreeNodePtr>::atomToCompareResult(atom);
|
||||
return !graph.isPossibleCompare(expected, arguments[0], arguments[1]);
|
||||
}
|
||||
|
||||
void replaceToConstants(QueryTreeNodePtr & term, const ComparisonGraph<QueryTreeNodePtr> & graph)
|
||||
{
|
||||
const auto equal_constant = graph.getEqualConst(term);
|
||||
if (equal_constant)
|
||||
{
|
||||
term = (*equal_constant)->clone();
|
||||
return;
|
||||
}
|
||||
|
||||
for (auto & child : term->getChildren())
|
||||
{
|
||||
if (child)
|
||||
replaceToConstants(child, graph);
|
||||
}
|
||||
}
|
||||
|
||||
Analyzer::CNF::AtomicFormula replaceTermsToConstants(const Analyzer::CNF::AtomicFormula & atom, const ComparisonGraph<QueryTreeNodePtr> & graph)
|
||||
{
|
||||
auto node = atom.node_with_hash.node->clone();
|
||||
replaceToConstants(node, graph);
|
||||
return {atom.negative, std::move(node)};
|
||||
}
|
||||
|
||||
StorageSnapshotPtr getStorageSnapshot(const QueryTreeNodePtr & node)
|
||||
{
|
||||
StorageSnapshotPtr storage_snapshot{nullptr};
|
||||
if (auto * table_node = node->as<TableNode>())
|
||||
return table_node->getStorageSnapshot();
|
||||
else if (auto * table_function_node = node->as<TableFunctionNode>())
|
||||
return table_function_node->getStorageSnapshot();
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
bool onlyIndexColumns(const QueryTreeNodePtr & node, const std::unordered_set<std::string_view> & primary_key_set)
|
||||
{
|
||||
const auto * column_node = node->as<ColumnNode>();
|
||||
/// TODO: verify that full name is correct here
|
||||
if (column_node && !primary_key_set.contains(column_node->getColumnName()))
|
||||
return false;
|
||||
|
||||
for (const auto & child : node->getChildren())
|
||||
{
|
||||
if (child && !onlyIndexColumns(child, primary_key_set))
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool onlyConstants(const QueryTreeNodePtr & node)
|
||||
{
|
||||
/// if it's only constant it will be already calculated
|
||||
return node->as<ConstantNode>() != nullptr;
|
||||
}
|
||||
|
||||
const std::unordered_map<std::string_view, ComparisonGraphCompareResult> & getRelationMap()
|
||||
{
|
||||
using enum ComparisonGraphCompareResult;
|
||||
static const std::unordered_map<std::string_view, ComparisonGraphCompareResult> relations =
|
||||
{
|
||||
{"equals", EQUAL},
|
||||
{"less", LESS},
|
||||
{"lessOrEquals", LESS_OR_EQUAL},
|
||||
{"greaterOrEquals", GREATER_OR_EQUAL},
|
||||
{"greater", GREATER},
|
||||
};
|
||||
return relations;
|
||||
}
|
||||
|
||||
const std::unordered_map<ComparisonGraphCompareResult, std::string> & getReverseRelationMap()
|
||||
{
|
||||
using enum ComparisonGraphCompareResult;
|
||||
static const std::unordered_map<ComparisonGraphCompareResult, std::string> relations =
|
||||
{
|
||||
{EQUAL, "equals"},
|
||||
{LESS, "less"},
|
||||
{LESS_OR_EQUAL, "lessOrEquals"},
|
||||
{GREATER_OR_EQUAL, "greaterOrEquals"},
|
||||
{GREATER, "greater"},
|
||||
};
|
||||
return relations;
|
||||
}
|
||||
|
||||
bool canBeSequence(const ComparisonGraphCompareResult left, const ComparisonGraphCompareResult right)
|
||||
{
|
||||
using enum ComparisonGraphCompareResult;
|
||||
if (left == UNKNOWN || right == UNKNOWN || left == NOT_EQUAL || right == NOT_EQUAL)
|
||||
return false;
|
||||
if ((left == GREATER || left == GREATER_OR_EQUAL) && (right == LESS || right == LESS_OR_EQUAL))
|
||||
return false;
|
||||
if ((right == GREATER || right == GREATER_OR_EQUAL) && (left == LESS || left == LESS_OR_EQUAL))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
ComparisonGraphCompareResult mostStrict(const ComparisonGraphCompareResult left, const ComparisonGraphCompareResult right)
|
||||
{
|
||||
using enum ComparisonGraphCompareResult;
|
||||
if (left == LESS || left == GREATER)
|
||||
return left;
|
||||
if (right == LESS || right == GREATER)
|
||||
return right;
|
||||
if (left == LESS_OR_EQUAL || left == GREATER_OR_EQUAL)
|
||||
return left;
|
||||
if (right == LESS_OR_EQUAL || right == GREATER_OR_EQUAL)
|
||||
return right;
|
||||
if (left == EQUAL)
|
||||
return left;
|
||||
if (right == EQUAL)
|
||||
return right;
|
||||
return UNKNOWN;
|
||||
}
|
||||
|
||||
/// Create OR-group for 'indexHint'.
|
||||
/// Consider we have expression like A <op1> C, where C is constant.
|
||||
/// Consider we have a constraint I <op2> A, where I depends only on columns from primary key.
|
||||
/// Then if op1 and op2 forms a sequence of comparisons (e.g. A < C and I < A),
|
||||
/// we can add to expression 'indexHint(I < A)' condition.
|
||||
Analyzer::CNF::OrGroup createIndexHintGroup(
|
||||
const Analyzer::CNF::OrGroup & group,
|
||||
const ComparisonGraph<QueryTreeNodePtr> & graph,
|
||||
const QueryTreeNodes & primary_key_only_nodes,
|
||||
const ContextPtr & context)
|
||||
{
|
||||
Analyzer::CNF::OrGroup result;
|
||||
for (const auto & atom : group)
|
||||
{
|
||||
const auto * function_node = atom.node_with_hash.node->as<FunctionNode>();
|
||||
if (!function_node || !getRelationMap().contains(function_node->getFunctionName()))
|
||||
continue;
|
||||
|
||||
const auto & arguments = function_node->getArguments().getNodes();
|
||||
if (arguments.size() != 2)
|
||||
continue;
|
||||
|
||||
auto check_and_insert = [&](const size_t index, const ComparisonGraphCompareResult expected_result)
|
||||
{
|
||||
if (!onlyConstants(arguments[1 - index]))
|
||||
return false;
|
||||
|
||||
for (const auto & primary_key_node : primary_key_only_nodes)
|
||||
{
|
||||
ComparisonGraphCompareResult actual_result;
|
||||
if (index == 0)
|
||||
actual_result = graph.compare(primary_key_node, arguments[index]);
|
||||
else
|
||||
actual_result = graph.compare(arguments[index], primary_key_node);
|
||||
|
||||
if (canBeSequence(expected_result, actual_result))
|
||||
{
|
||||
auto helper_node = function_node->clone();
|
||||
auto & helper_function_node = helper_node->as<FunctionNode &>();
|
||||
helper_function_node.getArguments().getNodes()[index] = primary_key_node->clone();
|
||||
auto reverse_function_name = getReverseRelationMap().at(mostStrict(expected_result, actual_result));
|
||||
helper_function_node.resolveAsFunction(FunctionFactory::instance().get(reverse_function_name, context));
|
||||
result.insert(Analyzer::CNF::AtomicFormula{atom.negative, std::move(helper_node)});
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
};
|
||||
|
||||
auto expected = getRelationMap().at(function_node->getFunctionName());
|
||||
if (!check_and_insert(0, expected) && !check_and_insert(1, expected))
|
||||
return {};
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void addIndexConstraint(Analyzer::CNF & cnf, const QueryTreeNodes & table_expressions, const ContextPtr & context)
|
||||
{
|
||||
for (const auto & table_expression : table_expressions)
|
||||
{
|
||||
auto snapshot = getStorageSnapshot(table_expression);
|
||||
if (!snapshot || !snapshot->metadata)
|
||||
continue;
|
||||
|
||||
const auto primary_key = snapshot->metadata->getColumnsRequiredForPrimaryKey();
|
||||
const std::unordered_set<std::string_view> primary_key_set(primary_key.begin(), primary_key.end());
|
||||
|
||||
const auto & query_tree_constraint = snapshot->metadata->getConstraints().getQueryTreeData(context, table_expression);
|
||||
const auto & graph = query_tree_constraint.getGraph();
|
||||
|
||||
QueryTreeNodes primary_key_only_nodes;
|
||||
for (const auto & vertex : graph.getVertices())
|
||||
{
|
||||
for (const auto & node : vertex)
|
||||
{
|
||||
if (onlyIndexColumns(node, primary_key_set))
|
||||
primary_key_only_nodes.push_back(node);
|
||||
}
|
||||
}
|
||||
|
||||
Analyzer::CNF::AndGroup and_group;
|
||||
const auto & statements = cnf.getStatements();
|
||||
for (const auto & group : statements)
|
||||
{
|
||||
auto new_group = createIndexHintGroup(group, graph, primary_key_only_nodes, context);
|
||||
if (!new_group.empty())
|
||||
and_group.emplace(std::move(new_group));
|
||||
}
|
||||
|
||||
if (!and_group.empty())
|
||||
{
|
||||
Analyzer::CNF::OrGroup new_group;
|
||||
auto index_hint_node = std::make_shared<FunctionNode>("indexHint");
|
||||
index_hint_node->getArguments().getNodes().push_back(Analyzer::CNF{std::move(and_group)}.toQueryTree(context));
|
||||
index_hint_node->resolveAsFunction(FunctionFactory::instance().get("indexHint", context));
|
||||
new_group.insert({false, QueryTreeNodePtrWithHash{std::move(index_hint_node)}});
|
||||
|
||||
cnf.appendGroup({new_group});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct ColumnPrice
|
||||
{
|
||||
Int64 compressed_size{0};
|
||||
Int64 uncompressed_size{0};
|
||||
|
||||
ColumnPrice(const Int64 compressed_size_, const Int64 uncompressed_size_)
|
||||
: compressed_size(compressed_size_)
|
||||
, uncompressed_size(uncompressed_size_)
|
||||
{
|
||||
}
|
||||
|
||||
bool operator<(const ColumnPrice & that) const
|
||||
{
|
||||
return std::tie(compressed_size, uncompressed_size) < std::tie(that.compressed_size, that.uncompressed_size);
|
||||
}
|
||||
|
||||
ColumnPrice & operator+=(const ColumnPrice & that)
|
||||
{
|
||||
compressed_size += that.compressed_size;
|
||||
uncompressed_size += that.uncompressed_size;
|
||||
return *this;
|
||||
}
|
||||
|
||||
ColumnPrice & operator-=(const ColumnPrice & that)
|
||||
{
|
||||
compressed_size -= that.compressed_size;
|
||||
uncompressed_size -= that.uncompressed_size;
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
|
||||
using ColumnPriceByName = std::unordered_map<String, ColumnPrice>;
|
||||
using ColumnPriceByQueryNode = QueryTreeNodePtrWithHashMap<ColumnPrice>;
|
||||
|
||||
class ComponentCollectorVisitor : public ConstInDepthQueryTreeVisitor<ComponentCollectorVisitor>
|
||||
{
|
||||
public:
|
||||
ComponentCollectorVisitor(
|
||||
std::set<UInt64> & components_,
|
||||
QueryTreeNodePtrWithHashMap<UInt64> & query_node_to_component_,
|
||||
const ComparisonGraph<QueryTreeNodePtr> & graph_)
|
||||
: components(components_), query_node_to_component(query_node_to_component_), graph(graph_)
|
||||
{}
|
||||
|
||||
void visitImpl(const QueryTreeNodePtr & node)
|
||||
{
|
||||
if (auto id = graph.getComponentId(node))
|
||||
{
|
||||
query_node_to_component.emplace(node, *id);
|
||||
components.insert(*id);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
std::set<UInt64> & components;
|
||||
QueryTreeNodePtrWithHashMap<UInt64> & query_node_to_component;
|
||||
|
||||
const ComparisonGraph<QueryTreeNodePtr> & graph;
|
||||
};
|
||||
|
||||
class ColumnNameCollectorVisitor : public ConstInDepthQueryTreeVisitor<ColumnNameCollectorVisitor>
|
||||
{
|
||||
public:
|
||||
ColumnNameCollectorVisitor(
|
||||
std::unordered_set<std::string> & column_names_,
|
||||
const QueryTreeNodePtrWithHashMap<UInt64> * query_node_to_component_)
|
||||
: column_names(column_names_), query_node_to_component(query_node_to_component_)
|
||||
{}
|
||||
|
||||
bool needChildVisit(const VisitQueryTreeNodeType & parent, const VisitQueryTreeNodeType &)
|
||||
{
|
||||
return !query_node_to_component || !query_node_to_component->contains(parent);
|
||||
}
|
||||
|
||||
void visitImpl(const QueryTreeNodePtr & node)
|
||||
{
|
||||
if (query_node_to_component && query_node_to_component->contains(node))
|
||||
return;
|
||||
|
||||
if (const auto * column_node = node->as<ColumnNode>())
|
||||
column_names.insert(column_node->getColumnName());
|
||||
}
|
||||
|
||||
private:
|
||||
std::unordered_set<std::string> & column_names;
|
||||
const QueryTreeNodePtrWithHashMap<UInt64> * query_node_to_component;
|
||||
};
|
||||
|
||||
class SubstituteColumnVisitor : public InDepthQueryTreeVisitor<SubstituteColumnVisitor>
|
||||
{
|
||||
public:
|
||||
SubstituteColumnVisitor(
|
||||
const QueryTreeNodePtrWithHashMap<UInt64> & query_node_to_component_,
|
||||
const std::unordered_map<UInt64, QueryTreeNodePtr> & id_to_query_node_map_,
|
||||
ContextPtr context_)
|
||||
: query_node_to_component(query_node_to_component_), id_to_query_node_map(id_to_query_node_map_), context(std::move(context_))
|
||||
{}
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
auto component_id_it = query_node_to_component.find(node);
|
||||
if (component_id_it == query_node_to_component.end())
|
||||
return;
|
||||
|
||||
const auto component_id = component_id_it->second;
|
||||
auto new_node = id_to_query_node_map.at(component_id)->clone();
|
||||
|
||||
if (!node->getResultType()->equals(*new_node->getResultType()))
|
||||
{
|
||||
node = buildCastFunction(new_node, node->getResultType(), context);
|
||||
return;
|
||||
}
|
||||
|
||||
node = std::move(new_node);
|
||||
}
|
||||
|
||||
private:
|
||||
const QueryTreeNodePtrWithHashMap<UInt64> & query_node_to_component;
|
||||
const std::unordered_map<UInt64, QueryTreeNodePtr> & id_to_query_node_map;
|
||||
ContextPtr context;
|
||||
};
|
||||
|
||||
ColumnPrice calculatePrice(
|
||||
const ColumnPriceByName & column_prices,
|
||||
const std::unordered_set<std::string> & column_names)
|
||||
{
|
||||
ColumnPrice result(0, 0);
|
||||
|
||||
for (const auto & column : column_names)
|
||||
{
|
||||
if (auto it = column_prices.find(column); it != column_prices.end())
|
||||
result += it->second;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
void bruteForce(
|
||||
const ComparisonGraph<QueryTreeNodePtr> & graph,
|
||||
const std::vector<UInt64> & components,
|
||||
size_t current_component,
|
||||
const ColumnPriceByName & column_prices,
|
||||
ColumnPrice current_price,
|
||||
std::vector<QueryTreeNodePtr> & expressions_stack,
|
||||
ColumnPrice & min_price,
|
||||
std::vector<QueryTreeNodePtr> & min_expressions)
|
||||
{
|
||||
if (current_component == components.size())
|
||||
{
|
||||
if (current_price < min_price)
|
||||
{
|
||||
min_price = current_price;
|
||||
min_expressions = expressions_stack;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
for (const auto & node : graph.getComponent(components[current_component]))
|
||||
{
|
||||
std::unordered_set<std::string> column_names;
|
||||
ColumnNameCollectorVisitor column_name_collector{column_names, nullptr};
|
||||
column_name_collector.visit(node);
|
||||
|
||||
ColumnPrice expression_price = calculatePrice(column_prices, column_names);
|
||||
|
||||
expressions_stack.push_back(node);
|
||||
current_price += expression_price;
|
||||
|
||||
ColumnPriceByName new_prices(column_prices);
|
||||
for (const auto & column : column_names)
|
||||
new_prices.insert_or_assign(column, ColumnPrice(0, 0));
|
||||
|
||||
bruteForce(graph,
|
||||
components,
|
||||
current_component + 1,
|
||||
new_prices,
|
||||
current_price,
|
||||
expressions_stack,
|
||||
min_price,
|
||||
min_expressions);
|
||||
|
||||
current_price -= expression_price;
|
||||
expressions_stack.pop_back();
|
||||
}
|
||||
}
|
||||
|
||||
void substituteColumns(QueryNode & query_node, const QueryTreeNodes & table_expressions, const ContextPtr & context)
|
||||
{
|
||||
static constexpr UInt64 COLUMN_PENALTY = 10 * 1024 * 1024;
|
||||
static constexpr Int64 INDEX_PRICE = -1'000'000'000'000'000'000;
|
||||
|
||||
for (const auto & table_expression : table_expressions)
|
||||
{
|
||||
auto snapshot = getStorageSnapshot(table_expression);
|
||||
if (!snapshot || !snapshot->metadata)
|
||||
continue;
|
||||
|
||||
const auto column_sizes = snapshot->storage.getColumnSizes();
|
||||
if (column_sizes.empty())
|
||||
return;
|
||||
|
||||
auto query_tree_constraint = snapshot->metadata->getConstraints().getQueryTreeData(context, table_expression);
|
||||
const auto & graph = query_tree_constraint.getGraph();
|
||||
|
||||
auto run_for_all = [&](const auto function)
|
||||
{
|
||||
function(query_node.getProjectionNode());
|
||||
|
||||
if (query_node.hasWhere())
|
||||
function(query_node.getWhere());
|
||||
|
||||
if (query_node.hasPrewhere())
|
||||
function(query_node.getPrewhere());
|
||||
|
||||
if (query_node.hasHaving())
|
||||
function(query_node.getHaving());
|
||||
};
|
||||
|
||||
std::set<UInt64> components;
|
||||
QueryTreeNodePtrWithHashMap<UInt64> query_node_to_component;
|
||||
std::unordered_set<std::string> column_names;
|
||||
|
||||
run_for_all([&](QueryTreeNodePtr & node)
|
||||
{
|
||||
ComponentCollectorVisitor component_collector{components, query_node_to_component, graph};
|
||||
component_collector.visit(node);
|
||||
ColumnNameCollectorVisitor column_name_collector{column_names, &query_node_to_component};
|
||||
column_name_collector.visit(node);
|
||||
});
|
||||
|
||||
ColumnPriceByName column_prices;
|
||||
const auto primary_key = snapshot->metadata->getColumnsRequiredForPrimaryKey();
|
||||
|
||||
for (const auto & [column_name, column_size] : column_sizes)
|
||||
column_prices.insert_or_assign(column_name, ColumnPrice(column_size.data_compressed + COLUMN_PENALTY, column_size.data_uncompressed));
|
||||
|
||||
for (const auto & column_name : primary_key)
|
||||
column_prices.insert_or_assign(column_name, ColumnPrice(INDEX_PRICE, INDEX_PRICE));
|
||||
|
||||
for (const auto & column_name : column_names)
|
||||
column_prices.insert_or_assign(column_name, ColumnPrice(0, 0));
|
||||
|
||||
std::unordered_map<UInt64, QueryTreeNodePtr> id_to_query_node_map;
|
||||
std::vector<UInt64> components_list;
|
||||
|
||||
for (const auto component_id : components)
|
||||
{
|
||||
auto component = graph.getComponent(component_id);
|
||||
if (component.size() == 1)
|
||||
id_to_query_node_map[component_id] = component.front();
|
||||
else
|
||||
components_list.push_back(component_id);
|
||||
}
|
||||
|
||||
std::vector<QueryTreeNodePtr> expressions_stack;
|
||||
ColumnPrice min_price(std::numeric_limits<Int64>::max(), std::numeric_limits<Int64>::max());
|
||||
std::vector<QueryTreeNodePtr> min_expressions;
|
||||
|
||||
bruteForce(graph,
|
||||
components_list,
|
||||
0,
|
||||
column_prices,
|
||||
ColumnPrice(0, 0),
|
||||
expressions_stack,
|
||||
min_price,
|
||||
min_expressions);
|
||||
|
||||
for (size_t i = 0; i < components_list.size(); ++i)
|
||||
id_to_query_node_map[components_list[i]] = min_expressions[i];
|
||||
|
||||
SubstituteColumnVisitor substitute_column{query_node_to_component, id_to_query_node_map, context};
|
||||
|
||||
run_for_all([&](QueryTreeNodePtr & node)
|
||||
{
|
||||
substitute_column.visit(node);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
void optimizeWithConstraints(Analyzer::CNF & cnf, const QueryTreeNodes & table_expressions, const ContextPtr & context)
|
||||
{
|
||||
cnf.pullNotOutFunctions(context);
|
||||
|
||||
for (const auto & table_expression : table_expressions)
|
||||
{
|
||||
auto snapshot = getStorageSnapshot(table_expression);
|
||||
if (!snapshot || !snapshot->metadata)
|
||||
continue;
|
||||
|
||||
const auto & constraints = snapshot->metadata->getConstraints();
|
||||
const auto & query_tree_constraints = constraints.getQueryTreeData(context, table_expression);
|
||||
const auto & compare_graph = query_tree_constraints.getGraph();
|
||||
cnf.filterAlwaysTrueGroups([&](const auto & group)
|
||||
{
|
||||
/// remove always true groups from CNF
|
||||
return !checkIfGroupAlwaysTrueFullMatch(group, query_tree_constraints) && !checkIfGroupAlwaysTrueGraph(group, compare_graph);
|
||||
})
|
||||
.filterAlwaysFalseAtoms([&](const Analyzer::CNF::AtomicFormula & atom)
|
||||
{
|
||||
/// remove always false atoms from CNF
|
||||
return !checkIfAtomAlwaysFalseFullMatch(atom, query_tree_constraints) && !checkIfAtomAlwaysFalseGraph(atom, compare_graph);
|
||||
})
|
||||
.transformAtoms([&](const auto & atom)
|
||||
{
|
||||
return replaceTermsToConstants(atom, compare_graph);
|
||||
})
|
||||
.reduce();
|
||||
}
|
||||
|
||||
cnf.pushNotIntoFunctions(context);
|
||||
|
||||
const auto & settings = context->getSettingsRef();
|
||||
if (settings.optimize_append_index)
|
||||
addIndexConstraint(cnf, table_expressions, context);
|
||||
}
|
||||
|
||||
void optimizeNode(QueryTreeNodePtr & node, const QueryTreeNodes & table_expressions, const ContextPtr & context)
|
||||
{
|
||||
const auto & settings = context->getSettingsRef();
|
||||
|
||||
auto cnf = tryConvertQueryToCNF(node, context);
|
||||
if (!cnf)
|
||||
return;
|
||||
|
||||
if (settings.optimize_using_constraints)
|
||||
optimizeWithConstraints(*cnf, table_expressions, context);
|
||||
|
||||
auto new_node = cnf->toQueryTree(context);
|
||||
node = std::move(new_node);
|
||||
}
|
||||
|
||||
class ConvertQueryToCNFVisitor : public InDepthQueryTreeVisitorWithContext<ConvertQueryToCNFVisitor>
|
||||
{
|
||||
public:
|
||||
using Base = InDepthQueryTreeVisitorWithContext<ConvertQueryToCNFVisitor>;
|
||||
using Base::Base;
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
auto * query_node = node->as<QueryNode>();
|
||||
if (!query_node)
|
||||
return;
|
||||
|
||||
auto table_expressions = extractTableExpressions(query_node->getJoinTree());
|
||||
|
||||
const auto & context = getContext();
|
||||
const auto & settings = context->getSettingsRef();
|
||||
|
||||
bool has_filter = false;
|
||||
const auto optimize_filter = [&](QueryTreeNodePtr & filter_node)
|
||||
{
|
||||
if (filter_node == nullptr)
|
||||
return;
|
||||
|
||||
optimizeNode(filter_node, table_expressions, context);
|
||||
has_filter = true;
|
||||
};
|
||||
|
||||
optimize_filter(query_node->getWhere());
|
||||
optimize_filter(query_node->getPrewhere());
|
||||
optimize_filter(query_node->getHaving());
|
||||
|
||||
if (has_filter && settings.optimize_substitute_columns)
|
||||
substituteColumns(*query_node, table_expressions, context);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
void ConvertLogicalExpressionToCNFPass::run(QueryTreeNodePtr query_tree_node, ContextPtr context)
|
||||
{
|
||||
const auto & settings = context->getSettingsRef();
|
||||
if (!settings.convert_query_to_cnf)
|
||||
return;
|
||||
|
||||
ConvertQueryToCNFVisitor visitor(std::move(context));
|
||||
visitor.visit(query_tree_node);
|
||||
}
|
||||
|
||||
}
|
18
src/Analyzer/Passes/ConvertQueryToCNFPass.h
Normal file
18
src/Analyzer/Passes/ConvertQueryToCNFPass.h
Normal file
@ -0,0 +1,18 @@
|
||||
#pragma once
|
||||
|
||||
#include <Analyzer/IQueryTreePass.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class ConvertLogicalExpressionToCNFPass final : public IQueryTreePass
|
||||
{
|
||||
public:
|
||||
String getName() override { return "ConvertLogicalExpressionToCNFPass"; }
|
||||
|
||||
String getDescription() override { return "Convert logical expression to CNF and apply optimizations using constraints"; }
|
||||
|
||||
void run(QueryTreeNodePtr query_tree_node, ContextPtr context) override;
|
||||
};
|
||||
|
||||
}
|
@ -219,7 +219,6 @@ private:
|
||||
/// we can replace OR with the operand
|
||||
if (or_operands[0]->getResultType()->equals(*function_node.getResultType()))
|
||||
{
|
||||
assert(!function_node.getResultType()->isNullable());
|
||||
node = std::move(or_operands[0]);
|
||||
return;
|
||||
}
|
||||
|
@ -5085,8 +5085,11 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
|
||||
arguments_projection_names[function_lambda_argument_index] = lambda_argument_projection_name_buffer.str();
|
||||
}
|
||||
|
||||
argument_types[function_lambda_argument_index] = std::make_shared<DataTypeFunction>(function_data_type_argument_types, lambda_to_resolve->getResultType());
|
||||
argument_columns[function_lambda_argument_index].type = argument_types[function_lambda_argument_index];
|
||||
auto lambda_resolved_type = std::make_shared<DataTypeFunction>(function_data_type_argument_types, lambda_to_resolve_typed.getExpression()->getResultType());
|
||||
lambda_to_resolve_typed.resolve(lambda_resolved_type);
|
||||
|
||||
argument_types[function_lambda_argument_index] = lambda_resolved_type;
|
||||
argument_columns[function_lambda_argument_index].type = lambda_resolved_type;
|
||||
function_arguments[function_lambda_argument_index] = std::move(lambda_to_resolve);
|
||||
}
|
||||
|
||||
|
@ -41,7 +41,7 @@
|
||||
#include <Analyzer/Passes/LogicalExpressionOptimizerPass.h>
|
||||
#include <Analyzer/Passes/CrossToInnerJoinPass.h>
|
||||
#include <Analyzer/Passes/ShardNumColumnToFunctionPass.h>
|
||||
|
||||
#include <Analyzer/Passes/ConvertQueryToCNFPass.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -115,13 +115,23 @@ private:
|
||||
|
||||
for (size_t i = 0; i < expected_argument_types_size; ++i)
|
||||
{
|
||||
// Skip lambdas
|
||||
if (WhichDataType(expected_argument_types[i]).isFunction())
|
||||
continue;
|
||||
|
||||
const auto & expected_argument_type = expected_argument_types[i];
|
||||
const auto & actual_argument_type = actual_argument_columns[i].type;
|
||||
|
||||
if (!expected_argument_type)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
"Function {} expected argument {} type is not set after running {} pass",
|
||||
function->toAST()->formatForErrorMessage(),
|
||||
i + 1,
|
||||
pass_name);
|
||||
|
||||
if (!actual_argument_type)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
"Function {} actual argument {} type is not set after running {} pass",
|
||||
function->toAST()->formatForErrorMessage(),
|
||||
i + 1,
|
||||
pass_name);
|
||||
|
||||
if (!expected_argument_type->equals(*actual_argument_type))
|
||||
{
|
||||
/// Aggregate functions remove low cardinality for their argument types
|
||||
@ -148,8 +158,6 @@ private:
|
||||
|
||||
/** ClickHouse query tree pass manager.
|
||||
*
|
||||
* TODO: Support setting convert_query_to_cnf.
|
||||
* TODO: Support setting optimize_using_constraints.
|
||||
* TODO: Support setting optimize_substitute_columns.
|
||||
* TODO: Support GROUP BY injective function elimination.
|
||||
* TODO: Support setting optimize_move_functions_out_of_any.
|
||||
@ -235,6 +243,8 @@ void addQueryTreePasses(QueryTreePassManager & manager)
|
||||
manager.addPass(std::make_unique<QueryAnalysisPass>());
|
||||
manager.addPass(std::make_unique<FunctionToSubcolumnsPass>());
|
||||
|
||||
manager.addPass(std::make_unique<ConvertLogicalExpressionToCNFPass>());
|
||||
|
||||
manager.addPass(std::make_unique<CountDistinctPass>());
|
||||
manager.addPass(std::make_unique<RewriteAggregateFunctionWithIfPass>());
|
||||
manager.addPass(std::make_unique<SumIfToCountIfPass>());
|
||||
|
@ -8,7 +8,8 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
BackupCoordinationLocal::BackupCoordinationLocal(bool plain_backup_) : file_infos(plain_backup_)
|
||||
BackupCoordinationLocal::BackupCoordinationLocal(bool plain_backup_)
|
||||
: log(&Poco::Logger::get("BackupCoordinationLocal")), file_infos(plain_backup_)
|
||||
{
|
||||
}
|
||||
|
||||
@ -35,7 +36,7 @@ Strings BackupCoordinationLocal::waitForStage(const String &, std::chrono::milli
|
||||
void BackupCoordinationLocal::addReplicatedPartNames(const String & table_shared_id, const String & table_name_for_logs, const String & replica_name, const std::vector<PartNameAndChecksum> & part_names_and_checksums)
|
||||
{
|
||||
std::lock_guard lock{replicated_tables_mutex};
|
||||
replicated_tables.addPartNames(table_shared_id, table_name_for_logs, replica_name, part_names_and_checksums);
|
||||
replicated_tables.addPartNames({table_shared_id, table_name_for_logs, replica_name, part_names_and_checksums});
|
||||
}
|
||||
|
||||
Strings BackupCoordinationLocal::getReplicatedPartNames(const String & table_shared_id, const String & replica_name) const
|
||||
@ -48,7 +49,7 @@ Strings BackupCoordinationLocal::getReplicatedPartNames(const String & table_sha
|
||||
void BackupCoordinationLocal::addReplicatedMutations(const String & table_shared_id, const String & table_name_for_logs, const String & replica_name, const std::vector<MutationInfo> & mutations)
|
||||
{
|
||||
std::lock_guard lock{replicated_tables_mutex};
|
||||
replicated_tables.addMutations(table_shared_id, table_name_for_logs, replica_name, mutations);
|
||||
replicated_tables.addMutations({table_shared_id, table_name_for_logs, replica_name, mutations});
|
||||
}
|
||||
|
||||
std::vector<IBackupCoordination::MutationInfo> BackupCoordinationLocal::getReplicatedMutations(const String & table_shared_id, const String & replica_name) const
|
||||
@ -61,7 +62,7 @@ std::vector<IBackupCoordination::MutationInfo> BackupCoordinationLocal::getRepli
|
||||
void BackupCoordinationLocal::addReplicatedDataPath(const String & table_shared_id, const String & data_path)
|
||||
{
|
||||
std::lock_guard lock{replicated_tables_mutex};
|
||||
replicated_tables.addDataPath(table_shared_id, data_path);
|
||||
replicated_tables.addDataPath({table_shared_id, data_path});
|
||||
}
|
||||
|
||||
Strings BackupCoordinationLocal::getReplicatedDataPaths(const String & table_shared_id) const
|
||||
@ -74,7 +75,7 @@ Strings BackupCoordinationLocal::getReplicatedDataPaths(const String & table_sha
|
||||
void BackupCoordinationLocal::addReplicatedAccessFilePath(const String & access_zk_path, AccessEntityType access_entity_type, const String & file_path)
|
||||
{
|
||||
std::lock_guard lock{replicated_access_mutex};
|
||||
replicated_access.addFilePath(access_zk_path, access_entity_type, "", file_path);
|
||||
replicated_access.addFilePath({access_zk_path, access_entity_type, "", file_path});
|
||||
}
|
||||
|
||||
Strings BackupCoordinationLocal::getReplicatedAccessFilePaths(const String & access_zk_path, AccessEntityType access_entity_type) const
|
||||
@ -87,7 +88,7 @@ Strings BackupCoordinationLocal::getReplicatedAccessFilePaths(const String & acc
|
||||
void BackupCoordinationLocal::addReplicatedSQLObjectsDir(const String & loader_zk_path, UserDefinedSQLObjectType object_type, const String & dir_path)
|
||||
{
|
||||
std::lock_guard lock{replicated_sql_objects_mutex};
|
||||
replicated_sql_objects.addDirectory(loader_zk_path, object_type, "", dir_path);
|
||||
replicated_sql_objects.addDirectory({loader_zk_path, object_type, "", dir_path});
|
||||
}
|
||||
|
||||
Strings BackupCoordinationLocal::getReplicatedSQLObjectsDirs(const String & loader_zk_path, UserDefinedSQLObjectType object_type) const
|
||||
@ -125,7 +126,12 @@ bool BackupCoordinationLocal::startWritingFile(size_t data_file_index)
|
||||
|
||||
bool BackupCoordinationLocal::hasConcurrentBackups(const std::atomic<size_t> & num_active_backups) const
|
||||
{
|
||||
return (num_active_backups > 1);
|
||||
if (num_active_backups > 1)
|
||||
{
|
||||
LOG_WARNING(log, "Found concurrent backups: num_active_backups={}", num_active_backups);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -52,6 +52,8 @@ public:
|
||||
bool hasConcurrentBackups(const std::atomic<size_t> & num_active_backups) const override;
|
||||
|
||||
private:
|
||||
Poco::Logger * const log;
|
||||
|
||||
BackupCoordinationReplicatedTables TSA_GUARDED_BY(replicated_tables_mutex) replicated_tables;
|
||||
BackupCoordinationReplicatedAccess TSA_GUARDED_BY(replicated_access_mutex) replicated_access;
|
||||
BackupCoordinationReplicatedSQLObjects TSA_GUARDED_BY(replicated_sql_objects_mutex) replicated_sql_objects;
|
||||
|
@ -1,13 +1,18 @@
|
||||
#include <Backups/BackupCoordinationRemote.h>
|
||||
|
||||
#include <base/hex.h>
|
||||
|
||||
#include <Access/Common/AccessEntityType.h>
|
||||
#include <Backups/BackupCoordinationReplicatedAccess.h>
|
||||
#include <Backups/BackupCoordinationStage.h>
|
||||
#include <Common/escapeForFileName.h>
|
||||
#include <Common/ZooKeeper/Common.h>
|
||||
#include <Common/ZooKeeper/KeeperException.h>
|
||||
#include <Functions/UserDefined/UserDefinedSQLObjectType.h>
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Common/ZooKeeper/KeeperException.h>
|
||||
#include <Common/escapeForFileName.h>
|
||||
#include <Backups/BackupCoordinationStage.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -154,8 +159,7 @@ BackupCoordinationRemote::BackupCoordinationRemote(
|
||||
const String & current_host_,
|
||||
bool plain_backup_,
|
||||
bool is_internal_)
|
||||
: get_zookeeper(get_zookeeper_)
|
||||
, root_zookeeper_path(root_zookeeper_path_)
|
||||
: root_zookeeper_path(root_zookeeper_path_)
|
||||
, zookeeper_path(root_zookeeper_path_ + "/backup-" + backup_uuid_)
|
||||
, keeper_settings(keeper_settings_)
|
||||
, backup_uuid(backup_uuid_)
|
||||
@ -164,17 +168,33 @@ BackupCoordinationRemote::BackupCoordinationRemote(
|
||||
, current_host_index(findCurrentHostIndex(all_hosts, current_host))
|
||||
, plain_backup(plain_backup_)
|
||||
, is_internal(is_internal_)
|
||||
{
|
||||
zookeeper_retries_info = ZooKeeperRetriesInfo(
|
||||
"BackupCoordinationRemote",
|
||||
&Poco::Logger::get("BackupCoordinationRemote"),
|
||||
keeper_settings.keeper_max_retries,
|
||||
keeper_settings.keeper_retry_initial_backoff_ms,
|
||||
keeper_settings.keeper_retry_max_backoff_ms);
|
||||
, log(&Poco::Logger::get("BackupCoordinationRemote"))
|
||||
, with_retries(
|
||||
log,
|
||||
get_zookeeper_,
|
||||
keeper_settings,
|
||||
[zookeeper_path = zookeeper_path, current_host = current_host, is_internal = is_internal]
|
||||
(WithRetries::FaultyKeeper & zk)
|
||||
{
|
||||
/// Recreate this ephemeral node to signal that we are alive.
|
||||
if (is_internal)
|
||||
{
|
||||
String alive_node_path = zookeeper_path + "/stage/alive|" + current_host;
|
||||
auto code = zk->tryCreate(alive_node_path, "", zkutil::CreateMode::Ephemeral);
|
||||
|
||||
if (code == Coordination::Error::ZNODEEXISTS)
|
||||
zk->handleEphemeralNodeExistenceNoFailureInjection(alive_node_path, "");
|
||||
else if (code != Coordination::Error::ZOK)
|
||||
throw zkutil::KeeperException(code, alive_node_path);
|
||||
}
|
||||
})
|
||||
{
|
||||
createRootNodes();
|
||||
|
||||
stage_sync.emplace(
|
||||
zookeeper_path + "/stage", [this] { return getZooKeeper(); }, &Poco::Logger::get("BackupCoordination"));
|
||||
zookeeper_path,
|
||||
with_retries,
|
||||
log);
|
||||
}
|
||||
|
||||
BackupCoordinationRemote::~BackupCoordinationRemote()
|
||||
@ -190,44 +210,45 @@ BackupCoordinationRemote::~BackupCoordinationRemote()
|
||||
}
|
||||
}
|
||||
|
||||
zkutil::ZooKeeperPtr BackupCoordinationRemote::getZooKeeper() const
|
||||
{
|
||||
std::lock_guard lock{zookeeper_mutex};
|
||||
if (!zookeeper || zookeeper->expired())
|
||||
{
|
||||
zookeeper = get_zookeeper();
|
||||
|
||||
/// It's possible that we connected to different [Zoo]Keeper instance
|
||||
/// so we may read a bit stale state.
|
||||
zookeeper->sync(zookeeper_path);
|
||||
}
|
||||
return zookeeper;
|
||||
}
|
||||
|
||||
void BackupCoordinationRemote::createRootNodes()
|
||||
{
|
||||
auto zk = getZooKeeper();
|
||||
zk->createAncestors(zookeeper_path);
|
||||
zk->createIfNotExists(zookeeper_path, "");
|
||||
zk->createIfNotExists(zookeeper_path + "/repl_part_names", "");
|
||||
zk->createIfNotExists(zookeeper_path + "/repl_mutations", "");
|
||||
zk->createIfNotExists(zookeeper_path + "/repl_data_paths", "");
|
||||
zk->createIfNotExists(zookeeper_path + "/repl_access", "");
|
||||
zk->createIfNotExists(zookeeper_path + "/repl_sql_objects", "");
|
||||
zk->createIfNotExists(zookeeper_path + "/file_infos", "");
|
||||
zk->createIfNotExists(zookeeper_path + "/writing_files", "");
|
||||
auto holder = with_retries.createRetriesControlHolder("createRootNodes");
|
||||
holder.retries_ctl.retryLoop(
|
||||
[&, &zk = holder.faulty_zookeeper]()
|
||||
{
|
||||
with_retries.renewZooKeeper(zk);
|
||||
|
||||
zk->createAncestors(zookeeper_path);
|
||||
|
||||
Coordination::Requests ops;
|
||||
Coordination::Responses responses;
|
||||
ops.emplace_back(zkutil::makeCreateRequest(zookeeper_path, "", zkutil::CreateMode::Persistent));
|
||||
ops.emplace_back(zkutil::makeCreateRequest(zookeeper_path + "/repl_part_names", "", zkutil::CreateMode::Persistent));
|
||||
ops.emplace_back(zkutil::makeCreateRequest(zookeeper_path + "/repl_mutations", "", zkutil::CreateMode::Persistent));
|
||||
ops.emplace_back(zkutil::makeCreateRequest(zookeeper_path + "/repl_data_paths", "", zkutil::CreateMode::Persistent));
|
||||
ops.emplace_back(zkutil::makeCreateRequest(zookeeper_path + "/repl_access", "", zkutil::CreateMode::Persistent));
|
||||
ops.emplace_back(zkutil::makeCreateRequest(zookeeper_path + "/repl_sql_objects", "", zkutil::CreateMode::Persistent));
|
||||
ops.emplace_back(zkutil::makeCreateRequest(zookeeper_path + "/file_infos", "", zkutil::CreateMode::Persistent));
|
||||
ops.emplace_back(zkutil::makeCreateRequest(zookeeper_path + "/writing_files", "", zkutil::CreateMode::Persistent));
|
||||
zk->tryMulti(ops, responses);
|
||||
});
|
||||
}
|
||||
|
||||
void BackupCoordinationRemote::removeAllNodes()
|
||||
{
|
||||
/// Usually this function is called by the initiator when a backup is complete so we don't need the coordination anymore.
|
||||
///
|
||||
/// However there can be a rare situation when this function is called after an error occurs on the initiator of a query
|
||||
/// while some hosts are still making the backup. Removing all the nodes will remove the parent node of the backup coordination
|
||||
/// at `zookeeper_path` which might cause such hosts to stop with exception "ZNONODE". Or such hosts might still do some useless part
|
||||
/// of their backup work before that. Anyway in this case backup won't be finalized (because only an initiator can do that).
|
||||
auto zk = getZooKeeper();
|
||||
zk->removeRecursive(zookeeper_path);
|
||||
auto holder = with_retries.createRetriesControlHolder("removeAllNodes");
|
||||
holder.retries_ctl.retryLoop(
|
||||
[&, &zk = holder.faulty_zookeeper]()
|
||||
{
|
||||
/// Usually this function is called by the initiator when a backup is complete so we don't need the coordination anymore.
|
||||
///
|
||||
/// However there can be a rare situation when this function is called after an error occurs on the initiator of a query
|
||||
/// while some hosts are still making the backup. Removing all the nodes will remove the parent node of the backup coordination
|
||||
/// at `zookeeper_path` which might cause such hosts to stop with exception "ZNONODE". Or such hosts might still do some useless part
|
||||
/// of their backup work before that. Anyway in this case backup won't be finalized (because only an initiator can do that).
|
||||
with_retries.renewZooKeeper(zk);
|
||||
zk->removeRecursive(zookeeper_path);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@ -255,10 +276,11 @@ Strings BackupCoordinationRemote::waitForStage(const String & stage_to_wait, std
|
||||
void BackupCoordinationRemote::serializeToMultipleZooKeeperNodes(const String & path, const String & value, const String & logging_name)
|
||||
{
|
||||
{
|
||||
ZooKeeperRetriesControl retries_ctl(logging_name + "::create", zookeeper_retries_info);
|
||||
retries_ctl.retryLoop([&]
|
||||
auto holder = with_retries.createRetriesControlHolder(logging_name + "::create");
|
||||
holder.retries_ctl.retryLoop(
|
||||
[&, &zk = holder.faulty_zookeeper]()
|
||||
{
|
||||
auto zk = getZooKeeper();
|
||||
with_retries.renewZooKeeper(zk);
|
||||
zk->createIfNotExists(path, "");
|
||||
});
|
||||
}
|
||||
@ -279,10 +301,11 @@ void BackupCoordinationRemote::serializeToMultipleZooKeeperNodes(const String &
|
||||
String part = value.substr(begin, end - begin);
|
||||
String part_path = fmt::format("{}/{:06}", path, i);
|
||||
|
||||
ZooKeeperRetriesControl retries_ctl(logging_name + "::createPart", zookeeper_retries_info);
|
||||
retries_ctl.retryLoop([&]
|
||||
auto holder = with_retries.createRetriesControlHolder(logging_name + "::createPart");
|
||||
holder.retries_ctl.retryLoop(
|
||||
[&, &zk = holder.faulty_zookeeper]()
|
||||
{
|
||||
auto zk = getZooKeeper();
|
||||
with_retries.renewZooKeeper(zk);
|
||||
zk->createIfNotExists(part_path, part);
|
||||
});
|
||||
}
|
||||
@ -293,9 +316,11 @@ String BackupCoordinationRemote::deserializeFromMultipleZooKeeperNodes(const Str
|
||||
Strings part_names;
|
||||
|
||||
{
|
||||
ZooKeeperRetriesControl retries_ctl(logging_name + "::getChildren", zookeeper_retries_info);
|
||||
retries_ctl.retryLoop([&]{
|
||||
auto zk = getZooKeeper();
|
||||
auto holder = with_retries.createRetriesControlHolder(logging_name + "::getChildren");
|
||||
holder.retries_ctl.retryLoop(
|
||||
[&, &zk = holder.faulty_zookeeper]()
|
||||
{
|
||||
with_retries.renewZooKeeper(zk);
|
||||
part_names = zk->getChildren(path);
|
||||
std::sort(part_names.begin(), part_names.end());
|
||||
});
|
||||
@ -306,10 +331,11 @@ String BackupCoordinationRemote::deserializeFromMultipleZooKeeperNodes(const Str
|
||||
{
|
||||
String part;
|
||||
String part_path = path + "/" + part_name;
|
||||
ZooKeeperRetriesControl retries_ctl(logging_name + "::get", zookeeper_retries_info);
|
||||
retries_ctl.retryLoop([&]
|
||||
auto holder = with_retries.createRetriesControlHolder(logging_name + "::get");
|
||||
holder.retries_ctl.retryLoop(
|
||||
[&, &zk = holder.faulty_zookeeper]()
|
||||
{
|
||||
auto zk = getZooKeeper();
|
||||
with_retries.renewZooKeeper(zk);
|
||||
part = zk->get(part_path);
|
||||
});
|
||||
res += part;
|
||||
@ -330,11 +356,16 @@ void BackupCoordinationRemote::addReplicatedPartNames(
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "addReplicatedPartNames() must not be called after preparing");
|
||||
}
|
||||
|
||||
auto zk = getZooKeeper();
|
||||
String path = zookeeper_path + "/repl_part_names/" + escapeForFileName(table_shared_id);
|
||||
zk->createIfNotExists(path, "");
|
||||
path += "/" + escapeForFileName(replica_name);
|
||||
zk->create(path, ReplicatedPartNames::serialize(part_names_and_checksums, table_name_for_logs), zkutil::CreateMode::Persistent);
|
||||
auto holder = with_retries.createRetriesControlHolder("addReplicatedPartNames");
|
||||
holder.retries_ctl.retryLoop(
|
||||
[&, &zk = holder.faulty_zookeeper]()
|
||||
{
|
||||
with_retries.renewZooKeeper(zk);
|
||||
String path = zookeeper_path + "/repl_part_names/" + escapeForFileName(table_shared_id);
|
||||
zk->createIfNotExists(path, "");
|
||||
path += "/" + escapeForFileName(replica_name);
|
||||
zk->createIfNotExists(path, ReplicatedPartNames::serialize(part_names_and_checksums, table_name_for_logs));
|
||||
});
|
||||
}
|
||||
|
||||
Strings BackupCoordinationRemote::getReplicatedPartNames(const String & table_shared_id, const String & replica_name) const
|
||||
@ -356,11 +387,16 @@ void BackupCoordinationRemote::addReplicatedMutations(
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "addReplicatedMutations() must not be called after preparing");
|
||||
}
|
||||
|
||||
auto zk = getZooKeeper();
|
||||
String path = zookeeper_path + "/repl_mutations/" + escapeForFileName(table_shared_id);
|
||||
zk->createIfNotExists(path, "");
|
||||
path += "/" + escapeForFileName(replica_name);
|
||||
zk->create(path, ReplicatedMutations::serialize(mutations, table_name_for_logs), zkutil::CreateMode::Persistent);
|
||||
auto holder = with_retries.createRetriesControlHolder("addReplicatedMutations");
|
||||
holder.retries_ctl.retryLoop(
|
||||
[&, &zk = holder.faulty_zookeeper]()
|
||||
{
|
||||
with_retries.renewZooKeeper(zk);
|
||||
String path = zookeeper_path + "/repl_mutations/" + escapeForFileName(table_shared_id);
|
||||
zk->createIfNotExists(path, "");
|
||||
path += "/" + escapeForFileName(replica_name);
|
||||
zk->createIfNotExists(path, ReplicatedMutations::serialize(mutations, table_name_for_logs));
|
||||
});
|
||||
}
|
||||
|
||||
std::vector<IBackupCoordination::MutationInfo> BackupCoordinationRemote::getReplicatedMutations(const String & table_shared_id, const String & replica_name) const
|
||||
@ -380,11 +416,16 @@ void BackupCoordinationRemote::addReplicatedDataPath(
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "addReplicatedDataPath() must not be called after preparing");
|
||||
}
|
||||
|
||||
auto zk = getZooKeeper();
|
||||
String path = zookeeper_path + "/repl_data_paths/" + escapeForFileName(table_shared_id);
|
||||
zk->createIfNotExists(path, "");
|
||||
path += "/" + escapeForFileName(data_path);
|
||||
zk->createIfNotExists(path, "");
|
||||
auto holder = with_retries.createRetriesControlHolder("addReplicatedDataPath");
|
||||
holder.retries_ctl.retryLoop(
|
||||
[&, &zk = holder.faulty_zookeeper]()
|
||||
{
|
||||
with_retries.renewZooKeeper(zk);
|
||||
String path = zookeeper_path + "/repl_data_paths/" + escapeForFileName(table_shared_id);
|
||||
zk->createIfNotExists(path, "");
|
||||
path += "/" + escapeForFileName(data_path);
|
||||
zk->createIfNotExists(path, "");
|
||||
});
|
||||
}
|
||||
|
||||
Strings BackupCoordinationRemote::getReplicatedDataPaths(const String & table_shared_id) const
|
||||
@ -400,55 +441,88 @@ void BackupCoordinationRemote::prepareReplicatedTables() const
|
||||
if (replicated_tables)
|
||||
return;
|
||||
|
||||
std::vector<BackupCoordinationReplicatedTables::PartNamesForTableReplica> part_names_for_replicated_tables;
|
||||
{
|
||||
auto holder = with_retries.createRetriesControlHolder("prepareReplicatedTables::repl_part_names");
|
||||
holder.retries_ctl.retryLoop(
|
||||
[&, &zk = holder.faulty_zookeeper]()
|
||||
{
|
||||
part_names_for_replicated_tables.clear();
|
||||
with_retries.renewZooKeeper(zk);
|
||||
|
||||
String path = zookeeper_path + "/repl_part_names";
|
||||
for (const String & escaped_table_shared_id : zk->getChildren(path))
|
||||
{
|
||||
String table_shared_id = unescapeForFileName(escaped_table_shared_id);
|
||||
String path2 = path + "/" + escaped_table_shared_id;
|
||||
for (const String & escaped_replica_name : zk->getChildren(path2))
|
||||
{
|
||||
String replica_name = unescapeForFileName(escaped_replica_name);
|
||||
auto part_names = ReplicatedPartNames::deserialize(zk->get(path2 + "/" + escaped_replica_name));
|
||||
part_names_for_replicated_tables.push_back(
|
||||
{table_shared_id, part_names.table_name_for_logs, replica_name, part_names.part_names_and_checksums});
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
std::vector<BackupCoordinationReplicatedTables::MutationsForTableReplica> mutations_for_replicated_tables;
|
||||
{
|
||||
auto holder = with_retries.createRetriesControlHolder("prepareReplicatedTables::repl_mutations");
|
||||
holder.retries_ctl.retryLoop(
|
||||
[&, &zk = holder.faulty_zookeeper]()
|
||||
{
|
||||
mutations_for_replicated_tables.clear();
|
||||
with_retries.renewZooKeeper(zk);
|
||||
|
||||
String path = zookeeper_path + "/repl_mutations";
|
||||
for (const String & escaped_table_shared_id : zk->getChildren(path))
|
||||
{
|
||||
String table_shared_id = unescapeForFileName(escaped_table_shared_id);
|
||||
String path2 = path + "/" + escaped_table_shared_id;
|
||||
for (const String & escaped_replica_name : zk->getChildren(path2))
|
||||
{
|
||||
String replica_name = unescapeForFileName(escaped_replica_name);
|
||||
auto mutations = ReplicatedMutations::deserialize(zk->get(path2 + "/" + escaped_replica_name));
|
||||
mutations_for_replicated_tables.push_back(
|
||||
{table_shared_id, mutations.table_name_for_logs, replica_name, mutations.mutations});
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
std::vector<BackupCoordinationReplicatedTables::DataPathForTableReplica> data_paths_for_replicated_tables;
|
||||
{
|
||||
auto holder = with_retries.createRetriesControlHolder("prepareReplicatedTables::repl_data_paths");
|
||||
holder.retries_ctl.retryLoop(
|
||||
[&, &zk = holder.faulty_zookeeper]()
|
||||
{
|
||||
data_paths_for_replicated_tables.clear();
|
||||
with_retries.renewZooKeeper(zk);
|
||||
|
||||
String path = zookeeper_path + "/repl_data_paths";
|
||||
for (const String & escaped_table_shared_id : zk->getChildren(path))
|
||||
{
|
||||
String table_shared_id = unescapeForFileName(escaped_table_shared_id);
|
||||
String path2 = path + "/" + escaped_table_shared_id;
|
||||
for (const String & escaped_data_path : zk->getChildren(path2))
|
||||
{
|
||||
String data_path = unescapeForFileName(escaped_data_path);
|
||||
data_paths_for_replicated_tables.push_back({table_shared_id, data_path});
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
replicated_tables.emplace();
|
||||
auto zk = getZooKeeper();
|
||||
|
||||
{
|
||||
String path = zookeeper_path + "/repl_part_names";
|
||||
for (const String & escaped_table_shared_id : zk->getChildren(path))
|
||||
{
|
||||
String table_shared_id = unescapeForFileName(escaped_table_shared_id);
|
||||
String path2 = path + "/" + escaped_table_shared_id;
|
||||
for (const String & escaped_replica_name : zk->getChildren(path2))
|
||||
{
|
||||
String replica_name = unescapeForFileName(escaped_replica_name);
|
||||
auto part_names = ReplicatedPartNames::deserialize(zk->get(path2 + "/" + escaped_replica_name));
|
||||
replicated_tables->addPartNames(table_shared_id, part_names.table_name_for_logs, replica_name, part_names.part_names_and_checksums);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
String path = zookeeper_path + "/repl_mutations";
|
||||
for (const String & escaped_table_shared_id : zk->getChildren(path))
|
||||
{
|
||||
String table_shared_id = unescapeForFileName(escaped_table_shared_id);
|
||||
String path2 = path + "/" + escaped_table_shared_id;
|
||||
for (const String & escaped_replica_name : zk->getChildren(path2))
|
||||
{
|
||||
String replica_name = unescapeForFileName(escaped_replica_name);
|
||||
auto mutations = ReplicatedMutations::deserialize(zk->get(path2 + "/" + escaped_replica_name));
|
||||
replicated_tables->addMutations(table_shared_id, mutations.table_name_for_logs, replica_name, mutations.mutations);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
String path = zookeeper_path + "/repl_data_paths";
|
||||
for (const String & escaped_table_shared_id : zk->getChildren(path))
|
||||
{
|
||||
String table_shared_id = unescapeForFileName(escaped_table_shared_id);
|
||||
String path2 = path + "/" + escaped_table_shared_id;
|
||||
for (const String & escaped_data_path : zk->getChildren(path2))
|
||||
{
|
||||
String data_path = unescapeForFileName(escaped_data_path);
|
||||
replicated_tables->addDataPath(table_shared_id, data_path);
|
||||
}
|
||||
}
|
||||
}
|
||||
for (auto & part_names : part_names_for_replicated_tables)
|
||||
replicated_tables->addPartNames(std::move(part_names));
|
||||
for (auto & mutations : mutations_for_replicated_tables)
|
||||
replicated_tables->addMutations(std::move(mutations));
|
||||
for (auto & data_paths : data_paths_for_replicated_tables)
|
||||
replicated_tables->addDataPath(std::move(data_paths));
|
||||
}
|
||||
|
||||
|
||||
void BackupCoordinationRemote::addReplicatedAccessFilePath(const String & access_zk_path, AccessEntityType access_entity_type, const String & file_path)
|
||||
{
|
||||
{
|
||||
@ -457,13 +531,18 @@ void BackupCoordinationRemote::addReplicatedAccessFilePath(const String & access
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "addReplicatedAccessFilePath() must not be called after preparing");
|
||||
}
|
||||
|
||||
auto zk = getZooKeeper();
|
||||
String path = zookeeper_path + "/repl_access/" + escapeForFileName(access_zk_path);
|
||||
zk->createIfNotExists(path, "");
|
||||
path += "/" + AccessEntityTypeInfo::get(access_entity_type).name;
|
||||
zk->createIfNotExists(path, "");
|
||||
path += "/" + current_host;
|
||||
zk->createIfNotExists(path, file_path);
|
||||
auto holder = with_retries.createRetriesControlHolder("addReplicatedAccessFilePath");
|
||||
holder.retries_ctl.retryLoop(
|
||||
[&, &zk = holder.faulty_zookeeper]()
|
||||
{
|
||||
with_retries.renewZooKeeper(zk);
|
||||
String path = zookeeper_path + "/repl_access/" + escapeForFileName(access_zk_path);
|
||||
zk->createIfNotExists(path, "");
|
||||
path += "/" + AccessEntityTypeInfo::get(access_entity_type).name;
|
||||
zk->createIfNotExists(path, "");
|
||||
path += "/" + current_host;
|
||||
zk->createIfNotExists(path, file_path);
|
||||
});
|
||||
}
|
||||
|
||||
Strings BackupCoordinationRemote::getReplicatedAccessFilePaths(const String & access_zk_path, AccessEntityType access_entity_type) const
|
||||
@ -478,25 +557,35 @@ void BackupCoordinationRemote::prepareReplicatedAccess() const
|
||||
if (replicated_access)
|
||||
return;
|
||||
|
||||
replicated_access.emplace();
|
||||
auto zk = getZooKeeper();
|
||||
|
||||
String path = zookeeper_path + "/repl_access";
|
||||
for (const String & escaped_access_zk_path : zk->getChildren(path))
|
||||
std::vector<BackupCoordinationReplicatedAccess::FilePathForAccessEntitry> file_path_for_access_entities;
|
||||
auto holder = with_retries.createRetriesControlHolder("prepareReplicatedAccess");
|
||||
holder.retries_ctl.retryLoop(
|
||||
[&, &zk = holder.faulty_zookeeper]()
|
||||
{
|
||||
String access_zk_path = unescapeForFileName(escaped_access_zk_path);
|
||||
String path2 = path + "/" + escaped_access_zk_path;
|
||||
for (const String & type_str : zk->getChildren(path2))
|
||||
file_path_for_access_entities.clear();
|
||||
with_retries.renewZooKeeper(zk);
|
||||
|
||||
String path = zookeeper_path + "/repl_access";
|
||||
for (const String & escaped_access_zk_path : zk->getChildren(path))
|
||||
{
|
||||
AccessEntityType type = AccessEntityTypeInfo::parseType(type_str);
|
||||
String path3 = path2 + "/" + type_str;
|
||||
for (const String & host_id : zk->getChildren(path3))
|
||||
String access_zk_path = unescapeForFileName(escaped_access_zk_path);
|
||||
String path2 = path + "/" + escaped_access_zk_path;
|
||||
for (const String & type_str : zk->getChildren(path2))
|
||||
{
|
||||
String file_path = zk->get(path3 + "/" + host_id);
|
||||
replicated_access->addFilePath(access_zk_path, type, host_id, file_path);
|
||||
AccessEntityType type = AccessEntityTypeInfo::parseType(type_str);
|
||||
String path3 = path2 + "/" + type_str;
|
||||
for (const String & host_id : zk->getChildren(path3))
|
||||
{
|
||||
String file_path = zk->get(path3 + "/" + host_id);
|
||||
file_path_for_access_entities.push_back({access_zk_path, type, host_id, file_path});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
replicated_access.emplace();
|
||||
for (auto & file_path : file_path_for_access_entities)
|
||||
replicated_access->addFilePath(std::move(file_path));
|
||||
}
|
||||
|
||||
void BackupCoordinationRemote::addReplicatedSQLObjectsDir(const String & loader_zk_path, UserDefinedSQLObjectType object_type, const String & dir_path)
|
||||
@ -507,21 +596,26 @@ void BackupCoordinationRemote::addReplicatedSQLObjectsDir(const String & loader_
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "addReplicatedSQLObjectsDir() must not be called after preparing");
|
||||
}
|
||||
|
||||
auto zk = getZooKeeper();
|
||||
String path = zookeeper_path + "/repl_sql_objects/" + escapeForFileName(loader_zk_path);
|
||||
zk->createIfNotExists(path, "");
|
||||
|
||||
path += "/";
|
||||
switch (object_type)
|
||||
auto holder = with_retries.createRetriesControlHolder("addReplicatedSQLObjectsDir");
|
||||
holder.retries_ctl.retryLoop(
|
||||
[&, &zk = holder.faulty_zookeeper]()
|
||||
{
|
||||
case UserDefinedSQLObjectType::Function:
|
||||
path += "functions";
|
||||
break;
|
||||
}
|
||||
with_retries.renewZooKeeper(zk);
|
||||
String path = zookeeper_path + "/repl_sql_objects/" + escapeForFileName(loader_zk_path);
|
||||
zk->createIfNotExists(path, "");
|
||||
|
||||
zk->createIfNotExists(path, "");
|
||||
path += "/" + current_host;
|
||||
zk->createIfNotExists(path, dir_path);
|
||||
path += "/";
|
||||
switch (object_type)
|
||||
{
|
||||
case UserDefinedSQLObjectType::Function:
|
||||
path += "functions";
|
||||
break;
|
||||
}
|
||||
|
||||
zk->createIfNotExists(path, "");
|
||||
path += "/" + current_host;
|
||||
zk->createIfNotExists(path, dir_path);
|
||||
});
|
||||
}
|
||||
|
||||
Strings BackupCoordinationRemote::getReplicatedSQLObjectsDirs(const String & loader_zk_path, UserDefinedSQLObjectType object_type) const
|
||||
@ -536,27 +630,36 @@ void BackupCoordinationRemote::prepareReplicatedSQLObjects() const
|
||||
if (replicated_sql_objects)
|
||||
return;
|
||||
|
||||
replicated_sql_objects.emplace();
|
||||
auto zk = getZooKeeper();
|
||||
|
||||
String path = zookeeper_path + "/repl_sql_objects";
|
||||
for (const String & escaped_loader_zk_path : zk->getChildren(path))
|
||||
std::vector<BackupCoordinationReplicatedSQLObjects::DirectoryPathForSQLObject> directories_for_sql_objects;
|
||||
auto holder = with_retries.createRetriesControlHolder("prepareReplicatedSQLObjects");
|
||||
holder.retries_ctl.retryLoop(
|
||||
[&, &zk = holder.faulty_zookeeper]()
|
||||
{
|
||||
String loader_zk_path = unescapeForFileName(escaped_loader_zk_path);
|
||||
String objects_path = path + "/" + escaped_loader_zk_path;
|
||||
directories_for_sql_objects.clear();
|
||||
with_retries.renewZooKeeper(zk);
|
||||
|
||||
if (String functions_path = objects_path + "/functions"; zk->exists(functions_path))
|
||||
String path = zookeeper_path + "/repl_sql_objects";
|
||||
for (const String & escaped_loader_zk_path : zk->getChildren(path))
|
||||
{
|
||||
UserDefinedSQLObjectType object_type = UserDefinedSQLObjectType::Function;
|
||||
for (const String & host_id : zk->getChildren(functions_path))
|
||||
String loader_zk_path = unescapeForFileName(escaped_loader_zk_path);
|
||||
String objects_path = path + "/" + escaped_loader_zk_path;
|
||||
|
||||
if (String functions_path = objects_path + "/functions"; zk->exists(functions_path))
|
||||
{
|
||||
String dir = zk->get(functions_path + "/" + host_id);
|
||||
replicated_sql_objects->addDirectory(loader_zk_path, object_type, host_id, dir);
|
||||
UserDefinedSQLObjectType object_type = UserDefinedSQLObjectType::Function;
|
||||
for (const String & host_id : zk->getChildren(functions_path))
|
||||
{
|
||||
String dir = zk->get(functions_path + "/" + host_id);
|
||||
directories_for_sql_objects.push_back({loader_zk_path, object_type, host_id, dir});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
replicated_sql_objects.emplace();
|
||||
for (auto & directory : directories_for_sql_objects)
|
||||
replicated_sql_objects->addDirectory(std::move(directory));
|
||||
}
|
||||
|
||||
void BackupCoordinationRemote::addFileInfos(BackupFileInfos && file_infos_)
|
||||
{
|
||||
@ -594,9 +697,11 @@ void BackupCoordinationRemote::prepareFileInfos() const
|
||||
|
||||
Strings hosts_with_file_infos;
|
||||
{
|
||||
ZooKeeperRetriesControl retries_ctl("prepareFileInfos::get_hosts", zookeeper_retries_info);
|
||||
retries_ctl.retryLoop([&]{
|
||||
auto zk = getZooKeeper();
|
||||
auto holder = with_retries.createRetriesControlHolder("prepareFileInfos::get_hosts");
|
||||
holder.retries_ctl.retryLoop(
|
||||
[&, &zk = holder.faulty_zookeeper]()
|
||||
{
|
||||
with_retries.renewZooKeeper(zk);
|
||||
hosts_with_file_infos = zk->getChildren(zookeeper_path + "/file_infos");
|
||||
});
|
||||
}
|
||||
@ -615,10 +720,11 @@ bool BackupCoordinationRemote::startWritingFile(size_t data_file_index)
|
||||
String full_path = zookeeper_path + "/writing_files/" + std::to_string(data_file_index);
|
||||
String host_index_str = std::to_string(current_host_index);
|
||||
|
||||
ZooKeeperRetriesControl retries_ctl("startWritingFile", zookeeper_retries_info);
|
||||
retries_ctl.retryLoop([&]
|
||||
auto holder = with_retries.createRetriesControlHolder("startWritingFile");
|
||||
holder.retries_ctl.retryLoop(
|
||||
[&, &zk = holder.faulty_zookeeper]()
|
||||
{
|
||||
auto zk = getZooKeeper();
|
||||
with_retries.renewZooKeeper(zk);
|
||||
auto code = zk->tryCreate(full_path, host_index_str, zkutil::CreateMode::Persistent);
|
||||
|
||||
if (code == Coordination::Error::ZOK)
|
||||
@ -632,51 +738,63 @@ bool BackupCoordinationRemote::startWritingFile(size_t data_file_index)
|
||||
return acquired_writing;
|
||||
}
|
||||
|
||||
|
||||
bool BackupCoordinationRemote::hasConcurrentBackups(const std::atomic<size_t> &) const
|
||||
{
|
||||
/// If its internal concurrency will be checked for the base backup
|
||||
if (is_internal)
|
||||
return false;
|
||||
|
||||
auto zk = getZooKeeper();
|
||||
std::string backup_stage_path = zookeeper_path + "/stage";
|
||||
|
||||
if (!zk->exists(root_zookeeper_path))
|
||||
zk->createAncestors(root_zookeeper_path);
|
||||
bool result = false;
|
||||
|
||||
for (size_t attempt = 0; attempt < MAX_ZOOKEEPER_ATTEMPTS; ++attempt)
|
||||
auto holder = with_retries.createRetriesControlHolder("getAllArchiveSuffixes");
|
||||
holder.retries_ctl.retryLoop(
|
||||
[&, &zk = holder.faulty_zookeeper]()
|
||||
{
|
||||
Coordination::Stat stat;
|
||||
zk->get(root_zookeeper_path, &stat);
|
||||
Strings existing_backup_paths = zk->getChildren(root_zookeeper_path);
|
||||
with_retries.renewZooKeeper(zk);
|
||||
|
||||
for (const auto & existing_backup_path : existing_backup_paths)
|
||||
if (!zk->exists(root_zookeeper_path))
|
||||
zk->createAncestors(root_zookeeper_path);
|
||||
|
||||
for (size_t attempt = 0; attempt < MAX_ZOOKEEPER_ATTEMPTS; ++attempt)
|
||||
{
|
||||
if (startsWith(existing_backup_path, "restore-"))
|
||||
continue;
|
||||
Coordination::Stat stat;
|
||||
zk->get(root_zookeeper_path, &stat);
|
||||
Strings existing_backup_paths = zk->getChildren(root_zookeeper_path);
|
||||
|
||||
String existing_backup_uuid = existing_backup_path;
|
||||
existing_backup_uuid.erase(0, String("backup-").size());
|
||||
for (const auto & existing_backup_path : existing_backup_paths)
|
||||
{
|
||||
if (startsWith(existing_backup_path, "restore-"))
|
||||
continue;
|
||||
|
||||
if (existing_backup_uuid == toString(backup_uuid))
|
||||
continue;
|
||||
String existing_backup_uuid = existing_backup_path;
|
||||
existing_backup_uuid.erase(0, String("backup-").size());
|
||||
|
||||
const auto status = zk->get(root_zookeeper_path + "/" + existing_backup_path + "/stage");
|
||||
if (status != Stage::COMPLETED)
|
||||
return true;
|
||||
|
||||
if (existing_backup_uuid == toString(backup_uuid))
|
||||
continue;
|
||||
|
||||
const auto status = zk->get(root_zookeeper_path + "/" + existing_backup_path + "/stage");
|
||||
if (status != Stage::COMPLETED)
|
||||
{
|
||||
LOG_WARNING(log, "Found a concurrent backup: {}, current backup: {}", existing_backup_uuid, toString(backup_uuid));
|
||||
result = true;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
zk->createIfNotExists(backup_stage_path, "");
|
||||
auto code = zk->trySet(backup_stage_path, Stage::SCHEDULED_TO_START, stat.version);
|
||||
if (code == Coordination::Error::ZOK)
|
||||
break;
|
||||
bool is_last_attempt = (attempt == MAX_ZOOKEEPER_ATTEMPTS - 1);
|
||||
if ((code != Coordination::Error::ZBADVERSION) || is_last_attempt)
|
||||
throw zkutil::KeeperException(code, backup_stage_path);
|
||||
}
|
||||
});
|
||||
|
||||
zk->createIfNotExists(backup_stage_path, "");
|
||||
auto code = zk->trySet(backup_stage_path, Stage::SCHEDULED_TO_START, stat.version);
|
||||
if (code == Coordination::Error::ZOK)
|
||||
break;
|
||||
bool is_last_attempt = (attempt == MAX_ZOOKEEPER_ATTEMPTS - 1);
|
||||
if ((code != Coordination::Error::ZBADVERSION) || is_last_attempt)
|
||||
throw zkutil::KeeperException(code, backup_stage_path);
|
||||
}
|
||||
|
||||
return false;
|
||||
return result;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -6,7 +6,7 @@
|
||||
#include <Backups/BackupCoordinationReplicatedSQLObjects.h>
|
||||
#include <Backups/BackupCoordinationReplicatedTables.h>
|
||||
#include <Backups/BackupCoordinationStageSync.h>
|
||||
#include <Storages/MergeTree/ZooKeeperRetries.h>
|
||||
#include <Backups/WithRetries.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -19,13 +19,7 @@ constexpr size_t MAX_ZOOKEEPER_ATTEMPTS = 10;
|
||||
class BackupCoordinationRemote : public IBackupCoordination
|
||||
{
|
||||
public:
|
||||
struct BackupKeeperSettings
|
||||
{
|
||||
UInt64 keeper_max_retries;
|
||||
UInt64 keeper_retry_initial_backoff_ms;
|
||||
UInt64 keeper_retry_max_backoff_ms;
|
||||
UInt64 keeper_value_max_size;
|
||||
};
|
||||
using BackupKeeperSettings = WithRetries::KeeperSettings;
|
||||
|
||||
BackupCoordinationRemote(
|
||||
zkutil::GetZooKeeper get_zookeeper_,
|
||||
@ -79,7 +73,6 @@ public:
|
||||
static size_t findCurrentHostIndex(const Strings & all_hosts, const String & current_host);
|
||||
|
||||
private:
|
||||
zkutil::ZooKeeperPtr getZooKeeper() const;
|
||||
void createRootNodes();
|
||||
void removeAllNodes();
|
||||
|
||||
@ -94,7 +87,6 @@ private:
|
||||
void prepareReplicatedSQLObjects() const TSA_REQUIRES(replicated_sql_objects_mutex);
|
||||
void prepareFileInfos() const TSA_REQUIRES(file_infos_mutex);
|
||||
|
||||
const zkutil::GetZooKeeper get_zookeeper;
|
||||
const String root_zookeeper_path;
|
||||
const String zookeeper_path;
|
||||
const BackupKeeperSettings keeper_settings;
|
||||
@ -104,11 +96,12 @@ private:
|
||||
const size_t current_host_index;
|
||||
const bool plain_backup;
|
||||
const bool is_internal;
|
||||
Poco::Logger * const log;
|
||||
|
||||
mutable ZooKeeperRetriesInfo zookeeper_retries_info;
|
||||
/// The order of these two fields matters, because stage_sync holds a reference to with_retries object
|
||||
mutable WithRetries with_retries;
|
||||
std::optional<BackupCoordinationStageSync> stage_sync;
|
||||
|
||||
mutable zkutil::ZooKeeperPtr TSA_GUARDED_BY(zookeeper_mutex) zookeeper;
|
||||
mutable std::optional<BackupCoordinationReplicatedTables> TSA_GUARDED_BY(replicated_tables_mutex) replicated_tables;
|
||||
mutable std::optional<BackupCoordinationReplicatedAccess> TSA_GUARDED_BY(replicated_access_mutex) replicated_access;
|
||||
mutable std::optional<BackupCoordinationReplicatedSQLObjects> TSA_GUARDED_BY(replicated_sql_objects_mutex) replicated_sql_objects;
|
||||
|
@ -7,8 +7,13 @@ namespace DB
|
||||
BackupCoordinationReplicatedAccess::BackupCoordinationReplicatedAccess() = default;
|
||||
BackupCoordinationReplicatedAccess::~BackupCoordinationReplicatedAccess() = default;
|
||||
|
||||
void BackupCoordinationReplicatedAccess::addFilePath(const String & access_zk_path, AccessEntityType access_entity_type, const String & host_id, const String & file_path)
|
||||
void BackupCoordinationReplicatedAccess::addFilePath(FilePathForAccessEntitry && file_path_for_access_entity)
|
||||
{
|
||||
const auto & access_zk_path = file_path_for_access_entity.access_zk_path;
|
||||
const auto & access_entity_type = file_path_for_access_entity.access_entity_type;
|
||||
const auto & host_id = file_path_for_access_entity.host_id;
|
||||
const auto & file_path = file_path_for_access_entity.file_path;
|
||||
|
||||
auto & ref = file_paths_by_zk_path[std::make_pair(access_zk_path, access_entity_type)];
|
||||
ref.file_paths.emplace(file_path);
|
||||
|
||||
|
@ -28,8 +28,16 @@ public:
|
||||
BackupCoordinationReplicatedAccess();
|
||||
~BackupCoordinationReplicatedAccess();
|
||||
|
||||
struct FilePathForAccessEntitry
|
||||
{
|
||||
String access_zk_path;
|
||||
AccessEntityType access_entity_type;
|
||||
String host_id;
|
||||
String file_path;
|
||||
};
|
||||
|
||||
/// Adds a path to access*.txt file keeping access entities of a ReplicatedAccessStorage.
|
||||
void addFilePath(const String & access_zk_path, AccessEntityType access_entity_type, const String & host_id, const String & file_path);
|
||||
void addFilePath(FilePathForAccessEntitry && file_path_for_access_entity);
|
||||
|
||||
/// Returns all paths added by addFilePath() if `host_id` is a host chosen to store access.
|
||||
Strings getFilePaths(const String & access_zk_path, AccessEntityType access_entity_type, const String & host_id) const;
|
||||
|
@ -7,8 +7,13 @@ namespace DB
|
||||
BackupCoordinationReplicatedSQLObjects::BackupCoordinationReplicatedSQLObjects() = default;
|
||||
BackupCoordinationReplicatedSQLObjects::~BackupCoordinationReplicatedSQLObjects() = default;
|
||||
|
||||
void BackupCoordinationReplicatedSQLObjects::addDirectory(const String & loader_zk_path, UserDefinedSQLObjectType object_type, const String & host_id, const String & dir_path)
|
||||
void BackupCoordinationReplicatedSQLObjects::addDirectory(DirectoryPathForSQLObject && directory_path_for_sql_object)
|
||||
{
|
||||
const auto & loader_zk_path = directory_path_for_sql_object.loader_zk_path;
|
||||
const auto & object_type = directory_path_for_sql_object.object_type;
|
||||
const auto & host_id = directory_path_for_sql_object.host_id;
|
||||
const auto & dir_path = directory_path_for_sql_object.dir_path;
|
||||
|
||||
auto & ref = dir_paths_by_zk_path[std::make_pair(loader_zk_path, object_type)];
|
||||
ref.dir_paths.emplace(dir_path);
|
||||
|
||||
|
@ -28,8 +28,16 @@ public:
|
||||
BackupCoordinationReplicatedSQLObjects();
|
||||
~BackupCoordinationReplicatedSQLObjects();
|
||||
|
||||
struct DirectoryPathForSQLObject
|
||||
{
|
||||
String loader_zk_path;
|
||||
UserDefinedSQLObjectType object_type;
|
||||
String host_id;
|
||||
String dir_path;
|
||||
};
|
||||
|
||||
/// Adds a path to directory keeping user defined SQL objects.
|
||||
void addDirectory(const String & loader_zk_path, UserDefinedSQLObjectType object_type, const String & host_id, const String & dir_path);
|
||||
void addDirectory(DirectoryPathForSQLObject && directory_path_for_sql_object);
|
||||
|
||||
/// Returns all added paths to directories if `host_id` is a host chosen to store user-defined SQL objects.
|
||||
Strings getDirectories(const String & loader_zk_path, UserDefinedSQLObjectType object_type, const String & host_id) const;
|
||||
|
@ -149,12 +149,13 @@ private:
|
||||
BackupCoordinationReplicatedTables::BackupCoordinationReplicatedTables() = default;
|
||||
BackupCoordinationReplicatedTables::~BackupCoordinationReplicatedTables() = default;
|
||||
|
||||
void BackupCoordinationReplicatedTables::addPartNames(
|
||||
const String & table_shared_id,
|
||||
const String & table_name_for_logs,
|
||||
const String & replica_name,
|
||||
const std::vector<PartNameAndChecksum> & part_names_and_checksums)
|
||||
void BackupCoordinationReplicatedTables::addPartNames(PartNamesForTableReplica && part_names)
|
||||
{
|
||||
const auto & table_shared_id = part_names.table_shared_id;
|
||||
const auto & table_name_for_logs = part_names.table_name_for_logs;
|
||||
const auto & replica_name = part_names.replica_name;
|
||||
const auto & part_names_and_checksums = part_names.part_names_and_checksums;
|
||||
|
||||
if (prepared)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "addPartNames() must not be called after preparing");
|
||||
|
||||
@ -216,12 +217,13 @@ Strings BackupCoordinationReplicatedTables::getPartNames(const String & table_sh
|
||||
return it2->second;
|
||||
}
|
||||
|
||||
void BackupCoordinationReplicatedTables::addMutations(
|
||||
const String & table_shared_id,
|
||||
const String & table_name_for_logs,
|
||||
const String & replica_name,
|
||||
const std::vector<MutationInfo> & mutations)
|
||||
void BackupCoordinationReplicatedTables::addMutations(MutationsForTableReplica && mutations_for_table_replica)
|
||||
{
|
||||
const auto & table_shared_id = mutations_for_table_replica.table_shared_id;
|
||||
const auto & table_name_for_logs = mutations_for_table_replica.table_name_for_logs;
|
||||
const auto & replica_name = mutations_for_table_replica.replica_name;
|
||||
const auto & mutations = mutations_for_table_replica.mutations;
|
||||
|
||||
if (prepared)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "addMutations() must not be called after preparing");
|
||||
|
||||
@ -254,8 +256,11 @@ BackupCoordinationReplicatedTables::getMutations(const String & table_shared_id,
|
||||
return res;
|
||||
}
|
||||
|
||||
void BackupCoordinationReplicatedTables::addDataPath(const String & table_shared_id, const String & data_path)
|
||||
void BackupCoordinationReplicatedTables::addDataPath(DataPathForTableReplica && data_path_for_table_replica)
|
||||
{
|
||||
const auto & table_shared_id = data_path_for_table_replica.table_shared_id;
|
||||
const auto & data_path = data_path_for_table_replica.data_path;
|
||||
|
||||
auto & table_info = table_infos[table_shared_id];
|
||||
table_info.data_paths.emplace(data_path);
|
||||
}
|
||||
|
@ -38,15 +38,19 @@ public:
|
||||
|
||||
using PartNameAndChecksum = IBackupCoordination::PartNameAndChecksum;
|
||||
|
||||
struct PartNamesForTableReplica
|
||||
{
|
||||
String table_shared_id;
|
||||
String table_name_for_logs;
|
||||
String replica_name;
|
||||
std::vector<PartNameAndChecksum> part_names_and_checksums;
|
||||
};
|
||||
|
||||
/// Adds part names which a specified replica of a replicated table is going to put to the backup.
|
||||
/// Multiple replicas of the replicated table call this function and then the added part names can be returned by call of the function
|
||||
/// getPartNames().
|
||||
/// Checksums are used only to control that parts under the same names on different replicas are the same.
|
||||
void addPartNames(
|
||||
const String & table_shared_id,
|
||||
const String & table_name_for_logs,
|
||||
const String & replica_name,
|
||||
const std::vector<PartNameAndChecksum> & part_names_and_checksums);
|
||||
void addPartNames(PartNamesForTableReplica && part_names);
|
||||
|
||||
/// Returns the names of the parts which a specified replica of a replicated table should put to the backup.
|
||||
/// This is the same list as it was added by call of the function addPartNames() but without duplications and without
|
||||
@ -55,20 +59,30 @@ public:
|
||||
|
||||
using MutationInfo = IBackupCoordination::MutationInfo;
|
||||
|
||||
struct MutationsForTableReplica
|
||||
{
|
||||
String table_shared_id;
|
||||
String table_name_for_logs;
|
||||
String replica_name;
|
||||
std::vector<MutationInfo> mutations;
|
||||
};
|
||||
|
||||
/// Adds information about mutations of a replicated table.
|
||||
void addMutations(
|
||||
const String & table_shared_id,
|
||||
const String & table_name_for_logs,
|
||||
const String & replica_name,
|
||||
const std::vector<MutationInfo> & mutations);
|
||||
void addMutations(MutationsForTableReplica && mutations_for_table_replica);
|
||||
|
||||
/// Returns all mutations of a replicated table which are not finished for some data parts added by addReplicatedPartNames().
|
||||
std::vector<MutationInfo> getMutations(const String & table_shared_id, const String & replica_name) const;
|
||||
|
||||
struct DataPathForTableReplica
|
||||
{
|
||||
String table_shared_id;
|
||||
String data_path;
|
||||
};
|
||||
|
||||
/// Adds a data path in backup for a replicated table.
|
||||
/// Multiple replicas of the replicated table call this function and then all the added paths can be returned by call of the function
|
||||
/// getDataPaths().
|
||||
void addDataPath(const String & table_shared_id, const String & data_path);
|
||||
void addDataPath(DataPathForTableReplica && data_path_for_table_replica);
|
||||
|
||||
/// Returns all the data paths in backup added for a replicated table (see also addReplicatedDataPath()).
|
||||
Strings getDataPaths(const String & table_shared_id) const;
|
||||
|
@ -1,11 +1,13 @@
|
||||
#include <Backups/BackupCoordinationStageSync.h>
|
||||
|
||||
#include <base/chrono_io.h>
|
||||
#include <Common/ZooKeeper/Common.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/ZooKeeper/KeeperException.h>
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <base/chrono_io.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -17,9 +19,12 @@ namespace ErrorCodes
|
||||
}
|
||||
|
||||
|
||||
BackupCoordinationStageSync::BackupCoordinationStageSync(const String & zookeeper_path_, zkutil::GetZooKeeper get_zookeeper_, Poco::Logger * log_)
|
||||
: zookeeper_path(zookeeper_path_)
|
||||
, get_zookeeper(get_zookeeper_)
|
||||
BackupCoordinationStageSync::BackupCoordinationStageSync(
|
||||
const String & root_zookeeper_path_,
|
||||
WithRetries & with_retries_,
|
||||
Poco::Logger * log_)
|
||||
: zookeeper_path(root_zookeeper_path_ + "/stage")
|
||||
, with_retries(with_retries_)
|
||||
, log(log_)
|
||||
{
|
||||
createRootNodes();
|
||||
@ -27,32 +32,48 @@ BackupCoordinationStageSync::BackupCoordinationStageSync(const String & zookeepe
|
||||
|
||||
void BackupCoordinationStageSync::createRootNodes()
|
||||
{
|
||||
auto zookeeper = get_zookeeper();
|
||||
zookeeper->createAncestors(zookeeper_path);
|
||||
zookeeper->createIfNotExists(zookeeper_path, "");
|
||||
auto holder = with_retries.createRetriesControlHolder("createRootNodes");
|
||||
holder.retries_ctl.retryLoop(
|
||||
[&, &zookeeper = holder.faulty_zookeeper]()
|
||||
{
|
||||
with_retries.renewZooKeeper(zookeeper);
|
||||
zookeeper->createAncestors(zookeeper_path);
|
||||
zookeeper->createIfNotExists(zookeeper_path, "");
|
||||
});
|
||||
}
|
||||
|
||||
void BackupCoordinationStageSync::set(const String & current_host, const String & new_stage, const String & message)
|
||||
{
|
||||
auto zookeeper = get_zookeeper();
|
||||
auto holder = with_retries.createRetriesControlHolder("set");
|
||||
holder.retries_ctl.retryLoop(
|
||||
[&, &zookeeper = holder.faulty_zookeeper]()
|
||||
{
|
||||
with_retries.renewZooKeeper(zookeeper);
|
||||
|
||||
/// Make an ephemeral node so the initiator can track if the current host is still working.
|
||||
String alive_node_path = zookeeper_path + "/alive|" + current_host;
|
||||
auto code = zookeeper->tryCreate(alive_node_path, "", zkutil::CreateMode::Ephemeral);
|
||||
if (code != Coordination::Error::ZOK && code != Coordination::Error::ZNODEEXISTS)
|
||||
throw zkutil::KeeperException(code, alive_node_path);
|
||||
/// Make an ephemeral node so the initiator can track if the current host is still working.
|
||||
String alive_node_path = zookeeper_path + "/alive|" + current_host;
|
||||
auto code = zookeeper->tryCreate(alive_node_path, "", zkutil::CreateMode::Ephemeral);
|
||||
if (code != Coordination::Error::ZOK && code != Coordination::Error::ZNODEEXISTS)
|
||||
throw zkutil::KeeperException(code, alive_node_path);
|
||||
|
||||
zookeeper->createIfNotExists(zookeeper_path + "/started|" + current_host, "");
|
||||
zookeeper->create(zookeeper_path + "/current|" + current_host + "|" + new_stage, message, zkutil::CreateMode::Persistent);
|
||||
zookeeper->createIfNotExists(zookeeper_path + "/started|" + current_host, "");
|
||||
zookeeper->createIfNotExists(zookeeper_path + "/current|" + current_host + "|" + new_stage, message);
|
||||
});
|
||||
}
|
||||
|
||||
void BackupCoordinationStageSync::setError(const String & current_host, const Exception & exception)
|
||||
{
|
||||
auto zookeeper = get_zookeeper();
|
||||
WriteBufferFromOwnString buf;
|
||||
writeStringBinary(current_host, buf);
|
||||
writeException(exception, buf, true);
|
||||
zookeeper->createIfNotExists(zookeeper_path + "/error", buf.str());
|
||||
auto holder = with_retries.createRetriesControlHolder("setError");
|
||||
holder.retries_ctl.retryLoop(
|
||||
[&, &zookeeper = holder.faulty_zookeeper]()
|
||||
{
|
||||
with_retries.renewZooKeeper(zookeeper);
|
||||
|
||||
WriteBufferFromOwnString buf;
|
||||
writeStringBinary(current_host, buf);
|
||||
writeException(exception, buf, true);
|
||||
zookeeper->createIfNotExists(zookeeper_path + "/error", buf.str());
|
||||
});
|
||||
}
|
||||
|
||||
Strings BackupCoordinationStageSync::wait(const Strings & all_hosts, const String & stage_to_wait)
|
||||
@ -83,14 +104,24 @@ struct BackupCoordinationStageSync::State
|
||||
};
|
||||
|
||||
BackupCoordinationStageSync::State BackupCoordinationStageSync::readCurrentState(
|
||||
zkutil::ZooKeeperPtr zookeeper, const Strings & zk_nodes, const Strings & all_hosts, const String & stage_to_wait) const
|
||||
const Strings & zk_nodes, const Strings & all_hosts, const String & stage_to_wait) const
|
||||
{
|
||||
std::unordered_set<std::string_view> zk_nodes_set{zk_nodes.begin(), zk_nodes.end()};
|
||||
|
||||
State state;
|
||||
if (zk_nodes_set.contains("error"))
|
||||
{
|
||||
ReadBufferFromOwnString buf{zookeeper->get(zookeeper_path + "/error")};
|
||||
String errors;
|
||||
{
|
||||
auto holder = with_retries.createRetriesControlHolder("readCurrentState");
|
||||
holder.retries_ctl.retryLoop(
|
||||
[&, &zookeeper = holder.faulty_zookeeper]()
|
||||
{
|
||||
with_retries.renewZooKeeper(zookeeper);
|
||||
errors = zookeeper->get(zookeeper_path + "/error");
|
||||
});
|
||||
}
|
||||
ReadBufferFromOwnString buf{errors};
|
||||
String host;
|
||||
readStringBinary(host, buf);
|
||||
state.error = std::make_pair(host, readException(buf, fmt::format("Got error from {}", host)));
|
||||
@ -102,8 +133,38 @@ BackupCoordinationStageSync::State BackupCoordinationStageSync::readCurrentState
|
||||
if (!zk_nodes_set.contains("current|" + host + "|" + stage_to_wait))
|
||||
{
|
||||
UnreadyHostState unready_host_state;
|
||||
unready_host_state.started = zk_nodes_set.contains("started|" + host);
|
||||
unready_host_state.alive = zk_nodes_set.contains("alive|" + host);
|
||||
const String started_node_name = "started|" + host;
|
||||
const String alive_node_name = "alive|" + host;
|
||||
const String alive_node_path = zookeeper_path + "/" + alive_node_name;
|
||||
unready_host_state.started = zk_nodes_set.contains(started_node_name);
|
||||
|
||||
/// Because we do retries everywhere we can't fully rely on ephemeral nodes anymore.
|
||||
/// Though we recreate "alive" node when reconnecting it might be not enough and race condition is possible.
|
||||
/// And everything we can do here - just retry.
|
||||
/// In worst case when we won't manage to see the alive node for a long time we will just abort the backup.
|
||||
unready_host_state.alive = zk_nodes_set.contains(alive_node_name);
|
||||
if (!unready_host_state.alive)
|
||||
{
|
||||
LOG_TRACE(log, "Seems like host ({}) is dead. Will retry the check to confirm", host);
|
||||
auto holder = with_retries.createRetriesControlHolder("readCurrentState::checkAliveNode");
|
||||
holder.retries_ctl.retryLoop(
|
||||
[&, &zookeeper = holder.faulty_zookeeper]()
|
||||
{
|
||||
with_retries.renewZooKeeper(zookeeper);
|
||||
|
||||
if (zookeeper->existsNoFailureInjection(alive_node_path))
|
||||
{
|
||||
unready_host_state.alive = true;
|
||||
return;
|
||||
}
|
||||
|
||||
// Retry with backoff. We also check whether it is last retry or no, because we won't to rethrow an exception.
|
||||
if (!holder.retries_ctl.isLastRetry())
|
||||
holder.retries_ctl.setKeeperError(Coordination::Error::ZNONODE, "There is no alive node for host {}. Will retry", host);
|
||||
});
|
||||
}
|
||||
LOG_TRACE(log, "Host ({}) appeared to be {}", host, unready_host_state.alive ? "alive" : "dead");
|
||||
|
||||
state.unready_hosts.emplace(host, unready_host_state);
|
||||
if (!unready_host_state.alive && unready_host_state.started && !state.host_terminated)
|
||||
state.host_terminated = host;
|
||||
@ -113,51 +174,62 @@ BackupCoordinationStageSync::State BackupCoordinationStageSync::readCurrentState
|
||||
if (state.host_terminated || !state.unready_hosts.empty())
|
||||
return state;
|
||||
|
||||
state.results.reserve(all_hosts.size());
|
||||
for (const auto & host : all_hosts)
|
||||
state.results.emplace_back(zookeeper->get(zookeeper_path + "/current|" + host + "|" + stage_to_wait));
|
||||
auto holder = with_retries.createRetriesControlHolder("waitImpl::collectStagesToWait");
|
||||
holder.retries_ctl.retryLoop(
|
||||
[&, &zookeeper = holder.faulty_zookeeper]()
|
||||
{
|
||||
with_retries.renewZooKeeper(zookeeper);
|
||||
Strings results;
|
||||
|
||||
for (const auto & host : all_hosts)
|
||||
results.emplace_back(zookeeper->get(zookeeper_path + "/current|" + host + "|" + stage_to_wait));
|
||||
|
||||
state.results = std::move(results);
|
||||
});
|
||||
|
||||
return state;
|
||||
}
|
||||
|
||||
Strings BackupCoordinationStageSync::waitImpl(const Strings & all_hosts, const String & stage_to_wait, std::optional<std::chrono::milliseconds> timeout) const
|
||||
Strings BackupCoordinationStageSync::waitImpl(
|
||||
const Strings & all_hosts, const String & stage_to_wait, std::optional<std::chrono::milliseconds> timeout) const
|
||||
{
|
||||
if (all_hosts.empty())
|
||||
return {};
|
||||
|
||||
/// Wait until all hosts are ready or an error happens or time is out.
|
||||
|
||||
auto zookeeper = get_zookeeper();
|
||||
|
||||
/// Set by ZooKepper when list of zk nodes have changed.
|
||||
auto watch = std::make_shared<Poco::Event>();
|
||||
|
||||
bool use_timeout = timeout.has_value();
|
||||
std::chrono::steady_clock::time_point end_of_timeout;
|
||||
if (use_timeout)
|
||||
end_of_timeout = std::chrono::steady_clock::now() + std::chrono::duration_cast<std::chrono::steady_clock::duration>(*timeout);
|
||||
|
||||
State state;
|
||||
|
||||
String previous_unready_host; /// Used for logging: we don't want to log the same unready host again.
|
||||
|
||||
for (;;)
|
||||
{
|
||||
/// Get zk nodes and subscribe on their changes.
|
||||
Strings zk_nodes = zookeeper->getChildren(zookeeper_path, nullptr, watch);
|
||||
LOG_INFO(log, "Waiting for the stage {}", stage_to_wait);
|
||||
/// Set by ZooKepper when list of zk nodes have changed.
|
||||
auto watch = std::make_shared<Poco::Event>();
|
||||
Strings zk_nodes;
|
||||
{
|
||||
auto holder = with_retries.createRetriesControlHolder("waitImpl::getChildren");
|
||||
holder.retries_ctl.retryLoop(
|
||||
[&, &zookeeper = holder.faulty_zookeeper]()
|
||||
{
|
||||
with_retries.renewZooKeeper(zookeeper);
|
||||
watch->reset();
|
||||
/// Get zk nodes and subscribe on their changes.
|
||||
zk_nodes = zookeeper->getChildren(zookeeper_path, nullptr, watch);
|
||||
});
|
||||
}
|
||||
|
||||
/// Read and analyze the current state of zk nodes.
|
||||
state = readCurrentState(zookeeper, zk_nodes, all_hosts, stage_to_wait);
|
||||
state = readCurrentState(zk_nodes, all_hosts, stage_to_wait);
|
||||
if (state.error || state.host_terminated || state.unready_hosts.empty())
|
||||
break; /// Error happened or everything is ready.
|
||||
|
||||
/// Log that we will wait for another host.
|
||||
/// Log that we will wait
|
||||
const auto & unready_host = state.unready_hosts.begin()->first;
|
||||
if (unready_host != previous_unready_host)
|
||||
{
|
||||
LOG_TRACE(log, "Waiting for host {}", unready_host);
|
||||
previous_unready_host = unready_host;
|
||||
}
|
||||
LOG_INFO(log, "Waiting on ZooKeeper watch for any node to be changed (currently waiting for host {})", unready_host);
|
||||
|
||||
/// Wait until `watch_callback` is called by ZooKeeper meaning that zk nodes have changed.
|
||||
{
|
||||
@ -195,6 +267,7 @@ Strings BackupCoordinationStageSync::waitImpl(const Strings & all_hosts, const S
|
||||
unready_host_state.started ? "" : ": Operation didn't start");
|
||||
}
|
||||
|
||||
LOG_TRACE(log, "Everything is Ok. All hosts achieved stage {}", stage_to_wait);
|
||||
return state.results;
|
||||
}
|
||||
|
||||
|
@ -1,7 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <Common/ZooKeeper/Common.h>
|
||||
|
||||
#include <Backups/WithRetries.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -10,7 +9,10 @@ namespace DB
|
||||
class BackupCoordinationStageSync
|
||||
{
|
||||
public:
|
||||
BackupCoordinationStageSync(const String & zookeeper_path_, zkutil::GetZooKeeper get_zookeeper_, Poco::Logger * log_);
|
||||
BackupCoordinationStageSync(
|
||||
const String & root_zookeeper_path_,
|
||||
WithRetries & with_retries_,
|
||||
Poco::Logger * log_);
|
||||
|
||||
/// Sets the stage of the current host and signal other hosts if there were other hosts waiting for that.
|
||||
void set(const String & current_host, const String & new_stage, const String & message);
|
||||
@ -27,12 +29,13 @@ private:
|
||||
void createRootNodes();
|
||||
|
||||
struct State;
|
||||
State readCurrentState(zkutil::ZooKeeperPtr zookeeper, const Strings & zk_nodes, const Strings & all_hosts, const String & stage_to_wait) const;
|
||||
State readCurrentState(const Strings & zk_nodes, const Strings & all_hosts, const String & stage_to_wait) const;
|
||||
|
||||
Strings waitImpl(const Strings & all_hosts, const String & stage_to_wait, std::optional<std::chrono::milliseconds> timeout) const;
|
||||
|
||||
String zookeeper_path;
|
||||
zkutil::GetZooKeeper get_zookeeper;
|
||||
/// A reference to the field of parent object - BackupCoordinationRemote or RestoreCoordinationRemote
|
||||
WithRetries & with_retries;
|
||||
Poco::Logger * log;
|
||||
};
|
||||
|
||||
|
@ -84,6 +84,12 @@ BackupEntriesCollector::BackupEntriesCollector(
|
||||
, on_cluster_first_sync_timeout(context->getConfigRef().getUInt64("backups.on_cluster_first_sync_timeout", 180000))
|
||||
, consistent_metadata_snapshot_timeout(context->getConfigRef().getUInt64("backups.consistent_metadata_snapshot_timeout", 600000))
|
||||
, log(&Poco::Logger::get("BackupEntriesCollector"))
|
||||
, global_zookeeper_retries_info(
|
||||
"BackupEntriesCollector",
|
||||
log,
|
||||
context->getSettingsRef().backup_restore_keeper_max_retries,
|
||||
context->getSettingsRef().backup_restore_keeper_retry_initial_backoff_ms,
|
||||
context->getSettingsRef().backup_restore_keeper_retry_max_backoff_ms)
|
||||
{
|
||||
}
|
||||
|
||||
@ -482,7 +488,10 @@ std::vector<std::pair<ASTPtr, StoragePtr>> BackupEntriesCollector::findTablesInD
|
||||
|
||||
try
|
||||
{
|
||||
db_tables = database->getTablesForBackup(filter_by_table_name, context);
|
||||
/// Database or table could be replicated - so may use ZooKeeper. We need to retry.
|
||||
auto zookeeper_retries_info = global_zookeeper_retries_info;
|
||||
ZooKeeperRetriesControl retries_ctl("getTablesForBackup", zookeeper_retries_info);
|
||||
retries_ctl.retryLoop([&](){ db_tables = database->getTablesForBackup(filter_by_table_name, context); });
|
||||
}
|
||||
catch (Exception & e)
|
||||
{
|
||||
@ -745,6 +754,7 @@ void BackupEntriesCollector::addPostTask(std::function<void()> task)
|
||||
/// Runs all the tasks added with addPostCollectingTask().
|
||||
void BackupEntriesCollector::runPostTasks()
|
||||
{
|
||||
LOG_TRACE(log, "Will run {} post tasks", post_tasks.size());
|
||||
/// Post collecting tasks can add other post collecting tasks, our code is fine with that.
|
||||
while (!post_tasks.empty())
|
||||
{
|
||||
@ -752,6 +762,7 @@ void BackupEntriesCollector::runPostTasks()
|
||||
post_tasks.pop();
|
||||
std::move(task)();
|
||||
}
|
||||
LOG_TRACE(log, "All post tasks successfully executed");
|
||||
}
|
||||
|
||||
size_t BackupEntriesCollector::getAccessCounter(AccessEntityType type)
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <Parsers/ASTBackupQuery.h>
|
||||
#include <Storages/IStorage_fwd.h>
|
||||
#include <Storages/TableLockHolder.h>
|
||||
#include <Storages/MergeTree/ZooKeeperRetries.h>
|
||||
#include <filesystem>
|
||||
#include <queue>
|
||||
|
||||
@ -96,6 +97,9 @@ private:
|
||||
std::chrono::milliseconds on_cluster_first_sync_timeout;
|
||||
std::chrono::milliseconds consistent_metadata_snapshot_timeout;
|
||||
Poco::Logger * log;
|
||||
/// Unfortunately we can use ZooKeeper for collecting information for backup
|
||||
/// and we need to retry...
|
||||
ZooKeeperRetriesInfo global_zookeeper_retries_info;
|
||||
|
||||
Strings all_hosts;
|
||||
DDLRenamingMap renaming_map;
|
||||
|
@ -20,14 +20,14 @@ BackupMutablePtr BackupFactory::createBackup(const CreateParams & params) const
|
||||
const String & engine_name = params.backup_info.backup_engine_name;
|
||||
auto it = creators.find(engine_name);
|
||||
if (it == creators.end())
|
||||
throw Exception(ErrorCodes::BACKUP_ENGINE_NOT_FOUND, "Not found backup engine {}", engine_name);
|
||||
throw Exception(ErrorCodes::BACKUP_ENGINE_NOT_FOUND, "Not found backup engine '{}'", engine_name);
|
||||
return (it->second)(params);
|
||||
}
|
||||
|
||||
void BackupFactory::registerBackupEngine(const String & engine_name, const CreatorFn & creator_fn)
|
||||
{
|
||||
if (creators.contains(engine_name))
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Backup engine {} was registered twice", engine_name);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Backup engine '{}' was registered twice", engine_name);
|
||||
creators[engine_name] = creator_fn;
|
||||
}
|
||||
|
||||
|
@ -66,12 +66,17 @@ namespace
|
||||
credentials.GetAWSSecretKey(),
|
||||
settings.auth_settings.server_side_encryption_customer_key_base64,
|
||||
std::move(headers),
|
||||
settings.auth_settings.use_environment_credentials.value_or(
|
||||
context->getConfigRef().getBool("s3.use_environment_credentials", false)),
|
||||
settings.auth_settings.use_insecure_imds_request.value_or(
|
||||
context->getConfigRef().getBool("s3.use_insecure_imds_request", false)),
|
||||
settings.auth_settings.expiration_window_seconds.value_or(
|
||||
context->getConfigRef().getUInt64("s3.expiration_window_seconds", S3::DEFAULT_EXPIRATION_WINDOW_SECONDS)));
|
||||
S3::CredentialsConfiguration
|
||||
{
|
||||
settings.auth_settings.use_environment_credentials.value_or(
|
||||
context->getConfigRef().getBool("s3.use_environment_credentials", true)),
|
||||
settings.auth_settings.use_insecure_imds_request.value_or(
|
||||
context->getConfigRef().getBool("s3.use_insecure_imds_request", false)),
|
||||
settings.auth_settings.expiration_window_seconds.value_or(
|
||||
context->getConfigRef().getUInt64("s3.expiration_window_seconds", S3::DEFAULT_EXPIRATION_WINDOW_SECONDS)),
|
||||
settings.auth_settings.no_sign_request.value_or(
|
||||
context->getConfigRef().getBool("s3.no_sign_request", false)),
|
||||
});
|
||||
}
|
||||
|
||||
Aws::Vector<Aws::S3::Model::Object> listObjects(S3::Client & client, const S3::URI & s3_uri, const String & file_name)
|
||||
|
@ -58,10 +58,13 @@ namespace
|
||||
|
||||
BackupCoordinationRemote::BackupKeeperSettings keeper_settings
|
||||
{
|
||||
.keeper_max_retries = context->getSettingsRef().backup_keeper_max_retries,
|
||||
.keeper_retry_initial_backoff_ms = context->getSettingsRef().backup_keeper_retry_initial_backoff_ms,
|
||||
.keeper_retry_max_backoff_ms = context->getSettingsRef().backup_keeper_retry_max_backoff_ms,
|
||||
.keeper_value_max_size = context->getSettingsRef().backup_keeper_value_max_size,
|
||||
.keeper_max_retries = context->getSettingsRef().backup_restore_keeper_max_retries,
|
||||
.keeper_retry_initial_backoff_ms = context->getSettingsRef().backup_restore_keeper_retry_initial_backoff_ms,
|
||||
.keeper_retry_max_backoff_ms = context->getSettingsRef().backup_restore_keeper_retry_max_backoff_ms,
|
||||
.batch_size_for_keeper_multiread = context->getSettingsRef().backup_restore_batch_size_for_keeper_multiread,
|
||||
.keeper_fault_injection_probability = context->getSettingsRef().backup_restore_keeper_fault_injection_probability,
|
||||
.keeper_fault_injection_seed = context->getSettingsRef().backup_restore_keeper_fault_injection_seed,
|
||||
.keeper_value_max_size = context->getSettingsRef().backup_restore_keeper_value_max_size,
|
||||
};
|
||||
|
||||
auto all_hosts = BackupSettings::Util::filterHostIDs(
|
||||
@ -92,10 +95,27 @@ namespace
|
||||
|
||||
auto get_zookeeper = [global_context = context->getGlobalContext()] { return global_context->getZooKeeper(); };
|
||||
|
||||
RestoreCoordinationRemote::RestoreKeeperSettings keeper_settings
|
||||
{
|
||||
.keeper_max_retries = context->getSettingsRef().backup_restore_keeper_max_retries,
|
||||
.keeper_retry_initial_backoff_ms = context->getSettingsRef().backup_restore_keeper_retry_initial_backoff_ms,
|
||||
.keeper_retry_max_backoff_ms = context->getSettingsRef().backup_restore_keeper_retry_max_backoff_ms,
|
||||
.batch_size_for_keeper_multiread = context->getSettingsRef().backup_restore_batch_size_for_keeper_multiread,
|
||||
.keeper_fault_injection_probability = context->getSettingsRef().backup_restore_keeper_fault_injection_probability,
|
||||
.keeper_fault_injection_seed = context->getSettingsRef().backup_restore_keeper_fault_injection_seed
|
||||
};
|
||||
|
||||
auto all_hosts = BackupSettings::Util::filterHostIDs(
|
||||
restore_settings.cluster_host_ids, restore_settings.shard_num, restore_settings.replica_num);
|
||||
|
||||
return std::make_shared<RestoreCoordinationRemote>(get_zookeeper, root_zk_path, toString(*restore_settings.restore_uuid), all_hosts, restore_settings.host_id, restore_settings.internal);
|
||||
return std::make_shared<RestoreCoordinationRemote>(
|
||||
get_zookeeper,
|
||||
root_zk_path,
|
||||
keeper_settings,
|
||||
toString(*restore_settings.restore_uuid),
|
||||
all_hosts,
|
||||
restore_settings.host_id,
|
||||
restore_settings.internal);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -660,7 +680,9 @@ void BackupsWorker::doRestore(
|
||||
restore_coordination = makeRestoreCoordination(context, restore_settings, /* remote= */ on_cluster);
|
||||
|
||||
if (!allow_concurrent_restores && restore_coordination->hasConcurrentRestores(std::ref(num_active_restores)))
|
||||
throw Exception(ErrorCodes::CONCURRENT_ACCESS_NOT_SUPPORTED, "Concurrent restores not supported, turn on setting 'allow_concurrent_restores'");
|
||||
throw Exception(
|
||||
ErrorCodes::CONCURRENT_ACCESS_NOT_SUPPORTED,
|
||||
"Concurrent restores not supported, turn on setting 'allow_concurrent_restores'");
|
||||
|
||||
/// Do RESTORE.
|
||||
if (on_cluster)
|
||||
|
@ -1,10 +1,14 @@
|
||||
#include <Backups/RestoreCoordinationLocal.h>
|
||||
#include <Common/logger_useful.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
RestoreCoordinationLocal::RestoreCoordinationLocal() = default;
|
||||
RestoreCoordinationLocal::RestoreCoordinationLocal() : log(&Poco::Logger::get("RestoreCoordinationLocal"))
|
||||
{
|
||||
}
|
||||
|
||||
RestoreCoordinationLocal::~RestoreCoordinationLocal() = default;
|
||||
|
||||
void RestoreCoordinationLocal::setStage(const String &, const String &)
|
||||
@ -49,7 +53,12 @@ bool RestoreCoordinationLocal::acquireReplicatedSQLObjects(const String &, UserD
|
||||
|
||||
bool RestoreCoordinationLocal::hasConcurrentRestores(const std::atomic<size_t> & num_active_restores) const
|
||||
{
|
||||
return (num_active_restores > 1);
|
||||
if (num_active_restores > 1)
|
||||
{
|
||||
LOG_WARNING(log, "Found concurrent backups: num_active_restores={}", num_active_restores);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user