mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 08:32:02 +00:00
Merge branch 'master' into compress-marks-primary-key-by-default
This commit is contained in:
commit
ef1e282d4c
787
.github/workflows/master.yml
vendored
787
.github/workflows/master.yml
vendored
@ -1131,7 +1131,7 @@ jobs:
|
|||||||
REPO_COPY=${{runner.temp}}/stateless_database_replicated/ClickHouse
|
REPO_COPY=${{runner.temp}}/stateless_database_replicated/ClickHouse
|
||||||
KILL_TIMEOUT=10800
|
KILL_TIMEOUT=10800
|
||||||
RUN_BY_HASH_NUM=0
|
RUN_BY_HASH_NUM=0
|
||||||
RUN_BY_HASH_TOTAL=2
|
RUN_BY_HASH_TOTAL=4
|
||||||
EOF
|
EOF
|
||||||
- name: Download json reports
|
- name: Download json reports
|
||||||
uses: actions/download-artifact@v3
|
uses: actions/download-artifact@v3
|
||||||
@ -1167,6 +1167,114 @@ jobs:
|
|||||||
REPO_COPY=${{runner.temp}}/stateless_database_replicated/ClickHouse
|
REPO_COPY=${{runner.temp}}/stateless_database_replicated/ClickHouse
|
||||||
KILL_TIMEOUT=10800
|
KILL_TIMEOUT=10800
|
||||||
RUN_BY_HASH_NUM=1
|
RUN_BY_HASH_NUM=1
|
||||||
|
RUN_BY_HASH_TOTAL=4
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Functional test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
FunctionalStatelessTestReleaseDatabaseReplicated2:
|
||||||
|
needs: [BuilderDebRelease]
|
||||||
|
runs-on: [self-hosted, func-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/stateless_database_replicated
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Stateless tests (release, DatabaseReplicated)
|
||||||
|
REPO_COPY=${{runner.temp}}/stateless_database_replicated/ClickHouse
|
||||||
|
KILL_TIMEOUT=10800
|
||||||
|
RUN_BY_HASH_NUM=2
|
||||||
|
RUN_BY_HASH_TOTAL=4
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Functional test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
FunctionalStatelessTestReleaseDatabaseReplicated3:
|
||||||
|
needs: [BuilderDebRelease]
|
||||||
|
runs-on: [self-hosted, func-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/stateless_database_replicated
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Stateless tests (release, DatabaseReplicated)
|
||||||
|
REPO_COPY=${{runner.temp}}/stateless_database_replicated/ClickHouse
|
||||||
|
KILL_TIMEOUT=10800
|
||||||
|
RUN_BY_HASH_NUM=3
|
||||||
|
RUN_BY_HASH_TOTAL=4
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Functional test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
FunctionalStatelessTestReleaseS3_0:
|
||||||
|
needs: [BuilderDebRelease]
|
||||||
|
runs-on: [self-hosted, func-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/stateless_s3_storage
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Stateless tests (release, s3 storage)
|
||||||
|
REPO_COPY=${{runner.temp}}/stateless_s3_storage/ClickHouse
|
||||||
|
KILL_TIMEOUT=10800
|
||||||
|
RUN_BY_HASH_NUM=0
|
||||||
RUN_BY_HASH_TOTAL=2
|
RUN_BY_HASH_TOTAL=2
|
||||||
EOF
|
EOF
|
||||||
- name: Download json reports
|
- name: Download json reports
|
||||||
@ -1190,7 +1298,7 @@ jobs:
|
|||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatelessTestReleaseS3:
|
FunctionalStatelessTestReleaseS3_1:
|
||||||
needs: [BuilderDebRelease]
|
needs: [BuilderDebRelease]
|
||||||
runs-on: [self-hosted, func-tester]
|
runs-on: [self-hosted, func-tester]
|
||||||
steps:
|
steps:
|
||||||
@ -1202,6 +1310,8 @@ jobs:
|
|||||||
CHECK_NAME=Stateless tests (release, s3 storage)
|
CHECK_NAME=Stateless tests (release, s3 storage)
|
||||||
REPO_COPY=${{runner.temp}}/stateless_s3_storage/ClickHouse
|
REPO_COPY=${{runner.temp}}/stateless_s3_storage/ClickHouse
|
||||||
KILL_TIMEOUT=10800
|
KILL_TIMEOUT=10800
|
||||||
|
RUN_BY_HASH_NUM=1
|
||||||
|
RUN_BY_HASH_TOTAL=2
|
||||||
EOF
|
EOF
|
||||||
- name: Download json reports
|
- name: Download json reports
|
||||||
uses: actions/download-artifact@v3
|
uses: actions/download-artifact@v3
|
||||||
@ -1271,7 +1381,7 @@ jobs:
|
|||||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||||
KILL_TIMEOUT=10800
|
KILL_TIMEOUT=10800
|
||||||
RUN_BY_HASH_NUM=0
|
RUN_BY_HASH_NUM=0
|
||||||
RUN_BY_HASH_TOTAL=2
|
RUN_BY_HASH_TOTAL=4
|
||||||
EOF
|
EOF
|
||||||
- name: Download json reports
|
- name: Download json reports
|
||||||
uses: actions/download-artifact@v3
|
uses: actions/download-artifact@v3
|
||||||
@ -1307,7 +1417,79 @@ jobs:
|
|||||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||||
KILL_TIMEOUT=10800
|
KILL_TIMEOUT=10800
|
||||||
RUN_BY_HASH_NUM=1
|
RUN_BY_HASH_NUM=1
|
||||||
RUN_BY_HASH_TOTAL=2
|
RUN_BY_HASH_TOTAL=4
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Functional test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
FunctionalStatelessTestAsan2:
|
||||||
|
needs: [BuilderDebAsan]
|
||||||
|
runs-on: [self-hosted, func-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/stateless_debug
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Stateless tests (asan)
|
||||||
|
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||||
|
KILL_TIMEOUT=10800
|
||||||
|
RUN_BY_HASH_NUM=2
|
||||||
|
RUN_BY_HASH_TOTAL=4
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Functional test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
FunctionalStatelessTestAsan3:
|
||||||
|
needs: [BuilderDebAsan]
|
||||||
|
runs-on: [self-hosted, func-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/stateless_debug
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Stateless tests (asan)
|
||||||
|
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||||
|
KILL_TIMEOUT=10800
|
||||||
|
RUN_BY_HASH_NUM=3
|
||||||
|
RUN_BY_HASH_TOTAL=4
|
||||||
EOF
|
EOF
|
||||||
- name: Download json reports
|
- name: Download json reports
|
||||||
uses: actions/download-artifact@v3
|
uses: actions/download-artifact@v3
|
||||||
@ -1343,7 +1525,7 @@ jobs:
|
|||||||
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
|
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
|
||||||
KILL_TIMEOUT=10800
|
KILL_TIMEOUT=10800
|
||||||
RUN_BY_HASH_NUM=0
|
RUN_BY_HASH_NUM=0
|
||||||
RUN_BY_HASH_TOTAL=3
|
RUN_BY_HASH_TOTAL=5
|
||||||
EOF
|
EOF
|
||||||
- name: Download json reports
|
- name: Download json reports
|
||||||
uses: actions/download-artifact@v3
|
uses: actions/download-artifact@v3
|
||||||
@ -1379,7 +1561,7 @@ jobs:
|
|||||||
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
|
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
|
||||||
KILL_TIMEOUT=10800
|
KILL_TIMEOUT=10800
|
||||||
RUN_BY_HASH_NUM=1
|
RUN_BY_HASH_NUM=1
|
||||||
RUN_BY_HASH_TOTAL=3
|
RUN_BY_HASH_TOTAL=5
|
||||||
EOF
|
EOF
|
||||||
- name: Download json reports
|
- name: Download json reports
|
||||||
uses: actions/download-artifact@v3
|
uses: actions/download-artifact@v3
|
||||||
@ -1415,7 +1597,7 @@ jobs:
|
|||||||
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
|
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
|
||||||
KILL_TIMEOUT=10800
|
KILL_TIMEOUT=10800
|
||||||
RUN_BY_HASH_NUM=2
|
RUN_BY_HASH_NUM=2
|
||||||
RUN_BY_HASH_TOTAL=3
|
RUN_BY_HASH_TOTAL=5
|
||||||
EOF
|
EOF
|
||||||
- name: Download json reports
|
- name: Download json reports
|
||||||
uses: actions/download-artifact@v3
|
uses: actions/download-artifact@v3
|
||||||
@ -1438,7 +1620,79 @@ jobs:
|
|||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatelessTestUBsan:
|
FunctionalStatelessTestTsan3:
|
||||||
|
needs: [BuilderDebTsan]
|
||||||
|
runs-on: [self-hosted, func-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/stateless_tsan
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Stateless tests (tsan)
|
||||||
|
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
|
||||||
|
KILL_TIMEOUT=10800
|
||||||
|
RUN_BY_HASH_NUM=3
|
||||||
|
RUN_BY_HASH_TOTAL=5
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Functional test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
FunctionalStatelessTestTsan4:
|
||||||
|
needs: [BuilderDebTsan]
|
||||||
|
runs-on: [self-hosted, func-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/stateless_tsan
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Stateless tests (tsan)
|
||||||
|
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
|
||||||
|
KILL_TIMEOUT=10800
|
||||||
|
RUN_BY_HASH_NUM=4
|
||||||
|
RUN_BY_HASH_TOTAL=5
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Functional test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
FunctionalStatelessTestUBsan0:
|
||||||
needs: [BuilderDebUBsan]
|
needs: [BuilderDebUBsan]
|
||||||
runs-on: [self-hosted, func-tester]
|
runs-on: [self-hosted, func-tester]
|
||||||
steps:
|
steps:
|
||||||
@ -1450,6 +1704,44 @@ jobs:
|
|||||||
CHECK_NAME=Stateless tests (ubsan)
|
CHECK_NAME=Stateless tests (ubsan)
|
||||||
REPO_COPY=${{runner.temp}}/stateless_ubsan/ClickHouse
|
REPO_COPY=${{runner.temp}}/stateless_ubsan/ClickHouse
|
||||||
KILL_TIMEOUT=10800
|
KILL_TIMEOUT=10800
|
||||||
|
RUN_BY_HASH_NUM=0
|
||||||
|
RUN_BY_HASH_TOTAL=2
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Functional test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
FunctionalStatelessTestUBsan1:
|
||||||
|
needs: [BuilderDebUBsan]
|
||||||
|
runs-on: [self-hosted, func-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/stateless_ubsan
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Stateless tests (ubsan)
|
||||||
|
REPO_COPY=${{runner.temp}}/stateless_ubsan/ClickHouse
|
||||||
|
KILL_TIMEOUT=10800
|
||||||
|
RUN_BY_HASH_NUM=1
|
||||||
|
RUN_BY_HASH_TOTAL=2
|
||||||
EOF
|
EOF
|
||||||
- name: Download json reports
|
- name: Download json reports
|
||||||
uses: actions/download-artifact@v3
|
uses: actions/download-artifact@v3
|
||||||
@ -1485,7 +1777,7 @@ jobs:
|
|||||||
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
|
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
|
||||||
KILL_TIMEOUT=10800
|
KILL_TIMEOUT=10800
|
||||||
RUN_BY_HASH_NUM=0
|
RUN_BY_HASH_NUM=0
|
||||||
RUN_BY_HASH_TOTAL=3
|
RUN_BY_HASH_TOTAL=6
|
||||||
EOF
|
EOF
|
||||||
- name: Download json reports
|
- name: Download json reports
|
||||||
uses: actions/download-artifact@v3
|
uses: actions/download-artifact@v3
|
||||||
@ -1521,7 +1813,7 @@ jobs:
|
|||||||
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
|
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
|
||||||
KILL_TIMEOUT=10800
|
KILL_TIMEOUT=10800
|
||||||
RUN_BY_HASH_NUM=1
|
RUN_BY_HASH_NUM=1
|
||||||
RUN_BY_HASH_TOTAL=3
|
RUN_BY_HASH_TOTAL=6
|
||||||
EOF
|
EOF
|
||||||
- name: Download json reports
|
- name: Download json reports
|
||||||
uses: actions/download-artifact@v3
|
uses: actions/download-artifact@v3
|
||||||
@ -1557,7 +1849,115 @@ jobs:
|
|||||||
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
|
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
|
||||||
KILL_TIMEOUT=10800
|
KILL_TIMEOUT=10800
|
||||||
RUN_BY_HASH_NUM=2
|
RUN_BY_HASH_NUM=2
|
||||||
RUN_BY_HASH_TOTAL=3
|
RUN_BY_HASH_TOTAL=6
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Functional test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
FunctionalStatelessTestMsan3:
|
||||||
|
needs: [BuilderDebMsan]
|
||||||
|
runs-on: [self-hosted, func-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/stateless_memory
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Stateless tests (msan)
|
||||||
|
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
|
||||||
|
KILL_TIMEOUT=10800
|
||||||
|
RUN_BY_HASH_NUM=3
|
||||||
|
RUN_BY_HASH_TOTAL=6
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Functional test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
FunctionalStatelessTestMsan4:
|
||||||
|
needs: [BuilderDebMsan]
|
||||||
|
runs-on: [self-hosted, func-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/stateless_memory
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Stateless tests (msan)
|
||||||
|
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
|
||||||
|
KILL_TIMEOUT=10800
|
||||||
|
RUN_BY_HASH_NUM=4
|
||||||
|
RUN_BY_HASH_TOTAL=6
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Functional test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
FunctionalStatelessTestMsan5:
|
||||||
|
needs: [BuilderDebMsan]
|
||||||
|
runs-on: [self-hosted, func-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/stateless_memory
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Stateless tests (msan)
|
||||||
|
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
|
||||||
|
KILL_TIMEOUT=10800
|
||||||
|
RUN_BY_HASH_NUM=5
|
||||||
|
RUN_BY_HASH_TOTAL=6
|
||||||
EOF
|
EOF
|
||||||
- name: Download json reports
|
- name: Download json reports
|
||||||
uses: actions/download-artifact@v3
|
uses: actions/download-artifact@v3
|
||||||
@ -1593,7 +1993,7 @@ jobs:
|
|||||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||||
KILL_TIMEOUT=10800
|
KILL_TIMEOUT=10800
|
||||||
RUN_BY_HASH_NUM=0
|
RUN_BY_HASH_NUM=0
|
||||||
RUN_BY_HASH_TOTAL=3
|
RUN_BY_HASH_TOTAL=5
|
||||||
EOF
|
EOF
|
||||||
- name: Download json reports
|
- name: Download json reports
|
||||||
uses: actions/download-artifact@v3
|
uses: actions/download-artifact@v3
|
||||||
@ -1629,7 +2029,7 @@ jobs:
|
|||||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||||
KILL_TIMEOUT=10800
|
KILL_TIMEOUT=10800
|
||||||
RUN_BY_HASH_NUM=1
|
RUN_BY_HASH_NUM=1
|
||||||
RUN_BY_HASH_TOTAL=3
|
RUN_BY_HASH_TOTAL=5
|
||||||
EOF
|
EOF
|
||||||
- name: Download json reports
|
- name: Download json reports
|
||||||
uses: actions/download-artifact@v3
|
uses: actions/download-artifact@v3
|
||||||
@ -1665,7 +2065,79 @@ jobs:
|
|||||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||||
KILL_TIMEOUT=10800
|
KILL_TIMEOUT=10800
|
||||||
RUN_BY_HASH_NUM=2
|
RUN_BY_HASH_NUM=2
|
||||||
RUN_BY_HASH_TOTAL=3
|
RUN_BY_HASH_TOTAL=5
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Functional test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
FunctionalStatelessTestDebug3:
|
||||||
|
needs: [BuilderDebDebug]
|
||||||
|
runs-on: [self-hosted, func-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/stateless_debug
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Stateless tests (debug)
|
||||||
|
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||||
|
KILL_TIMEOUT=10800
|
||||||
|
RUN_BY_HASH_NUM=3
|
||||||
|
RUN_BY_HASH_TOTAL=5
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Functional test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
FunctionalStatelessTestDebug4:
|
||||||
|
needs: [BuilderDebDebug]
|
||||||
|
runs-on: [self-hosted, func-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/stateless_debug
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Stateless tests (debug)
|
||||||
|
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||||
|
KILL_TIMEOUT=10800
|
||||||
|
RUN_BY_HASH_NUM=4
|
||||||
|
RUN_BY_HASH_TOTAL=5
|
||||||
EOF
|
EOF
|
||||||
- name: Download json reports
|
- name: Download json reports
|
||||||
uses: actions/download-artifact@v3
|
uses: actions/download-artifact@v3
|
||||||
@ -2116,7 +2588,7 @@ jobs:
|
|||||||
CHECK_NAME=Integration tests (asan)
|
CHECK_NAME=Integration tests (asan)
|
||||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||||
RUN_BY_HASH_NUM=0
|
RUN_BY_HASH_NUM=0
|
||||||
RUN_BY_HASH_TOTAL=3
|
RUN_BY_HASH_TOTAL=6
|
||||||
EOF
|
EOF
|
||||||
- name: Download json reports
|
- name: Download json reports
|
||||||
uses: actions/download-artifact@v3
|
uses: actions/download-artifact@v3
|
||||||
@ -2151,7 +2623,7 @@ jobs:
|
|||||||
CHECK_NAME=Integration tests (asan)
|
CHECK_NAME=Integration tests (asan)
|
||||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||||
RUN_BY_HASH_NUM=1
|
RUN_BY_HASH_NUM=1
|
||||||
RUN_BY_HASH_TOTAL=3
|
RUN_BY_HASH_TOTAL=6
|
||||||
EOF
|
EOF
|
||||||
- name: Download json reports
|
- name: Download json reports
|
||||||
uses: actions/download-artifact@v3
|
uses: actions/download-artifact@v3
|
||||||
@ -2186,7 +2658,112 @@ jobs:
|
|||||||
CHECK_NAME=Integration tests (asan)
|
CHECK_NAME=Integration tests (asan)
|
||||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||||
RUN_BY_HASH_NUM=2
|
RUN_BY_HASH_NUM=2
|
||||||
RUN_BY_HASH_TOTAL=3
|
RUN_BY_HASH_TOTAL=6
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Integration test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
IntegrationTestsAsan3:
|
||||||
|
needs: [BuilderDebAsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Integration tests (asan)
|
||||||
|
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=3
|
||||||
|
RUN_BY_HASH_TOTAL=6
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Integration test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
IntegrationTestsAsan4:
|
||||||
|
needs: [BuilderDebAsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Integration tests (asan)
|
||||||
|
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=4
|
||||||
|
RUN_BY_HASH_TOTAL=6
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Integration test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
IntegrationTestsAsan5:
|
||||||
|
needs: [BuilderDebAsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Integration tests (asan)
|
||||||
|
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=5
|
||||||
|
RUN_BY_HASH_TOTAL=6
|
||||||
EOF
|
EOF
|
||||||
- name: Download json reports
|
- name: Download json reports
|
||||||
uses: actions/download-artifact@v3
|
uses: actions/download-artifact@v3
|
||||||
@ -2221,7 +2798,7 @@ jobs:
|
|||||||
CHECK_NAME=Integration tests (tsan)
|
CHECK_NAME=Integration tests (tsan)
|
||||||
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||||
RUN_BY_HASH_NUM=0
|
RUN_BY_HASH_NUM=0
|
||||||
RUN_BY_HASH_TOTAL=4
|
RUN_BY_HASH_TOTAL=6
|
||||||
EOF
|
EOF
|
||||||
- name: Download json reports
|
- name: Download json reports
|
||||||
uses: actions/download-artifact@v3
|
uses: actions/download-artifact@v3
|
||||||
@ -2256,7 +2833,7 @@ jobs:
|
|||||||
CHECK_NAME=Integration tests (tsan)
|
CHECK_NAME=Integration tests (tsan)
|
||||||
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||||
RUN_BY_HASH_NUM=1
|
RUN_BY_HASH_NUM=1
|
||||||
RUN_BY_HASH_TOTAL=4
|
RUN_BY_HASH_TOTAL=6
|
||||||
EOF
|
EOF
|
||||||
- name: Download json reports
|
- name: Download json reports
|
||||||
uses: actions/download-artifact@v3
|
uses: actions/download-artifact@v3
|
||||||
@ -2291,7 +2868,7 @@ jobs:
|
|||||||
CHECK_NAME=Integration tests (tsan)
|
CHECK_NAME=Integration tests (tsan)
|
||||||
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||||
RUN_BY_HASH_NUM=2
|
RUN_BY_HASH_NUM=2
|
||||||
RUN_BY_HASH_TOTAL=4
|
RUN_BY_HASH_TOTAL=6
|
||||||
EOF
|
EOF
|
||||||
- name: Download json reports
|
- name: Download json reports
|
||||||
uses: actions/download-artifact@v3
|
uses: actions/download-artifact@v3
|
||||||
@ -2326,7 +2903,77 @@ jobs:
|
|||||||
CHECK_NAME=Integration tests (tsan)
|
CHECK_NAME=Integration tests (tsan)
|
||||||
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||||
RUN_BY_HASH_NUM=3
|
RUN_BY_HASH_NUM=3
|
||||||
RUN_BY_HASH_TOTAL=4
|
RUN_BY_HASH_TOTAL=6
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Integration test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
IntegrationTestsTsan4:
|
||||||
|
needs: [BuilderDebTsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/integration_tests_tsan
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Integration tests (tsan)
|
||||||
|
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=4
|
||||||
|
RUN_BY_HASH_TOTAL=6
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Integration test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
IntegrationTestsTsan5:
|
||||||
|
needs: [BuilderDebTsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/integration_tests_tsan
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Integration tests (tsan)
|
||||||
|
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=5
|
||||||
|
RUN_BY_HASH_TOTAL=6
|
||||||
EOF
|
EOF
|
||||||
- name: Download json reports
|
- name: Download json reports
|
||||||
uses: actions/download-artifact@v3
|
uses: actions/download-artifact@v3
|
||||||
@ -2361,7 +3008,7 @@ jobs:
|
|||||||
CHECK_NAME=Integration tests (release)
|
CHECK_NAME=Integration tests (release)
|
||||||
REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse
|
REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse
|
||||||
RUN_BY_HASH_NUM=0
|
RUN_BY_HASH_NUM=0
|
||||||
RUN_BY_HASH_TOTAL=2
|
RUN_BY_HASH_TOTAL=4
|
||||||
EOF
|
EOF
|
||||||
- name: Download json reports
|
- name: Download json reports
|
||||||
uses: actions/download-artifact@v3
|
uses: actions/download-artifact@v3
|
||||||
@ -2396,7 +3043,77 @@ jobs:
|
|||||||
CHECK_NAME=Integration tests (release)
|
CHECK_NAME=Integration tests (release)
|
||||||
REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse
|
REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse
|
||||||
RUN_BY_HASH_NUM=1
|
RUN_BY_HASH_NUM=1
|
||||||
RUN_BY_HASH_TOTAL=2
|
RUN_BY_HASH_TOTAL=4
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Integration test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
IntegrationTestsRelease2:
|
||||||
|
needs: [BuilderDebRelease]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/integration_tests_release
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Integration tests (release)
|
||||||
|
REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=2
|
||||||
|
RUN_BY_HASH_TOTAL=4
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
- name: Integration test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
IntegrationTestsRelease3:
|
||||||
|
needs: [BuilderDebRelease]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/integration_tests_release
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Integration tests (release)
|
||||||
|
REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=3
|
||||||
|
RUN_BY_HASH_TOTAL=4
|
||||||
EOF
|
EOF
|
||||||
- name: Download json reports
|
- name: Download json reports
|
||||||
uses: actions/download-artifact@v3
|
uses: actions/download-artifact@v3
|
||||||
@ -3116,23 +3833,36 @@ jobs:
|
|||||||
- FunctionalStatelessTestDebug0
|
- FunctionalStatelessTestDebug0
|
||||||
- FunctionalStatelessTestDebug1
|
- FunctionalStatelessTestDebug1
|
||||||
- FunctionalStatelessTestDebug2
|
- FunctionalStatelessTestDebug2
|
||||||
|
- FunctionalStatelessTestDebug3
|
||||||
|
- FunctionalStatelessTestDebug4
|
||||||
- FunctionalStatelessTestRelease
|
- FunctionalStatelessTestRelease
|
||||||
- FunctionalStatelessTestReleaseDatabaseOrdinary
|
- FunctionalStatelessTestReleaseDatabaseOrdinary
|
||||||
- FunctionalStatelessTestReleaseDatabaseReplicated0
|
- FunctionalStatelessTestReleaseDatabaseReplicated0
|
||||||
- FunctionalStatelessTestReleaseDatabaseReplicated1
|
- FunctionalStatelessTestReleaseDatabaseReplicated1
|
||||||
|
- FunctionalStatelessTestReleaseDatabaseReplicated2
|
||||||
|
- FunctionalStatelessTestReleaseDatabaseReplicated3
|
||||||
- FunctionalStatelessTestAarch64
|
- FunctionalStatelessTestAarch64
|
||||||
- FunctionalStatelessTestAsan0
|
- FunctionalStatelessTestAsan0
|
||||||
- FunctionalStatelessTestAsan1
|
- FunctionalStatelessTestAsan1
|
||||||
|
- FunctionalStatelessTestAsan2
|
||||||
|
- FunctionalStatelessTestAsan3
|
||||||
- FunctionalStatelessTestTsan0
|
- FunctionalStatelessTestTsan0
|
||||||
- FunctionalStatelessTestTsan1
|
- FunctionalStatelessTestTsan1
|
||||||
- FunctionalStatelessTestTsan2
|
- FunctionalStatelessTestTsan2
|
||||||
|
- FunctionalStatelessTestTsan3
|
||||||
|
- FunctionalStatelessTestTsan4
|
||||||
- FunctionalStatelessTestMsan0
|
- FunctionalStatelessTestMsan0
|
||||||
- FunctionalStatelessTestMsan1
|
- FunctionalStatelessTestMsan1
|
||||||
- FunctionalStatelessTestMsan2
|
- FunctionalStatelessTestMsan2
|
||||||
- FunctionalStatelessTestUBsan
|
- FunctionalStatelessTestMsan3
|
||||||
|
- FunctionalStatelessTestMsan4
|
||||||
|
- FunctionalStatelessTestMsan5
|
||||||
|
- FunctionalStatelessTestUBsan0
|
||||||
|
- FunctionalStatelessTestUBsan1
|
||||||
- FunctionalStatefulTestDebug
|
- FunctionalStatefulTestDebug
|
||||||
- FunctionalStatefulTestRelease
|
- FunctionalStatefulTestRelease
|
||||||
- FunctionalStatelessTestReleaseS3
|
- FunctionalStatelessTestReleaseS3_0
|
||||||
|
- FunctionalStatelessTestReleaseS3_1
|
||||||
- FunctionalStatefulTestAarch64
|
- FunctionalStatefulTestAarch64
|
||||||
- FunctionalStatefulTestAsan
|
- FunctionalStatefulTestAsan
|
||||||
- FunctionalStatefulTestTsan
|
- FunctionalStatefulTestTsan
|
||||||
@ -3146,12 +3876,19 @@ jobs:
|
|||||||
- IntegrationTestsAsan0
|
- IntegrationTestsAsan0
|
||||||
- IntegrationTestsAsan1
|
- IntegrationTestsAsan1
|
||||||
- IntegrationTestsAsan2
|
- IntegrationTestsAsan2
|
||||||
|
- IntegrationTestsAsan3
|
||||||
|
- IntegrationTestsAsan4
|
||||||
|
- IntegrationTestsAsan5
|
||||||
- IntegrationTestsRelease0
|
- IntegrationTestsRelease0
|
||||||
- IntegrationTestsRelease1
|
- IntegrationTestsRelease1
|
||||||
|
- IntegrationTestsRelease2
|
||||||
|
- IntegrationTestsRelease3
|
||||||
- IntegrationTestsTsan0
|
- IntegrationTestsTsan0
|
||||||
- IntegrationTestsTsan1
|
- IntegrationTestsTsan1
|
||||||
- IntegrationTestsTsan2
|
- IntegrationTestsTsan2
|
||||||
- IntegrationTestsTsan3
|
- IntegrationTestsTsan3
|
||||||
|
- IntegrationTestsTsan4
|
||||||
|
- IntegrationTestsTsan5
|
||||||
- PerformanceComparisonX86-0
|
- PerformanceComparisonX86-0
|
||||||
- PerformanceComparisonX86-1
|
- PerformanceComparisonX86-1
|
||||||
- PerformanceComparisonX86-2
|
- PerformanceComparisonX86-2
|
||||||
|
@ -13,9 +13,10 @@ The following versions of ClickHouse server are currently being supported with s
|
|||||||
|
|
||||||
| Version | Supported |
|
| Version | Supported |
|
||||||
|:-|:-|
|
|:-|:-|
|
||||||
|
| 23.3 | ✔️ |
|
||||||
| 23.2 | ✔️ |
|
| 23.2 | ✔️ |
|
||||||
| 23.1 | ✔️ |
|
| 23.1 | ✔️ |
|
||||||
| 22.12 | ✔️ |
|
| 22.12 | ❌ |
|
||||||
| 22.11 | ❌ |
|
| 22.11 | ❌ |
|
||||||
| 22.10 | ❌ |
|
| 22.10 | ❌ |
|
||||||
| 22.9 | ❌ |
|
| 22.9 | ❌ |
|
||||||
@ -24,7 +25,7 @@ The following versions of ClickHouse server are currently being supported with s
|
|||||||
| 22.6 | ❌ |
|
| 22.6 | ❌ |
|
||||||
| 22.5 | ❌ |
|
| 22.5 | ❌ |
|
||||||
| 22.4 | ❌ |
|
| 22.4 | ❌ |
|
||||||
| 22.3 | ✔️ |
|
| 22.3 | ❌ |
|
||||||
| 22.2 | ❌ |
|
| 22.2 | ❌ |
|
||||||
| 22.1 | ❌ |
|
| 22.1 | ❌ |
|
||||||
| 21.* | ❌ |
|
| 21.* | ❌ |
|
||||||
|
@ -35,7 +35,7 @@ public:
|
|||||||
Self & operator=(T && rhs) { t = std::move(rhs); return *this;}
|
Self & operator=(T && rhs) { t = std::move(rhs); return *this;}
|
||||||
|
|
||||||
// NOLINTBEGIN(google-explicit-constructor)
|
// NOLINTBEGIN(google-explicit-constructor)
|
||||||
operator const T & () const { return t; }
|
constexpr operator const T & () const { return t; }
|
||||||
operator T & () { return t; }
|
operator T & () { return t; }
|
||||||
// NOLINTEND(google-explicit-constructor)
|
// NOLINTEND(google-explicit-constructor)
|
||||||
|
|
||||||
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
esac
|
esac
|
||||||
|
|
||||||
ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release"
|
ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release"
|
||||||
ARG VERSION="23.2.4.12"
|
ARG VERSION="23.3.1.2823"
|
||||||
ARG PACKAGES="clickhouse-keeper"
|
ARG PACKAGES="clickhouse-keeper"
|
||||||
|
|
||||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||||
|
@ -33,7 +33,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
# lts / testing / prestable / etc
|
# lts / testing / prestable / etc
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||||
ARG VERSION="23.2.4.12"
|
ARG VERSION="23.3.1.2823"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
|
|
||||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||||
|
@ -22,7 +22,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
|||||||
|
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="deb https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
ARG REPOSITORY="deb https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||||
ARG VERSION="23.2.4.12"
|
ARG VERSION="23.3.1.2823"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
|
|
||||||
# set non-empty deb_location_url url to create a docker image
|
# set non-empty deb_location_url url to create a docker image
|
||||||
|
26
docs/changelogs/v22.12.6.22-stable.md
Normal file
26
docs/changelogs/v22.12.6.22-stable.md
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2023
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v22.12.6.22-stable (10d87f90261) FIXME as compared to v22.12.5.34-stable (b82d6401ca1)
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Fix changing an expired role [#46772](https://github.com/ClickHouse/ClickHouse/pull/46772) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Fix bug in zero-copy replication disk choice during fetch [#47010](https://github.com/ClickHouse/ClickHouse/pull/47010) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fix NOT_IMPLEMENTED error with CROSS JOIN and algorithm = auto [#47068](https://github.com/ClickHouse/ClickHouse/pull/47068) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Fix query parameters [#47488](https://github.com/ClickHouse/ClickHouse/pull/47488) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Hotfix for too verbose warnings in HTTP [#47903](https://github.com/ClickHouse/ClickHouse/pull/47903) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Better error messages in ReplicatedMergeTreeAttachThread [#47454](https://github.com/ClickHouse/ClickHouse/pull/47454) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Add a fuse for backport branches w/o a created PR [#47760](https://github.com/ClickHouse/ClickHouse/pull/47760) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Only valid Reviews.STATES overwrite existing reviews [#47789](https://github.com/ClickHouse/ClickHouse/pull/47789) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Place short return before big block, improve logging [#47822](https://github.com/ClickHouse/ClickHouse/pull/47822) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Artifacts s3 prefix [#47945](https://github.com/ClickHouse/ClickHouse/pull/47945) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Fix tsan error lock-order-inversion [#47953](https://github.com/ClickHouse/ClickHouse/pull/47953) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
|
29
docs/changelogs/v22.3.20.29-lts.md
Normal file
29
docs/changelogs/v22.3.20.29-lts.md
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2023
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v22.3.20.29-lts (297b4dd5e55) FIXME as compared to v22.3.19.6-lts (467e0a7bd77)
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Backported in [#46979](https://github.com/ClickHouse/ClickHouse/issues/46979): Apply `ALTER TABLE table_name ON CLUSTER cluster MOVE PARTITION|PART partition_expr TO DISK|VOLUME 'disk_name'` to all replicas. Because `ALTER TABLE t MOVE` is not replicated. [#46402](https://github.com/ClickHouse/ClickHouse/pull/46402) ([lizhuoyu5](https://github.com/lzydmxy)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Fix incorrect alias recursion in QueryNormalizer [#46609](https://github.com/ClickHouse/ClickHouse/pull/46609) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix arithmetic operations in aggregate optimization [#46705](https://github.com/ClickHouse/ClickHouse/pull/46705) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Fix MSan report in `maxIntersections` function [#46847](https://github.com/ClickHouse/ClickHouse/pull/46847) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix wrong results of some LIKE searches when the LIKE pattern contains quoted non-quotable characters [#46875](https://github.com/ClickHouse/ClickHouse/pull/46875) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix possible deadlock in QueryStatus [#47161](https://github.com/ClickHouse/ClickHouse/pull/47161) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Update typing for a new PyGithub version [#47123](https://github.com/ClickHouse/ClickHouse/pull/47123) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Add a manual trigger for release workflow [#47302](https://github.com/ClickHouse/ClickHouse/pull/47302) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Add a fuse for backport branches w/o a created PR [#47760](https://github.com/ClickHouse/ClickHouse/pull/47760) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Only valid Reviews.STATES overwrite existing reviews [#47789](https://github.com/ClickHouse/ClickHouse/pull/47789) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Place short return before big block, improve logging [#47822](https://github.com/ClickHouse/ClickHouse/pull/47822) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Fix tsan error lock-order-inversion [#47953](https://github.com/ClickHouse/ClickHouse/pull/47953) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
|
545
docs/changelogs/v23.3.1.2823-lts.md
Normal file
545
docs/changelogs/v23.3.1.2823-lts.md
Normal file
@ -0,0 +1,545 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2023
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v23.3.1.2823-lts (46e85357ce2) FIXME as compared to v23.2.1.2537-stable (52bf836e03a)
|
||||||
|
|
||||||
|
#### Backward Incompatible Change
|
||||||
|
* Relax symbols that are allowed in URL authority in *domain*RFC()/netloc(). [#46841](https://github.com/ClickHouse/ClickHouse/pull/46841) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Prohibit create tables based on KafkaEngine with DEFAULT/EPHEMERAL/ALIAS/MATERIALIZED statements for columns. [#47138](https://github.com/ClickHouse/ClickHouse/pull/47138) ([Aleksandr Musorin](https://github.com/AVMusorin)).
|
||||||
|
* An "asynchronous connection drain" feature is removed. Related settings and metrics are removed as well. It was an internal feature, so the removal should not affect users who had never heard about that feature. [#47486](https://github.com/ClickHouse/ClickHouse/pull/47486) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Support 256-bit Decimal data type (more than 38 digits) in `arraySum`/`Min`/`Max`/`Avg`/`Product`, `arrayCumSum`/`CumSumNonNegative`, `arrayDifference`, array construction, IN operator, query parameters, `groupArrayMovingSum`, statistical functions, `min`/`max`/`any`/`argMin`/`argMax`, PostgreSQL wire protocol, MySQL table engine and function, `sumMap`, `mapAdd`, `mapSubtract`, `arrayIntersect`. Add support for big integers in `arrayIntersect`. Statistical aggregate functions involving moments (such as `corr` or various `TTest`s) will use `Float64` as their internal representation (they were using `Decimal128` before this change, but it was pointless), and these functions can return `nan` instead of `inf` in case of infinite variance. Some functions were allowed on `Decimal256` data types but returned `Decimal128` in previous versions - now it is fixed. This closes [#47569](https://github.com/ClickHouse/ClickHouse/issues/47569). This closes [#44864](https://github.com/ClickHouse/ClickHouse/issues/44864). This closes [#28335](https://github.com/ClickHouse/ClickHouse/issues/28335). [#47594](https://github.com/ClickHouse/ClickHouse/pull/47594) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Make backup_threads/restore_threads server settings. [#47881](https://github.com/ClickHouse/ClickHouse/pull/47881) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix the isIPv6String function which could have outputted a false positive result in the case of an incorrect IPv6 address. For example `1234::1234:` was considered a valid IPv6 address. [#47895](https://github.com/ClickHouse/ClickHouse/pull/47895) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
|
||||||
|
#### New Feature
|
||||||
|
* Add new mode for splitting the work on replicas using settings `parallel_replicas_custom_key` and `parallel_replicas_custom_key_filter_type`. If the cluster consists of a single shard with multiple replicas, up to `max_parallel_replicas` will be randomly picked and turned into shards. For each shard, a corresponding filter is added to the query on the initiator before being sent to the shard. If the cluster consists of multiple shards, it will behave the same as `sample_key` but with the possibility to define an arbitrary key. [#45108](https://github.com/ClickHouse/ClickHouse/pull/45108) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Added query setting `partial_result_on_first_cancel` allowing the canceled query (e.g. due to Ctrl-C) to return a partial result. [#45689](https://github.com/ClickHouse/ClickHouse/pull/45689) ([Alexey Perevyshin](https://github.com/alexX512)).
|
||||||
|
* Added support of arbitrary tables engines for temporary tables except for Replicated and KeeperMap engines. Partially close [#31497](https://github.com/ClickHouse/ClickHouse/issues/31497). [#46071](https://github.com/ClickHouse/ClickHouse/pull/46071) ([Roman Vasin](https://github.com/rvasin)).
|
||||||
|
* Add replication of user-defined SQL functions using ZooKeeper. [#46085](https://github.com/ClickHouse/ClickHouse/pull/46085) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||||
|
* Implement `system.server_settings` (similar to `system.settings`), which will contain server configurations. [#46550](https://github.com/ClickHouse/ClickHouse/pull/46550) ([pufit](https://github.com/pufit)).
|
||||||
|
* Intruduce a function `WIDTH_BUCKET`. [#42974](https://github.com/ClickHouse/ClickHouse/issues/42974). [#46790](https://github.com/ClickHouse/ClickHouse/pull/46790) ([avoiderboi](https://github.com/avoiderboi)).
|
||||||
|
* Add new function parseDateTime/parseDateTimeInJodaSyntax according to specified format string. parseDateTime parses string to datetime in MySQL syntax, parseDateTimeInJodaSyntax parses in Joda syntax. [#46815](https://github.com/ClickHouse/ClickHouse/pull/46815) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Use `dummy UInt8` for default structure of table function `null`. Closes [#46930](https://github.com/ClickHouse/ClickHouse/issues/46930). [#47006](https://github.com/ClickHouse/ClickHouse/pull/47006) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Dec 15, 2021 support for parseDateTimeBestEffort function. closes [#46816](https://github.com/ClickHouse/ClickHouse/issues/46816). [#47071](https://github.com/ClickHouse/ClickHouse/pull/47071) ([chen](https://github.com/xiedeyantu)).
|
||||||
|
* Add function ULIDStringToDateTime(). Closes [#46945](https://github.com/ClickHouse/ClickHouse/issues/46945). [#47087](https://github.com/ClickHouse/ClickHouse/pull/47087) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Add settings `http_wait_end_of_query` and `http_response_buffer_size` that corresponds to URL params `wait_end_of_query` and `buffer_size` for HTTP interface. [#47108](https://github.com/ClickHouse/ClickHouse/pull/47108) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Support for `UNDROP TABLE` query. Closes [#46811](https://github.com/ClickHouse/ClickHouse/issues/46811). [#47241](https://github.com/ClickHouse/ClickHouse/pull/47241) ([chen](https://github.com/xiedeyantu)).
|
||||||
|
* Add `system.marked_dropped_tables` table that shows tables that were dropped from `Atomic` databases but were not completely removed yet. [#47364](https://github.com/ClickHouse/ClickHouse/pull/47364) ([chen](https://github.com/xiedeyantu)).
|
||||||
|
* Add `INSTR` as alias of `positionCaseInsensitive` for MySQL compatibility. Closes [#47529](https://github.com/ClickHouse/ClickHouse/issues/47529). [#47535](https://github.com/ClickHouse/ClickHouse/pull/47535) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Added `toDecimalString` function allowing to convert numbers to string with fixed precision. [#47838](https://github.com/ClickHouse/ClickHouse/pull/47838) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||||
|
* Added operator "REGEXP" (similar to operators "LIKE", "IN", "MOD" etc.) for better compatibility with MySQL. [#47869](https://github.com/ClickHouse/ClickHouse/pull/47869) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Allow executing reading pipeline for DIRECT dictionary with CLICKHOUSE source in multiple threads. To enable set `dictionary_use_async_executor=1` in `SETTINGS` section for source in `CREATE DICTIONARY` statement. [#47986](https://github.com/ClickHouse/ClickHouse/pull/47986) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Add merge tree setting `max_number_of_mutatuins_for_replica`. It limit the number of part mutations per replica to the specified amount. Zero means no limit on the number of mutations per replica (the execution can still be constrained by other settings). [#48047](https://github.com/ClickHouse/ClickHouse/pull/48047) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
|
||||||
|
#### Performance Improvement
|
||||||
|
* Optimize one nullable key aggregate performance. [#45772](https://github.com/ClickHouse/ClickHouse/pull/45772) ([LiuNeng](https://github.com/liuneng1994)).
|
||||||
|
* Implemented lowercase tokenbf_v1 index utilization for hasTokenOrNull, hasTokenCaseInsensitive and hasTokenCaseInsensitiveOrNull. [#46252](https://github.com/ClickHouse/ClickHouse/pull/46252) ([ltrk2](https://github.com/ltrk2)).
|
||||||
|
* Optimize the generic SIMD StringSearcher by searching first two chars. [#46289](https://github.com/ClickHouse/ClickHouse/pull/46289) ([Jiebin Sun](https://github.com/jiebinn)).
|
||||||
|
* System.detached_parts could be significant large. - added several sources with respects block size limitation - in each block iothread pool is used to calculate part size, ie to make syscalls in parallel. [#46624](https://github.com/ClickHouse/ClickHouse/pull/46624) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Increase the default value of `max_replicated_merges_in_queue` for ReplicatedMergeTree tables from 16 to 1000. It allows faster background merge operation on clusters with a very large number of replicas, such as clusters with shared storage in ClickHouse Cloud. [#47050](https://github.com/ClickHouse/ClickHouse/pull/47050) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Backups for large numbers of files were unbelievably slow in previous versions. [#47251](https://github.com/ClickHouse/ClickHouse/pull/47251) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Support filter push down to left table for JOIN with StorageJoin, StorageDictionary, StorageEmbeddedRocksDB. [#47280](https://github.com/ClickHouse/ClickHouse/pull/47280) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Marks in memory are now compressed, using 3-6x less memory. [#47290](https://github.com/ClickHouse/ClickHouse/pull/47290) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Updated copier to use group by instead of distinct to get list of partitions. For large tables this reduced the select time from over 500s to under 1s. [#47386](https://github.com/ClickHouse/ClickHouse/pull/47386) ([Clayton McClure](https://github.com/cmcclure-twilio)).
|
||||||
|
* Address https://github.com/clickhouse/clickhouse/issues/46453. bisect marked https://github.com/clickhouse/clickhouse/pull/35525 as the bad changed. this pr looks to reverse the changes in that pr. [#47544](https://github.com/ClickHouse/ClickHouse/pull/47544) ([Ongkong](https://github.com/ongkong)).
|
||||||
|
* Fixed excessive reading in queries with `FINAL`. [#47801](https://github.com/ClickHouse/ClickHouse/pull/47801) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Setting `max_final_threads` would be set to number of cores at server startup (by the same algorithm as we use for `max_threads`). This improves concurrency of `final` execution on servers with high number of CPUs. [#47915](https://github.com/ClickHouse/ClickHouse/pull/47915) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Avoid breaking batches on read requests to improve performance. [#47978](https://github.com/ClickHouse/ClickHouse/pull/47978) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Add map related functions: mapFromArrays, which allows us to create map from a pair of arrays. [#31125](https://github.com/ClickHouse/ClickHouse/pull/31125) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Rewrite distributed sends to avoid using filesystem as a queue, use in-memory queue instead. [#45491](https://github.com/ClickHouse/ClickHouse/pull/45491) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Allow separate grants for named collections (e.g. to be able to give `SHOW/CREATE/ALTER/DROP named collection` access only to certain collections, instead of all at once). Closes [#40894](https://github.com/ClickHouse/ClickHouse/issues/40894). Add new access type `NAMED_COLLECTION_CONTROL` which is not given to default user unless explicitly added to user config (is required to be able to do `GRANT ALL`), also `show_named_collections` is no longer obligatory to be manually specified for default user to be able to have full access rights as was in 23.2. [#46241](https://github.com/ClickHouse/ClickHouse/pull/46241) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Now `X-ClickHouse-Query-Id` and `X-ClickHouse-Timezone` headers are added to response in all queries via http protocol. Previously it was done only for `SELECT` queries. [#46364](https://github.com/ClickHouse/ClickHouse/pull/46364) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Support for connection to a replica set via a URI with a host:port enum and support for the readPreference option in MongoDB dictionaries. Example URI: mongodb://db0.example.com:27017,db1.example.com:27017,db2.example.com:27017/?replicaSet=myRepl&readPreference=primary. [#46524](https://github.com/ClickHouse/ClickHouse/pull/46524) ([artem-yadr](https://github.com/artem-yadr)).
|
||||||
|
* Re-implement projection analysis on top of query plan. Added setting `query_plan_optimize_projection=1` to switch between old and new version. Fixes [#44963](https://github.com/ClickHouse/ClickHouse/issues/44963). [#46537](https://github.com/ClickHouse/ClickHouse/pull/46537) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Use parquet format v2 instead of v1 in output format by default. Add setting `output_format_parquet_version` to control parquet version, possible values `v1_0`, `v2_4`, `v2_6`, `v2_latest` (default). [#46617](https://github.com/ClickHouse/ClickHouse/pull/46617) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Not for changelog - part of [#42648](https://github.com/ClickHouse/ClickHouse/issues/42648). [#46632](https://github.com/ClickHouse/ClickHouse/pull/46632) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Allow to ignore errors while pushing to MATERILIZED VIEW (add new setting `materialized_views_ignore_errors`, by default to `false`, but it is set to `true` for flushing logs to `system.*_log` tables unconditionally). [#46658](https://github.com/ClickHouse/ClickHouse/pull/46658) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Enable input_format_json_ignore_unknown_keys_in_named_tuple by default. [#46742](https://github.com/ClickHouse/ClickHouse/pull/46742) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* It is now possible using new configuration syntax to configure Kafka topics with periods in their name. [#46752](https://github.com/ClickHouse/ClickHouse/pull/46752) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix heuristics that check hyperscan patterns for problematic repeats. [#46819](https://github.com/ClickHouse/ClickHouse/pull/46819) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Don't report ZK node exists to system.errors when a block was created concurrently by a different replica. [#46820](https://github.com/ClickHouse/ClickHouse/pull/46820) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Allow PREWHERE for Merge with different DEFAULT expression for column. [#46831](https://github.com/ClickHouse/ClickHouse/pull/46831) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Increase the limit for opened files in `clickhouse-local`. It will be able to read from `web` tables on servers with a huge number of CPU cores. Do not back off reading from the URL table engine in case of too many opened files. This closes [#46852](https://github.com/ClickHouse/ClickHouse/issues/46852). [#46853](https://github.com/ClickHouse/ClickHouse/pull/46853) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Exceptions thrown when numbers cannot be parsed now have an easier-to-read exception message. [#46917](https://github.com/ClickHouse/ClickHouse/pull/46917) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Added update `system.backups` after every processed task. [#46989](https://github.com/ClickHouse/ClickHouse/pull/46989) ([Aleksandr Musorin](https://github.com/AVMusorin)).
|
||||||
|
* Allow types conversion in Native input format. Add settings `input_format_native_allow_types_conversion` that controls it (enabled by default). [#46990](https://github.com/ClickHouse/ClickHouse/pull/46990) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Allow IPv4 in the `range` function to generate IP ranges. [#46995](https://github.com/ClickHouse/ClickHouse/pull/46995) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Role change was not promoted sometimes before https://github.com/ClickHouse/ClickHouse/pull/46772 This PR just adds tests. [#47002](https://github.com/ClickHouse/ClickHouse/pull/47002) ([Ilya Golshtein](https://github.com/ilejn)).
|
||||||
|
* Improve exception message when it's impossible to make part move from one volume/disk to another. [#47032](https://github.com/ClickHouse/ClickHouse/pull/47032) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Support `Bool` type in `JSONType` function. Previously `Null` type was mistakenly returned for bool values. [#47046](https://github.com/ClickHouse/ClickHouse/pull/47046) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Use _request_body parameter to configure predefined http queries. [#47086](https://github.com/ClickHouse/ClickHouse/pull/47086) ([Constantine Peresypkin](https://github.com/pkit)).
|
||||||
|
* Removing logging of custom disk structure. [#47103](https://github.com/ClickHouse/ClickHouse/pull/47103) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Allow nested custom disks. Previously custom disks supported only flat disk structure. [#47106](https://github.com/ClickHouse/ClickHouse/pull/47106) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Automatic indentation in the built-in UI SQL editor when Enter is pressed. [#47113](https://github.com/ClickHouse/ClickHouse/pull/47113) ([Alexey Korepanov](https://github.com/alexkorep)).
|
||||||
|
* Allow control compression in Parquet/ORC/Arrow output formats, support more compression for input formats. This closes [#13541](https://github.com/ClickHouse/ClickHouse/issues/13541). [#47114](https://github.com/ClickHouse/ClickHouse/pull/47114) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Self-extraction with 'sudo' will attempt to set uid and gid of extracted files to running user. [#47116](https://github.com/ClickHouse/ClickHouse/pull/47116) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Currently the funtion repeat's second argument must be unsigned integer type, which can not accept a integer value like -1. And this is different from the spark function, so I fix this here to make it same as spark. And it tested as below. [#47134](https://github.com/ClickHouse/ClickHouse/pull/47134) ([KevinyhZou](https://github.com/KevinyhZou)).
|
||||||
|
* Remove `::__1` part from stacktraces. Display `std::basic_string<char, ...` as `String` in stacktraces. [#47171](https://github.com/ClickHouse/ClickHouse/pull/47171) ([Mike Kot](https://github.com/myrrc)).
|
||||||
|
* Introduced a separate thread pool for backup IO operations. This will allow to scale it independently from other pool and increase performance. [#47174](https://github.com/ClickHouse/ClickHouse/pull/47174) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Reimplement interserver mode to avoid replay attacks (note, that change is backward compatible with older servers). [#47213](https://github.com/ClickHouse/ClickHouse/pull/47213) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Make function `optimizeregularexpression` recognize re groups and refine regexp tree dictionary. [#47218](https://github.com/ClickHouse/ClickHouse/pull/47218) ([Han Fei](https://github.com/hanfei1991)).
|
||||||
|
* Use MultiRead request and retries for collecting metadata at final stage of backup processing. [#47243](https://github.com/ClickHouse/ClickHouse/pull/47243) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Keeper improvement: Add new 4LW `clrs` to clean resources used by Keeper (e.g. release unused memory). [#47256](https://github.com/ClickHouse/ClickHouse/pull/47256) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Add optional arguments to codecs `DoubleDelta(bytes_size)`, `Gorilla(bytes_size)`, `FPC(level, float_size)`, it will allow using this codecs without column type in `clickhouse-compressor`. Fix possible abrots and arithmetic errors in `clickhouse-compressor` with these codecs. Fixes: https://github.com/ClickHouse/ClickHouse/discussions/47262. [#47271](https://github.com/ClickHouse/ClickHouse/pull/47271) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Add support for big int types to runningDifference() function. Closes [#47194](https://github.com/ClickHouse/ClickHouse/issues/47194). [#47322](https://github.com/ClickHouse/ClickHouse/pull/47322) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* PostgreSQL replication has been adjusted to use "FROM ONLY" clause while performing initial synchronization. This prevents double-fetching the same data in case the target PostgreSQL database uses table inheritance. [#47387](https://github.com/ClickHouse/ClickHouse/pull/47387) ([Maksym Sobolyev](https://github.com/sobomax)).
|
||||||
|
* Add an expiration window for S3 credentials that have an expiration time to avoid `ExpiredToken` errors in some edge cases. It can be controlled with `expiration_window_seconds` config, the default is 120 seconds. [#47423](https://github.com/ClickHouse/ClickHouse/pull/47423) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Support Decimals and Date32 in Avro format. [#47434](https://github.com/ClickHouse/ClickHouse/pull/47434) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Do not start the server if an interrupted conversion from `Ordinary` to `Atomic` was detected, print a better error message with troubleshooting instructions. [#47487](https://github.com/ClickHouse/ClickHouse/pull/47487) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Add a new column `kind` to system.opentelemetry_span_log. This column holds the value of [SpanKind](https://opentelemetry.io/docs/reference/specification/trace/api/#spankind) defined in OpenTelemtry. [#47499](https://github.com/ClickHouse/ClickHouse/pull/47499) ([Frank Chen](https://github.com/FrankChen021)).
|
||||||
|
* If a backup and restoring data are both in S3 then server-side copy should be used from now on. [#47546](https://github.com/ClickHouse/ClickHouse/pull/47546) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Add SSL User Certificate authentication to the native protocol. Closes [#47077](https://github.com/ClickHouse/ClickHouse/issues/47077). [#47596](https://github.com/ClickHouse/ClickHouse/pull/47596) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Allow reading/writing nested arrays in Protobuf with only root field name as column name. Previously column name should've contain all nested field names (like `a.b.c Array(Array(Array(UInt32)))`, now you can use just `a Array(Array(Array(UInt32)))`. [#47650](https://github.com/ClickHouse/ClickHouse/pull/47650) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Added an optional `STRICT` modifier for `SYSTEM SYNC REPLICA` which makes the query wait for replication queue to become empty (just like it worked before https://github.com/ClickHouse/ClickHouse/pull/45648). [#47659](https://github.com/ClickHouse/ClickHouse/pull/47659) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Improvement name of some span logs. [#47667](https://github.com/ClickHouse/ClickHouse/pull/47667) ([Frank Chen](https://github.com/FrankChen021)).
|
||||||
|
* Now ReplicatedMergeTree with zero copy replication has less load to ZooKeeper. [#47676](https://github.com/ClickHouse/ClickHouse/pull/47676) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Prevent using too long chains of aggregate function combinators (they can lead to slow queries in the analysis stage). This closes [#47715](https://github.com/ClickHouse/ClickHouse/issues/47715). [#47716](https://github.com/ClickHouse/ClickHouse/pull/47716) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Support for subquery in parameterized views resolves [#46741](https://github.com/ClickHouse/ClickHouse/issues/46741) Implementation: * Updated to pass the parameter is_create_parameterized_view to subquery processing. Testing: * Added test case with subquery for parameterized view. [#47725](https://github.com/ClickHouse/ClickHouse/pull/47725) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Fix memory leak in MySQL integration (reproduces with `connection_auto_close=1`). [#47732](https://github.com/ClickHouse/ClickHouse/pull/47732) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* AST Fuzzer support fuzz `EXPLAIN` query. [#47803](https://github.com/ClickHouse/ClickHouse/pull/47803) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Fixed error print message while Decimal parameters is incorrect. [#47812](https://github.com/ClickHouse/ClickHouse/pull/47812) ([Yu Feng](https://github.com/Vigor-jpg)).
|
||||||
|
* Add `X-ClickHouse-Query-Id` to HTTP response when queries fails to execute. [#47813](https://github.com/ClickHouse/ClickHouse/pull/47813) ([Frank Chen](https://github.com/FrankChen021)).
|
||||||
|
* AST fuzzer support fuzzing `SELECT` query to `EXPLAIN` query randomly. [#47852](https://github.com/ClickHouse/ClickHouse/pull/47852) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Improved the overall performance by better utilizing local replica. And forbid reading with parallel replicas from non-replicated MergeTree by default. [#47858](https://github.com/ClickHouse/ClickHouse/pull/47858) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* More accurate CPU usage indication for client: account for usage in some long-living server threads (Segmentator) and do regular CPU accounting for every thread. [#47870](https://github.com/ClickHouse/ClickHouse/pull/47870) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* The parameter `exact_rows_before_limit` is used to make `rows_before_limit_at_least` is designed to accurately reflect the number of rows returned before the limit is reached. This pull request addresses issues encountered when the query involves distributed processing across multiple shards or sorting operations. Prior to this update, these scenarios were not functioning as intended. [#47874](https://github.com/ClickHouse/ClickHouse/pull/47874) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* ThreadPool metrics introspection. [#47880](https://github.com/ClickHouse/ClickHouse/pull/47880) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Add `WriteBufferFromS3Microseconds` and `WriteBufferFromS3RequestsErrors` profile events. [#47885](https://github.com/ClickHouse/ClickHouse/pull/47885) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Add `--link` and `--noninteractive` (`-y`) options to clickhouse install. Closes [#47750](https://github.com/ClickHouse/ClickHouse/issues/47750). [#47887](https://github.com/ClickHouse/ClickHouse/pull/47887) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Fix decimal-256 text output issue on s390x. [#47932](https://github.com/ClickHouse/ClickHouse/pull/47932) ([MeenaRenganathan22](https://github.com/MeenaRenganathan22)).
|
||||||
|
* Fixed `UNKNOWN_TABLE` exception when attaching to a materialized view that has dependent tables that are not available. This might be useful when trying to restore state from a backup. [#47975](https://github.com/ClickHouse/ClickHouse/pull/47975) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||||
|
* Fix case when (optional) path is not added to encrypted disk configuration. [#47981](https://github.com/ClickHouse/ClickHouse/pull/47981) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Add *OrNull() and *OrZero() variants for parseDateTime(), add alias "str_to_date" for MySQL parity. [#48000](https://github.com/ClickHouse/ClickHouse/pull/48000) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Improve the code around `background_..._pool_size` settings reading. It should be configured via the main server configuration file. [#48055](https://github.com/ClickHouse/ClickHouse/pull/48055) ([filimonov](https://github.com/filimonov)).
|
||||||
|
* Support for cte in parameterized views Implementation: * Updated to allow query parameters while evaluating scalar subqueries. Testing: * Added test case with cte for parameterized view. [#48065](https://github.com/ClickHouse/ClickHouse/pull/48065) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Add `NOSIGN` keyword for S3 table function and storage engine to avoid signing requests with provided credentials. Add `no_sign_request` config for all functionalities using S3. [#48092](https://github.com/ClickHouse/ClickHouse/pull/48092) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Support bin integers `(U)Int128/(U)Int256`, `Map` with any key type and `DateTime64` with any precision (not only 3 and 6). [#48119](https://github.com/ClickHouse/ClickHouse/pull/48119) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Support more ClickHouse types in MsgPack format: (U)Int128/(U)Int256, Enum8(16), Date32, Decimal(32|64|128|256), Tuples. [#48124](https://github.com/ClickHouse/ClickHouse/pull/48124) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* The output of some SHOW ... statements is now sorted. [#48127](https://github.com/ClickHouse/ClickHouse/pull/48127) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Allow skipping errors related to unknown enum values in row input formats. [#48133](https://github.com/ClickHouse/ClickHouse/pull/48133) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add `allow_distributed_ddl_queries` option to disallow distributed DDL queries for the cluster in the config. [#48171](https://github.com/ClickHouse/ClickHouse/pull/48171) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||||
|
* Determine the hosts' order in `SHOW CLUSTER` query, a followup for [#48127](https://github.com/ClickHouse/ClickHouse/issues/48127) and [#46240](https://github.com/ClickHouse/ClickHouse/issues/46240). [#48235](https://github.com/ClickHouse/ClickHouse/pull/48235) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
* Split stress test and backward compatibility check (now Upgrade check). [#44879](https://github.com/ClickHouse/ClickHouse/pull/44879) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Use sccache as a replacement for ccache and using S3 as cache backend. [#46240](https://github.com/ClickHouse/ClickHouse/pull/46240) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Updated Ubuntu Image. [#46784](https://github.com/ClickHouse/ClickHouse/pull/46784) ([Julio Jimenez](https://github.com/juliojimenez)).
|
||||||
|
* Adds a prompt to allow the removal of an existing `cickhouse` download when using "curl | sh" download of ClickHouse. Prompt is "ClickHouse binary clickhouse already exists. Overwrite? [y/N] ". [#46859](https://github.com/ClickHouse/ClickHouse/pull/46859) ([Dan Roscigno](https://github.com/DanRoscigno)).
|
||||||
|
* Fix error during server startup on old distros (e.g. Amazon Linux 2) and on ARM that glibc 2.28 symbols are not found. [#47008](https://github.com/ClickHouse/ClickHouse/pull/47008) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Clang 16 is set to release in the next few days, making it an opportune time to update. [#47027](https://github.com/ClickHouse/ClickHouse/pull/47027) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Added a CI check which ensures ClickHouse can run with an old glibc on ARM. [#47063](https://github.com/ClickHouse/ClickHouse/pull/47063) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* ClickHouse now builds with C++23. [#47424](https://github.com/ClickHouse/ClickHouse/pull/47424) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fixed issue with starting `clickhouse-test` against custom clickhouse binary with `-b`. ... [#47578](https://github.com/ClickHouse/ClickHouse/pull/47578) ([Vasily Nemkov](https://github.com/Enmk)).
|
||||||
|
* Add a style check to prevent incorrect usage of the `NDEBUG` macro. [#47699](https://github.com/ClickHouse/ClickHouse/pull/47699) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Speed up the build a little. [#47714](https://github.com/ClickHouse/ClickHouse/pull/47714) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Bump vectorscan to 5.4.9. [#47955](https://github.com/ClickHouse/ClickHouse/pull/47955) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Add a unit test to assert arrow fatal logging does not abort. It covers the changes in https://github.com/ClickHouse/arrow/pull/16. [#47958](https://github.com/ClickHouse/ClickHouse/pull/47958) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||||
|
* Restore ability of native macos debug server build to start (this time for real). [#48050](https://github.com/ClickHouse/ClickHouse/pull/48050) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Functional tests will trigger JIT compilation more frequently, in a randomized fashion. See [#48120](https://github.com/ClickHouse/ClickHouse/issues/48120). [#48196](https://github.com/ClickHouse/ClickHouse/pull/48196) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* The `clickhouse/clickhouse-keeper` image used to be pushed only with tags `-alpine`, e.g. `latest-alpine`. As it was suggested in https://github.com/ClickHouse/examples/pull/2, now it will be pushed as suffixless too. [#48236](https://github.com/ClickHouse/ClickHouse/pull/48236) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Fix create materialized view with MaterializedPostgreSQL [#40807](https://github.com/ClickHouse/ClickHouse/pull/40807) ([Maksim Buren](https://github.com/maks-buren630501)).
|
||||||
|
* Fix formats parser resetting, test processing bad messages in kafka [#45693](https://github.com/ClickHouse/ClickHouse/pull/45693) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix several `RENAME COLUMN` bugs. [#45911](https://github.com/ClickHouse/ClickHouse/pull/45911) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fix data size calculation in Keeper [#46086](https://github.com/ClickHouse/ClickHouse/pull/46086) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fixes for 993 [#46384](https://github.com/ClickHouse/ClickHouse/pull/46384) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix incorrect alias recursion in QueryNormalizer [#46609](https://github.com/ClickHouse/ClickHouse/pull/46609) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix IPv4/IPv6 serialization/deserialization in binary formats [#46616](https://github.com/ClickHouse/ClickHouse/pull/46616) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* ActionsDAG: do not change result of and() during optimization [#46653](https://github.com/ClickHouse/ClickHouse/pull/46653) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||||
|
* Fix queries cancellation when a client dies [#46681](https://github.com/ClickHouse/ClickHouse/pull/46681) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix arithmetic operations in aggregate optimization [#46705](https://github.com/ClickHouse/ClickHouse/pull/46705) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Fix possible clickhouse-local abort on JSONEachRow schema inference [#46731](https://github.com/ClickHouse/ClickHouse/pull/46731) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix changing an expired role [#46772](https://github.com/ClickHouse/ClickHouse/pull/46772) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Fix combined PREWHERE column accumulated from multiple steps [#46785](https://github.com/ClickHouse/ClickHouse/pull/46785) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Use initial range for fetching file size in HTTP read buffer [#46824](https://github.com/ClickHouse/ClickHouse/pull/46824) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix progress bar with URL [#46830](https://github.com/ClickHouse/ClickHouse/pull/46830) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Do not allow const and non-deterministic secondary indexes [#46839](https://github.com/ClickHouse/ClickHouse/pull/46839) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fix MSan report in `maxIntersections` function [#46847](https://github.com/ClickHouse/ClickHouse/pull/46847) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix a bug in `Map` data type [#46856](https://github.com/ClickHouse/ClickHouse/pull/46856) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix wrong results of some LIKE searches when the LIKE pattern contains quoted non-quotable characters [#46875](https://github.com/ClickHouse/ClickHouse/pull/46875) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix - WITH FILL would produce abort when FillingTransform processing empty block [#46897](https://github.com/ClickHouse/ClickHouse/pull/46897) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Fix date and int inference from string in JSON [#46972](https://github.com/ClickHouse/ClickHouse/pull/46972) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix bug in zero-copy replication disk choice during fetch [#47010](https://github.com/ClickHouse/ClickHouse/pull/47010) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fix typo in systemd service definition [#47051](https://github.com/ClickHouse/ClickHouse/pull/47051) ([Palash Goel](https://github.com/palash-goel)).
|
||||||
|
* Fix NOT_IMPLEMENTED error with CROSS JOIN and algorithm = auto [#47068](https://github.com/ClickHouse/ClickHouse/pull/47068) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Fix the problem that the 'ReplicatedMergeTree' table failed to insert two similar data when the 'part_type' is configured as 'InMemory' mode. [#47121](https://github.com/ClickHouse/ClickHouse/pull/47121) ([liding1992](https://github.com/liding1992)).
|
||||||
|
* External dictionaries / library-bridge: Fix error "unknown library method 'extDict_libClone'" [#47136](https://github.com/ClickHouse/ClickHouse/pull/47136) ([alex filatov](https://github.com/phil-88)).
|
||||||
|
* Fix race in grace hash join with limit [#47153](https://github.com/ClickHouse/ClickHouse/pull/47153) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Fix concrete columns PREWHERE support [#47154](https://github.com/ClickHouse/ClickHouse/pull/47154) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix possible deadlock in QueryStatus [#47161](https://github.com/ClickHouse/ClickHouse/pull/47161) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Backup_Restore_concurrency_check_node [#47216](https://github.com/ClickHouse/ClickHouse/pull/47216) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Forbid insert select for the same StorageJoin [#47260](https://github.com/ClickHouse/ClickHouse/pull/47260) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Skip merged partitions for `min_age_to_force_merge_seconds` merges [#47303](https://github.com/ClickHouse/ClickHouse/pull/47303) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Modify find_first_symbols so it works as expected for find_first_not_symbols [#47304](https://github.com/ClickHouse/ClickHouse/pull/47304) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||||
|
* Fix big numbers inference in CSV [#47410](https://github.com/ClickHouse/ClickHouse/pull/47410) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Disable logical expression optimizer for expression with aliases. [#47451](https://github.com/ClickHouse/ClickHouse/pull/47451) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Remove a feature [#47456](https://github.com/ClickHouse/ClickHouse/pull/47456) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix error in `decodeURLComponent` [#47457](https://github.com/ClickHouse/ClickHouse/pull/47457) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix explain graph with projection [#47473](https://github.com/ClickHouse/ClickHouse/pull/47473) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Fix query parameters [#47488](https://github.com/ClickHouse/ClickHouse/pull/47488) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Parameterized view bug fix 47287 47247 [#47495](https://github.com/ClickHouse/ClickHouse/pull/47495) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Fuzzer of data formats [#47519](https://github.com/ClickHouse/ClickHouse/pull/47519) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix monotonicity check for DateTime64 [#47526](https://github.com/ClickHouse/ClickHouse/pull/47526) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix block structure mismatch for nullable LowCardinality column [#47537](https://github.com/ClickHouse/ClickHouse/pull/47537) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Proper fix for bug in parquet, revert reverted [#45878](https://github.com/ClickHouse/ClickHouse/issues/45878) [#47538](https://github.com/ClickHouse/ClickHouse/pull/47538) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix BSONEachRow parallel parsing when document size is invalid [#47540](https://github.com/ClickHouse/ClickHouse/pull/47540) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Preserve error in system.distribution_queue on SYSTEM FLUSH DISTRIBUTED [#47541](https://github.com/ClickHouse/ClickHouse/pull/47541) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Revert "Revert "Backup_Restore_concurrency_check_node"" [#47586](https://github.com/ClickHouse/ClickHouse/pull/47586) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Check for duplicate column in BSONEachRow format [#47609](https://github.com/ClickHouse/ClickHouse/pull/47609) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix wait for zero copy lock during move [#47631](https://github.com/ClickHouse/ClickHouse/pull/47631) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fix aggregation by partitions [#47634](https://github.com/ClickHouse/ClickHouse/pull/47634) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Fix bug in tuple as array serialization in BSONEachRow format [#47690](https://github.com/ClickHouse/ClickHouse/pull/47690) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix crash in polygonsSymDifferenceCartesian [#47702](https://github.com/ClickHouse/ClickHouse/pull/47702) ([pufit](https://github.com/pufit)).
|
||||||
|
* Fix reading from storage `File` compressed files with `zlib` and `gzip` compression [#47796](https://github.com/ClickHouse/ClickHouse/pull/47796) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Improve empty query detection for PostgreSQL (for pgx golang driver) [#47854](https://github.com/ClickHouse/ClickHouse/pull/47854) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix DateTime monotonicity check for LowCardinality [#47860](https://github.com/ClickHouse/ClickHouse/pull/47860) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Use restore_threads (not backup_threads) for RESTORE ASYNC [#47861](https://github.com/ClickHouse/ClickHouse/pull/47861) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix DROP COLUMN with ReplicatedMergeTree containing projections [#47883](https://github.com/ClickHouse/ClickHouse/pull/47883) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix for Replicated database recovery [#47901](https://github.com/ClickHouse/ClickHouse/pull/47901) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Hotfix for too verbose warnings in HTTP [#47903](https://github.com/ClickHouse/ClickHouse/pull/47903) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix "Field value too long" in catboostEvaluate() [#47970](https://github.com/ClickHouse/ClickHouse/pull/47970) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix [#36971](https://github.com/ClickHouse/ClickHouse/issues/36971): Watchdog: exit with non-zero code if child process exits [#47973](https://github.com/ClickHouse/ClickHouse/pull/47973) ([Коренберг Марк](https://github.com/socketpair)).
|
||||||
|
* Fix for index file cidx is unexpectedly long [#48010](https://github.com/ClickHouse/ClickHouse/pull/48010) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* fix MaterializedPostgreSQL query to get attributes (replica-identity) [#48015](https://github.com/ClickHouse/ClickHouse/pull/48015) ([Solomatov Sergei](https://github.com/solomatovs)).
|
||||||
|
* parseDateTime(): Fix UB (signed integer overflow) [#48019](https://github.com/ClickHouse/ClickHouse/pull/48019) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Use uniq names for Records in Avro to avoid reusing its schema [#48057](https://github.com/ClickHouse/ClickHouse/pull/48057) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix crash in explain graph with StorageMerge [#48102](https://github.com/ClickHouse/ClickHouse/pull/48102) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Correctly set TCP/HTTP socket timeouts in Keeper [#48108](https://github.com/ClickHouse/ClickHouse/pull/48108) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix possible member call on null pointer in Avro format [#48184](https://github.com/ClickHouse/ClickHouse/pull/48184) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
|
||||||
|
#### Build Improvement
|
||||||
|
|
||||||
|
* Update krb5 to 1.20.1-final to mitigate CVE-2022-42898. [#46485](https://github.com/ClickHouse/ClickHouse/pull/46485) ([MeenaRenganathan22](https://github.com/MeenaRenganathan22)).
|
||||||
|
* Fixed random crash issues caused by bad pointers in libunwind for s390x. [#46755](https://github.com/ClickHouse/ClickHouse/pull/46755) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||||
|
* Fixed http xz compression issue for s390x. [#46832](https://github.com/ClickHouse/ClickHouse/pull/46832) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||||
|
* Fixed murmurhash function for s390x. [#47036](https://github.com/ClickHouse/ClickHouse/pull/47036) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||||
|
* Fixed halfMD5 and broken cityHash function for s390x. [#47115](https://github.com/ClickHouse/ClickHouse/pull/47115) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||||
|
* Fixed farmhash functions for s390x. [#47223](https://github.com/ClickHouse/ClickHouse/pull/47223) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||||
|
* Fixed endian issue in hashing tuples for s390x. [#47371](https://github.com/ClickHouse/ClickHouse/pull/47371) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||||
|
* Fixed SipHash integer hashing issue and byte order issue in random integer data from GenerateRandom storage engine for s390x. [#47576](https://github.com/ClickHouse/ClickHouse/pull/47576) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||||
|
|
||||||
|
#### NO CL ENTRY
|
||||||
|
|
||||||
|
* NO CL ENTRY: 'Revert "Fix several `RENAME COLUMN` bugs."'. [#46909](https://github.com/ClickHouse/ClickHouse/pull/46909) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* NO CL ENTRY: 'Revert "Add join_algorithm='grace_hash' to stress tests"'. [#46988](https://github.com/ClickHouse/ClickHouse/pull/46988) ([Pradeep Chhetri](https://github.com/chhetripradeep)).
|
||||||
|
* NO CL ENTRY: 'Revert "Give users option of overwriting"'. [#47169](https://github.com/ClickHouse/ClickHouse/pull/47169) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* NO CL ENTRY: 'Revert "standardize admonitions"'. [#47413](https://github.com/ClickHouse/ClickHouse/pull/47413) ([Rich Raposa](https://github.com/rfraposa)).
|
||||||
|
* NO CL ENTRY: 'Revert "Backup_Restore_concurrency_check_node"'. [#47581](https://github.com/ClickHouse/ClickHouse/pull/47581) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* NO CL ENTRY: 'Update storing-data.md'. [#47598](https://github.com/ClickHouse/ClickHouse/pull/47598) ([San](https://github.com/santrancisco)).
|
||||||
|
* NO CL ENTRY: 'Revert "Fix BSONEachRow parallel parsing when document size is invalid"'. [#47672](https://github.com/ClickHouse/ClickHouse/pull/47672) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* NO CL ENTRY: 'Revert "New navigation"'. [#47694](https://github.com/ClickHouse/ClickHouse/pull/47694) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* NO CL ENTRY: 'Revert "Analyzer planner fixes before enable by default"'. [#47721](https://github.com/ClickHouse/ClickHouse/pull/47721) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* NO CL ENTRY: 'Revert "Revert "Analyzer planner fixes before enable by default""'. [#47748](https://github.com/ClickHouse/ClickHouse/pull/47748) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* NO CL ENTRY: 'Revert "Add sanity checks for writing number in variable length format"'. [#47850](https://github.com/ClickHouse/ClickHouse/pull/47850) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* NO CL ENTRY: 'Revert "Revert "Revert "Backup_Restore_concurrency_check_node"""'. [#47963](https://github.com/ClickHouse/ClickHouse/pull/47963) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Test differences between using materialize_ttl_recalculate_only=1/0 [#45304](https://github.com/ClickHouse/ClickHouse/pull/45304) ([Jordi Villar](https://github.com/jrdi)).
|
||||||
|
* Fix query in stress script [#45480](https://github.com/ClickHouse/ClickHouse/pull/45480) ([Pradeep Chhetri](https://github.com/chhetripradeep)).
|
||||||
|
* Add join_algorithm='grace_hash' to stress tests [#45607](https://github.com/ClickHouse/ClickHouse/pull/45607) ([Pradeep Chhetri](https://github.com/chhetripradeep)).
|
||||||
|
* Support `group_by_use_nulls` setting in new analyzer [#45910](https://github.com/ClickHouse/ClickHouse/pull/45910) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Randomize setting `ratio_of_defaults_for_sparse_serialization` [#46118](https://github.com/ClickHouse/ClickHouse/pull/46118) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Add CrossToInnerJoinPass [#46408](https://github.com/ClickHouse/ClickHouse/pull/46408) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Fix flakiness of test_backup_restore_on_cluster/test_disallow_concurrency [#46517](https://github.com/ClickHouse/ClickHouse/pull/46517) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Map field to string fix [#46618](https://github.com/ClickHouse/ClickHouse/pull/46618) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Enable perf tests added in [#45364](https://github.com/ClickHouse/ClickHouse/issues/45364) [#46623](https://github.com/ClickHouse/ClickHouse/pull/46623) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Logical expression optimizer in new analyzer [#46644](https://github.com/ClickHouse/ClickHouse/pull/46644) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Named collections: finish replacing old code for storages [#46647](https://github.com/ClickHouse/ClickHouse/pull/46647) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Make tiny improvements [#46659](https://github.com/ClickHouse/ClickHouse/pull/46659) ([ltrk2](https://github.com/ltrk2)).
|
||||||
|
* Fix openssl/s390x build (setenv + link order) [#46684](https://github.com/ClickHouse/ClickHouse/pull/46684) ([Boris Kuschel](https://github.com/bkuschel)).
|
||||||
|
* Analyzer AutoFinalOnQueryPass fix [#46729](https://github.com/ClickHouse/ClickHouse/pull/46729) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Mark failed build reports as pending on reruns [#46736](https://github.com/ClickHouse/ClickHouse/pull/46736) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Do not reanalyze expressions from aggregation in projection [#46738](https://github.com/ClickHouse/ClickHouse/pull/46738) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Update CHANGELOG.md [#46766](https://github.com/ClickHouse/ClickHouse/pull/46766) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Poco: Remove some dead code [#46768](https://github.com/ClickHouse/ClickHouse/pull/46768) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* More concise logging at trace level for PREWHERE steps [#46771](https://github.com/ClickHouse/ClickHouse/pull/46771) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Follow-up to [#41534](https://github.com/ClickHouse/ClickHouse/issues/41534) [#46775](https://github.com/ClickHouse/ClickHouse/pull/46775) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix timeout for all expect tests (wrong usage of expect_after timeout) [#46779](https://github.com/ClickHouse/ClickHouse/pull/46779) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Reduce updates of Mergeable Check [#46781](https://github.com/ClickHouse/ClickHouse/pull/46781) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Updated Slack invite link [#46783](https://github.com/ClickHouse/ClickHouse/pull/46783) ([clickhouse-adrianfraguela](https://github.com/clickhouse-adrianfraguela)).
|
||||||
|
* Print all stacktraces in hung check [#46787](https://github.com/ClickHouse/ClickHouse/pull/46787) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Quick temporary fix for stress tests [#46789](https://github.com/ClickHouse/ClickHouse/pull/46789) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Update version after release [#46792](https://github.com/ClickHouse/ClickHouse/pull/46792) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.2.1.2537-stable [#46794](https://github.com/ClickHouse/ClickHouse/pull/46794) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Remove ZSTD version from CMake output [#46796](https://github.com/ClickHouse/ClickHouse/pull/46796) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Update version_date.tsv and changelogs after v22.11.6.44-stable [#46801](https://github.com/ClickHouse/ClickHouse/pull/46801) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* CMake: Add best effort checks that the build machine isn't too old [#46803](https://github.com/ClickHouse/ClickHouse/pull/46803) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix async reading pipeline when small limit is present [#46804](https://github.com/ClickHouse/ClickHouse/pull/46804) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Cleanup string search code [#46814](https://github.com/ClickHouse/ClickHouse/pull/46814) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Stateless cmake version [#46821](https://github.com/ClickHouse/ClickHouse/pull/46821) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* refine regexp tree dictionary [#46822](https://github.com/ClickHouse/ClickHouse/pull/46822) ([Han Fei](https://github.com/hanfei1991)).
|
||||||
|
* Non-significant change [#46844](https://github.com/ClickHouse/ClickHouse/pull/46844) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add a trap [#46845](https://github.com/ClickHouse/ClickHouse/pull/46845) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Better handling of fatal errors [#46846](https://github.com/ClickHouse/ClickHouse/pull/46846) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add a test for [#43184](https://github.com/ClickHouse/ClickHouse/issues/43184) [#46848](https://github.com/ClickHouse/ClickHouse/pull/46848) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix wrong function name [#46849](https://github.com/ClickHouse/ClickHouse/pull/46849) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add a test for [#45214](https://github.com/ClickHouse/ClickHouse/issues/45214) [#46850](https://github.com/ClickHouse/ClickHouse/pull/46850) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Final fixes for expect tests [#46857](https://github.com/ClickHouse/ClickHouse/pull/46857) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Small optimization of LIKE patterns with > 1 trailing % [#46869](https://github.com/ClickHouse/ClickHouse/pull/46869) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Add new metrics to system.asynchronous_metrics [#46886](https://github.com/ClickHouse/ClickHouse/pull/46886) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix flaky `test_concurrent_queries_restriction_by_query_kind` [#46887](https://github.com/ClickHouse/ClickHouse/pull/46887) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix test test_async_backups_to_same_destination. [#46888](https://github.com/ClickHouse/ClickHouse/pull/46888) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Make ASTSelectQuery::formatImpl() more robust [#46889](https://github.com/ClickHouse/ClickHouse/pull/46889) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* tests: fix 02116_interactive_hello for "official build" [#46911](https://github.com/ClickHouse/ClickHouse/pull/46911) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix some expect tests leftovers and enable them in fasttest [#46915](https://github.com/ClickHouse/ClickHouse/pull/46915) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Increase ddl timeout for DROP statement in backup restore tests [#46920](https://github.com/ClickHouse/ClickHouse/pull/46920) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* A better alternative to [#46344](https://github.com/ClickHouse/ClickHouse/issues/46344) [#46921](https://github.com/ClickHouse/ClickHouse/pull/46921) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Code review from @tavplubix [#46922](https://github.com/ClickHouse/ClickHouse/pull/46922) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Planner: trivial count optimization [#46923](https://github.com/ClickHouse/ClickHouse/pull/46923) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Typo: SIZES_OF_ARRAYS_DOESNT_MATCH --> SIZES_OF_ARRAYS_DONT_MATCH [#46940](https://github.com/ClickHouse/ClickHouse/pull/46940) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Another fix for clone() for ASTColumnMatchers [#46947](https://github.com/ClickHouse/ClickHouse/pull/46947) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Un-inline likePatternToRegexp() [#46950](https://github.com/ClickHouse/ClickHouse/pull/46950) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix missing format_description [#46959](https://github.com/ClickHouse/ClickHouse/pull/46959) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* ARM: Activate LDAPR with -march flag instead via -XClang [#46960](https://github.com/ClickHouse/ClickHouse/pull/46960) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Preset description on the tweak reset [#46963](https://github.com/ClickHouse/ClickHouse/pull/46963) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Update version_date.tsv and changelogs after v22.3.19.6-lts [#46964](https://github.com/ClickHouse/ClickHouse/pull/46964) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Update version_date.tsv and changelogs after v22.8.14.53-lts [#46969](https://github.com/ClickHouse/ClickHouse/pull/46969) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Better exception messages when schema_inference_hints is ill-formatted [#46971](https://github.com/ClickHouse/ClickHouse/pull/46971) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Decrease log level in "disks" [#46976](https://github.com/ClickHouse/ClickHouse/pull/46976) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Change the cherry-pick PR body [#46977](https://github.com/ClickHouse/ClickHouse/pull/46977) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Rename recent stateless tests to fix order [#46991](https://github.com/ClickHouse/ClickHouse/pull/46991) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Pass headers from StorageURL to WriteBufferFromHTTP [#46996](https://github.com/ClickHouse/ClickHouse/pull/46996) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||||
|
* Change level log in executeQuery [#46997](https://github.com/ClickHouse/ClickHouse/pull/46997) ([Andrey Bystrov](https://github.com/AndyBys)).
|
||||||
|
* Add thevar1able to trusted contributors [#46998](https://github.com/ClickHouse/ClickHouse/pull/46998) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||||
|
* Use /etc/default/clickhouse in systemd too [#47003](https://github.com/ClickHouse/ClickHouse/pull/47003) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Fix tmp_path_template in HTTPHandler::processQuery [#47007](https://github.com/ClickHouse/ClickHouse/pull/47007) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Fix flaky azure test [#47011](https://github.com/ClickHouse/ClickHouse/pull/47011) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Temporary enable force_sync for keeper in CI [#47024](https://github.com/ClickHouse/ClickHouse/pull/47024) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* ActionsDAG: do not change result of and() during optimization - part 2 [#47028](https://github.com/ClickHouse/ClickHouse/pull/47028) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||||
|
* Add upgrade check to stateful dependent field [#47031](https://github.com/ClickHouse/ClickHouse/pull/47031) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Disable path check in SQLite storage for clickhouse-local [#47052](https://github.com/ClickHouse/ClickHouse/pull/47052) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Terminate long-running offline non-busy runners in EC2 [#47064](https://github.com/ClickHouse/ClickHouse/pull/47064) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Fix Keeper with `force_sync = false` [#47065](https://github.com/ClickHouse/ClickHouse/pull/47065) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.2.2.20-stable [#47069](https://github.com/ClickHouse/ClickHouse/pull/47069) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.1.4.58-stable [#47070](https://github.com/ClickHouse/ClickHouse/pull/47070) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Update version_date.tsv and changelogs after v22.12.4.76-stable [#47074](https://github.com/ClickHouse/ClickHouse/pull/47074) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Fix empty result when selection from only one side of join in analyzer [#47093](https://github.com/ClickHouse/ClickHouse/pull/47093) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Suppress "Cannot flush" for Distributed tables in upgrade check [#47095](https://github.com/ClickHouse/ClickHouse/pull/47095) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Make stacktraces in hung check more readable [#47096](https://github.com/ClickHouse/ClickHouse/pull/47096) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* release lambda resources before detaching thread group [#47098](https://github.com/ClickHouse/ClickHouse/pull/47098) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Analyzer Planner fixes before enable by default [#47101](https://github.com/ClickHouse/ClickHouse/pull/47101) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* do flushUntrackedMemory when context switches [#47102](https://github.com/ClickHouse/ClickHouse/pull/47102) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* fix: keeper systemd service file include invalid inline comment [#47105](https://github.com/ClickHouse/ClickHouse/pull/47105) ([SuperDJY](https://github.com/cmsxbc)).
|
||||||
|
* Add code for autoscaling lambda [#47107](https://github.com/ClickHouse/ClickHouse/pull/47107) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Enable lightweight delete support by default [#47109](https://github.com/ClickHouse/ClickHouse/pull/47109) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Update typing for a new PyGithub version [#47123](https://github.com/ClickHouse/ClickHouse/pull/47123) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Shorten some code with CTAD [#47139](https://github.com/ClickHouse/ClickHouse/pull/47139) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Make 01710_projections more stable. [#47145](https://github.com/ClickHouse/ClickHouse/pull/47145) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* fix_JSON_searchField [#47147](https://github.com/ClickHouse/ClickHouse/pull/47147) ([Aleksei Tikhomirov](https://github.com/aletik256)).
|
||||||
|
* Mark 01771_bloom_filter_not_has as no-parallel and long [#47148](https://github.com/ClickHouse/ClickHouse/pull/47148) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Use unique names and paths in `test_replicated_database` [#47152](https://github.com/ClickHouse/ClickHouse/pull/47152) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Add stupid retries in clickhouse-test health check. [#47158](https://github.com/ClickHouse/ClickHouse/pull/47158) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* 02346_full_text_search.sql: Add result separators to simplify analysis [#47166](https://github.com/ClickHouse/ClickHouse/pull/47166) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* More correct handling of fatal errors [#47175](https://github.com/ClickHouse/ClickHouse/pull/47175) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Update read in StorageMemory [#47180](https://github.com/ClickHouse/ClickHouse/pull/47180) ([Konstantin Morozov](https://github.com/k-morozov)).
|
||||||
|
* Doc update for mapFromArrays() [#47183](https://github.com/ClickHouse/ClickHouse/pull/47183) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix failure context for Upgrade check [#47191](https://github.com/ClickHouse/ClickHouse/pull/47191) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Add support for different expected errors [#47196](https://github.com/ClickHouse/ClickHouse/pull/47196) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix ip coding on s390x [#47208](https://github.com/ClickHouse/ClickHouse/pull/47208) ([Suzy Wang](https://github.com/SuzyWangIBMer)).
|
||||||
|
* Add real client (initiator server) address into the logs for interserver mode [#47214](https://github.com/ClickHouse/ClickHouse/pull/47214) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix 01019_alter_materialized_view_consistent [#47215](https://github.com/ClickHouse/ClickHouse/pull/47215) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Fix RewriteArrayExistsToHasPass [#47225](https://github.com/ClickHouse/ClickHouse/pull/47225) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Release shared ptrs after finishing a transaction [#47245](https://github.com/ClickHouse/ClickHouse/pull/47245) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Add default constructor for `MultiReadResponse` [#47254](https://github.com/ClickHouse/ClickHouse/pull/47254) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Join threads if exception happened in `ZooKeeperImpl` constructor [#47261](https://github.com/ClickHouse/ClickHouse/pull/47261) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* use std::lerp, constexpr hex.h [#47268](https://github.com/ClickHouse/ClickHouse/pull/47268) ([Mike Kot](https://github.com/myrrc)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.2.3.17-stable [#47269](https://github.com/ClickHouse/ClickHouse/pull/47269) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Fix bug in zero copy replica which can lead to dataloss [#47274](https://github.com/ClickHouse/ClickHouse/pull/47274) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fix typo [#47282](https://github.com/ClickHouse/ClickHouse/pull/47282) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Follow-up to [#46681](https://github.com/ClickHouse/ClickHouse/issues/46681) [#47284](https://github.com/ClickHouse/ClickHouse/pull/47284) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix test 02566_ipv4_ipv6_binary_formats [#47295](https://github.com/ClickHouse/ClickHouse/pull/47295) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Set fixed index_granularity for test 00636 [#47298](https://github.com/ClickHouse/ClickHouse/pull/47298) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Add a manual trigger for release workflow [#47302](https://github.com/ClickHouse/ClickHouse/pull/47302) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Fix 02570_fallback_from_async_insert [#47308](https://github.com/ClickHouse/ClickHouse/pull/47308) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Catch exceptions in LiveViewPeriodicRefreshTask [#47309](https://github.com/ClickHouse/ClickHouse/pull/47309) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Fix MergeTreeTransaction::isReadOnly [#47310](https://github.com/ClickHouse/ClickHouse/pull/47310) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Fix an assertion with implicit transactions in interserver mode [#47312](https://github.com/ClickHouse/ClickHouse/pull/47312) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix `File exists` error in Upgrade check [#47314](https://github.com/ClickHouse/ClickHouse/pull/47314) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Support transformQueryForExternalDatabase for analyzer [#47316](https://github.com/ClickHouse/ClickHouse/pull/47316) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Disable parallel format in health check [#47318](https://github.com/ClickHouse/ClickHouse/pull/47318) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Analyzer - fix combine logic for limit expression and limit setting [#47324](https://github.com/ClickHouse/ClickHouse/pull/47324) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Suppress expected errors from test 01111 in Upgrade check [#47365](https://github.com/ClickHouse/ClickHouse/pull/47365) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix GROUPING function initialization for grouping sets [#47370](https://github.com/ClickHouse/ClickHouse/pull/47370) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Add join_algorithm='grace_hash' to stress tests [#47372](https://github.com/ClickHouse/ClickHouse/pull/47372) ([Pradeep Chhetri](https://github.com/chhetripradeep)).
|
||||||
|
* Fix 02343_group_by_use_nulls test in new analyzer [#47373](https://github.com/ClickHouse/ClickHouse/pull/47373) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Disable 02368_cancel_write_into_hdfs in stress tests [#47382](https://github.com/ClickHouse/ClickHouse/pull/47382) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Analyzer planner fixes before enable by default [#47383](https://github.com/ClickHouse/ClickHouse/pull/47383) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Fix `ALTER CLEAR COLUMN` with sparse columns [#47384](https://github.com/ClickHouse/ClickHouse/pull/47384) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fix: apply reading in order for distinct [#47385](https://github.com/ClickHouse/ClickHouse/pull/47385) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* add checks for ptr [#47398](https://github.com/ClickHouse/ClickHouse/pull/47398) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Remove distinct on top of MergingAggregatedStep [#47399](https://github.com/ClickHouse/ClickHouse/pull/47399) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Update LRUFileCachePriority.cpp [#47411](https://github.com/ClickHouse/ClickHouse/pull/47411) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Make test 02473_optimize_old_parts less flaky [#47416](https://github.com/ClickHouse/ClickHouse/pull/47416) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Add test to prevent regressions when using bitmapHasAny [#47419](https://github.com/ClickHouse/ClickHouse/pull/47419) ([Jordi Villar](https://github.com/jrdi)).
|
||||||
|
* Update README.md [#47421](https://github.com/ClickHouse/ClickHouse/pull/47421) ([Tyler Hannan](https://github.com/tylerhannan)).
|
||||||
|
* Refactor query cache (make use of CacheBase) [#47428](https://github.com/ClickHouse/ClickHouse/pull/47428) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Suppress Hung Check with UBsan [#47429](https://github.com/ClickHouse/ClickHouse/pull/47429) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* [docs] Document add async_insert_max_query_number [#47431](https://github.com/ClickHouse/ClickHouse/pull/47431) ([Antonio Bonuccelli](https://github.com/nellicus)).
|
||||||
|
* Apply settings for EXPLAIN earlier (in the same way we do for SELECT). [#47433](https://github.com/ClickHouse/ClickHouse/pull/47433) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.2.4.12-stable [#47448](https://github.com/ClickHouse/ClickHouse/pull/47448) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Fix aggregation-in-order with aliases. [#47449](https://github.com/ClickHouse/ClickHouse/pull/47449) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix 01429_join_on_error_messages [#47450](https://github.com/ClickHouse/ClickHouse/pull/47450) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.1.5.24-stable [#47452](https://github.com/ClickHouse/ClickHouse/pull/47452) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Update version_date.tsv and changelogs after v22.12.5.34-stable [#47453](https://github.com/ClickHouse/ClickHouse/pull/47453) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Better error messages in ReplicatedMergeTreeAttachThread [#47454](https://github.com/ClickHouse/ClickHouse/pull/47454) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Update version_date.tsv and changelogs after v22.8.15.23-lts [#47455](https://github.com/ClickHouse/ClickHouse/pull/47455) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Disable grace hash join in upgrade check [#47474](https://github.com/ClickHouse/ClickHouse/pull/47474) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Revert [#46622](https://github.com/ClickHouse/ClickHouse/issues/46622) (test_async_insert_memory) [#47476](https://github.com/ClickHouse/ClickHouse/pull/47476) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix `00933_test_fix_extra_seek_on_compressed_cache` in releases. [#47490](https://github.com/ClickHouse/ClickHouse/pull/47490) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix long test `02371_select_projection_normal_agg.sql` [#47491](https://github.com/ClickHouse/ClickHouse/pull/47491) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Revert [#45878](https://github.com/ClickHouse/ClickHouse/issues/45878) and add a test [#47492](https://github.com/ClickHouse/ClickHouse/pull/47492) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Planner JOIN TREE build fix [#47498](https://github.com/ClickHouse/ClickHouse/pull/47498) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Better support of identifiers from compound expressions in analyzer [#47506](https://github.com/ClickHouse/ClickHouse/pull/47506) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Adapt some tests to pass with and without the analyzer [#47525](https://github.com/ClickHouse/ClickHouse/pull/47525) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Small enhancements [#47534](https://github.com/ClickHouse/ClickHouse/pull/47534) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Support constants in INTERPOLATE clause (new analyzer) [#47539](https://github.com/ClickHouse/ClickHouse/pull/47539) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Remove TOTALS handling in FillingTransform [#47542](https://github.com/ClickHouse/ClickHouse/pull/47542) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Hide too noisy log messages, fix some tests [#47547](https://github.com/ClickHouse/ClickHouse/pull/47547) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix some flaky tests [#47553](https://github.com/ClickHouse/ClickHouse/pull/47553) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* remove counters for threads, fix negative counters [#47564](https://github.com/ClickHouse/ClickHouse/pull/47564) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Fix typo [#47565](https://github.com/ClickHouse/ClickHouse/pull/47565) ([hq1](https://github.com/aerosol)).
|
||||||
|
* Fixes for upgrade check [#47570](https://github.com/ClickHouse/ClickHouse/pull/47570) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Change error code in case of columns definitions was empty in ODBC [#47573](https://github.com/ClickHouse/ClickHouse/pull/47573) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Add missing SYSTEM FLUSH LOGS for log messages statistics [#47575](https://github.com/ClickHouse/ClickHouse/pull/47575) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix performance regression in aggregation [#47582](https://github.com/ClickHouse/ClickHouse/pull/47582) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* ReadFromMergeTree explain prewhere and row policy actions [#47583](https://github.com/ClickHouse/ClickHouse/pull/47583) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Fix possible failures of 01300_client_save_history_when_terminated_long [#47606](https://github.com/ClickHouse/ClickHouse/pull/47606) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* checksum: do not check inverted index files [#47607](https://github.com/ClickHouse/ClickHouse/pull/47607) ([save-my-heart](https://github.com/save-my-heart)).
|
||||||
|
* Add sanity checks for writing number in variable length format [#47608](https://github.com/ClickHouse/ClickHouse/pull/47608) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Analyzer planner fixes before enable by default [#47622](https://github.com/ClickHouse/ClickHouse/pull/47622) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Fix exception message in clickhouse-test [#47625](https://github.com/ClickHouse/ClickHouse/pull/47625) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* FillingTransform: remove unnecessary indirection when accessing columns [#47632](https://github.com/ClickHouse/ClickHouse/pull/47632) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* fix typo in HashJoin insertion that enables debug code in release build [#46726](https://github.com/ClickHouse/ClickHouse/issues/46726) [#47647](https://github.com/ClickHouse/ClickHouse/pull/47647) ([jorisgio](https://github.com/jorisgio)).
|
||||||
|
* clang-tidy >= 15: write CheckOptions in dictionary format [#47648](https://github.com/ClickHouse/ClickHouse/pull/47648) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* CMake: Build ClickHouse w/o GNU extensions [#47651](https://github.com/ClickHouse/ClickHouse/pull/47651) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Faster fasttest [#47654](https://github.com/ClickHouse/ClickHouse/pull/47654) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Add background pools size metrics [#47656](https://github.com/ClickHouse/ClickHouse/pull/47656) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* Improve ThreadPool [#47657](https://github.com/ClickHouse/ClickHouse/pull/47657) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* cmake: remove support for gold linker [#47660](https://github.com/ClickHouse/ClickHouse/pull/47660) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Updated events and recordings [#47668](https://github.com/ClickHouse/ClickHouse/pull/47668) ([clickhouse-adrianfraguela](https://github.com/clickhouse-adrianfraguela)).
|
||||||
|
* Follow-up to [#47660](https://github.com/ClickHouse/ClickHouse/issues/47660): Further removal of gold linker support [#47669](https://github.com/ClickHouse/ClickHouse/pull/47669) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Enable parallel execution for two tests [#47670](https://github.com/ClickHouse/ClickHouse/pull/47670) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Restore native macos build [#47673](https://github.com/ClickHouse/ClickHouse/pull/47673) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* CMake: Remove further cruft from build [#47680](https://github.com/ClickHouse/ClickHouse/pull/47680) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* fix test / remove hardcoded database [#47682](https://github.com/ClickHouse/ClickHouse/pull/47682) ([Denny Crane](https://github.com/den-crane)).
|
||||||
|
* Apply log_queries_cut_to_length in MergeTreeWhereOptimizer [#47684](https://github.com/ClickHouse/ClickHouse/pull/47684) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Fix logical error in evaluate constant expression [#47685](https://github.com/ClickHouse/ClickHouse/pull/47685) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Try making `test_keeper_mntr_data_size` less flaky [#47687](https://github.com/ClickHouse/ClickHouse/pull/47687) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix limit offset [#47688](https://github.com/ClickHouse/ClickHouse/pull/47688) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Fix startup on older systemd versions [#47689](https://github.com/ClickHouse/ClickHouse/pull/47689) ([Thomas Casteleyn](https://github.com/Hipska)).
|
||||||
|
* More random query id in tests [#47700](https://github.com/ClickHouse/ClickHouse/pull/47700) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Add a style check for unsafe code [#47703](https://github.com/ClickHouse/ClickHouse/pull/47703) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Make the code in Join less disgusting [#47712](https://github.com/ClickHouse/ClickHouse/pull/47712) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixup git reference to LLVM [#47719](https://github.com/ClickHouse/ClickHouse/pull/47719) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Preparation for libcxx(abi), llvm, clang-tidy 16 [#47722](https://github.com/ClickHouse/ClickHouse/pull/47722) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Rename cfg parameter query_cache.size to query_cache.max_size [#47724](https://github.com/ClickHouse/ClickHouse/pull/47724) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Add optimization for MemoryStorageStep [#47726](https://github.com/ClickHouse/ClickHouse/pull/47726) ([Konstantin Morozov](https://github.com/k-morozov)).
|
||||||
|
* Fix aggregation with constant key in planner [#47727](https://github.com/ClickHouse/ClickHouse/pull/47727) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Disable setting in 02343_group_by_use_nulls_distributed (for new analyzer) [#47728](https://github.com/ClickHouse/ClickHouse/pull/47728) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Add a test for [#21469](https://github.com/ClickHouse/ClickHouse/issues/21469) [#47736](https://github.com/ClickHouse/ClickHouse/pull/47736) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add a test for [#23804](https://github.com/ClickHouse/ClickHouse/issues/23804) [#47737](https://github.com/ClickHouse/ClickHouse/pull/47737) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add a test for [#18937](https://github.com/ClickHouse/ClickHouse/issues/18937) [#47738](https://github.com/ClickHouse/ClickHouse/pull/47738) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add a test for [#17756](https://github.com/ClickHouse/ClickHouse/issues/17756) [#47739](https://github.com/ClickHouse/ClickHouse/pull/47739) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add a test for [#23162](https://github.com/ClickHouse/ClickHouse/issues/23162) [#47740](https://github.com/ClickHouse/ClickHouse/pull/47740) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* remove unused code [#47743](https://github.com/ClickHouse/ClickHouse/pull/47743) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Fix broken cross-compiled macos builds [#47744](https://github.com/ClickHouse/ClickHouse/pull/47744) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Randomize query cache settings [#47749](https://github.com/ClickHouse/ClickHouse/pull/47749) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Clarify steps for reopened cherry-pick PRs [#47755](https://github.com/ClickHouse/ClickHouse/pull/47755) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Fix ZK exception error message [#47757](https://github.com/ClickHouse/ClickHouse/pull/47757) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Add ComparisonTupleEliminationVisitor [#47758](https://github.com/ClickHouse/ClickHouse/pull/47758) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Add a fuse for backport branches w/o a created PR [#47760](https://github.com/ClickHouse/ClickHouse/pull/47760) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Fix partition ID byte order for s390x [#47769](https://github.com/ClickHouse/ClickHouse/pull/47769) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||||
|
* Stop `wait for quorum` retries on shutdown [#47770](https://github.com/ClickHouse/ClickHouse/pull/47770) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* More preparation for upgrade to libcxx(abi), llvm, clang-tidy 16 [#47771](https://github.com/ClickHouse/ClickHouse/pull/47771) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Only valid Reviews.STATES overwrite existing reviews [#47789](https://github.com/ClickHouse/ClickHouse/pull/47789) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Apply black formatter to all python scripts [#47790](https://github.com/ClickHouse/ClickHouse/pull/47790) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Try fix window view test [#47791](https://github.com/ClickHouse/ClickHouse/pull/47791) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Update test for nested lambdas [#47795](https://github.com/ClickHouse/ClickHouse/pull/47795) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Decrease scale_down ratio for faster deflation [#47798](https://github.com/ClickHouse/ClickHouse/pull/47798) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Fix 993 and two other tests [#47802](https://github.com/ClickHouse/ClickHouse/pull/47802) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix flaky test 02417_opentelemetry_insert_on_distributed_table [#47811](https://github.com/ClickHouse/ClickHouse/pull/47811) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Make 01086_odbc_roundtrip less flaky [#47820](https://github.com/ClickHouse/ClickHouse/pull/47820) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Place short return before big block, improve logging [#47822](https://github.com/ClickHouse/ClickHouse/pull/47822) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* [FixTests] Remove wrong chassert() in UserDefinedSQLObjectsLoaderFromZooKeeper.cpp [#47839](https://github.com/ClickHouse/ClickHouse/pull/47839) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Fix test test_replicated_merge_tree_encryption_codec [#47851](https://github.com/ClickHouse/ClickHouse/pull/47851) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Allow injecting timeout errors on Keeper [#47856](https://github.com/ClickHouse/ClickHouse/pull/47856) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Comment stale cherry-pick PRs once a day to remind for resolving conflicts [#47857](https://github.com/ClickHouse/ClickHouse/pull/47857) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Followup to [#47802](https://github.com/ClickHouse/ClickHouse/issues/47802) [#47864](https://github.com/ClickHouse/ClickHouse/pull/47864) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Slightly better error message [#47868](https://github.com/ClickHouse/ClickHouse/pull/47868) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Make test_server_reload non-parallel [#47871](https://github.com/ClickHouse/ClickHouse/pull/47871) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* aspell-dict.txt: keep sorted things sorted [#47878](https://github.com/ClickHouse/ClickHouse/pull/47878) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* throw exception when all retries exhausted [#47902](https://github.com/ClickHouse/ClickHouse/pull/47902) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Fix GRANT query formatting [#47908](https://github.com/ClickHouse/ClickHouse/pull/47908) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Fix exception type in arrayElement function [#47909](https://github.com/ClickHouse/ClickHouse/pull/47909) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Fix logical error in DistributedSink [#47916](https://github.com/ClickHouse/ClickHouse/pull/47916) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix terminate in parts check thread [#47917](https://github.com/ClickHouse/ClickHouse/pull/47917) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Limit keeper request batching by size in bytes [#47918](https://github.com/ClickHouse/ClickHouse/pull/47918) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Improve replicated user defined functions [#47919](https://github.com/ClickHouse/ClickHouse/pull/47919) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Update 01072_window_view_multiple_columns_groupby.sh [#47928](https://github.com/ClickHouse/ClickHouse/pull/47928) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Added test. Closes [#12264](https://github.com/ClickHouse/ClickHouse/issues/12264) [#47931](https://github.com/ClickHouse/ClickHouse/pull/47931) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Disallow concurrent backup restore test - removed SYSTEM SYNC [#47944](https://github.com/ClickHouse/ClickHouse/pull/47944) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Artifacts s3 prefix [#47945](https://github.com/ClickHouse/ClickHouse/pull/47945) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Set content-length for empty POST requests [#47950](https://github.com/ClickHouse/ClickHouse/pull/47950) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix test `02050_client_profile_events` [#47951](https://github.com/ClickHouse/ClickHouse/pull/47951) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Fix tsan error lock-order-inversion [#47953](https://github.com/ClickHouse/ClickHouse/pull/47953) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Update docs for parseDateTime() (follow-up to [#46815](https://github.com/ClickHouse/ClickHouse/issues/46815)) [#47959](https://github.com/ClickHouse/ClickHouse/pull/47959) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Docs: Update secondary index example [#47961](https://github.com/ClickHouse/ClickHouse/pull/47961) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix compilation on MacOS [#47967](https://github.com/ClickHouse/ClickHouse/pull/47967) ([Jordi Villar](https://github.com/jrdi)).
|
||||||
|
* [Refactoring] Move information about current hosts and list of all hosts to BackupCoordination [#47971](https://github.com/ClickHouse/ClickHouse/pull/47971) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Stabilize tests for new function parseDateTimeInJodaSyntax [#47974](https://github.com/ClickHouse/ClickHouse/pull/47974) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Docs: Fix links [#47976](https://github.com/ClickHouse/ClickHouse/pull/47976) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Try fix rabbitmq test [#47987](https://github.com/ClickHouse/ClickHouse/pull/47987) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Better type check in arrayElement function [#47989](https://github.com/ClickHouse/ClickHouse/pull/47989) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Fix incorrect code indentation [#48011](https://github.com/ClickHouse/ClickHouse/pull/48011) ([exmy](https://github.com/exmy)).
|
||||||
|
* CMake: Remove configuration of CMAKE_SHARED_LINKER_FLAGS [#48018](https://github.com/ClickHouse/ClickHouse/pull/48018) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Remove the old changelog script [#48042](https://github.com/ClickHouse/ClickHouse/pull/48042) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix automatic indentation in the built-in UI SQL editor [#48045](https://github.com/ClickHouse/ClickHouse/pull/48045) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Rename `system.marked_dropped_tables` to `dropped_tables` [#48048](https://github.com/ClickHouse/ClickHouse/pull/48048) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Automatically correct some mistakes in the changelog [#48052](https://github.com/ClickHouse/ClickHouse/pull/48052) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Docs: Document [FULL] keyword in SHOW TABLES [#48061](https://github.com/ClickHouse/ClickHouse/pull/48061) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix stateless tests numbers [#48063](https://github.com/ClickHouse/ClickHouse/pull/48063) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Docs: Update syntax of some SHOW queries [#48064](https://github.com/ClickHouse/ClickHouse/pull/48064) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Simplify backup coordination for file infos [#48095](https://github.com/ClickHouse/ClickHouse/pull/48095) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* materialized pg small fix [#48098](https://github.com/ClickHouse/ClickHouse/pull/48098) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Update SQLite to 3.41.2 [#48101](https://github.com/ClickHouse/ClickHouse/pull/48101) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Fix test numbers again and enforce it with style [#48106](https://github.com/ClickHouse/ClickHouse/pull/48106) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* s390x reinterpret as float64 [#48112](https://github.com/ClickHouse/ClickHouse/pull/48112) ([Suzy Wang](https://github.com/SuzyWangIBMer)).
|
||||||
|
* Remove slow outdated test [#48114](https://github.com/ClickHouse/ClickHouse/pull/48114) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Cosmetic follow-up to [#46252](https://github.com/ClickHouse/ClickHouse/issues/46252) [#48128](https://github.com/ClickHouse/ClickHouse/pull/48128) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Merging "Support undrop table" [#48130](https://github.com/ClickHouse/ClickHouse/pull/48130) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix double whitespace in exception message [#48132](https://github.com/ClickHouse/ClickHouse/pull/48132) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Improve script for updating clickhouse-docs [#48135](https://github.com/ClickHouse/ClickHouse/pull/48135) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix stdlib compatibility issues [#48150](https://github.com/ClickHouse/ClickHouse/pull/48150) ([DimasKovas](https://github.com/DimasKovas)).
|
||||||
|
* Make test test_disallow_concurrency less flaky [#48152](https://github.com/ClickHouse/ClickHouse/pull/48152) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Remove unused mockSystemDatabase from gtest_transform_query_for_exter… [#48162](https://github.com/ClickHouse/ClickHouse/pull/48162) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Update environmental-sensors.md [#48166](https://github.com/ClickHouse/ClickHouse/pull/48166) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Correctly handle NULL constants in logical optimizer for new analyzer [#48168](https://github.com/ClickHouse/ClickHouse/pull/48168) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Try making KeeperMap test more stable [#48170](https://github.com/ClickHouse/ClickHouse/pull/48170) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Deprecate EXPLAIN QUERY TREE with disabled analyzer. [#48177](https://github.com/ClickHouse/ClickHouse/pull/48177) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Use uniq file names in 02149_* tests to avoid SIGBUS in stress tests [#48187](https://github.com/ClickHouse/ClickHouse/pull/48187) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Update style in ParserKQLSort.cpp [#48199](https://github.com/ClickHouse/ClickHouse/pull/48199) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Remove support for std::unary/binary_function (removed in C++17) [#48204](https://github.com/ClickHouse/ClickHouse/pull/48204) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Remove unused setting [#48208](https://github.com/ClickHouse/ClickHouse/pull/48208) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Remove wrong assert from LogicalExpressionOptimizerPass [#48214](https://github.com/ClickHouse/ClickHouse/pull/48214) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* MySQL compatibility: Make str_to_date alias case-insensitive [#48220](https://github.com/ClickHouse/ClickHouse/pull/48220) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Disable AST optimizations for projection analysis. [#48221](https://github.com/ClickHouse/ClickHouse/pull/48221) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix Too big of a difference between test numbers [#48224](https://github.com/ClickHouse/ClickHouse/pull/48224) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Stabilize 02477_age [#48225](https://github.com/ClickHouse/ClickHouse/pull/48225) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Rename setting stop_reading_on_first_cancel [#48226](https://github.com/ClickHouse/ClickHouse/pull/48226) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Address flaky 02346_full_text_search [#48227](https://github.com/ClickHouse/ClickHouse/pull/48227) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix incorrect ThreadPool usage after ThreadPool introspection [#48244](https://github.com/ClickHouse/ClickHouse/pull/48244) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* fix test numbers again [#48264](https://github.com/ClickHouse/ClickHouse/pull/48264) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
|
||||||
|
#### Testing Improvement
|
||||||
|
|
||||||
|
* Fixed functional test 02534_keyed_siphash and 02552_siphash128_reference for s390x. [#47615](https://github.com/ClickHouse/ClickHouse/pull/47615) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||||
|
|
@ -67,7 +67,8 @@ CREATE TABLE youtube
|
|||||||
(
|
(
|
||||||
`id` String,
|
`id` String,
|
||||||
`fetch_date` DateTime,
|
`fetch_date` DateTime,
|
||||||
`upload_date` String,
|
`upload_date_str` String,
|
||||||
|
`upload_date` Date,
|
||||||
`title` String,
|
`title` String,
|
||||||
`uploader_id` String,
|
`uploader_id` String,
|
||||||
`uploader` String,
|
`uploader` String,
|
||||||
@ -87,7 +88,7 @@ CREATE TABLE youtube
|
|||||||
`video_badges` String
|
`video_badges` String
|
||||||
)
|
)
|
||||||
ENGINE = MergeTree
|
ENGINE = MergeTree
|
||||||
ORDER BY (upload_date, uploader);
|
ORDER BY (uploader, upload_date);
|
||||||
```
|
```
|
||||||
|
|
||||||
3. The following command streams the records from the S3 files into the `youtube` table.
|
3. The following command streams the records from the S3 files into the `youtube` table.
|
||||||
@ -101,8 +102,9 @@ INSERT INTO youtube
|
|||||||
SETTINGS input_format_null_as_default = 1
|
SETTINGS input_format_null_as_default = 1
|
||||||
SELECT
|
SELECT
|
||||||
id,
|
id,
|
||||||
parseDateTimeBestEffortUS(toString(fetch_date)) AS fetch_date,
|
parseDateTimeBestEffortUSOrZero(toString(fetch_date)) AS fetch_date,
|
||||||
upload_date,
|
upload_date AS upload_date_str,
|
||||||
|
toDate(parseDateTimeBestEffortUSOrZero(upload_date::String)) AS upload_date,
|
||||||
ifNull(title, '') AS title,
|
ifNull(title, '') AS title,
|
||||||
uploader_id,
|
uploader_id,
|
||||||
ifNull(uploader, '') AS uploader,
|
ifNull(uploader, '') AS uploader,
|
||||||
@ -124,10 +126,23 @@ FROM s3Cluster(
|
|||||||
'default',
|
'default',
|
||||||
'https://clickhouse-public-datasets.s3.amazonaws.com/youtube/original/files/*.zst',
|
'https://clickhouse-public-datasets.s3.amazonaws.com/youtube/original/files/*.zst',
|
||||||
'JSONLines'
|
'JSONLines'
|
||||||
);
|
)
|
||||||
|
SETTINGS
|
||||||
|
max_download_threads = 24,
|
||||||
|
max_insert_threads = 64,
|
||||||
|
max_insert_block_size = 100000000,
|
||||||
|
min_insert_block_size_rows = 100000000,
|
||||||
|
min_insert_block_size_bytes = 500000000;
|
||||||
```
|
```
|
||||||
|
|
||||||
4. Open a new tab in the SQL Console of ClickHouse Cloud (or a new `clickhouse-client` window) and watch the count increase. It will take a while to insert 4.56B rows, depending on your server resources. (Withtout any tweaking of settings, it takes about 4.5 hours.)
|
Some comments about our `INSERT` command:
|
||||||
|
|
||||||
|
- The `parseDateTimeBestEffortUSOrZero` function is handy when the incoming date fields may not be in the proper format. If `fetch_date` does not get parsed properly, it will be set to `0`
|
||||||
|
- The `upload_date` column contains valid dates, but it also contains strings like "4 hours ago" - which is certainly not a valid date. We decided to store the original value in `upload_date_str` and attempt to parse it with `toDate(parseDateTimeBestEffortUSOrZero(upload_date::String))`. If the parsing fails we just get `0`
|
||||||
|
- We used `ifNull` to avoid getting `NULL` values in our table. If an incoming value is `NULL`, the `ifNull` function is setting the value to an empty string
|
||||||
|
- It takes a long time to download the data, so we added a `SETTINGS` clause to spread out the work over more threads while making sure the block sizes stayed fairly large
|
||||||
|
|
||||||
|
4. Open a new tab in the SQL Console of ClickHouse Cloud (or a new `clickhouse-client` window) and watch the count increase. It will take a while to insert 4.56B rows, depending on your server resources. (Without any tweaking of settings, it takes about 4.5 hours.)
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT formatReadableQuantity(count())
|
SELECT formatReadableQuantity(count())
|
||||||
@ -200,7 +215,7 @@ FROM youtube
|
|||||||
WHERE (title ILIKE '%ClickHouse%') OR (description ILIKE '%ClickHouse%')
|
WHERE (title ILIKE '%ClickHouse%') OR (description ILIKE '%ClickHouse%')
|
||||||
ORDER BY
|
ORDER BY
|
||||||
like_count DESC,
|
like_count DESC,
|
||||||
view_count DESC
|
view_count DESC;
|
||||||
```
|
```
|
||||||
|
|
||||||
This query has to process every row, and also parse through two columns of strings. Even then, we get decent performance at 4.15M rows/second:
|
This query has to process every row, and also parse through two columns of strings. Even then, we get decent performance at 4.15M rows/second:
|
||||||
@ -224,7 +239,6 @@ The results look like:
|
|||||||
|
|
||||||
When commenting is disabled, are people more likely to like or dislike to express their feelings about a video?
|
When commenting is disabled, are people more likely to like or dislike to express their feelings about a video?
|
||||||
|
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT
|
SELECT
|
||||||
concat('< ', formatReadableQuantity(view_range)) AS views,
|
concat('< ', formatReadableQuantity(view_range)) AS views,
|
||||||
@ -276,6 +290,127 @@ ORDER BY
|
|||||||
|
|
||||||
Enabling comments seems to be correlated with a higher rate of engagement.
|
Enabling comments seems to be correlated with a higher rate of engagement.
|
||||||
|
|
||||||
|
|
||||||
|
### How does the number of videos change over time - notable events?
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
toStartOfMonth(toDateTime(upload_date)) AS month,
|
||||||
|
uniq(uploader_id) AS uploaders,
|
||||||
|
count() as num_videos,
|
||||||
|
sum(view_count) as view_count
|
||||||
|
FROM youtube
|
||||||
|
GROUP BY month
|
||||||
|
ORDER BY month ASC;
|
||||||
|
```
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌──────month─┬─uploaders─┬─num_videos─┬───view_count─┐
|
||||||
|
│ 2005-04-01 │ 5 │ 6 │ 213597737 │
|
||||||
|
│ 2005-05-01 │ 6 │ 9 │ 2944005 │
|
||||||
|
│ 2005-06-01 │ 165 │ 351 │ 18624981 │
|
||||||
|
│ 2005-07-01 │ 395 │ 1168 │ 94164872 │
|
||||||
|
│ 2005-08-01 │ 1171 │ 3128 │ 124540774 │
|
||||||
|
│ 2005-09-01 │ 2418 │ 5206 │ 475536249 │
|
||||||
|
│ 2005-10-01 │ 6750 │ 13747 │ 737593613 │
|
||||||
|
│ 2005-11-01 │ 13706 │ 28078 │ 1896116976 │
|
||||||
|
│ 2005-12-01 │ 24756 │ 49885 │ 2478418930 │
|
||||||
|
│ 2006-01-01 │ 49992 │ 100447 │ 4532656581 │
|
||||||
|
│ 2006-02-01 │ 67882 │ 138485 │ 5677516317 │
|
||||||
|
│ 2006-03-01 │ 103358 │ 212237 │ 8430301366 │
|
||||||
|
│ 2006-04-01 │ 114615 │ 234174 │ 9980760440 │
|
||||||
|
│ 2006-05-01 │ 152682 │ 332076 │ 14129117212 │
|
||||||
|
│ 2006-06-01 │ 193962 │ 429538 │ 17014143263 │
|
||||||
|
│ 2006-07-01 │ 234401 │ 530311 │ 18721143410 │
|
||||||
|
│ 2006-08-01 │ 281280 │ 614128 │ 20473502342 │
|
||||||
|
│ 2006-09-01 │ 312434 │ 679906 │ 23158422265 │
|
||||||
|
│ 2006-10-01 │ 404873 │ 897590 │ 27357846117 │
|
||||||
|
```
|
||||||
|
|
||||||
|
A spike of uploaders [around covid is noticeable](https://www.theverge.com/2020/3/27/21197642/youtube-with-me-style-videos-views-coronavirus-cook-workout-study-home-beauty).
|
||||||
|
|
||||||
|
|
||||||
|
### More subtitiles over time and when
|
||||||
|
|
||||||
|
With advances in speech recognition, it’s easier than ever to create subtitles for video with youtube adding auto-captioning in late 2009 - was the jump then?
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
toStartOfMonth(upload_date) AS month,
|
||||||
|
countIf(has_subtitles) / count() AS percent_subtitles,
|
||||||
|
percent_subtitles - any(percent_subtitles) OVER (
|
||||||
|
ORDER BY month ASC ROWS BETWEEN 1 PRECEDING AND 1 PRECEDING
|
||||||
|
) AS previous
|
||||||
|
FROM youtube
|
||||||
|
GROUP BY month
|
||||||
|
ORDER BY month ASC;
|
||||||
|
```
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌──────month─┬───percent_subtitles─┬────────────────previous─┐
|
||||||
|
│ 2015-01-01 │ 0.2652653881082824 │ 0.2652653881082824 │
|
||||||
|
│ 2015-02-01 │ 0.3147556050309162 │ 0.049490216922633834 │
|
||||||
|
│ 2015-03-01 │ 0.32460464492371877 │ 0.009849039892802558 │
|
||||||
|
│ 2015-04-01 │ 0.33471963051468445 │ 0.010114985590965686 │
|
||||||
|
│ 2015-05-01 │ 0.3168087575501062 │ -0.017910872964578273 │
|
||||||
|
│ 2015-06-01 │ 0.3162609788438222 │ -0.0005477787062839745 │
|
||||||
|
│ 2015-07-01 │ 0.31828767677518033 │ 0.0020266979313581235 │
|
||||||
|
│ 2015-08-01 │ 0.3045551564286859 │ -0.013732520346494415 │
|
||||||
|
│ 2015-09-01 │ 0.311221133995152 │ 0.006665977566466086 │
|
||||||
|
│ 2015-10-01 │ 0.30574870926812175 │ -0.005472424727030245 │
|
||||||
|
│ 2015-11-01 │ 0.31125409712077234 │ 0.0055053878526505895 │
|
||||||
|
│ 2015-12-01 │ 0.3190967954651779 │ 0.007842698344405541 │
|
||||||
|
│ 2016-01-01 │ 0.32636021432496176 │ 0.007263418859783877 │
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
The data results show a spike in 2009. Apparently at that, time YouTube was removing their community captions feature, which allowed you to upload captions for other people's video.
|
||||||
|
This prompted a very successful campaign to have creators add captions to their videos for hard of hearing and deaf viewers.
|
||||||
|
|
||||||
|
|
||||||
|
### Top uploaders over time
|
||||||
|
|
||||||
|
```sql
|
||||||
|
WITH uploaders AS
|
||||||
|
(
|
||||||
|
SELECT uploader
|
||||||
|
FROM youtube
|
||||||
|
GROUP BY uploader
|
||||||
|
ORDER BY sum(view_count) DESC
|
||||||
|
LIMIT 10
|
||||||
|
)
|
||||||
|
SELECT
|
||||||
|
month,
|
||||||
|
uploader,
|
||||||
|
sum(view_count) AS total_views,
|
||||||
|
avg(dislike_count / like_count) AS like_to_dislike_ratio
|
||||||
|
FROM youtube
|
||||||
|
WHERE uploader IN (uploaders)
|
||||||
|
GROUP BY
|
||||||
|
toStartOfMonth(upload_date) AS month,
|
||||||
|
uploader
|
||||||
|
ORDER BY
|
||||||
|
month ASC,
|
||||||
|
total_views DESC;
|
||||||
|
```
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌──────month─┬─uploader───────────────────┬─total_views─┬─like_to_dislike_ratio─┐
|
||||||
|
│ 1970-01-01 │ T-Series │ 10957099 │ 0.022784656361208206 │
|
||||||
|
│ 1970-01-01 │ Ryan's World │ 0 │ 0.003035559410234172 │
|
||||||
|
│ 1970-01-01 │ SET India │ 0 │ nan │
|
||||||
|
│ 2006-09-01 │ Cocomelon - Nursery Rhymes │ 256406497 │ 0.7005566715978622 │
|
||||||
|
│ 2007-06-01 │ Cocomelon - Nursery Rhymes │ 33641320 │ 0.7088650914344298 │
|
||||||
|
│ 2008-02-01 │ WWE │ 43733469 │ 0.07198856488734842 │
|
||||||
|
│ 2008-03-01 │ WWE │ 16514541 │ 0.1230603715431997 │
|
||||||
|
│ 2008-04-01 │ WWE │ 5907295 │ 0.2089399470159618 │
|
||||||
|
│ 2008-05-01 │ WWE │ 7779627 │ 0.09101676560436774 │
|
||||||
|
│ 2008-06-01 │ WWE │ 7018780 │ 0.0974184753155297 │
|
||||||
|
│ 2008-07-01 │ WWE │ 4686447 │ 0.1263845422065158 │
|
||||||
|
│ 2008-08-01 │ WWE │ 4514312 │ 0.08384574274791441 │
|
||||||
|
│ 2008-09-01 │ WWE │ 3717092 │ 0.07872802579349912 │
|
||||||
|
```
|
||||||
|
|
||||||
### How do like ratio changes as views go up?
|
### How do like ratio changes as views go up?
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
@ -322,8 +457,6 @@ ORDER BY
|
|||||||
│ < 10.00 billion │ false │ 1.77 │
|
│ < 10.00 billion │ false │ 1.77 │
|
||||||
│ < 10.00 billion │ true │ 19.5 │
|
│ < 10.00 billion │ true │ 19.5 │
|
||||||
└───────────────────┴─────────────────────┴────────────┘
|
└───────────────────┴─────────────────────┴────────────┘
|
||||||
|
|
||||||
20 rows in set. Elapsed: 63.664 sec. Processed 4.56 billion rows, 113.93 GB (71.59 million rows/s., 1.79 GB/s.)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### How are views distributed?
|
### How are views distributed?
|
||||||
@ -359,6 +492,4 @@ ARRAY JOIN
|
|||||||
│ 20th │ 16 │
|
│ 20th │ 16 │
|
||||||
│ 10th │ 6 │
|
│ 10th │ 6 │
|
||||||
└────────────┴─────────┘
|
└────────────┴─────────┘
|
||||||
|
|
||||||
12 rows in set. Elapsed: 1.864 sec. Processed 4.56 billion rows, 36.46 GB (2.45 billion rows/s., 19.56 GB/s.)
|
|
||||||
```
|
```
|
@ -244,10 +244,12 @@ Example of configuration:
|
|||||||
<database>system</database>
|
<database>system</database>
|
||||||
<user>foo</user>
|
<user>foo</user>
|
||||||
<password>secret</password>
|
<password>secret</password>
|
||||||
|
<secure>1</secure>
|
||||||
</remote1>
|
</remote1>
|
||||||
</named_collections>
|
</named_collections>
|
||||||
</clickhouse>
|
</clickhouse>
|
||||||
```
|
```
|
||||||
|
`secure` is not needed for connection because of `remoteSecure`, but it can be used for dictionaries.
|
||||||
|
|
||||||
### Example of using named collections with the `remote`/`remoteSecure` functions
|
### Example of using named collections with the `remote`/`remoteSecure` functions
|
||||||
|
|
||||||
|
@ -1047,7 +1047,7 @@ Default value: `0`.
|
|||||||
|
|
||||||
Sets the number of threads performing background merges and mutations for tables with MergeTree engines. This setting is also could be applied at server startup from the `default` profile configuration for backward compatibility at the ClickHouse server start. You can only increase the number of threads at runtime. To lower the number of threads you have to restart the server. By adjusting this setting, you manage CPU and disk load. Smaller pool size utilizes less CPU and disk resources, but background processes advance slower which might eventually impact query performance.
|
Sets the number of threads performing background merges and mutations for tables with MergeTree engines. This setting is also could be applied at server startup from the `default` profile configuration for backward compatibility at the ClickHouse server start. You can only increase the number of threads at runtime. To lower the number of threads you have to restart the server. By adjusting this setting, you manage CPU and disk load. Smaller pool size utilizes less CPU and disk resources, but background processes advance slower which might eventually impact query performance.
|
||||||
|
|
||||||
Before changing it, please also take a look at related MergeTree settings, such as `number_of_free_entries_in_pool_to_lower_max_size_of_merge` and `number_of_free_entries_in_pool_to_execute_mutation`.
|
Before changing it, please also take a look at related MergeTree settings, such as [number_of_free_entries_in_pool_to_lower_max_size_of_merge](../../operations/settings/merge-tree-settings.md#number-of-free-entries-in-pool-to-lower-max-size-of-merge) and [number_of_free_entries_in_pool_to_execute_mutation](../../operations/settings/merge-tree-settings.md#number-of-free-entries-in-pool-to-execute-mutation).
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
|
@ -553,6 +553,32 @@ Default value: 8192
|
|||||||
|
|
||||||
Merge reads rows from parts in blocks of `merge_max_block_size` rows, then merges and writes the result into a new part. The read block is placed in RAM, so `merge_max_block_size` affects the size of the RAM required for the merge. Thus, merges can consume a large amount of RAM for tables with very wide rows (if the average row size is 100kb, then when merging 10 parts, (100kb * 10 * 8192) = ~ 8GB of RAM). By decreasing `merge_max_block_size`, you can reduce the amount of RAM required for a merge but slow down a merge.
|
Merge reads rows from parts in blocks of `merge_max_block_size` rows, then merges and writes the result into a new part. The read block is placed in RAM, so `merge_max_block_size` affects the size of the RAM required for the merge. Thus, merges can consume a large amount of RAM for tables with very wide rows (if the average row size is 100kb, then when merging 10 parts, (100kb * 10 * 8192) = ~ 8GB of RAM). By decreasing `merge_max_block_size`, you can reduce the amount of RAM required for a merge but slow down a merge.
|
||||||
|
|
||||||
|
## number_of_free_entries_in_pool_to_lower_max_size_of_merge {#number-of-free-entries-in-pool-to-lower-max-size-of-merge}
|
||||||
|
|
||||||
|
When there is less than specified number of free entries in pool (or replicated queue), start to lower maximum size of merge to process (or to put in queue).
|
||||||
|
This is to allow small merges to process - not filling the pool with long running merges.
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
|
||||||
|
- Any positive integer.
|
||||||
|
|
||||||
|
Default value: 8
|
||||||
|
|
||||||
|
## number_of_free_entries_in_pool_to_execute_mutation {#number-of-free-entries-in-pool-to-execute-mutation}
|
||||||
|
|
||||||
|
When there is less than specified number of free entries in pool, do not execute part mutations.
|
||||||
|
This is to leave free threads for regular merges and avoid "Too many parts".
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
|
||||||
|
- Any positive integer.
|
||||||
|
|
||||||
|
Default value: 20
|
||||||
|
|
||||||
|
**Usage**
|
||||||
|
|
||||||
|
The value of the `number_of_free_entries_in_pool_to_execute_mutation` setting should be less than the value of the [background_pool_size](/docs/en/operations/server-configuration-parameters/settings#background_pool_size) * [background_pool_size](/docs/en/operations/server-configuration-parameters/settings#background_merges_mutations_concurrency_ratio). Otherwise, ClickHouse throws an exception.
|
||||||
|
|
||||||
## max_part_loading_threads {#max-part-loading-threads}
|
## max_part_loading_threads {#max-part-loading-threads}
|
||||||
|
|
||||||
The maximum number of threads that read parts when ClickHouse starts.
|
The maximum number of threads that read parts when ClickHouse starts.
|
||||||
|
@ -51,10 +51,14 @@ But for storing archives with rare queries, shelves will work.
|
|||||||
## RAID {#raid}
|
## RAID {#raid}
|
||||||
|
|
||||||
When using HDD, you can combine their RAID-10, RAID-5, RAID-6 or RAID-50.
|
When using HDD, you can combine their RAID-10, RAID-5, RAID-6 or RAID-50.
|
||||||
For Linux, software RAID is better (with `mdadm`). We do not recommend using LVM.
|
For Linux, software RAID is better (with `mdadm`).
|
||||||
When creating RAID-10, select the `far` layout.
|
When creating RAID-10, select the `far` layout.
|
||||||
If your budget allows, choose RAID-10.
|
If your budget allows, choose RAID-10.
|
||||||
|
|
||||||
|
LVM by itself (without RAID or `mdadm`) is ok, but making RAID with it or combining it with `mdadm` is a less explored option, and there will be more chances for mistakes
|
||||||
|
(selecting wrong chunk size; misalignment of chunks; choosing a wrong raid type; forgetting to cleanup disks). If you are confident
|
||||||
|
in using LVM, there is nothing against using it.
|
||||||
|
|
||||||
If you have more than 4 disks, use RAID-6 (preferred) or RAID-50, instead of RAID-5.
|
If you have more than 4 disks, use RAID-6 (preferred) or RAID-50, instead of RAID-5.
|
||||||
When using RAID-5, RAID-6 or RAID-50, always increase stripe_cache_size, since the default value is usually not the best choice.
|
When using RAID-5, RAID-6 or RAID-50, always increase stripe_cache_size, since the default value is usually not the best choice.
|
||||||
|
|
||||||
|
@ -91,7 +91,7 @@ The command must read arguments from `STDIN` and must output the result to `STDO
|
|||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
Creating `test_function` using XML configuration.
|
Creating `test_function` using XML configuration.
|
||||||
File test_function.xml.
|
File `test_function.xml` (`/etc/clickhouse-server/test_function.xml` with default path settings).
|
||||||
```xml
|
```xml
|
||||||
<functions>
|
<functions>
|
||||||
<function>
|
<function>
|
||||||
@ -108,7 +108,7 @@ File test_function.xml.
|
|||||||
</functions>
|
</functions>
|
||||||
```
|
```
|
||||||
|
|
||||||
Script file inside `user_scripts` folder `test_function.py`.
|
Script file inside `user_scripts` folder `test_function.py` (`/var/lib/clickhouse/user_scripts/test_function.py` with default path settings).
|
||||||
|
|
||||||
```python
|
```python
|
||||||
#!/usr/bin/python3
|
#!/usr/bin/python3
|
||||||
@ -136,7 +136,7 @@ Result:
|
|||||||
```
|
```
|
||||||
|
|
||||||
Creating `test_function_sum` manually specifying `execute_direct` to `0` using XML configuration.
|
Creating `test_function_sum` manually specifying `execute_direct` to `0` using XML configuration.
|
||||||
File test_function.xml.
|
File `test_function.xml` (`/etc/clickhouse-server/test_function.xml` with default path settings).
|
||||||
```xml
|
```xml
|
||||||
<functions>
|
<functions>
|
||||||
<function>
|
<function>
|
||||||
@ -173,7 +173,7 @@ Result:
|
|||||||
```
|
```
|
||||||
|
|
||||||
Creating `test_function_sum_json` with named arguments and format [JSONEachRow](../../interfaces/formats.md#jsoneachrow) using XML configuration.
|
Creating `test_function_sum_json` with named arguments and format [JSONEachRow](../../interfaces/formats.md#jsoneachrow) using XML configuration.
|
||||||
File test_function.xml.
|
File `test_function.xml` (`/etc/clickhouse-server/test_function.xml` with default path settings).
|
||||||
```xml
|
```xml
|
||||||
<functions>
|
<functions>
|
||||||
<function>
|
<function>
|
||||||
@ -195,7 +195,7 @@ File test_function.xml.
|
|||||||
</functions>
|
</functions>
|
||||||
```
|
```
|
||||||
|
|
||||||
Script file inside `user_scripts` folder `test_function_sum_json.py`.
|
Script file inside `user_scripts` folder `test_function_sum_json.py` (`/var/lib/clickhouse/user_scripts/test_function_sum_json.py` with default path settings).
|
||||||
|
|
||||||
```python
|
```python
|
||||||
#!/usr/bin/python3
|
#!/usr/bin/python3
|
||||||
@ -228,7 +228,7 @@ Result:
|
|||||||
```
|
```
|
||||||
|
|
||||||
Executable user defined functions can take constant parameters configured in `command` setting (works only for user defined functions with `executable` type).
|
Executable user defined functions can take constant parameters configured in `command` setting (works only for user defined functions with `executable` type).
|
||||||
File test_function_parameter_python.xml.
|
File `test_function_parameter_python.xml` (`/etc/clickhouse-server/test_function_parameter_python.xml` with default path settings).
|
||||||
```xml
|
```xml
|
||||||
<functions>
|
<functions>
|
||||||
<function>
|
<function>
|
||||||
@ -244,7 +244,7 @@ File test_function_parameter_python.xml.
|
|||||||
</functions>
|
</functions>
|
||||||
```
|
```
|
||||||
|
|
||||||
Script file inside `user_scripts` folder `test_function_parameter_python.py`.
|
Script file inside `user_scripts` folder `test_function_parameter_python.py` (`/var/lib/clickhouse/user_scripts/test_function_parameter_python.py` with default path settings).
|
||||||
|
|
||||||
```python
|
```python
|
||||||
#!/usr/bin/python3
|
#!/usr/bin/python3
|
||||||
|
@ -30,7 +30,7 @@ This statement is identical to the query:
|
|||||||
SELECT name FROM system.databases [WHERE name [NOT] LIKE | ILIKE '<pattern>'] [LIMIT <N>] [INTO OUTFILE filename] [FORMAT format]
|
SELECT name FROM system.databases [WHERE name [NOT] LIKE | ILIKE '<pattern>'] [LIMIT <N>] [INTO OUTFILE filename] [FORMAT format]
|
||||||
```
|
```
|
||||||
|
|
||||||
### Examples
|
**Examples**
|
||||||
|
|
||||||
Getting database names, containing the symbols sequence 'de' in their names:
|
Getting database names, containing the symbols sequence 'de' in their names:
|
||||||
|
|
||||||
@ -92,7 +92,7 @@ Result:
|
|||||||
└────────────────────────────────┘
|
└────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
### See Also
|
**See also**
|
||||||
|
|
||||||
- [CREATE DATABASE](https://clickhouse.com/docs/en/sql-reference/statements/create/database/#query-language-create-database)
|
- [CREATE DATABASE](https://clickhouse.com/docs/en/sql-reference/statements/create/database/#query-language-create-database)
|
||||||
|
|
||||||
@ -128,7 +128,7 @@ This statement is identical to the query:
|
|||||||
SELECT name FROM system.tables [WHERE name [NOT] LIKE | ILIKE '<pattern>'] [LIMIT <N>] [INTO OUTFILE <filename>] [FORMAT <format>]
|
SELECT name FROM system.tables [WHERE name [NOT] LIKE | ILIKE '<pattern>'] [LIMIT <N>] [INTO OUTFILE <filename>] [FORMAT <format>]
|
||||||
```
|
```
|
||||||
|
|
||||||
### Examples
|
**Examples**
|
||||||
|
|
||||||
Getting table names, containing the symbols sequence 'user' in their names:
|
Getting table names, containing the symbols sequence 'user' in their names:
|
||||||
|
|
||||||
@ -191,11 +191,59 @@ Result:
|
|||||||
└────────────────────────────────┘
|
└────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
### See Also
|
**See also**
|
||||||
|
|
||||||
- [Create Tables](https://clickhouse.com/docs/en/getting-started/tutorial/#create-tables)
|
- [Create Tables](https://clickhouse.com/docs/en/getting-started/tutorial/#create-tables)
|
||||||
- [SHOW CREATE TABLE](https://clickhouse.com/docs/en/sql-reference/statements/show/#show-create-table)
|
- [SHOW CREATE TABLE](https://clickhouse.com/docs/en/sql-reference/statements/show/#show-create-table)
|
||||||
|
|
||||||
|
## SHOW COLUMNS
|
||||||
|
|
||||||
|
Displays a list of columns
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SHOW [EXTENDED] [FULL] COLUMNS {FROM | IN} <table> [{FROM | IN} <db>] [{[NOT] {LIKE | ILIKE} '<pattern>' | WHERE <expr>}] [LIMIT <N>] [INTO
|
||||||
|
OUTFILE <filename>] [FORMAT <format>]
|
||||||
|
```
|
||||||
|
|
||||||
|
The database and table name can be specified in abbreviated form as `<db>.<table>`, i.e. `FROM tab FROM db` and `FROM db.tab` are
|
||||||
|
equivalent. If no database is specified, the query returns the list of columns from the current database.
|
||||||
|
|
||||||
|
The optional keyword `EXTENDED` currently has no effect, it only exists for MySQL compatibility.
|
||||||
|
|
||||||
|
The optional keyword `FULL` causes the output to include the collation, comment and privilege columns.
|
||||||
|
|
||||||
|
`SHOW COLUMNS` produces a result table with the following structure:
|
||||||
|
- field - The name of the column (String)
|
||||||
|
- type - The column data type (String)
|
||||||
|
- null - If the column data type is Nullable (UInt8)
|
||||||
|
- key - `PRI` if the column is part of the primary key, `SOR` if the column is part of the sorting key, empty otherwise (String)
|
||||||
|
- default - Default expression of the column if it is of type `ALIAS`, `DEFAULT`, or `MATERIALIZED`, otherwise `NULL`. (Nullable(String))
|
||||||
|
- extra - Additional information, currently unused (String)
|
||||||
|
- collation - (only if `FULL` keyword was specified) Collation of the column, always `NULL` because ClickHouse has no per-column collations (Nullable(String))
|
||||||
|
- comment - (only if `FULL` keyword was specified) Comment on the column (String)
|
||||||
|
- privilege - (only if `FULL` keyword was specified) The privilege you have on this column, currently not available (String)
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
|
Getting information about all columns in table 'order' starting with 'delivery_':
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SHOW COLUMNS FROM 'orders' LIKE 'delivery_%'
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─field───────────┬─type─────┬─null─┬─key─────┬─default─┬─extra─┐
|
||||||
|
│ delivery_date │ DateTime │ 0 │ PRI SOR │ ᴺᵁᴸᴸ │ │
|
||||||
|
│ delivery_status │ Bool │ 0 │ │ ᴺᵁᴸᴸ │ │
|
||||||
|
└─────────────────┴──────────┴──────┴─────────┴─────────┴───────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
**See also**
|
||||||
|
|
||||||
|
- [system.columns](https://clickhouse.com/docs/en/operations/system-tables/columns)
|
||||||
|
|
||||||
## SHOW DICTIONARIES
|
## SHOW DICTIONARIES
|
||||||
|
|
||||||
Displays a list of [Dictionaries](../../sql-reference/dictionaries/index.md).
|
Displays a list of [Dictionaries](../../sql-reference/dictionaries/index.md).
|
||||||
@ -212,7 +260,7 @@ You can get the same results as the `SHOW DICTIONARIES` query in the following w
|
|||||||
SELECT name FROM system.dictionaries WHERE database = <db> [AND name LIKE <pattern>] [LIMIT <N>] [INTO OUTFILE <filename>] [FORMAT <format>]
|
SELECT name FROM system.dictionaries WHERE database = <db> [AND name LIKE <pattern>] [LIMIT <N>] [INTO OUTFILE <filename>] [FORMAT <format>]
|
||||||
```
|
```
|
||||||
|
|
||||||
**Example**
|
**Examples**
|
||||||
|
|
||||||
The following query selects the first two rows from the list of tables in the `system` database, whose names contain `reg`.
|
The following query selects the first two rows from the list of tables in the `system` database, whose names contain `reg`.
|
||||||
|
|
||||||
@ -231,7 +279,7 @@ SHOW DICTIONARIES FROM db LIKE '%reg%' LIMIT 2
|
|||||||
|
|
||||||
Shows privileges for a user.
|
Shows privileges for a user.
|
||||||
|
|
||||||
### Syntax
|
**Syntax**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SHOW GRANTS [FOR user1 [, user2 ...]]
|
SHOW GRANTS [FOR user1 [, user2 ...]]
|
||||||
@ -245,7 +293,7 @@ Shows parameters that were used at a [user creation](../../sql-reference/stateme
|
|||||||
|
|
||||||
`SHOW CREATE USER` does not output user passwords.
|
`SHOW CREATE USER` does not output user passwords.
|
||||||
|
|
||||||
### Syntax
|
**Syntax**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SHOW CREATE USER [name1 [, name2 ...] | CURRENT_USER]
|
SHOW CREATE USER [name1 [, name2 ...] | CURRENT_USER]
|
||||||
@ -255,7 +303,7 @@ SHOW CREATE USER [name1 [, name2 ...] | CURRENT_USER]
|
|||||||
|
|
||||||
Shows parameters that were used at a [role creation](../../sql-reference/statements/create/role.md).
|
Shows parameters that were used at a [role creation](../../sql-reference/statements/create/role.md).
|
||||||
|
|
||||||
### Syntax
|
**Syntax**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SHOW CREATE ROLE name1 [, name2 ...]
|
SHOW CREATE ROLE name1 [, name2 ...]
|
||||||
@ -265,7 +313,7 @@ SHOW CREATE ROLE name1 [, name2 ...]
|
|||||||
|
|
||||||
Shows parameters that were used at a [row policy creation](../../sql-reference/statements/create/row-policy.md).
|
Shows parameters that were used at a [row policy creation](../../sql-reference/statements/create/row-policy.md).
|
||||||
|
|
||||||
### Syntax
|
**Syntax**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SHOW CREATE [ROW] POLICY name ON [database1.]table1 [, [database2.]table2 ...]
|
SHOW CREATE [ROW] POLICY name ON [database1.]table1 [, [database2.]table2 ...]
|
||||||
@ -275,7 +323,7 @@ SHOW CREATE [ROW] POLICY name ON [database1.]table1 [, [database2.]table2 ...]
|
|||||||
|
|
||||||
Shows parameters that were used at a [quota creation](../../sql-reference/statements/create/quota.md).
|
Shows parameters that were used at a [quota creation](../../sql-reference/statements/create/quota.md).
|
||||||
|
|
||||||
### Syntax
|
**Syntax**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SHOW CREATE QUOTA [name1 [, name2 ...] | CURRENT]
|
SHOW CREATE QUOTA [name1 [, name2 ...] | CURRENT]
|
||||||
@ -285,7 +333,7 @@ SHOW CREATE QUOTA [name1 [, name2 ...] | CURRENT]
|
|||||||
|
|
||||||
Shows parameters that were used at a [settings profile creation](../../sql-reference/statements/create/settings-profile.md).
|
Shows parameters that were used at a [settings profile creation](../../sql-reference/statements/create/settings-profile.md).
|
||||||
|
|
||||||
### Syntax
|
**Syntax**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SHOW CREATE [SETTINGS] PROFILE name1 [, name2 ...]
|
SHOW CREATE [SETTINGS] PROFILE name1 [, name2 ...]
|
||||||
@ -295,7 +343,7 @@ SHOW CREATE [SETTINGS] PROFILE name1 [, name2 ...]
|
|||||||
|
|
||||||
Returns a list of [user account](../../guides/sre/user-management/index.md#user-account-management) names. To view user accounts parameters, see the system table [system.users](../../operations/system-tables/users.md#system_tables-users).
|
Returns a list of [user account](../../guides/sre/user-management/index.md#user-account-management) names. To view user accounts parameters, see the system table [system.users](../../operations/system-tables/users.md#system_tables-users).
|
||||||
|
|
||||||
### Syntax
|
**Syntax**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SHOW USERS
|
SHOW USERS
|
||||||
@ -305,7 +353,7 @@ SHOW USERS
|
|||||||
|
|
||||||
Returns a list of [roles](../../guides/sre/user-management/index.md#role-management). To view another parameters, see system tables [system.roles](../../operations/system-tables/roles.md#system_tables-roles) and [system.role_grants](../../operations/system-tables/role-grants.md#system_tables-role_grants).
|
Returns a list of [roles](../../guides/sre/user-management/index.md#role-management). To view another parameters, see system tables [system.roles](../../operations/system-tables/roles.md#system_tables-roles) and [system.role_grants](../../operations/system-tables/role-grants.md#system_tables-role_grants).
|
||||||
|
|
||||||
### Syntax
|
**Syntax**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SHOW [CURRENT|ENABLED] ROLES
|
SHOW [CURRENT|ENABLED] ROLES
|
||||||
@ -314,7 +362,7 @@ SHOW [CURRENT|ENABLED] ROLES
|
|||||||
|
|
||||||
Returns a list of [setting profiles](../../guides/sre/user-management/index.md#settings-profiles-management). To view user accounts parameters, see the system table [settings_profiles](../../operations/system-tables/settings_profiles.md#system_tables-settings_profiles).
|
Returns a list of [setting profiles](../../guides/sre/user-management/index.md#settings-profiles-management). To view user accounts parameters, see the system table [settings_profiles](../../operations/system-tables/settings_profiles.md#system_tables-settings_profiles).
|
||||||
|
|
||||||
### Syntax
|
**Syntax**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SHOW [SETTINGS] PROFILES
|
SHOW [SETTINGS] PROFILES
|
||||||
@ -324,7 +372,7 @@ SHOW [SETTINGS] PROFILES
|
|||||||
|
|
||||||
Returns a list of [row policies](../../guides/sre/user-management/index.md#row-policy-management) for the specified table. To view user accounts parameters, see the system table [system.row_policies](../../operations/system-tables/row_policies.md#system_tables-row_policies).
|
Returns a list of [row policies](../../guides/sre/user-management/index.md#row-policy-management) for the specified table. To view user accounts parameters, see the system table [system.row_policies](../../operations/system-tables/row_policies.md#system_tables-row_policies).
|
||||||
|
|
||||||
### Syntax
|
**Syntax**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SHOW [ROW] POLICIES [ON [db.]table]
|
SHOW [ROW] POLICIES [ON [db.]table]
|
||||||
@ -334,7 +382,7 @@ SHOW [ROW] POLICIES [ON [db.]table]
|
|||||||
|
|
||||||
Returns a list of [quotas](../../guides/sre/user-management/index.md#quotas-management). To view quotas parameters, see the system table [system.quotas](../../operations/system-tables/quotas.md#system_tables-quotas).
|
Returns a list of [quotas](../../guides/sre/user-management/index.md#quotas-management). To view quotas parameters, see the system table [system.quotas](../../operations/system-tables/quotas.md#system_tables-quotas).
|
||||||
|
|
||||||
### Syntax
|
**Syntax**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SHOW QUOTAS
|
SHOW QUOTAS
|
||||||
@ -344,7 +392,7 @@ SHOW QUOTAS
|
|||||||
|
|
||||||
Returns a [quota](../../operations/quotas.md) consumption for all users or for current user. To view another parameters, see system tables [system.quotas_usage](../../operations/system-tables/quotas_usage.md#system_tables-quotas_usage) and [system.quota_usage](../../operations/system-tables/quota_usage.md#system_tables-quota_usage).
|
Returns a [quota](../../operations/quotas.md) consumption for all users or for current user. To view another parameters, see system tables [system.quotas_usage](../../operations/system-tables/quotas_usage.md#system_tables-quotas_usage) and [system.quota_usage](../../operations/system-tables/quota_usage.md#system_tables-quota_usage).
|
||||||
|
|
||||||
### Syntax
|
**Syntax**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SHOW [CURRENT] QUOTA
|
SHOW [CURRENT] QUOTA
|
||||||
@ -353,7 +401,7 @@ SHOW [CURRENT] QUOTA
|
|||||||
|
|
||||||
Shows all [users](../../guides/sre/user-management/index.md#user-account-management), [roles](../../guides/sre/user-management/index.md#role-management), [profiles](../../guides/sre/user-management/index.md#settings-profiles-management), etc. and all their [grants](../../sql-reference/statements/grant.md#grant-privileges).
|
Shows all [users](../../guides/sre/user-management/index.md#user-account-management), [roles](../../guides/sre/user-management/index.md#role-management), [profiles](../../guides/sre/user-management/index.md#settings-profiles-management), etc. and all their [grants](../../sql-reference/statements/grant.md#grant-privileges).
|
||||||
|
|
||||||
### Syntax
|
**Syntax**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SHOW ACCESS
|
SHOW ACCESS
|
||||||
@ -366,13 +414,14 @@ Returns a list of clusters. All available clusters are listed in the [system.clu
|
|||||||
`SHOW CLUSTER name` query displays the contents of system.clusters table for this cluster.
|
`SHOW CLUSTER name` query displays the contents of system.clusters table for this cluster.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
### Syntax
|
**Syntax**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SHOW CLUSTER '<name>'
|
SHOW CLUSTER '<name>'
|
||||||
SHOW CLUSTERS [[NOT] LIKE|ILIKE '<pattern>'] [LIMIT <N>]
|
SHOW CLUSTERS [[NOT] LIKE|ILIKE '<pattern>'] [LIMIT <N>]
|
||||||
```
|
```
|
||||||
### Examples
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
Query:
|
Query:
|
||||||
|
|
||||||
|
@ -283,10 +283,14 @@ SYSTEM START REPLICATION QUEUES [[db.]replicated_merge_tree_family_table_name]
|
|||||||
Wait until a `ReplicatedMergeTree` table will be synced with other replicas in a cluster, but no more than `receive_timeout` seconds.
|
Wait until a `ReplicatedMergeTree` table will be synced with other replicas in a cluster, but no more than `receive_timeout` seconds.
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SYSTEM SYNC REPLICA [ON CLUSTER cluster_name] [db.]replicated_merge_tree_family_table_name [STRICT]
|
SYSTEM SYNC REPLICA [ON CLUSTER cluster_name] [db.]replicated_merge_tree_family_table_name [STRICT | LIGHTWEIGHT | PULL]
|
||||||
```
|
```
|
||||||
|
|
||||||
After running this statement the `[db.]replicated_merge_tree_family_table_name` fetches commands from the common replicated log into its own replication queue, and then the query waits till the replica processes all of the fetched commands. If a `STRICT` modifier was specified then the query waits for the replication queue to become empty. The `STRICT` version may never succeed if new entries constantly appear in the replication queue.
|
After running this statement the `[db.]replicated_merge_tree_family_table_name` fetches commands from the common replicated log into its own replication queue, and then the query waits till the replica processes all of the fetched commands. The following modifiers are supported:
|
||||||
|
|
||||||
|
- If a `STRICT` modifier was specified then the query waits for the replication queue to become empty. The `STRICT` version may never succeed if new entries constantly appear in the replication queue.
|
||||||
|
- If a `LIGHTWEIGHT` modifier was specified then the query waits only for `GET_PART`, `ATTACH_PART`, `DROP_RANGE`, `REPLACE_RANGE` and `DROP_PART` entries to be processed.
|
||||||
|
- If a `PULL` modifier was specified then the query pulls new replication queue entries from ZooKeeper, but does not wait for anything to be processed.
|
||||||
|
|
||||||
### RESTART REPLICA
|
### RESTART REPLICA
|
||||||
|
|
||||||
|
@ -272,10 +272,14 @@ SYSTEM START REPLICATION QUEUES [[db.]replicated_merge_tree_family_table_name]
|
|||||||
Ждет когда таблица семейства `ReplicatedMergeTree` будет синхронизирована с другими репликами в кластере, но не более `receive_timeout` секунд:
|
Ждет когда таблица семейства `ReplicatedMergeTree` будет синхронизирована с другими репликами в кластере, но не более `receive_timeout` секунд:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SYSTEM SYNC REPLICA [db.]replicated_merge_tree_family_table_name [STRICT]
|
SYSTEM SYNC REPLICA [db.]replicated_merge_tree_family_table_name [STRICT | LIGHTWEIGHT | PULL]
|
||||||
```
|
```
|
||||||
|
|
||||||
После выполнения этого запроса таблица `[db.]replicated_merge_tree_family_table_name` загружает команды из общего реплицированного лога в свою собственную очередь репликации. Затем запрос ждет, пока реплика не обработает все загруженные команды. Если указан модификатор `STRICT`, то запрос ждёт когда очередь репликации станет пустой. Строгий вариант запроса может никогда не завершиться успешно, если в очереди репликации постоянно появляются новые записи.
|
После выполнения этого запроса таблица `[db.]replicated_merge_tree_family_table_name` загружает команды из общего реплицированного лога в свою собственную очередь репликации. Затем запрос ждет, пока реплика не обработает все загруженные команды. Поддерживаются следующие модификаторы:
|
||||||
|
|
||||||
|
- Если указан модификатор `STRICT`, то запрос ждёт когда очередь репликации станет пустой. Строгий вариант запроса может никогда не завершиться успешно, если в очереди репликации постоянно появляются новые записи.
|
||||||
|
- Если указан модификатор `LIGHTWEIGHT`, то запрос ждёт когда будут обработаны записи `GET_PART`, `ATTACH_PART`, `DROP_RANGE`, `REPLACE_RANGE` и `DROP_PART`.
|
||||||
|
- Если указан модификатор `PULL`, то запрос только загружает записи очереди репликации из ZooKeeper и не ждёт выполнения чего-либо.
|
||||||
|
|
||||||
### RESTART REPLICA {#query_language-system-restart-replica}
|
### RESTART REPLICA {#query_language-system-restart-replica}
|
||||||
|
|
||||||
|
@ -240,7 +240,7 @@ SYSTEM START REPLICATION QUEUES [[db.]replicated_merge_tree_family_table_name]
|
|||||||
|
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SYSTEM SYNC REPLICA [db.]replicated_merge_tree_family_table_name
|
SYSTEM SYNC REPLICA [db.]replicated_merge_tree_family_table_name [STRICT | LIGHTWEIGHT | PULL]
|
||||||
```
|
```
|
||||||
|
|
||||||
### RESTART REPLICA {#query_language-system-restart-replica}
|
### RESTART REPLICA {#query_language-system-restart-replica}
|
||||||
|
@ -87,7 +87,7 @@ void MetricsTransmitter::transmit(std::vector<ProfileEvents::Count> & prev_count
|
|||||||
|
|
||||||
if (send_events)
|
if (send_events)
|
||||||
{
|
{
|
||||||
for (size_t i = 0, end = ProfileEvents::end(); i < end; ++i)
|
for (ProfileEvents::Event i = ProfileEvents::Event(0), end = ProfileEvents::end(); i < end; ++i)
|
||||||
{
|
{
|
||||||
const auto counter = ProfileEvents::global_counters[i].load(std::memory_order_relaxed);
|
const auto counter = ProfileEvents::global_counters[i].load(std::memory_order_relaxed);
|
||||||
const auto counter_increment = counter - prev_counters[i];
|
const auto counter_increment = counter - prev_counters[i];
|
||||||
@ -100,7 +100,7 @@ void MetricsTransmitter::transmit(std::vector<ProfileEvents::Count> & prev_count
|
|||||||
|
|
||||||
if (send_events_cumulative)
|
if (send_events_cumulative)
|
||||||
{
|
{
|
||||||
for (size_t i = 0, end = ProfileEvents::end(); i < end; ++i)
|
for (ProfileEvents::Event i = ProfileEvents::Event(0), end = ProfileEvents::end(); i < end; ++i)
|
||||||
{
|
{
|
||||||
const auto counter = ProfileEvents::global_counters[i].load(std::memory_order_relaxed);
|
const auto counter = ProfileEvents::global_counters[i].load(std::memory_order_relaxed);
|
||||||
std::string key{ProfileEvents::getName(static_cast<ProfileEvents::Event>(i))};
|
std::string key{ProfileEvents::getName(static_cast<ProfileEvents::Event>(i))};
|
||||||
@ -110,7 +110,7 @@ void MetricsTransmitter::transmit(std::vector<ProfileEvents::Count> & prev_count
|
|||||||
|
|
||||||
if (send_metrics)
|
if (send_metrics)
|
||||||
{
|
{
|
||||||
for (size_t i = 0, end = CurrentMetrics::end(); i < end; ++i)
|
for (CurrentMetrics::Metric i = CurrentMetrics::Metric(0), end = CurrentMetrics::end(); i < end; ++i)
|
||||||
{
|
{
|
||||||
const auto value = CurrentMetrics::values[i].load(std::memory_order_relaxed);
|
const auto value = CurrentMetrics::values[i].load(std::memory_order_relaxed);
|
||||||
|
|
||||||
|
@ -288,7 +288,8 @@ public:
|
|||||||
readVarUInt(size, buf);
|
readVarUInt(size, buf);
|
||||||
|
|
||||||
if (unlikely(size > AGGREGATE_FUNCTION_GROUP_ARRAY_MAX_ARRAY_SIZE))
|
if (unlikely(size > AGGREGATE_FUNCTION_GROUP_ARRAY_MAX_ARRAY_SIZE))
|
||||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size");
|
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE,
|
||||||
|
"Too large array size (maximum: {})", AGGREGATE_FUNCTION_GROUP_ARRAY_MAX_ARRAY_SIZE);
|
||||||
|
|
||||||
if (limit_num_elems && unlikely(size > max_elems))
|
if (limit_num_elems && unlikely(size > max_elems))
|
||||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size, it should not exceed {}", max_elems);
|
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size, it should not exceed {}", max_elems);
|
||||||
@ -367,7 +368,8 @@ struct GroupArrayNodeBase
|
|||||||
UInt64 size;
|
UInt64 size;
|
||||||
readVarUInt(size, buf);
|
readVarUInt(size, buf);
|
||||||
if (unlikely(size > AGGREGATE_FUNCTION_GROUP_ARRAY_MAX_ARRAY_SIZE))
|
if (unlikely(size > AGGREGATE_FUNCTION_GROUP_ARRAY_MAX_ARRAY_SIZE))
|
||||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size");
|
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE,
|
||||||
|
"Too large array size (maximum: {})", AGGREGATE_FUNCTION_GROUP_ARRAY_MAX_ARRAY_SIZE);
|
||||||
|
|
||||||
Node * node = reinterpret_cast<Node *>(arena->alignedAlloc(sizeof(Node) + size, alignof(Node)));
|
Node * node = reinterpret_cast<Node *>(arena->alignedAlloc(sizeof(Node) + size, alignof(Node)));
|
||||||
node->size = size;
|
node->size = size;
|
||||||
@ -621,7 +623,8 @@ public:
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
if (unlikely(elems > AGGREGATE_FUNCTION_GROUP_ARRAY_MAX_ARRAY_SIZE))
|
if (unlikely(elems > AGGREGATE_FUNCTION_GROUP_ARRAY_MAX_ARRAY_SIZE))
|
||||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size");
|
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE,
|
||||||
|
"Too large array size (maximum: {})", AGGREGATE_FUNCTION_GROUP_ARRAY_MAX_ARRAY_SIZE);
|
||||||
|
|
||||||
if (limit_num_elems && unlikely(elems > max_elems))
|
if (limit_num_elems && unlikely(elems > max_elems))
|
||||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size, it should not exceed {}", max_elems);
|
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size, it should not exceed {}", max_elems);
|
||||||
|
@ -79,7 +79,8 @@ public:
|
|||||||
{
|
{
|
||||||
length_to_resize = applyVisitor(FieldVisitorConvertToNumber<UInt64>(), params[1]);
|
length_to_resize = applyVisitor(FieldVisitorConvertToNumber<UInt64>(), params[1]);
|
||||||
if (length_to_resize > AGGREGATE_FUNCTION_GROUP_ARRAY_INSERT_AT_MAX_SIZE)
|
if (length_to_resize > AGGREGATE_FUNCTION_GROUP_ARRAY_INSERT_AT_MAX_SIZE)
|
||||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size");
|
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE,
|
||||||
|
"Too large array size (maximum: {})", AGGREGATE_FUNCTION_GROUP_ARRAY_INSERT_AT_MAX_SIZE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -167,7 +168,8 @@ public:
|
|||||||
readVarUInt(size, buf);
|
readVarUInt(size, buf);
|
||||||
|
|
||||||
if (size > AGGREGATE_FUNCTION_GROUP_ARRAY_INSERT_AT_MAX_SIZE)
|
if (size > AGGREGATE_FUNCTION_GROUP_ARRAY_INSERT_AT_MAX_SIZE)
|
||||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size");
|
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE,
|
||||||
|
"Too large array size (maximum: {})", AGGREGATE_FUNCTION_GROUP_ARRAY_INSERT_AT_MAX_SIZE);
|
||||||
|
|
||||||
Array & arr = data(place).value;
|
Array & arr = data(place).value;
|
||||||
|
|
||||||
|
@ -144,7 +144,8 @@ public:
|
|||||||
readVarUInt(size, buf);
|
readVarUInt(size, buf);
|
||||||
|
|
||||||
if (unlikely(size > AGGREGATE_FUNCTION_MOVING_MAX_ARRAY_SIZE))
|
if (unlikely(size > AGGREGATE_FUNCTION_MOVING_MAX_ARRAY_SIZE))
|
||||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size");
|
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE,
|
||||||
|
"Too large array size (maximum: {})", AGGREGATE_FUNCTION_MOVING_MAX_ARRAY_SIZE);
|
||||||
|
|
||||||
if (size > 0)
|
if (size > 0)
|
||||||
{
|
{
|
||||||
|
@ -127,7 +127,8 @@ public:
|
|||||||
if (size == 0)
|
if (size == 0)
|
||||||
throw Exception(ErrorCodes::INCORRECT_DATA, "Incorrect size (0) in groupBitmap.");
|
throw Exception(ErrorCodes::INCORRECT_DATA, "Incorrect size (0) in groupBitmap.");
|
||||||
if (size > max_size)
|
if (size > max_size)
|
||||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size in groupBitmap.");
|
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE,
|
||||||
|
"Too large array size in groupBitmap (maximum: {})", max_size);
|
||||||
|
|
||||||
/// TODO: this is unnecessary copying - it will be better to read and deserialize in one pass.
|
/// TODO: this is unnecessary copying - it will be better to read and deserialize in one pass.
|
||||||
std::unique_ptr<char[]> buf(new char[size]);
|
std::unique_ptr<char[]> buf(new char[size]);
|
||||||
|
@ -294,7 +294,8 @@ public:
|
|||||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too many bins");
|
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too many bins");
|
||||||
static constexpr size_t max_size = 1_GiB;
|
static constexpr size_t max_size = 1_GiB;
|
||||||
if (size > max_size)
|
if (size > max_size)
|
||||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size in histogram.");
|
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE,
|
||||||
|
"Too large array size in histogram (maximum: {})", max_size);
|
||||||
|
|
||||||
buf.readStrict(reinterpret_cast<char *>(points), size * sizeof(WeightedValue));
|
buf.readStrict(reinterpret_cast<char *>(points), size * sizeof(WeightedValue));
|
||||||
}
|
}
|
||||||
|
@ -117,7 +117,7 @@ struct AggregateFunctionIntervalLengthSumData
|
|||||||
readBinary(size, buf);
|
readBinary(size, buf);
|
||||||
|
|
||||||
if (unlikely(size > MAX_ARRAY_SIZE))
|
if (unlikely(size > MAX_ARRAY_SIZE))
|
||||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size");
|
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size (maximum: {})", MAX_ARRAY_SIZE);
|
||||||
|
|
||||||
segments.clear();
|
segments.clear();
|
||||||
segments.reserve(size);
|
segments.reserve(size);
|
||||||
|
@ -140,7 +140,8 @@ public:
|
|||||||
readVarUInt(size, buf);
|
readVarUInt(size, buf);
|
||||||
|
|
||||||
if (unlikely(size > AGGREGATE_FUNCTION_MAX_INTERSECTIONS_MAX_ARRAY_SIZE))
|
if (unlikely(size > AGGREGATE_FUNCTION_MAX_INTERSECTIONS_MAX_ARRAY_SIZE))
|
||||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size");
|
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE,
|
||||||
|
"Too large array size (maximum: {})", AGGREGATE_FUNCTION_MAX_INTERSECTIONS_MAX_ARRAY_SIZE);
|
||||||
|
|
||||||
auto & value = this->data(place).value;
|
auto & value = this->data(place).value;
|
||||||
|
|
||||||
|
@ -324,7 +324,8 @@ public:
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
if (unlikely(size > max_node_size_deserialize))
|
if (unlikely(size > max_node_size_deserialize))
|
||||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size");
|
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE,
|
||||||
|
"Too large array size (maximum: {})", max_node_size_deserialize);
|
||||||
|
|
||||||
auto & value = data(place).value;
|
auto & value = data(place).value;
|
||||||
|
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <base/arithmeticOverflow.h>
|
||||||
|
|
||||||
#include <array>
|
#include <array>
|
||||||
#include <string_view>
|
#include <string_view>
|
||||||
#include <DataTypes/DataTypeString.h>
|
#include <DataTypes/DataTypeString.h>
|
||||||
@ -43,9 +45,21 @@ struct AggregateFunctionSparkbarData
|
|||||||
|
|
||||||
auto [it, inserted] = points.insert({x, y});
|
auto [it, inserted] = points.insert({x, y});
|
||||||
if (!inserted)
|
if (!inserted)
|
||||||
|
{
|
||||||
|
if constexpr (std::is_floating_point_v<Y>)
|
||||||
|
{
|
||||||
it->getMapped() += y;
|
it->getMapped() += y;
|
||||||
return it->getMapped();
|
return it->getMapped();
|
||||||
}
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
Y res;
|
||||||
|
bool has_overfllow = common::addOverflow(it->getMapped(), y, res);
|
||||||
|
it->getMapped() = has_overfllow ? std::numeric_limits<Y>::max() : res;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return it->getMapped();
|
||||||
|
}
|
||||||
|
|
||||||
void add(X x, Y y)
|
void add(X x, Y y)
|
||||||
{
|
{
|
||||||
@ -117,6 +131,7 @@ class AggregateFunctionSparkbar final
|
|||||||
{
|
{
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
static constexpr size_t BAR_LEVELS = 8;
|
||||||
const size_t width = 0;
|
const size_t width = 0;
|
||||||
|
|
||||||
/// Range for x specified in parameters.
|
/// Range for x specified in parameters.
|
||||||
@ -126,8 +141,8 @@ private:
|
|||||||
|
|
||||||
size_t updateFrame(ColumnString::Chars & frame, Y value) const
|
size_t updateFrame(ColumnString::Chars & frame, Y value) const
|
||||||
{
|
{
|
||||||
static constexpr std::array<std::string_view, 9> bars{" ", "▁", "▂", "▃", "▄", "▅", "▆", "▇", "█"};
|
static constexpr std::array<std::string_view, BAR_LEVELS + 1> bars{" ", "▁", "▂", "▃", "▄", "▅", "▆", "▇", "█"};
|
||||||
const auto & bar = (isNaN(value) || value < 1 || 8 < value) ? bars[0] : bars[static_cast<UInt8>(value)];
|
const auto & bar = (isNaN(value) || value < 1 || static_cast<Y>(BAR_LEVELS) < value) ? bars[0] : bars[static_cast<UInt8>(value)];
|
||||||
frame.insert(bar.begin(), bar.end());
|
frame.insert(bar.begin(), bar.end());
|
||||||
return bar.size();
|
return bar.size();
|
||||||
}
|
}
|
||||||
@ -161,7 +176,7 @@ private:
|
|||||||
}
|
}
|
||||||
|
|
||||||
PaddedPODArray<Y> histogram(width, 0);
|
PaddedPODArray<Y> histogram(width, 0);
|
||||||
PaddedPODArray<UInt64> fhistogram(width, 0);
|
PaddedPODArray<UInt64> count_histogram(width, 0); /// The number of points in each bucket
|
||||||
|
|
||||||
for (const auto & point : data.points)
|
for (const auto & point : data.points)
|
||||||
{
|
{
|
||||||
@ -176,22 +191,30 @@ private:
|
|||||||
Float64 w = histogram.size();
|
Float64 w = histogram.size();
|
||||||
size_t index = std::min<size_t>(static_cast<size_t>(w / delta * value), histogram.size() - 1);
|
size_t index = std::min<size_t>(static_cast<size_t>(w / delta * value), histogram.size() - 1);
|
||||||
|
|
||||||
if (std::numeric_limits<Y>::max() - histogram[index] > point.getMapped())
|
Y res;
|
||||||
|
bool has_overfllow = false;
|
||||||
|
if constexpr (std::is_floating_point_v<Y>)
|
||||||
|
res = histogram[index] + point.getMapped();
|
||||||
|
else
|
||||||
|
has_overfllow = common::addOverflow(histogram[index], point.getMapped(), res);
|
||||||
|
|
||||||
|
if (unlikely(has_overfllow))
|
||||||
{
|
{
|
||||||
histogram[index] += point.getMapped();
|
/// In case of overflow, just saturate
|
||||||
fhistogram[index] += 1;
|
/// Do not count new values, because we do not know how many of them were added
|
||||||
|
histogram[index] = std::numeric_limits<Y>::max();
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
/// In case of overflow, just saturate
|
histogram[index] = res;
|
||||||
histogram[index] = std::numeric_limits<Y>::max();
|
count_histogram[index] += 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (size_t i = 0; i < histogram.size(); ++i)
|
for (size_t i = 0; i < histogram.size(); ++i)
|
||||||
{
|
{
|
||||||
if (fhistogram[i] > 0)
|
if (count_histogram[i] > 0)
|
||||||
histogram[i] /= fhistogram[i];
|
histogram[i] /= count_histogram[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
Y y_max = 0;
|
Y y_max = 0;
|
||||||
@ -209,12 +232,30 @@ private:
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Scale the histogram to the range [0, BAR_LEVELS]
|
||||||
for (auto & y : histogram)
|
for (auto & y : histogram)
|
||||||
{
|
{
|
||||||
if (isNaN(y) || y <= 0)
|
if (isNaN(y) || y <= 0)
|
||||||
|
{
|
||||||
y = 0;
|
y = 0;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr auto levels_num = static_cast<Y>(BAR_LEVELS - 1);
|
||||||
|
if constexpr (std::is_floating_point_v<Y>)
|
||||||
|
{
|
||||||
|
y = y / (y_max / levels_num) + 1;
|
||||||
|
}
|
||||||
else
|
else
|
||||||
y = y * 7 / y_max + 1;
|
{
|
||||||
|
Y scaled;
|
||||||
|
bool has_overfllow = common::mulOverflow<Y>(y, levels_num, scaled);
|
||||||
|
|
||||||
|
if (has_overfllow)
|
||||||
|
y = y / (y_max / levels_num) + 1;
|
||||||
|
else
|
||||||
|
y = scaled / y_max + 1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t sz = 0;
|
size_t sz = 0;
|
||||||
|
@ -58,7 +58,8 @@ struct QuantileExactBase
|
|||||||
size_t size = 0;
|
size_t size = 0;
|
||||||
readVarUInt(size, buf);
|
readVarUInt(size, buf);
|
||||||
if (unlikely(size > QUANTILE_EXACT_MAX_ARRAY_SIZE))
|
if (unlikely(size > QUANTILE_EXACT_MAX_ARRAY_SIZE))
|
||||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size");
|
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE,
|
||||||
|
"Too large array size (maximum: {})", QUANTILE_EXACT_MAX_ARRAY_SIZE);
|
||||||
array.resize(size);
|
array.resize(size);
|
||||||
buf.readStrict(reinterpret_cast<char *>(array.data()), size * sizeof(array[0]));
|
buf.readStrict(reinterpret_cast<char *>(array.data()), size * sizeof(array[0]));
|
||||||
}
|
}
|
||||||
|
@ -213,7 +213,8 @@ public:
|
|||||||
size_t size = std::min(total_values, sample_count);
|
size_t size = std::min(total_values, sample_count);
|
||||||
static constexpr size_t MAX_RESERVOIR_SIZE = 1_GiB;
|
static constexpr size_t MAX_RESERVOIR_SIZE = 1_GiB;
|
||||||
if (unlikely(size > MAX_RESERVOIR_SIZE))
|
if (unlikely(size > MAX_RESERVOIR_SIZE))
|
||||||
throw DB::Exception(DB::ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size");
|
throw DB::Exception(DB::ErrorCodes::TOO_LARGE_ARRAY_SIZE,
|
||||||
|
"Too large array size (maximum: {})", MAX_RESERVOIR_SIZE);
|
||||||
|
|
||||||
samples.resize(size);
|
samples.resize(size);
|
||||||
|
|
||||||
|
@ -166,7 +166,8 @@ public:
|
|||||||
|
|
||||||
static constexpr size_t MAX_RESERVOIR_SIZE = 1_GiB;
|
static constexpr size_t MAX_RESERVOIR_SIZE = 1_GiB;
|
||||||
if (unlikely(size > MAX_RESERVOIR_SIZE))
|
if (unlikely(size > MAX_RESERVOIR_SIZE))
|
||||||
throw DB::Exception(DB::ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size");
|
throw DB::Exception(DB::ErrorCodes::TOO_LARGE_ARRAY_SIZE,
|
||||||
|
"Too large array size (maximum: {})", MAX_RESERVOIR_SIZE);
|
||||||
|
|
||||||
samples.resize(size);
|
samples.resize(size);
|
||||||
for (size_t i = 0; i < size; ++i)
|
for (size_t i = 0; i < size; ++i)
|
||||||
|
599
src/Analyzer/Passes/CNF.cpp
Normal file
599
src/Analyzer/Passes/CNF.cpp
Normal file
@ -0,0 +1,599 @@
|
|||||||
|
#include <Analyzer/Passes/CNF.h>
|
||||||
|
|
||||||
|
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||||
|
#include <Analyzer/FunctionNode.h>
|
||||||
|
#include <Analyzer/ConstantNode.h>
|
||||||
|
|
||||||
|
#include <Interpreters/TreeCNFConverter.h>
|
||||||
|
|
||||||
|
#include <IO/WriteBufferFromString.h>
|
||||||
|
#include <IO/Operators.h>
|
||||||
|
|
||||||
|
#include <Functions/FunctionFactory.h>
|
||||||
|
|
||||||
|
#include <Common/checkStackSize.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int TOO_MANY_TEMPORARY_COLUMNS;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace Analyzer
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
|
||||||
|
bool isLogicalFunction(const FunctionNode & function_node)
|
||||||
|
{
|
||||||
|
const std::string_view name = function_node.getFunctionName();
|
||||||
|
return name == "and" || name == "or" || name == "not";
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename... Args>
|
||||||
|
QueryTreeNodePtr createFunctionNode(const FunctionOverloadResolverPtr & function_resolver, Args &&... args)
|
||||||
|
{
|
||||||
|
auto function_node = std::make_shared<FunctionNode>(function_resolver->getName());
|
||||||
|
auto & new_arguments = function_node->getArguments().getNodes();
|
||||||
|
new_arguments.reserve(sizeof...(args));
|
||||||
|
(new_arguments.push_back(std::forward<Args>(args)), ...);
|
||||||
|
function_node->resolveAsFunction(function_resolver);
|
||||||
|
return function_node;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t countAtoms(const QueryTreeNodePtr & node)
|
||||||
|
{
|
||||||
|
checkStackSize();
|
||||||
|
|
||||||
|
const auto * function_node = node->as<FunctionNode>();
|
||||||
|
if (!function_node || !isLogicalFunction(*function_node))
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
size_t atom_count = 0;
|
||||||
|
const auto & arguments = function_node->getArguments().getNodes();
|
||||||
|
for (const auto & argument : arguments)
|
||||||
|
atom_count += countAtoms(argument);
|
||||||
|
|
||||||
|
return atom_count;
|
||||||
|
}
|
||||||
|
|
||||||
|
class SplitMultiLogicVisitor
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
explicit SplitMultiLogicVisitor(ContextPtr context)
|
||||||
|
: current_context(std::move(context))
|
||||||
|
{}
|
||||||
|
|
||||||
|
void visit(QueryTreeNodePtr & node)
|
||||||
|
{
|
||||||
|
checkStackSize();
|
||||||
|
|
||||||
|
auto * function_node = node->as<FunctionNode>();
|
||||||
|
if (!function_node || !isLogicalFunction(*function_node))
|
||||||
|
return;
|
||||||
|
|
||||||
|
const auto & name = function_node->getFunctionName();
|
||||||
|
|
||||||
|
if (name == "and" || name == "or")
|
||||||
|
{
|
||||||
|
auto function_resolver = FunctionFactory::instance().get(name, current_context);
|
||||||
|
|
||||||
|
const auto & arguments = function_node->getArguments().getNodes();
|
||||||
|
if (arguments.size() > 2)
|
||||||
|
{
|
||||||
|
QueryTreeNodePtr current = arguments[0];
|
||||||
|
for (size_t i = 1; i < arguments.size(); ++i)
|
||||||
|
current = createFunctionNode(function_resolver, std::move(current), arguments[i]);
|
||||||
|
|
||||||
|
auto & new_function_node = current->as<FunctionNode &>();
|
||||||
|
function_node->getArguments().getNodes() = std::move(new_function_node.getArguments().getNodes());
|
||||||
|
function_node->resolveAsFunction(function_resolver);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
assert(name == "not");
|
||||||
|
}
|
||||||
|
|
||||||
|
auto & arguments = function_node->getArguments().getNodes();
|
||||||
|
for (auto & argument : arguments)
|
||||||
|
visit(argument);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
ContextPtr current_context;
|
||||||
|
};
|
||||||
|
|
||||||
|
class PushNotVisitor
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
explicit PushNotVisitor(const ContextPtr & context)
|
||||||
|
: not_function_resolver(FunctionFactory::instance().get("not", context))
|
||||||
|
, or_function_resolver(FunctionFactory::instance().get("or", context))
|
||||||
|
, and_function_resolver(FunctionFactory::instance().get("and", context))
|
||||||
|
{}
|
||||||
|
|
||||||
|
void visit(QueryTreeNodePtr & node, bool add_negation)
|
||||||
|
{
|
||||||
|
checkStackSize();
|
||||||
|
|
||||||
|
auto * function_node = node->as<FunctionNode>();
|
||||||
|
|
||||||
|
if (!function_node || !isLogicalFunction(*function_node))
|
||||||
|
{
|
||||||
|
if (add_negation)
|
||||||
|
node = createFunctionNode(not_function_resolver, std::move(node));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string_view function_name = function_node->getFunctionName();
|
||||||
|
if (function_name == "and" || function_name == "or")
|
||||||
|
{
|
||||||
|
if (add_negation)
|
||||||
|
{
|
||||||
|
if (function_name == "and")
|
||||||
|
function_node->resolveAsFunction(or_function_resolver);
|
||||||
|
else
|
||||||
|
function_node->resolveAsFunction(and_function_resolver);
|
||||||
|
}
|
||||||
|
|
||||||
|
auto & arguments = function_node->getArguments().getNodes();
|
||||||
|
for (auto & argument : arguments)
|
||||||
|
visit(argument, add_negation);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(function_name == "not");
|
||||||
|
auto & arguments = function_node->getArguments().getNodes();
|
||||||
|
assert(arguments.size() == 1);
|
||||||
|
node = arguments[0];
|
||||||
|
visit(node, !add_negation);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
const FunctionOverloadResolverPtr not_function_resolver;
|
||||||
|
const FunctionOverloadResolverPtr or_function_resolver;
|
||||||
|
const FunctionOverloadResolverPtr and_function_resolver;
|
||||||
|
};
|
||||||
|
|
||||||
|
class PushOrVisitor
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
PushOrVisitor(ContextPtr context, size_t max_atoms_, size_t num_atoms_)
|
||||||
|
: max_atoms(max_atoms_)
|
||||||
|
, num_atoms(num_atoms_)
|
||||||
|
, and_resolver(FunctionFactory::instance().get("and", context))
|
||||||
|
, or_resolver(FunctionFactory::instance().get("or", context))
|
||||||
|
{}
|
||||||
|
|
||||||
|
bool visit(QueryTreeNodePtr & node)
|
||||||
|
{
|
||||||
|
if (max_atoms && num_atoms > max_atoms)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
checkStackSize();
|
||||||
|
|
||||||
|
auto * function_node = node->as<FunctionNode>();
|
||||||
|
|
||||||
|
if (!function_node)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
std::string_view name = function_node->getFunctionName();
|
||||||
|
|
||||||
|
if (name == "or" || name == "and")
|
||||||
|
{
|
||||||
|
auto & arguments = function_node->getArguments().getNodes();
|
||||||
|
for (auto & argument : arguments)
|
||||||
|
visit(argument);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (name == "or")
|
||||||
|
{
|
||||||
|
auto & arguments = function_node->getArguments().getNodes();
|
||||||
|
assert(arguments.size() == 2);
|
||||||
|
|
||||||
|
size_t and_node_id = arguments.size();
|
||||||
|
|
||||||
|
for (size_t i = 0; i < arguments.size(); ++i)
|
||||||
|
{
|
||||||
|
auto & argument = arguments[i];
|
||||||
|
if (auto * argument_function_node = argument->as<FunctionNode>();
|
||||||
|
argument_function_node && argument_function_node->getFunctionName() == "and")
|
||||||
|
and_node_id = i;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (and_node_id == arguments.size())
|
||||||
|
return true;
|
||||||
|
|
||||||
|
auto & other_node = arguments[1 - and_node_id];
|
||||||
|
auto & and_function_arguments = arguments[and_node_id]->as<FunctionNode &>().getArguments().getNodes();
|
||||||
|
|
||||||
|
auto lhs = createFunctionNode(or_resolver, other_node->clone(), std::move(and_function_arguments[0]));
|
||||||
|
num_atoms += countAtoms(other_node);
|
||||||
|
|
||||||
|
auto rhs = createFunctionNode(or_resolver, std::move(other_node), std::move(and_function_arguments[1]));
|
||||||
|
node = createFunctionNode(and_resolver, std::move(lhs), std::move(rhs));
|
||||||
|
|
||||||
|
visit(node);
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
size_t max_atoms;
|
||||||
|
size_t num_atoms;
|
||||||
|
|
||||||
|
const FunctionOverloadResolverPtr and_resolver;
|
||||||
|
const FunctionOverloadResolverPtr or_resolver;
|
||||||
|
};
|
||||||
|
|
||||||
|
class CollectGroupsVisitor
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
void visit(QueryTreeNodePtr & node)
|
||||||
|
{
|
||||||
|
CNF::OrGroup or_group;
|
||||||
|
visitImpl(node, or_group);
|
||||||
|
if (!or_group.empty())
|
||||||
|
and_group.insert(std::move(or_group));
|
||||||
|
}
|
||||||
|
|
||||||
|
CNF::AndGroup and_group;
|
||||||
|
|
||||||
|
private:
|
||||||
|
void visitImpl(QueryTreeNodePtr & node, CNF::OrGroup & or_group)
|
||||||
|
{
|
||||||
|
checkStackSize();
|
||||||
|
|
||||||
|
auto * function_node = node->as<FunctionNode>();
|
||||||
|
if (!function_node || !isLogicalFunction(*function_node))
|
||||||
|
{
|
||||||
|
or_group.insert(CNF::AtomicFormula{false, std::move(node)});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string_view name = function_node->getFunctionName();
|
||||||
|
|
||||||
|
if (name == "and")
|
||||||
|
{
|
||||||
|
auto & arguments = function_node->getArguments().getNodes();
|
||||||
|
for (auto & argument : arguments)
|
||||||
|
{
|
||||||
|
CNF::OrGroup argument_or_group;
|
||||||
|
visitImpl(argument, argument_or_group);
|
||||||
|
if (!argument_or_group.empty())
|
||||||
|
and_group.insert(std::move(argument_or_group));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if (name == "or")
|
||||||
|
{
|
||||||
|
auto & arguments = function_node->getArguments().getNodes();
|
||||||
|
for (auto & argument : arguments)
|
||||||
|
visitImpl(argument, or_group);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
assert(name == "not");
|
||||||
|
auto & arguments = function_node->getArguments().getNodes();
|
||||||
|
or_group.insert(CNF::AtomicFormula{true, std::move(arguments[0])});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
std::optional<CNF::AtomicFormula> tryInvertFunction(
|
||||||
|
const CNF::AtomicFormula & atom, const ContextPtr & context, const std::unordered_map<std::string, std::string> & inverse_relations)
|
||||||
|
{
|
||||||
|
auto * function_node = atom.node_with_hash.node->as<FunctionNode>();
|
||||||
|
if (!function_node)
|
||||||
|
return std::nullopt;
|
||||||
|
|
||||||
|
if (auto it = inverse_relations.find(function_node->getFunctionName()); it != inverse_relations.end())
|
||||||
|
{
|
||||||
|
auto inverse_function_resolver = FunctionFactory::instance().get(it->second, context);
|
||||||
|
function_node->resolveAsFunction(inverse_function_resolver);
|
||||||
|
return CNF::AtomicFormula{!atom.negative, atom.node_with_hash.node};
|
||||||
|
}
|
||||||
|
|
||||||
|
return std::nullopt;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool CNF::AtomicFormula::operator==(const AtomicFormula & rhs) const
|
||||||
|
{
|
||||||
|
return negative == rhs.negative && node_with_hash == rhs.node_with_hash;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool CNF::AtomicFormula::operator<(const AtomicFormula & rhs) const
|
||||||
|
{
|
||||||
|
if (node_with_hash.hash > rhs.node_with_hash.hash)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return node_with_hash.hash < rhs.node_with_hash.hash || negative < rhs.negative;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string CNF::dump() const
|
||||||
|
{
|
||||||
|
WriteBufferFromOwnString res;
|
||||||
|
bool first = true;
|
||||||
|
for (const auto & group : statements)
|
||||||
|
{
|
||||||
|
if (!first)
|
||||||
|
res << " AND ";
|
||||||
|
first = false;
|
||||||
|
res << "(";
|
||||||
|
bool first_in_group = true;
|
||||||
|
for (const auto & atom : group)
|
||||||
|
{
|
||||||
|
if (!first_in_group)
|
||||||
|
res << " OR ";
|
||||||
|
first_in_group = false;
|
||||||
|
if (atom.negative)
|
||||||
|
res << " NOT ";
|
||||||
|
res << atom.node_with_hash.node->formatASTForErrorMessage();
|
||||||
|
}
|
||||||
|
res << ")";
|
||||||
|
}
|
||||||
|
|
||||||
|
return res.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
CNF & CNF::transformGroups(std::function<OrGroup(const OrGroup &)> fn)
|
||||||
|
{
|
||||||
|
AndGroup result;
|
||||||
|
|
||||||
|
for (const auto & group : statements)
|
||||||
|
{
|
||||||
|
auto new_group = fn(group);
|
||||||
|
if (!new_group.empty())
|
||||||
|
result.insert(std::move(new_group));
|
||||||
|
}
|
||||||
|
|
||||||
|
statements = std::move(result);
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
CNF & CNF::transformAtoms(std::function<AtomicFormula(const AtomicFormula &)> fn)
|
||||||
|
{
|
||||||
|
transformGroups([fn](const OrGroup & group)
|
||||||
|
{
|
||||||
|
OrGroup result;
|
||||||
|
for (const auto & atom : group)
|
||||||
|
{
|
||||||
|
auto new_atom = fn(atom);
|
||||||
|
if (new_atom.node_with_hash.node)
|
||||||
|
result.insert(std::move(new_atom));
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
});
|
||||||
|
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
CNF & CNF::pushNotIntoFunctions(const ContextPtr & context)
|
||||||
|
{
|
||||||
|
transformAtoms([&](const AtomicFormula & atom)
|
||||||
|
{
|
||||||
|
return pushNotIntoFunction(atom, context);
|
||||||
|
});
|
||||||
|
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
CNF::AtomicFormula CNF::pushNotIntoFunction(const AtomicFormula & atom, const ContextPtr & context)
|
||||||
|
{
|
||||||
|
if (!atom.negative)
|
||||||
|
return atom;
|
||||||
|
|
||||||
|
static const std::unordered_map<std::string, std::string> inverse_relations = {
|
||||||
|
{"equals", "notEquals"},
|
||||||
|
{"less", "greaterOrEquals"},
|
||||||
|
{"lessOrEquals", "greater"},
|
||||||
|
{"in", "notIn"},
|
||||||
|
{"like", "notLike"},
|
||||||
|
{"empty", "notEmpty"},
|
||||||
|
{"notEquals", "equals"},
|
||||||
|
{"greaterOrEquals", "less"},
|
||||||
|
{"greater", "lessOrEquals"},
|
||||||
|
{"notIn", "in"},
|
||||||
|
{"notLike", "like"},
|
||||||
|
{"notEmpty", "empty"},
|
||||||
|
};
|
||||||
|
|
||||||
|
if (auto inverted_atom = tryInvertFunction(atom, context, inverse_relations);
|
||||||
|
inverted_atom.has_value())
|
||||||
|
return std::move(*inverted_atom);
|
||||||
|
|
||||||
|
return atom;
|
||||||
|
}
|
||||||
|
|
||||||
|
CNF & CNF::pullNotOutFunctions(const ContextPtr & context)
|
||||||
|
{
|
||||||
|
transformAtoms([&](const AtomicFormula & atom)
|
||||||
|
{
|
||||||
|
static const std::unordered_map<std::string, std::string> inverse_relations = {
|
||||||
|
{"notEquals", "equals"},
|
||||||
|
{"greaterOrEquals", "less"},
|
||||||
|
{"greater", "lessOrEquals"},
|
||||||
|
{"notIn", "in"},
|
||||||
|
{"notLike", "like"},
|
||||||
|
{"notEmpty", "empty"},
|
||||||
|
};
|
||||||
|
|
||||||
|
if (auto inverted_atom = tryInvertFunction(atom, context, inverse_relations);
|
||||||
|
inverted_atom.has_value())
|
||||||
|
return std::move(*inverted_atom);
|
||||||
|
|
||||||
|
return atom;
|
||||||
|
});
|
||||||
|
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
CNF & CNF::filterAlwaysTrueGroups(std::function<bool(const OrGroup &)> predicate)
|
||||||
|
{
|
||||||
|
AndGroup filtered;
|
||||||
|
for (const auto & or_group : statements)
|
||||||
|
{
|
||||||
|
if (predicate(or_group))
|
||||||
|
filtered.insert(or_group);
|
||||||
|
}
|
||||||
|
|
||||||
|
statements = std::move(filtered);
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
CNF & CNF::filterAlwaysFalseAtoms(std::function<bool(const AtomicFormula &)> predicate)
|
||||||
|
{
|
||||||
|
AndGroup filtered;
|
||||||
|
for (const auto & or_group : statements)
|
||||||
|
{
|
||||||
|
OrGroup filtered_group;
|
||||||
|
for (const auto & atom : or_group)
|
||||||
|
{
|
||||||
|
if (predicate(atom))
|
||||||
|
filtered_group.insert(atom);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!filtered_group.empty())
|
||||||
|
filtered.insert(std::move(filtered_group));
|
||||||
|
else
|
||||||
|
{
|
||||||
|
filtered.clear();
|
||||||
|
filtered_group.insert(AtomicFormula{false, QueryTreeNodePtrWithHash{std::make_shared<ConstantNode>(static_cast<UInt8>(0))}});
|
||||||
|
filtered.insert(std::move(filtered_group));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
statements = std::move(filtered);
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
CNF & CNF::reduce()
|
||||||
|
{
|
||||||
|
while (true)
|
||||||
|
{
|
||||||
|
AndGroup new_statements = reduceOnceCNFStatements(statements);
|
||||||
|
if (statements == new_statements)
|
||||||
|
{
|
||||||
|
statements = filterCNFSubsets(statements);
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
statements = new_statements;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void CNF::appendGroup(const AndGroup & and_group)
|
||||||
|
{
|
||||||
|
for (const auto & or_group : and_group)
|
||||||
|
statements.emplace(or_group);
|
||||||
|
}
|
||||||
|
|
||||||
|
CNF::CNF(AndGroup statements_)
|
||||||
|
: statements(std::move(statements_))
|
||||||
|
{}
|
||||||
|
|
||||||
|
std::optional<CNF> CNF::tryBuildCNF(const QueryTreeNodePtr & node, ContextPtr context, size_t max_growth_multiplier)
|
||||||
|
{
|
||||||
|
auto node_cloned = node->clone();
|
||||||
|
|
||||||
|
size_t atom_count = countAtoms(node_cloned);
|
||||||
|
size_t max_atoms = max_growth_multiplier ? std::max(MAX_ATOMS_WITHOUT_CHECK, atom_count * max_growth_multiplier) : 0;
|
||||||
|
|
||||||
|
{
|
||||||
|
SplitMultiLogicVisitor visitor(context);
|
||||||
|
visitor.visit(node_cloned);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
PushNotVisitor visitor(context);
|
||||||
|
visitor.visit(node_cloned, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (PushOrVisitor visitor(context, max_atoms, atom_count);
|
||||||
|
!visitor.visit(node_cloned))
|
||||||
|
return std::nullopt;
|
||||||
|
|
||||||
|
CollectGroupsVisitor collect_visitor;
|
||||||
|
collect_visitor.visit(node_cloned);
|
||||||
|
|
||||||
|
if (collect_visitor.and_group.empty())
|
||||||
|
return std::nullopt;
|
||||||
|
|
||||||
|
return CNF{std::move(collect_visitor.and_group)};
|
||||||
|
}
|
||||||
|
|
||||||
|
CNF CNF::toCNF(const QueryTreeNodePtr & node, ContextPtr context, size_t max_growth_multiplier)
|
||||||
|
{
|
||||||
|
auto cnf = tryBuildCNF(node, context, max_growth_multiplier);
|
||||||
|
if (!cnf)
|
||||||
|
throw Exception(ErrorCodes::TOO_MANY_TEMPORARY_COLUMNS,
|
||||||
|
"Cannot convert expression '{}' to CNF, because it produces to many clauses."
|
||||||
|
"Size of boolean formula in CNF can be exponential of size of source formula.");
|
||||||
|
|
||||||
|
return *cnf;
|
||||||
|
}
|
||||||
|
|
||||||
|
QueryTreeNodePtr CNF::toQueryTree(ContextPtr context) const
|
||||||
|
{
|
||||||
|
if (statements.empty())
|
||||||
|
return nullptr;
|
||||||
|
|
||||||
|
QueryTreeNodes and_arguments;
|
||||||
|
and_arguments.reserve(statements.size());
|
||||||
|
|
||||||
|
auto not_resolver = FunctionFactory::instance().get("not", context);
|
||||||
|
auto or_resolver = FunctionFactory::instance().get("or", context);
|
||||||
|
auto and_resolver = FunctionFactory::instance().get("and", context);
|
||||||
|
|
||||||
|
const auto function_node_from_atom = [&](const auto & atom) -> QueryTreeNodePtr
|
||||||
|
{
|
||||||
|
auto cloned_node = atom.node_with_hash.node->clone();
|
||||||
|
if (atom.negative)
|
||||||
|
return createFunctionNode(not_resolver, std::move(cloned_node));
|
||||||
|
|
||||||
|
return std::move(cloned_node);
|
||||||
|
};
|
||||||
|
|
||||||
|
for (const auto & or_group : statements)
|
||||||
|
{
|
||||||
|
if (or_group.size() == 1)
|
||||||
|
{
|
||||||
|
const auto & atom = *or_group.begin();
|
||||||
|
and_arguments.push_back(function_node_from_atom(atom));
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
QueryTreeNodes or_arguments;
|
||||||
|
or_arguments.reserve(or_group.size());
|
||||||
|
|
||||||
|
for (const auto & atom : or_group)
|
||||||
|
or_arguments.push_back(function_node_from_atom(atom));
|
||||||
|
|
||||||
|
auto or_function = std::make_shared<FunctionNode>("or");
|
||||||
|
or_function->getArguments().getNodes() = std::move(or_arguments);
|
||||||
|
or_function->resolveAsFunction(or_resolver);
|
||||||
|
|
||||||
|
and_arguments.push_back(std::move(or_function));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (and_arguments.size() == 1)
|
||||||
|
return std::move(and_arguments[0]);
|
||||||
|
|
||||||
|
auto and_function = std::make_shared<FunctionNode>("and");
|
||||||
|
and_function->getArguments().getNodes() = std::move(and_arguments);
|
||||||
|
and_function->resolveAsFunction(and_resolver);
|
||||||
|
|
||||||
|
return and_function;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
67
src/Analyzer/Passes/CNF.h
Normal file
67
src/Analyzer/Passes/CNF.h
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <Analyzer/HashUtils.h>
|
||||||
|
#include <Analyzer/IQueryTreeNode.h>
|
||||||
|
|
||||||
|
#include <Common/SipHash.h>
|
||||||
|
|
||||||
|
#include <Interpreters/Context_fwd.h>
|
||||||
|
|
||||||
|
#include <unordered_set>
|
||||||
|
|
||||||
|
namespace DB::Analyzer
|
||||||
|
{
|
||||||
|
|
||||||
|
class CNF
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
struct AtomicFormula
|
||||||
|
{
|
||||||
|
bool negative = false;
|
||||||
|
QueryTreeNodePtrWithHash node_with_hash;
|
||||||
|
|
||||||
|
bool operator==(const AtomicFormula & rhs) const;
|
||||||
|
bool operator<(const AtomicFormula & rhs) const;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Different hash is generated for different order, so we use std::set
|
||||||
|
using OrGroup = std::set<AtomicFormula>;
|
||||||
|
using AndGroup = std::set<OrGroup>;
|
||||||
|
|
||||||
|
std::string dump() const;
|
||||||
|
|
||||||
|
static constexpr size_t DEFAULT_MAX_GROWTH_MULTIPLIER = 20;
|
||||||
|
static constexpr size_t MAX_ATOMS_WITHOUT_CHECK = 200;
|
||||||
|
|
||||||
|
CNF & transformAtoms(std::function<AtomicFormula(const AtomicFormula &)> fn);
|
||||||
|
CNF & transformGroups(std::function<OrGroup(const OrGroup &)> fn);
|
||||||
|
|
||||||
|
CNF & filterAlwaysTrueGroups(std::function<bool(const OrGroup &)> predicate);
|
||||||
|
CNF & filterAlwaysFalseAtoms(std::function<bool(const AtomicFormula &)> predicate);
|
||||||
|
|
||||||
|
CNF & reduce();
|
||||||
|
|
||||||
|
void appendGroup(const AndGroup & and_group);
|
||||||
|
|
||||||
|
/// Convert "NOT fn" to a single node representing inverse of "fn"
|
||||||
|
CNF & pushNotIntoFunctions(const ContextPtr & context);
|
||||||
|
CNF & pullNotOutFunctions(const ContextPtr & context);
|
||||||
|
|
||||||
|
static AtomicFormula pushNotIntoFunction(const AtomicFormula & atom, const ContextPtr & context);
|
||||||
|
|
||||||
|
explicit CNF(AndGroup statements_);
|
||||||
|
|
||||||
|
static std::optional<CNF> tryBuildCNF(const QueryTreeNodePtr & node, ContextPtr context, size_t max_growth_multiplier = DEFAULT_MAX_GROWTH_MULTIPLIER);
|
||||||
|
static CNF toCNF(const QueryTreeNodePtr & node, ContextPtr context, size_t max_growth_multiplier = DEFAULT_MAX_GROWTH_MULTIPLIER);
|
||||||
|
|
||||||
|
QueryTreeNodePtr toQueryTree(ContextPtr context) const;
|
||||||
|
|
||||||
|
const auto & getStatements() const
|
||||||
|
{
|
||||||
|
return statements;
|
||||||
|
}
|
||||||
|
private:
|
||||||
|
AndGroup statements;
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
733
src/Analyzer/Passes/ConvertQueryToCNFPass.cpp
Normal file
733
src/Analyzer/Passes/ConvertQueryToCNFPass.cpp
Normal file
@ -0,0 +1,733 @@
|
|||||||
|
#include <Analyzer/Passes/ConvertQueryToCNFPass.h>
|
||||||
|
|
||||||
|
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||||
|
#include <Analyzer/FunctionNode.h>
|
||||||
|
#include <Analyzer/TableNode.h>
|
||||||
|
#include <Analyzer/ColumnNode.h>
|
||||||
|
#include <Analyzer/TableFunctionNode.h>
|
||||||
|
#include <Analyzer/ConstantNode.h>
|
||||||
|
#include <Analyzer/Passes/CNF.h>
|
||||||
|
#include <Analyzer/Utils.h>
|
||||||
|
|
||||||
|
#include <Storages/IStorage.h>
|
||||||
|
|
||||||
|
#include <Functions/FunctionFactory.h>
|
||||||
|
#include "Analyzer/HashUtils.h"
|
||||||
|
#include "Analyzer/IQueryTreeNode.h"
|
||||||
|
#include "Interpreters/ComparisonGraph.h"
|
||||||
|
#include "base/types.h"
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
|
||||||
|
std::optional<Analyzer::CNF> tryConvertQueryToCNF(const QueryTreeNodePtr & node, const ContextPtr & context)
|
||||||
|
{
|
||||||
|
auto cnf_form = Analyzer::CNF::tryBuildCNF(node, context);
|
||||||
|
if (!cnf_form)
|
||||||
|
return std::nullopt;
|
||||||
|
|
||||||
|
cnf_form->pushNotIntoFunctions(context);
|
||||||
|
return cnf_form;
|
||||||
|
}
|
||||||
|
|
||||||
|
enum class MatchState : uint8_t
|
||||||
|
{
|
||||||
|
FULL_MATCH, /// a = b
|
||||||
|
PARTIAL_MATCH, /// a = not b
|
||||||
|
NONE,
|
||||||
|
};
|
||||||
|
|
||||||
|
MatchState match(const Analyzer::CNF::AtomicFormula & a, const Analyzer::CNF::AtomicFormula & b)
|
||||||
|
{
|
||||||
|
using enum MatchState;
|
||||||
|
if (a.node_with_hash != b.node_with_hash)
|
||||||
|
return NONE;
|
||||||
|
|
||||||
|
return a.negative == b.negative ? FULL_MATCH : PARTIAL_MATCH;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool checkIfGroupAlwaysTrueFullMatch(const Analyzer::CNF::OrGroup & group, const ConstraintsDescription::QueryTreeData & query_tree_constraints)
|
||||||
|
{
|
||||||
|
/// We have constraints in CNF.
|
||||||
|
/// CNF is always true => Each OR group in CNF is always true.
|
||||||
|
/// So, we try to check whether we have al least one OR group from CNF as subset in our group.
|
||||||
|
/// If we've found one then our group is always true too.
|
||||||
|
|
||||||
|
const auto & constraints_data = query_tree_constraints.getConstraintData();
|
||||||
|
std::vector<size_t> found(constraints_data.size());
|
||||||
|
for (size_t i = 0; i < constraints_data.size(); ++i)
|
||||||
|
found[i] = constraints_data[i].size();
|
||||||
|
|
||||||
|
for (const auto & atom : group)
|
||||||
|
{
|
||||||
|
const auto constraint_atom_ids = query_tree_constraints.getAtomIds(atom.node_with_hash);
|
||||||
|
if (constraint_atom_ids)
|
||||||
|
{
|
||||||
|
const auto constraint_atoms = query_tree_constraints.getAtomsById(*constraint_atom_ids);
|
||||||
|
for (size_t i = 0; i < constraint_atoms.size(); ++i)
|
||||||
|
{
|
||||||
|
if (match(constraint_atoms[i], atom) == MatchState::FULL_MATCH)
|
||||||
|
{
|
||||||
|
if ((--found[(*constraint_atom_ids)[i].group_id]) == 0)
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool checkIfGroupAlwaysTrueGraph(const Analyzer::CNF::OrGroup & group, const ComparisonGraph<QueryTreeNodePtr> & graph)
|
||||||
|
{
|
||||||
|
/// We try to find at least one atom that is always true by using comparison graph.
|
||||||
|
for (const auto & atom : group)
|
||||||
|
{
|
||||||
|
const auto * function_node = atom.node_with_hash.node->as<FunctionNode>();
|
||||||
|
if (function_node)
|
||||||
|
{
|
||||||
|
const auto & arguments = function_node->getArguments().getNodes();
|
||||||
|
if (arguments.size() == 2)
|
||||||
|
{
|
||||||
|
const auto expected = ComparisonGraph<QueryTreeNodePtr>::atomToCompareResult(atom);
|
||||||
|
if (graph.isAlwaysCompare(expected, arguments[0], arguments[1]))
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool checkIfAtomAlwaysFalseFullMatch(const Analyzer::CNF::AtomicFormula & atom, const ConstraintsDescription::QueryTreeData & query_tree_constraints)
|
||||||
|
{
|
||||||
|
const auto constraint_atom_ids = query_tree_constraints.getAtomIds(atom.node_with_hash);
|
||||||
|
if (constraint_atom_ids)
|
||||||
|
{
|
||||||
|
for (const auto & constraint_atom : query_tree_constraints.getAtomsById(*constraint_atom_ids))
|
||||||
|
{
|
||||||
|
const auto match_result = match(constraint_atom, atom);
|
||||||
|
if (match_result == MatchState::PARTIAL_MATCH)
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool checkIfAtomAlwaysFalseGraph(const Analyzer::CNF::AtomicFormula & atom, const ComparisonGraph<QueryTreeNodePtr> & graph)
|
||||||
|
{
|
||||||
|
const auto * function_node = atom.node_with_hash.node->as<FunctionNode>();
|
||||||
|
if (!function_node)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
const auto & arguments = function_node->getArguments().getNodes();
|
||||||
|
if (arguments.size() != 2)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/// TODO: special support for !=
|
||||||
|
const auto expected = ComparisonGraph<QueryTreeNodePtr>::atomToCompareResult(atom);
|
||||||
|
return !graph.isPossibleCompare(expected, arguments[0], arguments[1]);
|
||||||
|
}
|
||||||
|
|
||||||
|
void replaceToConstants(QueryTreeNodePtr & term, const ComparisonGraph<QueryTreeNodePtr> & graph)
|
||||||
|
{
|
||||||
|
const auto equal_constant = graph.getEqualConst(term);
|
||||||
|
if (equal_constant)
|
||||||
|
{
|
||||||
|
term = (*equal_constant)->clone();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (auto & child : term->getChildren())
|
||||||
|
{
|
||||||
|
if (child)
|
||||||
|
replaceToConstants(child, graph);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Analyzer::CNF::AtomicFormula replaceTermsToConstants(const Analyzer::CNF::AtomicFormula & atom, const ComparisonGraph<QueryTreeNodePtr> & graph)
|
||||||
|
{
|
||||||
|
auto node = atom.node_with_hash.node->clone();
|
||||||
|
replaceToConstants(node, graph);
|
||||||
|
return {atom.negative, std::move(node)};
|
||||||
|
}
|
||||||
|
|
||||||
|
StorageSnapshotPtr getStorageSnapshot(const QueryTreeNodePtr & node)
|
||||||
|
{
|
||||||
|
StorageSnapshotPtr storage_snapshot{nullptr};
|
||||||
|
if (auto * table_node = node->as<TableNode>())
|
||||||
|
return table_node->getStorageSnapshot();
|
||||||
|
else if (auto * table_function_node = node->as<TableFunctionNode>())
|
||||||
|
return table_function_node->getStorageSnapshot();
|
||||||
|
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool onlyIndexColumns(const QueryTreeNodePtr & node, const std::unordered_set<std::string_view> & primary_key_set)
|
||||||
|
{
|
||||||
|
const auto * column_node = node->as<ColumnNode>();
|
||||||
|
/// TODO: verify that full name is correct here
|
||||||
|
if (column_node && !primary_key_set.contains(column_node->getColumnName()))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
for (const auto & child : node->getChildren())
|
||||||
|
{
|
||||||
|
if (child && !onlyIndexColumns(child, primary_key_set))
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool onlyConstants(const QueryTreeNodePtr & node)
|
||||||
|
{
|
||||||
|
/// if it's only constant it will be already calculated
|
||||||
|
return node->as<ConstantNode>() != nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
const std::unordered_map<std::string_view, ComparisonGraphCompareResult> & getRelationMap()
|
||||||
|
{
|
||||||
|
using enum ComparisonGraphCompareResult;
|
||||||
|
static const std::unordered_map<std::string_view, ComparisonGraphCompareResult> relations =
|
||||||
|
{
|
||||||
|
{"equals", EQUAL},
|
||||||
|
{"less", LESS},
|
||||||
|
{"lessOrEquals", LESS_OR_EQUAL},
|
||||||
|
{"greaterOrEquals", GREATER_OR_EQUAL},
|
||||||
|
{"greater", GREATER},
|
||||||
|
};
|
||||||
|
return relations;
|
||||||
|
}
|
||||||
|
|
||||||
|
const std::unordered_map<ComparisonGraphCompareResult, std::string> & getReverseRelationMap()
|
||||||
|
{
|
||||||
|
using enum ComparisonGraphCompareResult;
|
||||||
|
static const std::unordered_map<ComparisonGraphCompareResult, std::string> relations =
|
||||||
|
{
|
||||||
|
{EQUAL, "equals"},
|
||||||
|
{LESS, "less"},
|
||||||
|
{LESS_OR_EQUAL, "lessOrEquals"},
|
||||||
|
{GREATER_OR_EQUAL, "greaterOrEquals"},
|
||||||
|
{GREATER, "greater"},
|
||||||
|
};
|
||||||
|
return relations;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool canBeSequence(const ComparisonGraphCompareResult left, const ComparisonGraphCompareResult right)
|
||||||
|
{
|
||||||
|
using enum ComparisonGraphCompareResult;
|
||||||
|
if (left == UNKNOWN || right == UNKNOWN || left == NOT_EQUAL || right == NOT_EQUAL)
|
||||||
|
return false;
|
||||||
|
if ((left == GREATER || left == GREATER_OR_EQUAL) && (right == LESS || right == LESS_OR_EQUAL))
|
||||||
|
return false;
|
||||||
|
if ((right == GREATER || right == GREATER_OR_EQUAL) && (left == LESS || left == LESS_OR_EQUAL))
|
||||||
|
return false;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
ComparisonGraphCompareResult mostStrict(const ComparisonGraphCompareResult left, const ComparisonGraphCompareResult right)
|
||||||
|
{
|
||||||
|
using enum ComparisonGraphCompareResult;
|
||||||
|
if (left == LESS || left == GREATER)
|
||||||
|
return left;
|
||||||
|
if (right == LESS || right == GREATER)
|
||||||
|
return right;
|
||||||
|
if (left == LESS_OR_EQUAL || left == GREATER_OR_EQUAL)
|
||||||
|
return left;
|
||||||
|
if (right == LESS_OR_EQUAL || right == GREATER_OR_EQUAL)
|
||||||
|
return right;
|
||||||
|
if (left == EQUAL)
|
||||||
|
return left;
|
||||||
|
if (right == EQUAL)
|
||||||
|
return right;
|
||||||
|
return UNKNOWN;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create OR-group for 'indexHint'.
|
||||||
|
/// Consider we have expression like A <op1> C, where C is constant.
|
||||||
|
/// Consider we have a constraint I <op2> A, where I depends only on columns from primary key.
|
||||||
|
/// Then if op1 and op2 forms a sequence of comparisons (e.g. A < C and I < A),
|
||||||
|
/// we can add to expression 'indexHint(I < A)' condition.
|
||||||
|
Analyzer::CNF::OrGroup createIndexHintGroup(
|
||||||
|
const Analyzer::CNF::OrGroup & group,
|
||||||
|
const ComparisonGraph<QueryTreeNodePtr> & graph,
|
||||||
|
const QueryTreeNodes & primary_key_only_nodes,
|
||||||
|
const ContextPtr & context)
|
||||||
|
{
|
||||||
|
Analyzer::CNF::OrGroup result;
|
||||||
|
for (const auto & atom : group)
|
||||||
|
{
|
||||||
|
const auto * function_node = atom.node_with_hash.node->as<FunctionNode>();
|
||||||
|
if (!function_node || !getRelationMap().contains(function_node->getFunctionName()))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
const auto & arguments = function_node->getArguments().getNodes();
|
||||||
|
if (arguments.size() != 2)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
auto check_and_insert = [&](const size_t index, const ComparisonGraphCompareResult expected_result)
|
||||||
|
{
|
||||||
|
if (!onlyConstants(arguments[1 - index]))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
for (const auto & primary_key_node : primary_key_only_nodes)
|
||||||
|
{
|
||||||
|
ComparisonGraphCompareResult actual_result;
|
||||||
|
if (index == 0)
|
||||||
|
actual_result = graph.compare(primary_key_node, arguments[index]);
|
||||||
|
else
|
||||||
|
actual_result = graph.compare(arguments[index], primary_key_node);
|
||||||
|
|
||||||
|
if (canBeSequence(expected_result, actual_result))
|
||||||
|
{
|
||||||
|
auto helper_node = function_node->clone();
|
||||||
|
auto & helper_function_node = helper_node->as<FunctionNode &>();
|
||||||
|
helper_function_node.getArguments().getNodes()[index] = primary_key_node->clone();
|
||||||
|
auto reverse_function_name = getReverseRelationMap().at(mostStrict(expected_result, actual_result));
|
||||||
|
helper_function_node.resolveAsFunction(FunctionFactory::instance().get(reverse_function_name, context));
|
||||||
|
result.insert(Analyzer::CNF::AtomicFormula{atom.negative, std::move(helper_node)});
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
|
||||||
|
auto expected = getRelationMap().at(function_node->getFunctionName());
|
||||||
|
if (!check_and_insert(0, expected) && !check_and_insert(1, expected))
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
void addIndexConstraint(Analyzer::CNF & cnf, const QueryTreeNodes & table_expressions, const ContextPtr & context)
|
||||||
|
{
|
||||||
|
for (const auto & table_expression : table_expressions)
|
||||||
|
{
|
||||||
|
auto snapshot = getStorageSnapshot(table_expression);
|
||||||
|
if (!snapshot || !snapshot->metadata)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
const auto primary_key = snapshot->metadata->getColumnsRequiredForPrimaryKey();
|
||||||
|
const std::unordered_set<std::string_view> primary_key_set(primary_key.begin(), primary_key.end());
|
||||||
|
|
||||||
|
const auto & query_tree_constraint = snapshot->metadata->getConstraints().getQueryTreeData(context, table_expression);
|
||||||
|
const auto & graph = query_tree_constraint.getGraph();
|
||||||
|
|
||||||
|
QueryTreeNodes primary_key_only_nodes;
|
||||||
|
for (const auto & vertex : graph.getVertices())
|
||||||
|
{
|
||||||
|
for (const auto & node : vertex)
|
||||||
|
{
|
||||||
|
if (onlyIndexColumns(node, primary_key_set))
|
||||||
|
primary_key_only_nodes.push_back(node);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Analyzer::CNF::AndGroup and_group;
|
||||||
|
const auto & statements = cnf.getStatements();
|
||||||
|
for (const auto & group : statements)
|
||||||
|
{
|
||||||
|
auto new_group = createIndexHintGroup(group, graph, primary_key_only_nodes, context);
|
||||||
|
if (!new_group.empty())
|
||||||
|
and_group.emplace(std::move(new_group));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!and_group.empty())
|
||||||
|
{
|
||||||
|
Analyzer::CNF::OrGroup new_group;
|
||||||
|
auto index_hint_node = std::make_shared<FunctionNode>("indexHint");
|
||||||
|
index_hint_node->getArguments().getNodes().push_back(Analyzer::CNF{std::move(and_group)}.toQueryTree(context));
|
||||||
|
index_hint_node->resolveAsFunction(FunctionFactory::instance().get("indexHint", context));
|
||||||
|
new_group.insert({false, QueryTreeNodePtrWithHash{std::move(index_hint_node)}});
|
||||||
|
|
||||||
|
cnf.appendGroup({new_group});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ColumnPrice
|
||||||
|
{
|
||||||
|
Int64 compressed_size{0};
|
||||||
|
Int64 uncompressed_size{0};
|
||||||
|
|
||||||
|
ColumnPrice(const Int64 compressed_size_, const Int64 uncompressed_size_)
|
||||||
|
: compressed_size(compressed_size_)
|
||||||
|
, uncompressed_size(uncompressed_size_)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
bool operator<(const ColumnPrice & that) const
|
||||||
|
{
|
||||||
|
return std::tie(compressed_size, uncompressed_size) < std::tie(that.compressed_size, that.uncompressed_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
ColumnPrice & operator+=(const ColumnPrice & that)
|
||||||
|
{
|
||||||
|
compressed_size += that.compressed_size;
|
||||||
|
uncompressed_size += that.uncompressed_size;
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
ColumnPrice & operator-=(const ColumnPrice & that)
|
||||||
|
{
|
||||||
|
compressed_size -= that.compressed_size;
|
||||||
|
uncompressed_size -= that.uncompressed_size;
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
using ColumnPriceByName = std::unordered_map<String, ColumnPrice>;
|
||||||
|
using ColumnPriceByQueryNode = QueryTreeNodePtrWithHashMap<ColumnPrice>;
|
||||||
|
|
||||||
|
class ComponentCollectorVisitor : public ConstInDepthQueryTreeVisitor<ComponentCollectorVisitor>
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
ComponentCollectorVisitor(
|
||||||
|
std::set<UInt64> & components_,
|
||||||
|
QueryTreeNodePtrWithHashMap<UInt64> & query_node_to_component_,
|
||||||
|
const ComparisonGraph<QueryTreeNodePtr> & graph_)
|
||||||
|
: components(components_), query_node_to_component(query_node_to_component_), graph(graph_)
|
||||||
|
{}
|
||||||
|
|
||||||
|
void visitImpl(const QueryTreeNodePtr & node)
|
||||||
|
{
|
||||||
|
if (auto id = graph.getComponentId(node))
|
||||||
|
{
|
||||||
|
query_node_to_component.emplace(node, *id);
|
||||||
|
components.insert(*id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::set<UInt64> & components;
|
||||||
|
QueryTreeNodePtrWithHashMap<UInt64> & query_node_to_component;
|
||||||
|
|
||||||
|
const ComparisonGraph<QueryTreeNodePtr> & graph;
|
||||||
|
};
|
||||||
|
|
||||||
|
class ColumnNameCollectorVisitor : public ConstInDepthQueryTreeVisitor<ColumnNameCollectorVisitor>
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
ColumnNameCollectorVisitor(
|
||||||
|
std::unordered_set<std::string> & column_names_,
|
||||||
|
const QueryTreeNodePtrWithHashMap<UInt64> * query_node_to_component_)
|
||||||
|
: column_names(column_names_), query_node_to_component(query_node_to_component_)
|
||||||
|
{}
|
||||||
|
|
||||||
|
bool needChildVisit(const VisitQueryTreeNodeType & parent, const VisitQueryTreeNodeType &)
|
||||||
|
{
|
||||||
|
return !query_node_to_component || !query_node_to_component->contains(parent);
|
||||||
|
}
|
||||||
|
|
||||||
|
void visitImpl(const QueryTreeNodePtr & node)
|
||||||
|
{
|
||||||
|
if (query_node_to_component && query_node_to_component->contains(node))
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (const auto * column_node = node->as<ColumnNode>())
|
||||||
|
column_names.insert(column_node->getColumnName());
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::unordered_set<std::string> & column_names;
|
||||||
|
const QueryTreeNodePtrWithHashMap<UInt64> * query_node_to_component;
|
||||||
|
};
|
||||||
|
|
||||||
|
class SubstituteColumnVisitor : public InDepthQueryTreeVisitor<SubstituteColumnVisitor>
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
SubstituteColumnVisitor(
|
||||||
|
const QueryTreeNodePtrWithHashMap<UInt64> & query_node_to_component_,
|
||||||
|
const std::unordered_map<UInt64, QueryTreeNodePtr> & id_to_query_node_map_,
|
||||||
|
ContextPtr context_)
|
||||||
|
: query_node_to_component(query_node_to_component_), id_to_query_node_map(id_to_query_node_map_), context(std::move(context_))
|
||||||
|
{}
|
||||||
|
|
||||||
|
void visitImpl(QueryTreeNodePtr & node)
|
||||||
|
{
|
||||||
|
auto component_id_it = query_node_to_component.find(node);
|
||||||
|
if (component_id_it == query_node_to_component.end())
|
||||||
|
return;
|
||||||
|
|
||||||
|
const auto component_id = component_id_it->second;
|
||||||
|
auto new_node = id_to_query_node_map.at(component_id)->clone();
|
||||||
|
|
||||||
|
if (!node->getResultType()->equals(*new_node->getResultType()))
|
||||||
|
{
|
||||||
|
node = buildCastFunction(new_node, node->getResultType(), context);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
node = std::move(new_node);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
const QueryTreeNodePtrWithHashMap<UInt64> & query_node_to_component;
|
||||||
|
const std::unordered_map<UInt64, QueryTreeNodePtr> & id_to_query_node_map;
|
||||||
|
ContextPtr context;
|
||||||
|
};
|
||||||
|
|
||||||
|
ColumnPrice calculatePrice(
|
||||||
|
const ColumnPriceByName & column_prices,
|
||||||
|
const std::unordered_set<std::string> & column_names)
|
||||||
|
{
|
||||||
|
ColumnPrice result(0, 0);
|
||||||
|
|
||||||
|
for (const auto & column : column_names)
|
||||||
|
{
|
||||||
|
if (auto it = column_prices.find(column); it != column_prices.end())
|
||||||
|
result += it->second;
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void bruteForce(
|
||||||
|
const ComparisonGraph<QueryTreeNodePtr> & graph,
|
||||||
|
const std::vector<UInt64> & components,
|
||||||
|
size_t current_component,
|
||||||
|
const ColumnPriceByName & column_prices,
|
||||||
|
ColumnPrice current_price,
|
||||||
|
std::vector<QueryTreeNodePtr> & expressions_stack,
|
||||||
|
ColumnPrice & min_price,
|
||||||
|
std::vector<QueryTreeNodePtr> & min_expressions)
|
||||||
|
{
|
||||||
|
if (current_component == components.size())
|
||||||
|
{
|
||||||
|
if (current_price < min_price)
|
||||||
|
{
|
||||||
|
min_price = current_price;
|
||||||
|
min_expressions = expressions_stack;
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const auto & node : graph.getComponent(components[current_component]))
|
||||||
|
{
|
||||||
|
std::unordered_set<std::string> column_names;
|
||||||
|
ColumnNameCollectorVisitor column_name_collector{column_names, nullptr};
|
||||||
|
column_name_collector.visit(node);
|
||||||
|
|
||||||
|
ColumnPrice expression_price = calculatePrice(column_prices, column_names);
|
||||||
|
|
||||||
|
expressions_stack.push_back(node);
|
||||||
|
current_price += expression_price;
|
||||||
|
|
||||||
|
ColumnPriceByName new_prices(column_prices);
|
||||||
|
for (const auto & column : column_names)
|
||||||
|
new_prices.insert_or_assign(column, ColumnPrice(0, 0));
|
||||||
|
|
||||||
|
bruteForce(graph,
|
||||||
|
components,
|
||||||
|
current_component + 1,
|
||||||
|
new_prices,
|
||||||
|
current_price,
|
||||||
|
expressions_stack,
|
||||||
|
min_price,
|
||||||
|
min_expressions);
|
||||||
|
|
||||||
|
current_price -= expression_price;
|
||||||
|
expressions_stack.pop_back();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void substituteColumns(QueryNode & query_node, const QueryTreeNodes & table_expressions, const ContextPtr & context)
|
||||||
|
{
|
||||||
|
static constexpr UInt64 COLUMN_PENALTY = 10 * 1024 * 1024;
|
||||||
|
static constexpr Int64 INDEX_PRICE = -1'000'000'000'000'000'000;
|
||||||
|
|
||||||
|
for (const auto & table_expression : table_expressions)
|
||||||
|
{
|
||||||
|
auto snapshot = getStorageSnapshot(table_expression);
|
||||||
|
if (!snapshot || !snapshot->metadata)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
const auto column_sizes = snapshot->storage.getColumnSizes();
|
||||||
|
if (column_sizes.empty())
|
||||||
|
return;
|
||||||
|
|
||||||
|
auto query_tree_constraint = snapshot->metadata->getConstraints().getQueryTreeData(context, table_expression);
|
||||||
|
const auto & graph = query_tree_constraint.getGraph();
|
||||||
|
|
||||||
|
auto run_for_all = [&](const auto function)
|
||||||
|
{
|
||||||
|
function(query_node.getProjectionNode());
|
||||||
|
|
||||||
|
if (query_node.hasWhere())
|
||||||
|
function(query_node.getWhere());
|
||||||
|
|
||||||
|
if (query_node.hasPrewhere())
|
||||||
|
function(query_node.getPrewhere());
|
||||||
|
|
||||||
|
if (query_node.hasHaving())
|
||||||
|
function(query_node.getHaving());
|
||||||
|
};
|
||||||
|
|
||||||
|
std::set<UInt64> components;
|
||||||
|
QueryTreeNodePtrWithHashMap<UInt64> query_node_to_component;
|
||||||
|
std::unordered_set<std::string> column_names;
|
||||||
|
|
||||||
|
run_for_all([&](QueryTreeNodePtr & node)
|
||||||
|
{
|
||||||
|
ComponentCollectorVisitor component_collector{components, query_node_to_component, graph};
|
||||||
|
component_collector.visit(node);
|
||||||
|
ColumnNameCollectorVisitor column_name_collector{column_names, &query_node_to_component};
|
||||||
|
column_name_collector.visit(node);
|
||||||
|
});
|
||||||
|
|
||||||
|
ColumnPriceByName column_prices;
|
||||||
|
const auto primary_key = snapshot->metadata->getColumnsRequiredForPrimaryKey();
|
||||||
|
|
||||||
|
for (const auto & [column_name, column_size] : column_sizes)
|
||||||
|
column_prices.insert_or_assign(column_name, ColumnPrice(column_size.data_compressed + COLUMN_PENALTY, column_size.data_uncompressed));
|
||||||
|
|
||||||
|
for (const auto & column_name : primary_key)
|
||||||
|
column_prices.insert_or_assign(column_name, ColumnPrice(INDEX_PRICE, INDEX_PRICE));
|
||||||
|
|
||||||
|
for (const auto & column_name : column_names)
|
||||||
|
column_prices.insert_or_assign(column_name, ColumnPrice(0, 0));
|
||||||
|
|
||||||
|
std::unordered_map<UInt64, QueryTreeNodePtr> id_to_query_node_map;
|
||||||
|
std::vector<UInt64> components_list;
|
||||||
|
|
||||||
|
for (const auto component_id : components)
|
||||||
|
{
|
||||||
|
auto component = graph.getComponent(component_id);
|
||||||
|
if (component.size() == 1)
|
||||||
|
id_to_query_node_map[component_id] = component.front();
|
||||||
|
else
|
||||||
|
components_list.push_back(component_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<QueryTreeNodePtr> expressions_stack;
|
||||||
|
ColumnPrice min_price(std::numeric_limits<Int64>::max(), std::numeric_limits<Int64>::max());
|
||||||
|
std::vector<QueryTreeNodePtr> min_expressions;
|
||||||
|
|
||||||
|
bruteForce(graph,
|
||||||
|
components_list,
|
||||||
|
0,
|
||||||
|
column_prices,
|
||||||
|
ColumnPrice(0, 0),
|
||||||
|
expressions_stack,
|
||||||
|
min_price,
|
||||||
|
min_expressions);
|
||||||
|
|
||||||
|
for (size_t i = 0; i < components_list.size(); ++i)
|
||||||
|
id_to_query_node_map[components_list[i]] = min_expressions[i];
|
||||||
|
|
||||||
|
SubstituteColumnVisitor substitute_column{query_node_to_component, id_to_query_node_map, context};
|
||||||
|
|
||||||
|
run_for_all([&](QueryTreeNodePtr & node)
|
||||||
|
{
|
||||||
|
substitute_column.visit(node);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void optimizeWithConstraints(Analyzer::CNF & cnf, const QueryTreeNodes & table_expressions, const ContextPtr & context)
|
||||||
|
{
|
||||||
|
cnf.pullNotOutFunctions(context);
|
||||||
|
|
||||||
|
for (const auto & table_expression : table_expressions)
|
||||||
|
{
|
||||||
|
auto snapshot = getStorageSnapshot(table_expression);
|
||||||
|
if (!snapshot || !snapshot->metadata)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
const auto & constraints = snapshot->metadata->getConstraints();
|
||||||
|
const auto & query_tree_constraints = constraints.getQueryTreeData(context, table_expression);
|
||||||
|
const auto & compare_graph = query_tree_constraints.getGraph();
|
||||||
|
cnf.filterAlwaysTrueGroups([&](const auto & group)
|
||||||
|
{
|
||||||
|
/// remove always true groups from CNF
|
||||||
|
return !checkIfGroupAlwaysTrueFullMatch(group, query_tree_constraints) && !checkIfGroupAlwaysTrueGraph(group, compare_graph);
|
||||||
|
})
|
||||||
|
.filterAlwaysFalseAtoms([&](const Analyzer::CNF::AtomicFormula & atom)
|
||||||
|
{
|
||||||
|
/// remove always false atoms from CNF
|
||||||
|
return !checkIfAtomAlwaysFalseFullMatch(atom, query_tree_constraints) && !checkIfAtomAlwaysFalseGraph(atom, compare_graph);
|
||||||
|
})
|
||||||
|
.transformAtoms([&](const auto & atom)
|
||||||
|
{
|
||||||
|
return replaceTermsToConstants(atom, compare_graph);
|
||||||
|
})
|
||||||
|
.reduce();
|
||||||
|
}
|
||||||
|
|
||||||
|
cnf.pushNotIntoFunctions(context);
|
||||||
|
|
||||||
|
const auto & settings = context->getSettingsRef();
|
||||||
|
if (settings.optimize_append_index)
|
||||||
|
addIndexConstraint(cnf, table_expressions, context);
|
||||||
|
}
|
||||||
|
|
||||||
|
void optimizeNode(QueryTreeNodePtr & node, const QueryTreeNodes & table_expressions, const ContextPtr & context)
|
||||||
|
{
|
||||||
|
const auto & settings = context->getSettingsRef();
|
||||||
|
|
||||||
|
auto cnf = tryConvertQueryToCNF(node, context);
|
||||||
|
if (!cnf)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (settings.optimize_using_constraints)
|
||||||
|
optimizeWithConstraints(*cnf, table_expressions, context);
|
||||||
|
|
||||||
|
auto new_node = cnf->toQueryTree(context);
|
||||||
|
node = std::move(new_node);
|
||||||
|
}
|
||||||
|
|
||||||
|
class ConvertQueryToCNFVisitor : public InDepthQueryTreeVisitorWithContext<ConvertQueryToCNFVisitor>
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
using Base = InDepthQueryTreeVisitorWithContext<ConvertQueryToCNFVisitor>;
|
||||||
|
using Base::Base;
|
||||||
|
|
||||||
|
void visitImpl(QueryTreeNodePtr & node)
|
||||||
|
{
|
||||||
|
auto * query_node = node->as<QueryNode>();
|
||||||
|
if (!query_node)
|
||||||
|
return;
|
||||||
|
|
||||||
|
auto table_expressions = extractTableExpressions(query_node->getJoinTree());
|
||||||
|
|
||||||
|
const auto & context = getContext();
|
||||||
|
const auto & settings = context->getSettingsRef();
|
||||||
|
|
||||||
|
bool has_filter = false;
|
||||||
|
const auto optimize_filter = [&](QueryTreeNodePtr & filter_node)
|
||||||
|
{
|
||||||
|
if (filter_node == nullptr)
|
||||||
|
return;
|
||||||
|
|
||||||
|
optimizeNode(filter_node, table_expressions, context);
|
||||||
|
has_filter = true;
|
||||||
|
};
|
||||||
|
|
||||||
|
optimize_filter(query_node->getWhere());
|
||||||
|
optimize_filter(query_node->getPrewhere());
|
||||||
|
optimize_filter(query_node->getHaving());
|
||||||
|
|
||||||
|
if (has_filter && settings.optimize_substitute_columns)
|
||||||
|
substituteColumns(*query_node, table_expressions, context);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
void ConvertLogicalExpressionToCNFPass::run(QueryTreeNodePtr query_tree_node, ContextPtr context)
|
||||||
|
{
|
||||||
|
const auto & settings = context->getSettingsRef();
|
||||||
|
if (!settings.convert_query_to_cnf)
|
||||||
|
return;
|
||||||
|
|
||||||
|
ConvertQueryToCNFVisitor visitor(std::move(context));
|
||||||
|
visitor.visit(query_tree_node);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
18
src/Analyzer/Passes/ConvertQueryToCNFPass.h
Normal file
18
src/Analyzer/Passes/ConvertQueryToCNFPass.h
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <Analyzer/IQueryTreePass.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
class ConvertLogicalExpressionToCNFPass final : public IQueryTreePass
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
String getName() override { return "ConvertLogicalExpressionToCNFPass"; }
|
||||||
|
|
||||||
|
String getDescription() override { return "Convert logical expression to CNF and apply optimizations using constraints"; }
|
||||||
|
|
||||||
|
void run(QueryTreeNodePtr query_tree_node, ContextPtr context) override;
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
@ -41,7 +41,7 @@
|
|||||||
#include <Analyzer/Passes/LogicalExpressionOptimizerPass.h>
|
#include <Analyzer/Passes/LogicalExpressionOptimizerPass.h>
|
||||||
#include <Analyzer/Passes/CrossToInnerJoinPass.h>
|
#include <Analyzer/Passes/CrossToInnerJoinPass.h>
|
||||||
#include <Analyzer/Passes/ShardNumColumnToFunctionPass.h>
|
#include <Analyzer/Passes/ShardNumColumnToFunctionPass.h>
|
||||||
|
#include <Analyzer/Passes/ConvertQueryToCNFPass.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -148,8 +148,6 @@ private:
|
|||||||
|
|
||||||
/** ClickHouse query tree pass manager.
|
/** ClickHouse query tree pass manager.
|
||||||
*
|
*
|
||||||
* TODO: Support setting convert_query_to_cnf.
|
|
||||||
* TODO: Support setting optimize_using_constraints.
|
|
||||||
* TODO: Support setting optimize_substitute_columns.
|
* TODO: Support setting optimize_substitute_columns.
|
||||||
* TODO: Support GROUP BY injective function elimination.
|
* TODO: Support GROUP BY injective function elimination.
|
||||||
* TODO: Support setting optimize_move_functions_out_of_any.
|
* TODO: Support setting optimize_move_functions_out_of_any.
|
||||||
@ -235,6 +233,8 @@ void addQueryTreePasses(QueryTreePassManager & manager)
|
|||||||
manager.addPass(std::make_unique<QueryAnalysisPass>());
|
manager.addPass(std::make_unique<QueryAnalysisPass>());
|
||||||
manager.addPass(std::make_unique<FunctionToSubcolumnsPass>());
|
manager.addPass(std::make_unique<FunctionToSubcolumnsPass>());
|
||||||
|
|
||||||
|
manager.addPass(std::make_unique<ConvertLogicalExpressionToCNFPass>());
|
||||||
|
|
||||||
manager.addPass(std::make_unique<CountDistinctPass>());
|
manager.addPass(std::make_unique<CountDistinctPass>());
|
||||||
manager.addPass(std::make_unique<RewriteAggregateFunctionWithIfPass>());
|
manager.addPass(std::make_unique<RewriteAggregateFunctionWithIfPass>());
|
||||||
manager.addPass(std::make_unique<SumIfToCountIfPass>());
|
manager.addPass(std::make_unique<SumIfToCountIfPass>());
|
||||||
|
@ -20,14 +20,14 @@ BackupMutablePtr BackupFactory::createBackup(const CreateParams & params) const
|
|||||||
const String & engine_name = params.backup_info.backup_engine_name;
|
const String & engine_name = params.backup_info.backup_engine_name;
|
||||||
auto it = creators.find(engine_name);
|
auto it = creators.find(engine_name);
|
||||||
if (it == creators.end())
|
if (it == creators.end())
|
||||||
throw Exception(ErrorCodes::BACKUP_ENGINE_NOT_FOUND, "Not found backup engine {}", engine_name);
|
throw Exception(ErrorCodes::BACKUP_ENGINE_NOT_FOUND, "Not found backup engine '{}'", engine_name);
|
||||||
return (it->second)(params);
|
return (it->second)(params);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BackupFactory::registerBackupEngine(const String & engine_name, const CreatorFn & creator_fn)
|
void BackupFactory::registerBackupEngine(const String & engine_name, const CreatorFn & creator_fn)
|
||||||
{
|
{
|
||||||
if (creators.contains(engine_name))
|
if (creators.contains(engine_name))
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Backup engine {} was registered twice", engine_name);
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Backup engine '{}' was registered twice", engine_name);
|
||||||
creators[engine_name] = creator_fn;
|
creators[engine_name] = creator_fn;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -41,7 +41,7 @@ namespace
|
|||||||
key = "backups.allowed_disk[" + std::to_string(++counter) + "]";
|
key = "backups.allowed_disk[" + std::to_string(++counter) + "]";
|
||||||
if (!config.has(key))
|
if (!config.has(key))
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||||
"Disk {} is not allowed for backups, see the 'backups.allowed_disk' configuration parameter", quoteString(disk_name));
|
"Disk '{}' is not allowed for backups, see the 'backups.allowed_disk' configuration parameter", quoteString(disk_name));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -54,7 +54,7 @@ namespace
|
|||||||
|
|
||||||
bool path_ok = path.empty() || (path.is_relative() && (*path.begin() != ".."));
|
bool path_ok = path.empty() || (path.is_relative() && (*path.begin() != ".."));
|
||||||
if (!path_ok)
|
if (!path_ok)
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Path {} to backup must be inside the specified disk {}",
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Path '{}' to backup must be inside the specified disk '{}'",
|
||||||
quoteString(path.c_str()), quoteString(disk_name));
|
quoteString(path.c_str()), quoteString(disk_name));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -126,6 +126,8 @@
|
|||||||
M(DDLWorkerThreadsActive, "Number of threads in the DDLWORKER thread pool for ON CLUSTER queries running a task.") \
|
M(DDLWorkerThreadsActive, "Number of threads in the DDLWORKER thread pool for ON CLUSTER queries running a task.") \
|
||||||
M(StorageDistributedThreads, "Number of threads in the StorageDistributed thread pool.") \
|
M(StorageDistributedThreads, "Number of threads in the StorageDistributed thread pool.") \
|
||||||
M(StorageDistributedThreadsActive, "Number of threads in the StorageDistributed thread pool running a task.") \
|
M(StorageDistributedThreadsActive, "Number of threads in the StorageDistributed thread pool running a task.") \
|
||||||
|
M(DistributedInsertThreads, "Number of threads used for INSERT into Distributed.") \
|
||||||
|
M(DistributedInsertThreadsActive, "Number of threads used for INSERT into Distributed running a task.") \
|
||||||
M(StorageS3Threads, "Number of threads in the StorageS3 thread pool.") \
|
M(StorageS3Threads, "Number of threads in the StorageS3 thread pool.") \
|
||||||
M(StorageS3ThreadsActive, "Number of threads in the StorageS3 thread pool running a task.") \
|
M(StorageS3ThreadsActive, "Number of threads in the StorageS3 thread pool running a task.") \
|
||||||
M(MergeTreePartsLoaderThreads, "Number of threads in the MergeTree parts loader thread pool.") \
|
M(MergeTreePartsLoaderThreads, "Number of threads in the MergeTree parts loader thread pool.") \
|
||||||
@ -184,10 +186,10 @@
|
|||||||
|
|
||||||
namespace CurrentMetrics
|
namespace CurrentMetrics
|
||||||
{
|
{
|
||||||
#define M(NAME, DOCUMENTATION) extern const Metric NAME = __COUNTER__;
|
#define M(NAME, DOCUMENTATION) extern const Metric NAME = Metric(__COUNTER__);
|
||||||
APPLY_FOR_METRICS(M)
|
APPLY_FOR_METRICS(M)
|
||||||
#undef M
|
#undef M
|
||||||
constexpr Metric END = __COUNTER__;
|
constexpr Metric END = Metric(__COUNTER__);
|
||||||
|
|
||||||
std::atomic<Value> values[END] {}; /// Global variable, initialized by zeros.
|
std::atomic<Value> values[END] {}; /// Global variable, initialized by zeros.
|
||||||
|
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#include <atomic>
|
#include <atomic>
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
#include <base/types.h>
|
#include <base/types.h>
|
||||||
|
#include <base/strong_typedef.h>
|
||||||
|
|
||||||
/** Allows to count number of simultaneously happening processes or current value of some metric.
|
/** Allows to count number of simultaneously happening processes or current value of some metric.
|
||||||
* - for high-level profiling.
|
* - for high-level profiling.
|
||||||
@ -22,7 +23,7 @@
|
|||||||
namespace CurrentMetrics
|
namespace CurrentMetrics
|
||||||
{
|
{
|
||||||
/// Metric identifier (index in array).
|
/// Metric identifier (index in array).
|
||||||
using Metric = size_t;
|
using Metric = StrongTypedef<size_t, struct MetricTag>;
|
||||||
using Value = DB::Int64;
|
using Value = DB::Int64;
|
||||||
|
|
||||||
/// Get name of metric by identifier. Returns statically allocated string.
|
/// Get name of metric by identifier. Returns statically allocated string.
|
||||||
|
@ -497,10 +497,10 @@ The server successfully detected this situation and will download merged part fr
|
|||||||
namespace ProfileEvents
|
namespace ProfileEvents
|
||||||
{
|
{
|
||||||
|
|
||||||
#define M(NAME, DOCUMENTATION) extern const Event NAME = __COUNTER__;
|
#define M(NAME, DOCUMENTATION) extern const Event NAME = Event(__COUNTER__);
|
||||||
APPLY_FOR_EVENTS(M)
|
APPLY_FOR_EVENTS(M)
|
||||||
#undef M
|
#undef M
|
||||||
constexpr Event END = __COUNTER__;
|
constexpr Event END = Event(__COUNTER__);
|
||||||
|
|
||||||
/// Global variable, initialized by zeros.
|
/// Global variable, initialized by zeros.
|
||||||
Counter global_counters_array[END] {};
|
Counter global_counters_array[END] {};
|
||||||
@ -522,7 +522,7 @@ void Counters::resetCounters()
|
|||||||
{
|
{
|
||||||
if (counters)
|
if (counters)
|
||||||
{
|
{
|
||||||
for (Event i = 0; i < num_counters; ++i)
|
for (Event i = Event(0); i < num_counters; ++i)
|
||||||
counters[i].store(0, std::memory_order_relaxed);
|
counters[i].store(0, std::memory_order_relaxed);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -540,7 +540,7 @@ Counters::Snapshot::Snapshot()
|
|||||||
Counters::Snapshot Counters::getPartiallyAtomicSnapshot() const
|
Counters::Snapshot Counters::getPartiallyAtomicSnapshot() const
|
||||||
{
|
{
|
||||||
Snapshot res;
|
Snapshot res;
|
||||||
for (Event i = 0; i < num_counters; ++i)
|
for (Event i = Event(0); i < num_counters; ++i)
|
||||||
res.counters_holder[i] = counters[i].load(std::memory_order_relaxed);
|
res.counters_holder[i] = counters[i].load(std::memory_order_relaxed);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
@ -616,7 +616,7 @@ CountersIncrement::CountersIncrement(Counters::Snapshot const & snapshot)
|
|||||||
CountersIncrement::CountersIncrement(Counters::Snapshot const & after, Counters::Snapshot const & before)
|
CountersIncrement::CountersIncrement(Counters::Snapshot const & after, Counters::Snapshot const & before)
|
||||||
{
|
{
|
||||||
init();
|
init();
|
||||||
for (Event i = 0; i < Counters::num_counters; ++i)
|
for (Event i = Event(0); i < Counters::num_counters; ++i)
|
||||||
increment_holder[i] = static_cast<Increment>(after[i]) - static_cast<Increment>(before[i]);
|
increment_holder[i] = static_cast<Increment>(after[i]) - static_cast<Increment>(before[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,7 +1,8 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <Common/VariableContext.h>
|
#include <Common/VariableContext.h>
|
||||||
#include "base/types.h"
|
#include <base/types.h>
|
||||||
|
#include <base/strong_typedef.h>
|
||||||
#include <atomic>
|
#include <atomic>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <cstddef>
|
#include <cstddef>
|
||||||
@ -14,7 +15,7 @@
|
|||||||
namespace ProfileEvents
|
namespace ProfileEvents
|
||||||
{
|
{
|
||||||
/// Event identifier (index in array).
|
/// Event identifier (index in array).
|
||||||
using Event = size_t;
|
using Event = StrongTypedef<size_t, struct EventTag>;
|
||||||
using Count = size_t;
|
using Count = size_t;
|
||||||
using Increment = Int64;
|
using Increment = Int64;
|
||||||
using Counter = std::atomic<Count>;
|
using Counter = std::atomic<Count>;
|
||||||
|
@ -8,10 +8,10 @@
|
|||||||
|
|
||||||
namespace CurrentStatusInfo
|
namespace CurrentStatusInfo
|
||||||
{
|
{
|
||||||
#define M(NAME, DOCUMENTATION, ENUM) extern const Status NAME = __COUNTER__;
|
#define M(NAME, DOCUMENTATION, ENUM) extern const Status NAME = Status(__COUNTER__);
|
||||||
APPLY_FOR_STATUS(M)
|
APPLY_FOR_STATUS(M)
|
||||||
#undef M
|
#undef M
|
||||||
constexpr Status END = __COUNTER__;
|
constexpr Status END = Status(__COUNTER__);
|
||||||
|
|
||||||
std::mutex locks[END] {};
|
std::mutex locks[END] {};
|
||||||
std::unordered_map<String, Int8> values[END] {};
|
std::unordered_map<String, Int8> values[END] {};
|
||||||
|
@ -6,13 +6,14 @@
|
|||||||
#include <atomic>
|
#include <atomic>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <base/types.h>
|
#include <base/types.h>
|
||||||
|
#include <base/strong_typedef.h>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
|
|
||||||
|
|
||||||
namespace CurrentStatusInfo
|
namespace CurrentStatusInfo
|
||||||
{
|
{
|
||||||
using Status = size_t;
|
using Status = StrongTypedef<size_t, struct StatusTag>;
|
||||||
using Key = std::string;
|
using Key = std::string;
|
||||||
|
|
||||||
const char * getName(Status event);
|
const char * getName(Status event);
|
||||||
|
@ -1,16 +1,23 @@
|
|||||||
#include <atomic>
|
#include <atomic>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <Common/ThreadPool.h>
|
#include <Common/ThreadPool.h>
|
||||||
|
#include <Common/CurrentMetrics.h>
|
||||||
|
|
||||||
#include <gtest/gtest.h>
|
#include <gtest/gtest.h>
|
||||||
|
|
||||||
|
namespace CurrentMetrics
|
||||||
|
{
|
||||||
|
extern const Metric LocalThread;
|
||||||
|
extern const Metric LocalThreadActive;
|
||||||
|
}
|
||||||
|
|
||||||
/// Test for thread self-removal when number of free threads in pool is too large.
|
/// Test for thread self-removal when number of free threads in pool is too large.
|
||||||
/// Just checks that nothing weird happens.
|
/// Just checks that nothing weird happens.
|
||||||
|
|
||||||
template <typename Pool>
|
template <typename Pool>
|
||||||
int test()
|
int test()
|
||||||
{
|
{
|
||||||
Pool pool(10, 2, 10);
|
Pool pool(CurrentMetrics::LocalThread, CurrentMetrics::LocalThreadActive, 10, 2, 10);
|
||||||
|
|
||||||
std::atomic<int> counter{0};
|
std::atomic<int> counter{0};
|
||||||
for (size_t i = 0; i < 10; ++i)
|
for (size_t i = 0; i < 10; ++i)
|
||||||
|
@ -71,9 +71,12 @@ public:
|
|||||||
scale(scale_)
|
scale(scale_)
|
||||||
{
|
{
|
||||||
if (unlikely(precision < 1 || precision > maxPrecision()))
|
if (unlikely(precision < 1 || precision > maxPrecision()))
|
||||||
throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Precision {} is out of bounds", std::to_string(precision));
|
throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND,
|
||||||
|
"Precision {} is out of bounds (precision range: [1, {}])",
|
||||||
|
std::to_string(precision), maxPrecision());
|
||||||
if (unlikely(scale > maxPrecision()))
|
if (unlikely(scale > maxPrecision()))
|
||||||
throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Scale {} is out of bounds", std::to_string(scale));
|
throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Scale {} is out of bounds (max scale: {})",
|
||||||
|
std::to_string(scale), maxPrecision());
|
||||||
}
|
}
|
||||||
|
|
||||||
TypeIndex getTypeId() const override { return TypeToTypeIndex<T>; }
|
TypeIndex getTypeId() const override { return TypeToTypeIndex<T>; }
|
||||||
|
@ -116,7 +116,8 @@ inline ReturnType convertDecimalsImpl(const typename FromDataType::FieldType & v
|
|||||||
if (common::mulOverflow(static_cast<MaxNativeType>(value.value), converted_value, converted_value))
|
if (common::mulOverflow(static_cast<MaxNativeType>(value.value), converted_value, converted_value))
|
||||||
{
|
{
|
||||||
if constexpr (throw_exception)
|
if constexpr (throw_exception)
|
||||||
throw Exception(ErrorCodes::DECIMAL_OVERFLOW, "{} convert overflow", std::string(ToDataType::family_name));
|
throw Exception(ErrorCodes::DECIMAL_OVERFLOW, "{} convert overflow while multiplying {} by scale {}",
|
||||||
|
std::string(ToDataType::family_name), toString(value.value), toString(converted_value));
|
||||||
else
|
else
|
||||||
return ReturnType(false);
|
return ReturnType(false);
|
||||||
}
|
}
|
||||||
@ -136,7 +137,10 @@ inline ReturnType convertDecimalsImpl(const typename FromDataType::FieldType & v
|
|||||||
converted_value > std::numeric_limits<typename ToFieldType::NativeType>::max())
|
converted_value > std::numeric_limits<typename ToFieldType::NativeType>::max())
|
||||||
{
|
{
|
||||||
if constexpr (throw_exception)
|
if constexpr (throw_exception)
|
||||||
throw Exception(ErrorCodes::DECIMAL_OVERFLOW, "{} convert overflow", std::string(ToDataType::family_name));
|
throw Exception(ErrorCodes::DECIMAL_OVERFLOW, "{} convert overflow: {} is not in range ({}, {})",
|
||||||
|
std::string(ToDataType::family_name), toString(converted_value),
|
||||||
|
toString(std::numeric_limits<typename ToFieldType::NativeType>::min()),
|
||||||
|
toString(std::numeric_limits<typename ToFieldType::NativeType>::max()));
|
||||||
else
|
else
|
||||||
return ReturnType(false);
|
return ReturnType(false);
|
||||||
}
|
}
|
||||||
|
@ -661,7 +661,7 @@ BlockIO DatabaseReplicated::tryEnqueueReplicatedDDL(const ASTPtr & query, Contex
|
|||||||
String node_path = ddl_worker->tryEnqueueAndExecuteEntry(entry, query_context);
|
String node_path = ddl_worker->tryEnqueueAndExecuteEntry(entry, query_context);
|
||||||
|
|
||||||
Strings hosts_to_wait = getZooKeeper()->getChildren(zookeeper_path + "/replicas");
|
Strings hosts_to_wait = getZooKeeper()->getChildren(zookeeper_path + "/replicas");
|
||||||
return getDistributedDDLStatus(node_path, entry, query_context, hosts_to_wait);
|
return getDistributedDDLStatus(node_path, entry, query_context, &hosts_to_wait);
|
||||||
}
|
}
|
||||||
|
|
||||||
static UUID getTableUUIDIfReplicated(const String & metadata, ContextPtr context)
|
static UUID getTableUUIDIfReplicated(const String & metadata, ContextPtr context)
|
||||||
|
@ -7,10 +7,11 @@
|
|||||||
#include <Interpreters/ExpressionActions.h>
|
#include <Interpreters/ExpressionActions.h>
|
||||||
#include <Processors/Transforms/ExpressionTransform.h>
|
#include <Processors/Transforms/ExpressionTransform.h>
|
||||||
#include <QueryPipeline/QueryPipelineBuilder.h>
|
#include <QueryPipeline/QueryPipelineBuilder.h>
|
||||||
#include <Storages/ExternalDataSourceConfiguration.h>
|
#include <Storages/checkAndGetLiteralArgument.h>
|
||||||
#include <IO/ConnectionTimeouts.h>
|
#include <IO/ConnectionTimeouts.h>
|
||||||
#include <Interpreters/Session.h>
|
#include <Interpreters/Session.h>
|
||||||
#include <Interpreters/executeQuery.h>
|
#include <Interpreters/executeQuery.h>
|
||||||
|
#include <Storages/NamedCollectionsHelpers.h>
|
||||||
#include <Common/isLocalAddress.h>
|
#include <Common/isLocalAddress.h>
|
||||||
#include <Common/logger_useful.h>
|
#include <Common/logger_useful.h>
|
||||||
#include "DictionarySourceFactory.h"
|
#include "DictionarySourceFactory.h"
|
||||||
@ -28,10 +29,6 @@ namespace ErrorCodes
|
|||||||
extern const int BAD_ARGUMENTS;
|
extern const int BAD_ARGUMENTS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const std::unordered_set<std::string_view> dictionary_allowed_keys = {
|
|
||||||
"host", "port", "user", "password", "quota_key", "db", "database", "table",
|
|
||||||
"update_field", "update_lag", "invalidate_query", "query", "where", "name", "secure"};
|
|
||||||
|
|
||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
constexpr size_t MAX_CONNECTIONS = 16;
|
constexpr size_t MAX_CONNECTIONS = 16;
|
||||||
@ -213,47 +210,58 @@ void registerDictionarySourceClickHouse(DictionarySourceFactory & factory)
|
|||||||
const std::string & config_prefix,
|
const std::string & config_prefix,
|
||||||
Block & sample_block,
|
Block & sample_block,
|
||||||
ContextPtr global_context,
|
ContextPtr global_context,
|
||||||
const std::string & default_database [[maybe_unused]],
|
const std::string & default_database,
|
||||||
bool created_from_ddl) -> DictionarySourcePtr
|
bool created_from_ddl) -> DictionarySourcePtr
|
||||||
{
|
{
|
||||||
bool secure = config.getBool(config_prefix + ".secure", false);
|
using Configuration = ClickHouseDictionarySource::Configuration;
|
||||||
|
std::optional<Configuration> configuration;
|
||||||
UInt16 default_port = getPortFromContext(global_context, secure);
|
|
||||||
|
|
||||||
std::string settings_config_prefix = config_prefix + ".clickhouse";
|
std::string settings_config_prefix = config_prefix + ".clickhouse";
|
||||||
|
auto named_collection = created_from_ddl ? tryGetNamedCollectionWithOverrides(config, settings_config_prefix) : nullptr;
|
||||||
std::string host = config.getString(settings_config_prefix + ".host", "localhost");
|
|
||||||
std::string user = config.getString(settings_config_prefix + ".user", "default");
|
|
||||||
std::string password = config.getString(settings_config_prefix + ".password", "");
|
|
||||||
std::string quota_key = config.getString(settings_config_prefix + ".quota_key", "");
|
|
||||||
std::string db = config.getString(settings_config_prefix + ".db", default_database);
|
|
||||||
std::string table = config.getString(settings_config_prefix + ".table", "");
|
|
||||||
UInt16 port = static_cast<UInt16>(config.getUInt(settings_config_prefix + ".port", default_port));
|
|
||||||
auto has_config_key = [](const String & key) { return dictionary_allowed_keys.contains(key); };
|
|
||||||
|
|
||||||
auto named_collection = created_from_ddl
|
|
||||||
? getExternalDataSourceConfiguration(config, settings_config_prefix, global_context, has_config_key)
|
|
||||||
: std::nullopt;
|
|
||||||
|
|
||||||
if (named_collection)
|
if (named_collection)
|
||||||
{
|
{
|
||||||
const auto & configuration = named_collection->configuration;
|
validateNamedCollection(
|
||||||
host = configuration.host;
|
*named_collection, {}, ValidateKeysMultiset<ExternalDatabaseEqualKeysSet>{
|
||||||
user = configuration.username;
|
"secure", "host", "hostnmae", "port", "user", "username", "password", "quota_key", "name",
|
||||||
password = configuration.password;
|
"db", "database", "table","query", "where", "invalidate_query", "update_field", "update_lag"});
|
||||||
quota_key = configuration.quota_key;
|
|
||||||
db = configuration.database;
|
|
||||||
table = configuration.table;
|
|
||||||
port = configuration.port;
|
|
||||||
}
|
|
||||||
|
|
||||||
ClickHouseDictionarySource::Configuration configuration{
|
const auto secure = named_collection->getOrDefault("secure", false);
|
||||||
|
const auto default_port = getPortFromContext(global_context, secure);
|
||||||
|
const auto host = named_collection->getAnyOrDefault<String>({"host", "hostname"}, "localhost");
|
||||||
|
const auto port = static_cast<UInt16>(named_collection->getOrDefault<UInt64>("port", default_port));
|
||||||
|
|
||||||
|
configuration.emplace(Configuration{
|
||||||
.host = host,
|
.host = host,
|
||||||
.user = user,
|
.user = named_collection->getAnyOrDefault<String>({"user", "username"}, "default"),
|
||||||
.password = password,
|
.password = named_collection->getOrDefault<String>("password", ""),
|
||||||
.quota_key = quota_key,
|
.quota_key = named_collection->getOrDefault<String>("quota_key", ""),
|
||||||
.db = db,
|
.db = named_collection->getAnyOrDefault<String>({"db", "database"}, default_database),
|
||||||
.table = table,
|
.table = named_collection->getOrDefault<String>("table", ""),
|
||||||
|
.query = named_collection->getOrDefault<String>("query", ""),
|
||||||
|
.where = named_collection->getOrDefault<String>("where", ""),
|
||||||
|
.invalidate_query = named_collection->getOrDefault<String>("invalidate_query", ""),
|
||||||
|
.update_field = named_collection->getOrDefault<String>("update_field", ""),
|
||||||
|
.update_lag = named_collection->getOrDefault<UInt64>("update_lag", 1),
|
||||||
|
.port = port,
|
||||||
|
.is_local = isLocalAddress({host, port}, default_port),
|
||||||
|
.secure = secure,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
const auto secure = config.getBool(settings_config_prefix + ".secure", false);
|
||||||
|
const auto default_port = getPortFromContext(global_context, secure);
|
||||||
|
const auto host = config.getString(settings_config_prefix + ".host", "localhost");
|
||||||
|
const auto port = static_cast<UInt16>(config.getUInt(settings_config_prefix + ".port", default_port));
|
||||||
|
|
||||||
|
configuration.emplace(Configuration{
|
||||||
|
.host = host,
|
||||||
|
.user = config.getString(settings_config_prefix + ".user", "default"),
|
||||||
|
.password = config.getString(settings_config_prefix + ".password", ""),
|
||||||
|
.quota_key = config.getString(settings_config_prefix + ".quota_key", ""),
|
||||||
|
.db = config.getString(settings_config_prefix + ".db", default_database),
|
||||||
|
.table = config.getString(settings_config_prefix + ".table", ""),
|
||||||
.query = config.getString(settings_config_prefix + ".query", ""),
|
.query = config.getString(settings_config_prefix + ".query", ""),
|
||||||
.where = config.getString(settings_config_prefix + ".where", ""),
|
.where = config.getString(settings_config_prefix + ".where", ""),
|
||||||
.invalidate_query = config.getString(settings_config_prefix + ".invalidate_query", ""),
|
.invalidate_query = config.getString(settings_config_prefix + ".invalidate_query", ""),
|
||||||
@ -261,15 +269,16 @@ void registerDictionarySourceClickHouse(DictionarySourceFactory & factory)
|
|||||||
.update_lag = config.getUInt64(settings_config_prefix + ".update_lag", 1),
|
.update_lag = config.getUInt64(settings_config_prefix + ".update_lag", 1),
|
||||||
.port = port,
|
.port = port,
|
||||||
.is_local = isLocalAddress({host, port}, default_port),
|
.is_local = isLocalAddress({host, port}, default_port),
|
||||||
.secure = config.getBool(settings_config_prefix + ".secure", false)};
|
.secure = secure,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
ContextMutablePtr context;
|
ContextMutablePtr context;
|
||||||
if (configuration.is_local)
|
if (configuration->is_local)
|
||||||
{
|
{
|
||||||
/// We should set user info even for the case when the dictionary is loaded in-process (without TCP communication).
|
/// We should set user info even for the case when the dictionary is loaded in-process (without TCP communication).
|
||||||
Session session(global_context, ClientInfo::Interface::LOCAL);
|
Session session(global_context, ClientInfo::Interface::LOCAL);
|
||||||
session.authenticate(configuration.user, configuration.password, Poco::Net::SocketAddress{});
|
session.authenticate(configuration->user, configuration->password, Poco::Net::SocketAddress{});
|
||||||
context = session.makeQueryContext();
|
context = session.makeQueryContext();
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -277,7 +286,7 @@ void registerDictionarySourceClickHouse(DictionarySourceFactory & factory)
|
|||||||
context = Context::createCopy(global_context);
|
context = Context::createCopy(global_context);
|
||||||
|
|
||||||
if (created_from_ddl)
|
if (created_from_ddl)
|
||||||
context->getRemoteHostFilter().checkHostAndPort(configuration.host, toString(configuration.port));
|
context->getRemoteHostFilter().checkHostAndPort(configuration->host, toString(configuration->port));
|
||||||
}
|
}
|
||||||
|
|
||||||
context->applySettingsChanges(readSettingsFromDictionaryConfig(config, config_prefix));
|
context->applySettingsChanges(readSettingsFromDictionaryConfig(config, config_prefix));
|
||||||
@ -285,10 +294,10 @@ void registerDictionarySourceClickHouse(DictionarySourceFactory & factory)
|
|||||||
String dictionary_name = config.getString(".dictionary.name", "");
|
String dictionary_name = config.getString(".dictionary.name", "");
|
||||||
String dictionary_database = config.getString(".dictionary.database", "");
|
String dictionary_database = config.getString(".dictionary.database", "");
|
||||||
|
|
||||||
if (dictionary_name == configuration.table && dictionary_database == configuration.db)
|
if (dictionary_name == configuration->table && dictionary_database == configuration->db)
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "ClickHouseDictionarySource table cannot be dictionary table");
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "ClickHouseDictionarySource table cannot be dictionary table");
|
||||||
|
|
||||||
return std::make_unique<ClickHouseDictionarySource>(dict_struct, configuration, sample_block, context);
|
return std::make_unique<ClickHouseDictionarySource>(dict_struct, *configuration, sample_block, context);
|
||||||
};
|
};
|
||||||
|
|
||||||
factory.registerSource("clickhouse", create_table_source);
|
factory.registerSource("clickhouse", create_table_source);
|
||||||
|
@ -27,7 +27,7 @@ TemporaryFileOnDisk::TemporaryFileOnDisk(const DiskPtr & disk_)
|
|||||||
: TemporaryFileOnDisk(disk_, "")
|
: TemporaryFileOnDisk(disk_, "")
|
||||||
{}
|
{}
|
||||||
|
|
||||||
TemporaryFileOnDisk::TemporaryFileOnDisk(const DiskPtr & disk_, CurrentMetrics::Value metric_scope)
|
TemporaryFileOnDisk::TemporaryFileOnDisk(const DiskPtr & disk_, CurrentMetrics::Metric metric_scope)
|
||||||
: TemporaryFileOnDisk(disk_)
|
: TemporaryFileOnDisk(disk_)
|
||||||
{
|
{
|
||||||
sub_metric_increment.emplace(metric_scope);
|
sub_metric_increment.emplace(metric_scope);
|
||||||
|
@ -17,7 +17,7 @@ class TemporaryFileOnDisk
|
|||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
explicit TemporaryFileOnDisk(const DiskPtr & disk_);
|
explicit TemporaryFileOnDisk(const DiskPtr & disk_);
|
||||||
explicit TemporaryFileOnDisk(const DiskPtr & disk_, CurrentMetrics::Value metric_scope);
|
explicit TemporaryFileOnDisk(const DiskPtr & disk_, CurrentMetrics::Metric metric_scope);
|
||||||
explicit TemporaryFileOnDisk(const DiskPtr & disk_, const String & prefix);
|
explicit TemporaryFileOnDisk(const DiskPtr & disk_, const String & prefix);
|
||||||
|
|
||||||
~TemporaryFileOnDisk();
|
~TemporaryFileOnDisk();
|
||||||
|
@ -223,7 +223,8 @@ struct ArrayAggregateImpl
|
|||||||
|
|
||||||
auto result_scale = column_typed->getScale() * array_size;
|
auto result_scale = column_typed->getScale() * array_size;
|
||||||
if (unlikely(result_scale > DecimalUtils::max_precision<AggregationType>))
|
if (unlikely(result_scale > DecimalUtils::max_precision<AggregationType>))
|
||||||
throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Scale {} is out of bounds", result_scale);
|
throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Scale {} is out of bounds (max scale: {})",
|
||||||
|
result_scale, DecimalUtils::max_precision<AggregationType>);
|
||||||
|
|
||||||
res[i] = DecimalUtils::convertTo<ResultType>(product, static_cast<UInt32>(result_scale));
|
res[i] = DecimalUtils::convertTo<ResultType>(product, static_cast<UInt32>(result_scale));
|
||||||
}
|
}
|
||||||
@ -332,7 +333,8 @@ struct ArrayAggregateImpl
|
|||||||
auto result_scale = column->getScale() * count;
|
auto result_scale = column->getScale() * count;
|
||||||
|
|
||||||
if (unlikely(result_scale > DecimalUtils::max_precision<AggregationType>))
|
if (unlikely(result_scale > DecimalUtils::max_precision<AggregationType>))
|
||||||
throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Scale {} is out of bounds", result_scale);
|
throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Scale {} is out of bounds (max scale: {})",
|
||||||
|
result_scale, DecimalUtils::max_precision<AggregationType>);
|
||||||
|
|
||||||
res[i] = DecimalUtils::convertTo<ResultType>(aggregate_value, static_cast<UInt32>(result_scale));
|
res[i] = DecimalUtils::convertTo<ResultType>(aggregate_value, static_cast<UInt32>(result_scale));
|
||||||
}
|
}
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
#include "array/arrayIndex.h"
|
#include "array/arrayIndex.h"
|
||||||
#include "Functions/like.h"
|
#include "Functions/like.h"
|
||||||
#include "Functions/FunctionsStringSearch.h"
|
#include "Functions/FunctionsStringSearch.h"
|
||||||
|
#include <Common/HashTable/HashSet.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -616,103 +617,116 @@ public:
|
|||||||
"Number of arguments for function {} doesn't match: passed {}, should be 2",
|
"Number of arguments for function {} doesn't match: passed {}, should be 2",
|
||||||
getName(), arguments.size());
|
getName(), arguments.size());
|
||||||
|
|
||||||
const DataTypeMap * left = checkAndGetDataType<DataTypeMap>(arguments[0].type.get());
|
const auto * left = checkAndGetDataType<DataTypeMap>(arguments[0].type.get());
|
||||||
const DataTypeMap * right = checkAndGetDataType<DataTypeMap>(arguments[1].type.get());
|
const auto * right = checkAndGetDataType<DataTypeMap>(arguments[1].type.get());
|
||||||
|
|
||||||
if (!left || !right)
|
if (!left || !right)
|
||||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "The two arguments for function {} must be both Map type",
|
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||||
getName());
|
"The two arguments for function {} must be both Map type", getName());
|
||||||
|
|
||||||
if (!left->getKeyType()->equals(*right->getKeyType()) || !left->getValueType()->equals(*right->getValueType()))
|
if (!left->getKeyType()->equals(*right->getKeyType()) || !left->getValueType()->equals(*right->getValueType()))
|
||||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "The Key And Value type of Map for function {} must be the same",
|
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||||
getName());
|
"The Key And Value type of Map for function {} must be the same", getName());
|
||||||
|
|
||||||
return std::make_shared<DataTypeMap>(left->getKeyType(), left->getValueType());
|
return std::make_shared<DataTypeMap>(left->getKeyType(), left->getValueType());
|
||||||
}
|
}
|
||||||
|
|
||||||
bool useDefaultImplementationForConstants() const override { return true; }
|
bool useDefaultImplementationForConstants() const override { return true; }
|
||||||
|
|
||||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override
|
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
|
||||||
{
|
{
|
||||||
const ColumnMap * col_map_left = typeid_cast<const ColumnMap *>(arguments[0].column.get());
|
bool is_left_const = isColumnConst(*arguments[0].column);
|
||||||
const auto * col_const_map_left = checkAndGetColumnConst<ColumnMap>(arguments[0].column.get());
|
bool is_right_const = isColumnConst(*arguments[1].column);
|
||||||
bool col_const_map_left_flag = false;
|
|
||||||
if (col_const_map_left)
|
|
||||||
{
|
|
||||||
col_const_map_left_flag = true;
|
|
||||||
col_map_left = typeid_cast<const ColumnMap *>(&col_const_map_left->getDataColumn());
|
|
||||||
}
|
|
||||||
if (!col_map_left)
|
|
||||||
return nullptr;
|
|
||||||
|
|
||||||
const ColumnMap * col_map_right = typeid_cast<const ColumnMap *>(arguments[1].column.get());
|
const auto * map_column_left = is_left_const
|
||||||
const auto * col_const_map_right = checkAndGetColumnConst<ColumnMap>(arguments[1].column.get());
|
? checkAndGetColumnConstData<ColumnMap>(arguments[0].column.get())
|
||||||
bool col_const_map_right_flag = false;
|
: checkAndGetColumn<ColumnMap>(arguments[0].column.get());
|
||||||
if (col_const_map_right)
|
|
||||||
{
|
|
||||||
col_const_map_right_flag = true;
|
|
||||||
col_map_right = typeid_cast<const ColumnMap *>(&col_const_map_right->getDataColumn());
|
|
||||||
}
|
|
||||||
if (!col_map_right)
|
|
||||||
return nullptr;
|
|
||||||
|
|
||||||
const auto & nested_column_left = col_map_left->getNestedColumn();
|
const auto * map_column_right = is_right_const
|
||||||
const auto & keys_data_left = col_map_left->getNestedData().getColumn(0);
|
? checkAndGetColumnConstData<ColumnMap>(arguments[1].column.get())
|
||||||
const auto & values_data_left = col_map_left->getNestedData().getColumn(1);
|
: checkAndGetColumn<ColumnMap>(arguments[1].column.get());
|
||||||
|
|
||||||
|
if (!map_column_left || !map_column_right)
|
||||||
|
throw Exception(ErrorCodes::ILLEGAL_COLUMN,
|
||||||
|
"Arguments for function {} must be maps, got {} and {} instead",
|
||||||
|
getName(), arguments[0].column->getName(), arguments[1].column->getName());
|
||||||
|
|
||||||
|
const auto & nested_column_left = map_column_left->getNestedColumn();
|
||||||
|
const auto & keys_data_left = map_column_left->getNestedData().getColumn(0);
|
||||||
|
const auto & values_data_left = map_column_left->getNestedData().getColumn(1);
|
||||||
const auto & offsets_left = nested_column_left.getOffsets();
|
const auto & offsets_left = nested_column_left.getOffsets();
|
||||||
|
|
||||||
const auto & nested_column_right = col_map_right->getNestedColumn();
|
const auto & nested_column_right = map_column_right->getNestedColumn();
|
||||||
const auto & keys_data_right = col_map_right->getNestedData().getColumn(0);
|
const auto & keys_data_right = map_column_right->getNestedData().getColumn(0);
|
||||||
const auto & values_data_right = col_map_right->getNestedData().getColumn(1);
|
const auto & values_data_right = map_column_right->getNestedData().getColumn(1);
|
||||||
const auto & offsets_right = nested_column_right.getOffsets();
|
const auto & offsets_right = nested_column_right.getOffsets();
|
||||||
|
|
||||||
const auto & result_type_map = static_cast<const DataTypeMap &>(*result_type);
|
auto result_keys = keys_data_left.cloneEmpty();
|
||||||
const DataTypePtr & key_type = result_type_map.getKeyType();
|
auto result_values = values_data_left.cloneEmpty();
|
||||||
const DataTypePtr & value_type = result_type_map.getValueType();
|
|
||||||
MutableColumnPtr keys_data = key_type->createColumn();
|
size_t size_to_reserve = keys_data_right.size() + (keys_data_left.size() - keys_data_right.size());
|
||||||
MutableColumnPtr values_data = value_type->createColumn();
|
|
||||||
MutableColumnPtr offsets = DataTypeNumber<IColumn::Offset>().createColumn();
|
result_keys->reserve(size_to_reserve);
|
||||||
|
result_values->reserve(size_to_reserve);
|
||||||
|
|
||||||
|
auto result_offsets = ColumnVector<IColumn::Offset>::create(input_rows_count);
|
||||||
|
auto & result_offsets_data = result_offsets->getData();
|
||||||
|
|
||||||
|
using Set = HashSetWithStackMemory<StringRef, StringRefHash, 4>;
|
||||||
|
|
||||||
|
Set right_keys_const;
|
||||||
|
if (is_right_const)
|
||||||
|
{
|
||||||
|
for (size_t i = 0; i < keys_data_right.size(); ++i)
|
||||||
|
right_keys_const.insert(keys_data_right.getDataAt(i));
|
||||||
|
}
|
||||||
|
|
||||||
IColumn::Offset current_offset = 0;
|
IColumn::Offset current_offset = 0;
|
||||||
for (size_t row_idx = 0; row_idx < input_rows_count; ++row_idx)
|
for (size_t row_idx = 0; row_idx < input_rows_count; ++row_idx)
|
||||||
{
|
{
|
||||||
size_t left_it_begin = col_const_map_left_flag ? 0 : offsets_left[row_idx - 1];
|
size_t left_from = is_left_const ? 0 : offsets_left[row_idx - 1];
|
||||||
size_t left_it_end = col_const_map_left_flag ? offsets_left.size() : offsets_left[row_idx];
|
size_t left_to = is_left_const ? offsets_left[0] : offsets_left[row_idx];
|
||||||
size_t right_it_begin = col_const_map_right_flag ? 0 : offsets_right[row_idx - 1];
|
|
||||||
size_t right_it_end = col_const_map_right_flag ? offsets_right.size() : offsets_right[row_idx];
|
|
||||||
|
|
||||||
for (size_t i = left_it_begin; i < left_it_end; ++i)
|
size_t right_from = is_right_const ? 0 : offsets_right[row_idx - 1];
|
||||||
|
size_t right_to = is_right_const ? offsets_right[0] : offsets_right[row_idx];
|
||||||
|
|
||||||
|
auto execute_row = [&](const auto & set)
|
||||||
{
|
{
|
||||||
bool matched = false;
|
for (size_t i = left_from; i < left_to; ++i)
|
||||||
auto key = keys_data_left.getDataAt(i);
|
|
||||||
for (size_t j = right_it_begin; j < right_it_end; ++j)
|
|
||||||
{
|
{
|
||||||
if (keys_data_right.getDataAt(j).toString() == key.toString())
|
if (!set.find(keys_data_left.getDataAt(i)))
|
||||||
{
|
{
|
||||||
matched = true;
|
result_keys->insertFrom(keys_data_left, i);
|
||||||
break;
|
result_values->insertFrom(values_data_left, i);
|
||||||
}
|
|
||||||
}
|
|
||||||
if (!matched)
|
|
||||||
{
|
|
||||||
keys_data->insertFrom(keys_data_left, i);
|
|
||||||
values_data->insertFrom(values_data_left, i);
|
|
||||||
++current_offset;
|
++current_offset;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
};
|
||||||
|
|
||||||
for (size_t j = right_it_begin; j < right_it_end; ++j)
|
if (is_right_const)
|
||||||
{
|
{
|
||||||
keys_data->insertFrom(keys_data_right, j);
|
execute_row(right_keys_const);
|
||||||
values_data->insertFrom(values_data_right, j);
|
}
|
||||||
++current_offset;
|
else
|
||||||
|
{
|
||||||
|
Set right_keys;
|
||||||
|
for (size_t i = right_from; i < right_to; ++i)
|
||||||
|
right_keys.insert(keys_data_right.getDataAt(i));
|
||||||
|
|
||||||
|
execute_row(right_keys);
|
||||||
}
|
}
|
||||||
|
|
||||||
offsets->insert(current_offset);
|
size_t right_map_size = right_to - right_from;
|
||||||
|
result_keys->insertRangeFrom(keys_data_right, right_from, right_map_size);
|
||||||
|
result_values->insertRangeFrom(values_data_right, right_from, right_map_size);
|
||||||
|
|
||||||
|
current_offset += right_map_size;
|
||||||
|
result_offsets_data[row_idx] = current_offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto nested_column = ColumnArray::create(
|
auto nested_column = ColumnArray::create(
|
||||||
ColumnTuple::create(Columns{std::move(keys_data), std::move(values_data)}),
|
ColumnTuple::create(Columns{std::move(result_keys), std::move(result_values)}),
|
||||||
std::move(offsets));
|
std::move(result_offsets));
|
||||||
|
|
||||||
return ColumnMap::create(nested_column);
|
return ColumnMap::create(nested_column);
|
||||||
}
|
}
|
||||||
|
@ -20,9 +20,7 @@ namespace DB
|
|||||||
{
|
{
|
||||||
namespace ErrorCodes
|
namespace ErrorCodes
|
||||||
{
|
{
|
||||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
|
||||||
extern const int ILLEGAL_COLUMN;
|
extern const int ILLEGAL_COLUMN;
|
||||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
|
||||||
extern const int NOT_IMPLEMENTED;
|
extern const int NOT_IMPLEMENTED;
|
||||||
extern const int BAD_ARGUMENTS;
|
extern const int BAD_ARGUMENTS;
|
||||||
extern const int VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE;
|
extern const int VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE;
|
||||||
@ -480,33 +478,15 @@ namespace
|
|||||||
|
|
||||||
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
|
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
|
||||||
{
|
{
|
||||||
if (arguments.size() != 1 && arguments.size() != 2 && arguments.size() != 3)
|
FunctionArgumentDescriptors args{
|
||||||
throw Exception(
|
{"time", &isString<IDataType>, nullptr, "String"},
|
||||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
|
{"format", &isString<IDataType>, nullptr, "String"},
|
||||||
"Number of arguments for function {} doesn't match: passed {}, should be 1, 2 or 3",
|
};
|
||||||
getName(),
|
|
||||||
arguments.size());
|
|
||||||
|
|
||||||
if (!isString(arguments[0].type))
|
if (arguments.size() == 3)
|
||||||
throw Exception(
|
args.emplace_back(FunctionArgumentDescriptor{"timezone", &isString<IDataType>, nullptr, "String"});
|
||||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
|
||||||
"Illegal type {} of first argument of function {}. Should be String",
|
|
||||||
arguments[0].type->getName(),
|
|
||||||
getName());
|
|
||||||
|
|
||||||
if (arguments.size() > 1 && !isString(arguments[1].type))
|
validateFunctionArgumentTypes(*this, arguments, args);
|
||||||
throw Exception(
|
|
||||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
|
||||||
"Illegal type {} of second argument of function {}. Should be String",
|
|
||||||
arguments[0].type->getName(),
|
|
||||||
getName());
|
|
||||||
|
|
||||||
if (arguments.size() > 2 && !isString(arguments[2].type))
|
|
||||||
throw Exception(
|
|
||||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
|
||||||
"Illegal type {} of third argument of function {}. Should be String",
|
|
||||||
arguments[0].type->getName(),
|
|
||||||
getName());
|
|
||||||
|
|
||||||
String time_zone_name = getTimeZone(arguments).getTimeZone();
|
String time_zone_name = getTimeZone(arguments).getTimeZone();
|
||||||
DataTypePtr date_type = std::make_shared<DataTypeDateTime>(time_zone_name);
|
DataTypePtr date_type = std::make_shared<DataTypeDateTime>(time_zone_name);
|
||||||
@ -1776,14 +1756,6 @@ namespace
|
|||||||
|
|
||||||
String getFormat(const ColumnsWithTypeAndName & arguments) const
|
String getFormat(const ColumnsWithTypeAndName & arguments) const
|
||||||
{
|
{
|
||||||
if (arguments.size() < 2)
|
|
||||||
{
|
|
||||||
if constexpr (parse_syntax == ParseSyntax::Joda)
|
|
||||||
return "yyyy-MM-dd HH:mm:ss";
|
|
||||||
else
|
|
||||||
return "%Y-%m-%d %H:%M:%S";
|
|
||||||
}
|
|
||||||
|
|
||||||
const auto * format_column = checkAndGetColumnConst<ColumnString>(arguments[1].column.get());
|
const auto * format_column = checkAndGetColumnConst<ColumnString>(arguments[1].column.get());
|
||||||
if (!format_column)
|
if (!format_column)
|
||||||
throw Exception(
|
throw Exception(
|
||||||
|
@ -71,7 +71,8 @@ private:
|
|||||||
/// For array on stack, see below.
|
/// For array on stack, see below.
|
||||||
if (arguments.size() > 10000)
|
if (arguments.size() > 10000)
|
||||||
{
|
{
|
||||||
throw Exception(ErrorCodes::TOO_MANY_ARGUMENTS_FOR_FUNCTION, "Number of arguments of function {} is too large.",
|
throw Exception(ErrorCodes::TOO_MANY_ARGUMENTS_FOR_FUNCTION,
|
||||||
|
"Number of arguments of function {} is too large (maximum: 10000).",
|
||||||
getName());
|
getName());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -37,7 +37,7 @@ namespace DB
|
|||||||
{
|
{
|
||||||
namespace ErrorCodes
|
namespace ErrorCodes
|
||||||
{
|
{
|
||||||
extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION;
|
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||||
extern const int BAD_ARGUMENTS;
|
extern const int BAD_ARGUMENTS;
|
||||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||||
extern const int ILLEGAL_COLUMN;
|
extern const int ILLEGAL_COLUMN;
|
||||||
@ -87,7 +87,7 @@ public:
|
|||||||
{
|
{
|
||||||
if (arguments.size() < 2)
|
if (arguments.size() < 2)
|
||||||
{
|
{
|
||||||
throw Exception(ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION, "Too few arguments");
|
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} requires at least 2 arguments", getName());
|
||||||
}
|
}
|
||||||
|
|
||||||
/** We allow function invocation in one of the following forms:
|
/** We allow function invocation in one of the following forms:
|
||||||
|
@ -13,8 +13,7 @@ namespace DB
|
|||||||
namespace ErrorCodes
|
namespace ErrorCodes
|
||||||
{
|
{
|
||||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||||
extern const int TOO_MANY_ARGUMENTS_FOR_FUNCTION;
|
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||||
extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
class FunctionSvg : public IFunction
|
class FunctionSvg : public IFunction
|
||||||
@ -48,13 +47,9 @@ public:
|
|||||||
|
|
||||||
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
|
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
|
||||||
{
|
{
|
||||||
if (arguments.size() > 2)
|
if (arguments.empty() || arguments.size() > 2)
|
||||||
{
|
{
|
||||||
throw Exception(ErrorCodes::TOO_MANY_ARGUMENTS_FOR_FUNCTION, "Too many arguments");
|
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Incorrect number of arguments: expected 1 or 2 arguments");
|
||||||
}
|
|
||||||
else if (arguments.empty())
|
|
||||||
{
|
|
||||||
throw Exception(ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION, "Too few arguments");
|
|
||||||
}
|
}
|
||||||
else if (arguments.size() == 2 && checkAndGetDataType<DataTypeString>(arguments[1].get()) == nullptr)
|
else if (arguments.size() == 2 && checkAndGetDataType<DataTypeString>(arguments[1].get()) == nullptr)
|
||||||
{
|
{
|
||||||
|
@ -165,7 +165,8 @@ void readVectorBinary(std::vector<T> & v, ReadBuffer & buf)
|
|||||||
readVarUInt(size, buf);
|
readVarUInt(size, buf);
|
||||||
|
|
||||||
if (size > DEFAULT_MAX_STRING_SIZE)
|
if (size > DEFAULT_MAX_STRING_SIZE)
|
||||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size.");
|
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE,
|
||||||
|
"Too large array size (maximum: {})", DEFAULT_MAX_STRING_SIZE);
|
||||||
|
|
||||||
v.resize(size);
|
v.resize(size);
|
||||||
for (size_t i = 0; i < size; ++i)
|
for (size_t i = 0; i < size; ++i)
|
||||||
|
@ -1051,7 +1051,7 @@ ActionsDAGPtr ActionsDAG::clone() const
|
|||||||
void ActionsDAG::compileExpressions(size_t min_count_to_compile_expression, const std::unordered_set<const ActionsDAG::Node *> & lazy_executed_nodes)
|
void ActionsDAG::compileExpressions(size_t min_count_to_compile_expression, const std::unordered_set<const ActionsDAG::Node *> & lazy_executed_nodes)
|
||||||
{
|
{
|
||||||
compileFunctions(min_count_to_compile_expression, lazy_executed_nodes);
|
compileFunctions(min_count_to_compile_expression, lazy_executed_nodes);
|
||||||
removeUnusedActions();
|
removeUnusedActions(/*allow_remove_inputs = */ false);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -41,35 +41,35 @@ namespace
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
const std::unordered_map<std::string, ComparisonGraph::CompareResult> & getRelationMap()
|
const std::unordered_map<std::string, ComparisonGraphCompareResult> & getRelationMap()
|
||||||
{
|
{
|
||||||
const static std::unordered_map<std::string, ComparisonGraph::CompareResult> relations =
|
const static std::unordered_map<std::string, ComparisonGraphCompareResult> relations =
|
||||||
{
|
{
|
||||||
{"equals", ComparisonGraph::CompareResult::EQUAL},
|
{"equals", ComparisonGraphCompareResult::EQUAL},
|
||||||
{"less", ComparisonGraph::CompareResult::LESS},
|
{"less", ComparisonGraphCompareResult::LESS},
|
||||||
{"lessOrEquals", ComparisonGraph::CompareResult::LESS_OR_EQUAL},
|
{"lessOrEquals", ComparisonGraphCompareResult::LESS_OR_EQUAL},
|
||||||
{"greaterOrEquals", ComparisonGraph::CompareResult::GREATER_OR_EQUAL},
|
{"greaterOrEquals", ComparisonGraphCompareResult::GREATER_OR_EQUAL},
|
||||||
{"greater", ComparisonGraph::CompareResult::GREATER},
|
{"greater", ComparisonGraphCompareResult::GREATER},
|
||||||
};
|
};
|
||||||
return relations;
|
return relations;
|
||||||
}
|
}
|
||||||
|
|
||||||
const std::unordered_map<ComparisonGraph::CompareResult, std::string> & getReverseRelationMap()
|
const std::unordered_map<ComparisonGraphCompareResult, std::string> & getReverseRelationMap()
|
||||||
{
|
{
|
||||||
const static std::unordered_map<ComparisonGraph::CompareResult, std::string> relations =
|
const static std::unordered_map<ComparisonGraphCompareResult, std::string> relations =
|
||||||
{
|
{
|
||||||
{ComparisonGraph::CompareResult::EQUAL, "equals"},
|
{ComparisonGraphCompareResult::EQUAL, "equals"},
|
||||||
{ComparisonGraph::CompareResult::LESS, "less"},
|
{ComparisonGraphCompareResult::LESS, "less"},
|
||||||
{ComparisonGraph::CompareResult::LESS_OR_EQUAL, "lessOrEquals"},
|
{ComparisonGraphCompareResult::LESS_OR_EQUAL, "lessOrEquals"},
|
||||||
{ComparisonGraph::CompareResult::GREATER_OR_EQUAL, "greaterOrEquals"},
|
{ComparisonGraphCompareResult::GREATER_OR_EQUAL, "greaterOrEquals"},
|
||||||
{ComparisonGraph::CompareResult::GREATER, "greater"},
|
{ComparisonGraphCompareResult::GREATER, "greater"},
|
||||||
};
|
};
|
||||||
return relations;
|
return relations;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool canBeSequence(const ComparisonGraph::CompareResult left, const ComparisonGraph::CompareResult right)
|
bool canBeSequence(const ComparisonGraphCompareResult left, const ComparisonGraphCompareResult right)
|
||||||
{
|
{
|
||||||
using CR = ComparisonGraph::CompareResult;
|
using CR = ComparisonGraphCompareResult;
|
||||||
if (left == CR::UNKNOWN || right == CR::UNKNOWN || left == CR::NOT_EQUAL || right == CR::NOT_EQUAL)
|
if (left == CR::UNKNOWN || right == CR::UNKNOWN || left == CR::NOT_EQUAL || right == CR::NOT_EQUAL)
|
||||||
return false;
|
return false;
|
||||||
if ((left == CR::GREATER || left == CR::GREATER_OR_EQUAL) && (right == CR::LESS || right == CR::LESS_OR_EQUAL))
|
if ((left == CR::GREATER || left == CR::GREATER_OR_EQUAL) && (right == CR::LESS || right == CR::LESS_OR_EQUAL))
|
||||||
@ -79,9 +79,9 @@ namespace
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
ComparisonGraph::CompareResult mostStrict(const ComparisonGraph::CompareResult left, const ComparisonGraph::CompareResult right)
|
ComparisonGraphCompareResult mostStrict(const ComparisonGraphCompareResult left, const ComparisonGraphCompareResult right)
|
||||||
{
|
{
|
||||||
using CR = ComparisonGraph::CompareResult;
|
using CR = ComparisonGraphCompareResult;
|
||||||
if (left == CR::LESS || left == CR::GREATER)
|
if (left == CR::LESS || left == CR::GREATER)
|
||||||
return left;
|
return left;
|
||||||
if (right == CR::LESS || right == CR::GREATER)
|
if (right == CR::LESS || right == CR::GREATER)
|
||||||
@ -104,7 +104,7 @@ namespace
|
|||||||
/// we can add to expression 'indexHint(I < A)' condition.
|
/// we can add to expression 'indexHint(I < A)' condition.
|
||||||
CNFQuery::OrGroup createIndexHintGroup(
|
CNFQuery::OrGroup createIndexHintGroup(
|
||||||
const CNFQuery::OrGroup & group,
|
const CNFQuery::OrGroup & group,
|
||||||
const ComparisonGraph & graph,
|
const ComparisonGraph<ASTPtr> & graph,
|
||||||
const ASTs & primary_key_only_asts)
|
const ASTs & primary_key_only_asts)
|
||||||
{
|
{
|
||||||
CNFQuery::OrGroup result;
|
CNFQuery::OrGroup result;
|
||||||
@ -113,14 +113,14 @@ namespace
|
|||||||
const auto * func = atom.ast->as<ASTFunction>();
|
const auto * func = atom.ast->as<ASTFunction>();
|
||||||
if (func && func->arguments->children.size() == 2 && getRelationMap().contains(func->name))
|
if (func && func->arguments->children.size() == 2 && getRelationMap().contains(func->name))
|
||||||
{
|
{
|
||||||
auto check_and_insert = [&](const size_t index, const ComparisonGraph::CompareResult need_result)
|
auto check_and_insert = [&](const size_t index, const ComparisonGraphCompareResult need_result)
|
||||||
{
|
{
|
||||||
if (!onlyConstants(func->arguments->children[1 - index]))
|
if (!onlyConstants(func->arguments->children[1 - index]))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
for (const auto & primary_key_ast : primary_key_only_asts)
|
for (const auto & primary_key_ast : primary_key_only_asts)
|
||||||
{
|
{
|
||||||
ComparisonGraph::CompareResult actual_result;
|
ComparisonGraphCompareResult actual_result;
|
||||||
if (index == 0)
|
if (index == 0)
|
||||||
actual_result = graph.compare(primary_key_ast, func->arguments->children[index]);
|
actual_result = graph.compare(primary_key_ast, func->arguments->children[index]);
|
||||||
else
|
else
|
||||||
|
@ -1,10 +1,17 @@
|
|||||||
#include <Interpreters/ComparisonGraph.h>
|
#include <Interpreters/ComparisonGraph.h>
|
||||||
|
|
||||||
#include <Parsers/IAST.h>
|
#include <Parsers/IAST.h>
|
||||||
#include <Parsers/ASTLiteral.h>
|
#include <Parsers/ASTLiteral.h>
|
||||||
#include <Parsers/ASTFunction.h>
|
#include <Parsers/ASTFunction.h>
|
||||||
#include <Parsers/queryToString.h>
|
#include <Parsers/queryToString.h>
|
||||||
|
|
||||||
#include <Common/FieldVisitorsAccurateComparison.h>
|
#include <Common/FieldVisitorsAccurateComparison.h>
|
||||||
|
|
||||||
|
#include <Analyzer/FunctionNode.h>
|
||||||
|
#include <Analyzer/ConstantNode.h>
|
||||||
|
|
||||||
|
#include <Functions/FunctionFactory.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -17,7 +24,7 @@ namespace
|
|||||||
{
|
{
|
||||||
|
|
||||||
/// Make function a > b or a >= b
|
/// Make function a > b or a >= b
|
||||||
ASTPtr normalizeAtom(const ASTPtr & atom)
|
ASTPtr normalizeAtom(const ASTPtr & atom, ContextPtr)
|
||||||
{
|
{
|
||||||
static const std::map<std::string, std::string> inverse_relations =
|
static const std::map<std::string, std::string> inverse_relations =
|
||||||
{
|
{
|
||||||
@ -29,26 +36,158 @@ ASTPtr normalizeAtom(const ASTPtr & atom)
|
|||||||
if (const auto * func = res->as<ASTFunction>())
|
if (const auto * func = res->as<ASTFunction>())
|
||||||
{
|
{
|
||||||
if (const auto it = inverse_relations.find(func->name); it != std::end(inverse_relations))
|
if (const auto it = inverse_relations.find(func->name); it != std::end(inverse_relations))
|
||||||
{
|
|
||||||
res = makeASTFunction(it->second, func->arguments->children[1]->clone(), func->arguments->children[0]->clone());
|
res = makeASTFunction(it->second, func->arguments->children[1]->clone(), func->arguments->children[0]->clone());
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
QueryTreeNodePtr normalizeAtom(const QueryTreeNodePtr & atom, const ContextPtr & context)
|
||||||
|
{
|
||||||
|
static const std::map<std::string, std::string> inverse_relations =
|
||||||
|
{
|
||||||
|
{"lessOrEquals", "greaterOrEquals"},
|
||||||
|
{"less", "greater"},
|
||||||
|
};
|
||||||
|
|
||||||
|
if (const auto * function_node = atom->as<FunctionNode>())
|
||||||
|
{
|
||||||
|
if (const auto it = inverse_relations.find(function_node->getFunctionName()); it != inverse_relations.end())
|
||||||
|
{
|
||||||
|
auto inverted_node = function_node->clone();
|
||||||
|
auto * inverted_function_node = inverted_node->as<FunctionNode>();
|
||||||
|
auto function_resolver = FunctionFactory::instance().get(it->second, context);
|
||||||
|
auto & arguments = inverted_function_node->getArguments().getNodes();
|
||||||
|
assert(arguments.size() == 2);
|
||||||
|
std::swap(arguments[0], arguments[1]);
|
||||||
|
inverted_function_node->resolveAsFunction(function_resolver);
|
||||||
|
return inverted_node;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return atom;
|
||||||
|
}
|
||||||
|
|
||||||
|
const FunctionNode * tryGetFunctionNode(const QueryTreeNodePtr & node)
|
||||||
|
{
|
||||||
|
return node->as<FunctionNode>();
|
||||||
|
}
|
||||||
|
|
||||||
|
const ASTFunction * tryGetFunctionNode(const ASTPtr & node)
|
||||||
|
{
|
||||||
|
return node->as<ASTFunction>();
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string functionName(const QueryTreeNodePtr & node)
|
||||||
|
{
|
||||||
|
return node->as<FunctionNode &>().getFunctionName();
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string functionName(const ASTPtr & node)
|
||||||
|
{
|
||||||
|
return node->as<ASTFunction &>().name;
|
||||||
|
}
|
||||||
|
|
||||||
|
const Field * tryGetConstantValue(const QueryTreeNodePtr & node)
|
||||||
|
{
|
||||||
|
if (const auto * constant = node->as<ConstantNode>())
|
||||||
|
return &constant->getValue();
|
||||||
|
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
const Field * tryGetConstantValue(const ASTPtr & node)
|
||||||
|
{
|
||||||
|
if (const auto * constant = node->as<ASTLiteral>())
|
||||||
|
return &constant->value;
|
||||||
|
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Node>
|
||||||
|
const Field & getConstantValue(const Node & node)
|
||||||
|
{
|
||||||
|
const auto * constant = tryGetConstantValue(node);
|
||||||
|
assert(constant);
|
||||||
|
return *constant;
|
||||||
|
}
|
||||||
|
|
||||||
|
const auto & getNode(const Analyzer::CNF::AtomicFormula & atom)
|
||||||
|
{
|
||||||
|
return atom.node_with_hash.node;
|
||||||
|
}
|
||||||
|
|
||||||
|
const auto & getNode(const CNFQuery::AtomicFormula & atom)
|
||||||
|
{
|
||||||
|
return atom.ast;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string nodeToString(const ASTPtr & ast)
|
||||||
|
{
|
||||||
|
return queryToString(ast);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string nodeToString(const QueryTreeNodePtr & node)
|
||||||
|
{
|
||||||
|
return queryToString(node->toAST());
|
||||||
|
}
|
||||||
|
|
||||||
|
const auto & getArguments(const ASTFunction * function)
|
||||||
|
{
|
||||||
|
return function->arguments->children;
|
||||||
|
}
|
||||||
|
|
||||||
|
const auto & getArguments(const FunctionNode * function)
|
||||||
|
{
|
||||||
|
return function->getArguments().getNodes();
|
||||||
|
}
|
||||||
|
|
||||||
bool less(const Field & lhs, const Field & rhs) { return applyVisitor(FieldVisitorAccurateLess{}, lhs, rhs); }
|
bool less(const Field & lhs, const Field & rhs) { return applyVisitor(FieldVisitorAccurateLess{}, lhs, rhs); }
|
||||||
bool greater(const Field & lhs, const Field & rhs) { return applyVisitor(FieldVisitorAccurateLess{}, rhs, lhs); }
|
bool greater(const Field & lhs, const Field & rhs) { return applyVisitor(FieldVisitorAccurateLess{}, rhs, lhs); }
|
||||||
bool equals(const Field & lhs, const Field & rhs) { return applyVisitor(FieldVisitorAccurateEquals{}, lhs, rhs); }
|
bool equals(const Field & lhs, const Field & rhs) { return applyVisitor(FieldVisitorAccurateEquals{}, lhs, rhs); }
|
||||||
|
|
||||||
|
ComparisonGraphCompareResult functionNameToCompareResult(const std::string & name)
|
||||||
|
{
|
||||||
|
using enum ComparisonGraphCompareResult;
|
||||||
|
static const std::unordered_map<std::string, ComparisonGraphCompareResult> relation_to_compare =
|
||||||
|
{
|
||||||
|
{"equals", EQUAL},
|
||||||
|
{"notEquals", NOT_EQUAL},
|
||||||
|
{"less", LESS},
|
||||||
|
{"lessOrEquals", LESS_OR_EQUAL},
|
||||||
|
{"greaterOrEquals", GREATER_OR_EQUAL},
|
||||||
|
{"greater", GREATER},
|
||||||
|
};
|
||||||
|
|
||||||
|
const auto it = relation_to_compare.find(name);
|
||||||
|
return it == std::end(relation_to_compare) ? UNKNOWN : it->second;
|
||||||
}
|
}
|
||||||
|
|
||||||
ComparisonGraph::ComparisonGraph(const ASTs & atomic_formulas)
|
ComparisonGraphCompareResult inverseCompareResult(ComparisonGraphCompareResult result)
|
||||||
|
{
|
||||||
|
using enum ComparisonGraphCompareResult;
|
||||||
|
static const std::unordered_map<ComparisonGraphCompareResult, ComparisonGraphCompareResult> inverse_relations =
|
||||||
|
{
|
||||||
|
{NOT_EQUAL, EQUAL},
|
||||||
|
{EQUAL, NOT_EQUAL},
|
||||||
|
{GREATER_OR_EQUAL, LESS},
|
||||||
|
{GREATER, LESS_OR_EQUAL},
|
||||||
|
{LESS, GREATER_OR_EQUAL},
|
||||||
|
{LESS_OR_EQUAL, GREATER},
|
||||||
|
{UNKNOWN, UNKNOWN},
|
||||||
|
};
|
||||||
|
return inverse_relations.at(result);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
template <ComparisonGraphNodeType Node>
|
||||||
|
ComparisonGraph<Node>::ComparisonGraph(const NodeContainer & atomic_formulas, ContextPtr context)
|
||||||
{
|
{
|
||||||
if (atomic_formulas.empty())
|
if (atomic_formulas.empty())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
static const std::unordered_map<std::string, Edge::Type> relation_to_enum =
|
static const std::unordered_map<std::string, typename Edge::Type> relation_to_enum =
|
||||||
{
|
{
|
||||||
{"equals", Edge::EQUAL},
|
{"equals", Edge::EQUAL},
|
||||||
{"greater", Edge::GREATER},
|
{"greater", Edge::GREATER},
|
||||||
@ -63,20 +202,23 @@ ComparisonGraph::ComparisonGraph(const ASTs & atomic_formulas)
|
|||||||
Graph g;
|
Graph g;
|
||||||
for (const auto & atom_raw : atomic_formulas)
|
for (const auto & atom_raw : atomic_formulas)
|
||||||
{
|
{
|
||||||
const auto atom = normalizeAtom(atom_raw);
|
const auto atom = normalizeAtom(atom_raw, context);
|
||||||
|
|
||||||
auto get_index = [](const ASTPtr & ast, Graph & asts_graph) -> std::optional<size_t>
|
auto get_index = [](const Node & node, Graph & nodes_graph) -> std::optional<size_t>
|
||||||
{
|
{
|
||||||
const auto it = asts_graph.ast_hash_to_component.find(ast->getTreeHash());
|
const auto it = nodes_graph.node_hash_to_component.find(Graph::getHash(node));
|
||||||
if (it != std::end(asts_graph.ast_hash_to_component))
|
if (it != std::end(nodes_graph.node_hash_to_component))
|
||||||
{
|
{
|
||||||
if (!std::any_of(
|
if (!std::any_of(
|
||||||
std::cbegin(asts_graph.vertices[it->second].asts),
|
std::cbegin(nodes_graph.vertices[it->second].nodes),
|
||||||
std::cend(asts_graph.vertices[it->second].asts),
|
std::cend(nodes_graph.vertices[it->second].nodes),
|
||||||
[ast](const ASTPtr & constraint_ast)
|
[node](const Node & constraint_node)
|
||||||
{
|
{
|
||||||
return constraint_ast->getTreeHash() == ast->getTreeHash()
|
if constexpr (with_ast)
|
||||||
&& constraint_ast->getColumnName() == ast->getColumnName();
|
return constraint_node->getTreeHash() == node->getTreeHash()
|
||||||
|
&& constraint_node->getColumnName() == node->getColumnName();
|
||||||
|
else
|
||||||
|
return constraint_node->isEqual(*node);
|
||||||
}))
|
}))
|
||||||
{
|
{
|
||||||
return {};
|
return {};
|
||||||
@ -86,22 +228,25 @@ ComparisonGraph::ComparisonGraph(const ASTs & atomic_formulas)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
asts_graph.ast_hash_to_component[ast->getTreeHash()] = asts_graph.vertices.size();
|
nodes_graph.node_hash_to_component[Graph::getHash(node)] = nodes_graph.vertices.size();
|
||||||
asts_graph.vertices.push_back(EqualComponent{{ast}, std::nullopt});
|
nodes_graph.vertices.push_back(EqualComponent{{node}, std::nullopt});
|
||||||
asts_graph.edges.emplace_back();
|
nodes_graph.edges.emplace_back();
|
||||||
return asts_graph.vertices.size() - 1;
|
return nodes_graph.vertices.size() - 1;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
const auto * func = atom->as<ASTFunction>();
|
const auto * function_node = tryGetFunctionNode(atom);
|
||||||
if (func && func->arguments->children.size() == 2)
|
if (function_node)
|
||||||
{
|
{
|
||||||
auto index_left = get_index(func->arguments->children[0], g);
|
const auto & arguments = getArguments(function_node);
|
||||||
auto index_right = get_index(func->arguments->children[1], g);
|
if (arguments.size() == 2)
|
||||||
|
{
|
||||||
|
auto index_left = get_index(arguments[0], g);
|
||||||
|
auto index_right = get_index(arguments[1], g);
|
||||||
|
|
||||||
if (index_left && index_right)
|
if (index_left && index_right)
|
||||||
{
|
{
|
||||||
if (const auto it = relation_to_enum.find(func->name); it != std::end(relation_to_enum))
|
if (const auto it = relation_to_enum.find(functionName(atom)); it != std::end(relation_to_enum))
|
||||||
{
|
{
|
||||||
g.edges[*index_left].push_back(Edge{it->second, *index_right});
|
g.edges[*index_left].push_back(Edge{it->second, *index_right});
|
||||||
if (it->second == Edge::EQUAL)
|
if (it->second == Edge::EQUAL)
|
||||||
@ -110,6 +255,7 @@ ComparisonGraph::ComparisonGraph(const ASTs & atomic_formulas)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Now expressions A and B are equal, if and only if
|
/// Now expressions A and B are equal, if and only if
|
||||||
/// we have both paths from A to B and from B to A in graph.
|
/// we have both paths from A to B and from B to A in graph.
|
||||||
@ -119,9 +265,9 @@ ComparisonGraph::ComparisonGraph(const ASTs & atomic_formulas)
|
|||||||
/// All expressions from one equivalence class will be stored
|
/// All expressions from one equivalence class will be stored
|
||||||
/// in the corresponding vertex of new graph.
|
/// in the corresponding vertex of new graph.
|
||||||
|
|
||||||
graph = buildGraphFromAstsGraph(g);
|
graph = buildGraphFromNodesGraph(g);
|
||||||
dists = buildDistsFromGraph(graph);
|
dists = buildDistsFromGraph(graph);
|
||||||
std::tie(ast_const_lower_bound, ast_const_upper_bound) = buildConstBounds();
|
std::tie(node_const_lower_bound, node_const_upper_bound) = buildConstBounds();
|
||||||
|
|
||||||
/// Find expressions that are known to be unequal.
|
/// Find expressions that are known to be unequal.
|
||||||
static const std::unordered_set<String> not_equals_functions = {"notEquals", "greater"};
|
static const std::unordered_set<String> not_equals_functions = {"notEquals", "greater"};
|
||||||
@ -130,36 +276,44 @@ ComparisonGraph::ComparisonGraph(const ASTs & atomic_formulas)
|
|||||||
/// TODO: Build a graph for unequal components.
|
/// TODO: Build a graph for unequal components.
|
||||||
for (const auto & atom_raw : atomic_formulas)
|
for (const auto & atom_raw : atomic_formulas)
|
||||||
{
|
{
|
||||||
const auto atom = normalizeAtom(atom_raw);
|
const auto atom = normalizeAtom(atom_raw, context);
|
||||||
const auto * func = atom->as<ASTFunction>();
|
|
||||||
|
|
||||||
if (func && not_equals_functions.contains(func->name))
|
const auto * function_node = tryGetFunctionNode(atom);
|
||||||
|
if (function_node && not_equals_functions.contains(functionName(atom)))
|
||||||
{
|
{
|
||||||
auto index_left = graph.ast_hash_to_component.at(func->arguments->children[0]->getTreeHash());
|
const auto & arguments = getArguments(function_node);
|
||||||
auto index_right = graph.ast_hash_to_component.at(func->arguments->children[1]->getTreeHash());
|
if (arguments.size() == 2)
|
||||||
|
{
|
||||||
|
auto index_left = graph.node_hash_to_component.at(Graph::getHash(arguments[0]));
|
||||||
|
auto index_right = graph.node_hash_to_component.at(Graph::getHash(arguments[1]));
|
||||||
|
|
||||||
if (index_left == index_right)
|
if (index_left == index_right)
|
||||||
|
{
|
||||||
throw Exception(ErrorCodes::VIOLATED_CONSTRAINT,
|
throw Exception(ErrorCodes::VIOLATED_CONSTRAINT,
|
||||||
"Found expression '{}', but its arguments considered equal according to constraints",
|
"Found expression '{}', but its arguments considered equal according to constraints",
|
||||||
queryToString(atom));
|
nodeToString(atom));
|
||||||
|
}
|
||||||
|
|
||||||
not_equal.emplace(index_left, index_right);
|
not_equal.emplace(index_left, index_right);
|
||||||
not_equal.emplace(index_right, index_left);
|
not_equal.emplace(index_right, index_left);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ComparisonGraph::CompareResult ComparisonGraph::pathToCompareResult(Path path, bool inverse)
|
template <ComparisonGraphNodeType Node>
|
||||||
|
ComparisonGraphCompareResult ComparisonGraph<Node>::pathToCompareResult(Path path, bool inverse)
|
||||||
{
|
{
|
||||||
switch (path)
|
switch (path)
|
||||||
{
|
{
|
||||||
case Path::GREATER: return inverse ? CompareResult::LESS : CompareResult::GREATER;
|
case Path::GREATER: return inverse ? ComparisonGraphCompareResult::LESS : ComparisonGraphCompareResult::GREATER;
|
||||||
case Path::GREATER_OR_EQUAL: return inverse ? CompareResult::LESS_OR_EQUAL : CompareResult::GREATER_OR_EQUAL;
|
case Path::GREATER_OR_EQUAL: return inverse ? ComparisonGraphCompareResult::LESS_OR_EQUAL : ComparisonGraphCompareResult::GREATER_OR_EQUAL;
|
||||||
}
|
}
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<ComparisonGraph::Path> ComparisonGraph::findPath(size_t start, size_t finish) const
|
template <ComparisonGraphNodeType Node>
|
||||||
|
std::optional<typename ComparisonGraph<Node>::Path> ComparisonGraph<Node>::findPath(size_t start, size_t finish) const
|
||||||
{
|
{
|
||||||
const auto it = dists.find(std::make_pair(start, finish));
|
const auto it = dists.find(std::make_pair(start, finish));
|
||||||
if (it == std::end(dists))
|
if (it == std::end(dists))
|
||||||
@ -170,18 +324,19 @@ std::optional<ComparisonGraph::Path> ComparisonGraph::findPath(size_t start, siz
|
|||||||
return not_equal.contains({start, finish}) ? Path::GREATER : it->second;
|
return not_equal.contains({start, finish}) ? Path::GREATER : it->second;
|
||||||
}
|
}
|
||||||
|
|
||||||
ComparisonGraph::CompareResult ComparisonGraph::compare(const ASTPtr & left, const ASTPtr & right) const
|
template <ComparisonGraphNodeType Node>
|
||||||
|
ComparisonGraphCompareResult ComparisonGraph<Node>::compare(const Node & left, const Node & right) const
|
||||||
{
|
{
|
||||||
size_t start = 0;
|
size_t start = 0;
|
||||||
size_t finish = 0;
|
size_t finish = 0;
|
||||||
|
|
||||||
/// TODO: check full ast
|
/// TODO: check full ast
|
||||||
const auto it_left = graph.ast_hash_to_component.find(left->getTreeHash());
|
const auto it_left = graph.node_hash_to_component.find(Graph::getHash(left));
|
||||||
const auto it_right = graph.ast_hash_to_component.find(right->getTreeHash());
|
const auto it_right = graph.node_hash_to_component.find(Graph::getHash(right));
|
||||||
|
|
||||||
if (it_left == std::end(graph.ast_hash_to_component) || it_right == std::end(graph.ast_hash_to_component))
|
if (it_left == std::end(graph.node_hash_to_component) || it_right == std::end(graph.node_hash_to_component))
|
||||||
{
|
{
|
||||||
CompareResult result = CompareResult::UNKNOWN;
|
auto result = ComparisonGraphCompareResult::UNKNOWN;
|
||||||
{
|
{
|
||||||
const auto left_bound = getConstLowerBound(left);
|
const auto left_bound = getConstLowerBound(left);
|
||||||
const auto right_bound = getConstUpperBound(right);
|
const auto right_bound = getConstUpperBound(right);
|
||||||
@ -189,10 +344,10 @@ ComparisonGraph::CompareResult ComparisonGraph::compare(const ASTPtr & left, con
|
|||||||
if (left_bound && right_bound)
|
if (left_bound && right_bound)
|
||||||
{
|
{
|
||||||
if (greater(left_bound->first, right_bound->first))
|
if (greater(left_bound->first, right_bound->first))
|
||||||
result = CompareResult::GREATER;
|
result = ComparisonGraphCompareResult::GREATER;
|
||||||
else if (equals(left_bound->first, right_bound->first))
|
else if (equals(left_bound->first, right_bound->first))
|
||||||
result = left_bound->second || right_bound->second
|
result = left_bound->second || right_bound->second
|
||||||
? CompareResult::GREATER : CompareResult::GREATER_OR_EQUAL;
|
? ComparisonGraphCompareResult::GREATER : ComparisonGraphCompareResult::GREATER_OR_EQUAL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
@ -202,10 +357,10 @@ ComparisonGraph::CompareResult ComparisonGraph::compare(const ASTPtr & left, con
|
|||||||
if (left_bound && right_bound)
|
if (left_bound && right_bound)
|
||||||
{
|
{
|
||||||
if (less(left_bound->first, right_bound->first))
|
if (less(left_bound->first, right_bound->first))
|
||||||
result = CompareResult::LESS;
|
result = ComparisonGraphCompareResult::LESS;
|
||||||
else if (equals(left_bound->first, right_bound->first))
|
else if (equals(left_bound->first, right_bound->first))
|
||||||
result = left_bound->second || right_bound->second
|
result = left_bound->second || right_bound->second
|
||||||
? CompareResult::LESS : CompareResult::LESS_OR_EQUAL;
|
? ComparisonGraphCompareResult::LESS : ComparisonGraphCompareResult::LESS_OR_EQUAL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -218,7 +373,7 @@ ComparisonGraph::CompareResult ComparisonGraph::compare(const ASTPtr & left, con
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (start == finish)
|
if (start == finish)
|
||||||
return CompareResult::EQUAL;
|
return ComparisonGraphCompareResult::EQUAL;
|
||||||
|
|
||||||
if (auto path = findPath(start, finish))
|
if (auto path = findPath(start, finish))
|
||||||
return pathToCompareResult(*path, /*inverse=*/ false);
|
return pathToCompareResult(*path, /*inverse=*/ false);
|
||||||
@ -227,93 +382,102 @@ ComparisonGraph::CompareResult ComparisonGraph::compare(const ASTPtr & left, con
|
|||||||
return pathToCompareResult(*path, /*inverse=*/ true);
|
return pathToCompareResult(*path, /*inverse=*/ true);
|
||||||
|
|
||||||
if (not_equal.contains({start, finish}))
|
if (not_equal.contains({start, finish}))
|
||||||
return CompareResult::NOT_EQUAL;
|
return ComparisonGraphCompareResult::NOT_EQUAL;
|
||||||
|
|
||||||
return CompareResult::UNKNOWN;
|
return ComparisonGraphCompareResult::UNKNOWN;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ComparisonGraph::isPossibleCompare(CompareResult expected, const ASTPtr & left, const ASTPtr & right) const
|
template <ComparisonGraphNodeType Node>
|
||||||
|
bool ComparisonGraph<Node>::isPossibleCompare(ComparisonGraphCompareResult expected, const Node & left, const Node & right) const
|
||||||
{
|
{
|
||||||
const auto result = compare(left, right);
|
const auto result = compare(left, right);
|
||||||
|
|
||||||
if (expected == CompareResult::UNKNOWN || result == CompareResult::UNKNOWN)
|
using enum ComparisonGraphCompareResult;
|
||||||
|
if (expected == UNKNOWN || result == UNKNOWN)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
if (expected == result)
|
if (expected == result)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
static const std::set<std::pair<CompareResult, CompareResult>> possible_pairs =
|
static const std::set<std::pair<ComparisonGraphCompareResult, ComparisonGraphCompareResult>> possible_pairs =
|
||||||
{
|
{
|
||||||
{CompareResult::EQUAL, CompareResult::LESS_OR_EQUAL},
|
{EQUAL, LESS_OR_EQUAL},
|
||||||
{CompareResult::EQUAL, CompareResult::GREATER_OR_EQUAL},
|
{EQUAL, GREATER_OR_EQUAL},
|
||||||
{CompareResult::LESS_OR_EQUAL, CompareResult::LESS},
|
{LESS_OR_EQUAL, LESS},
|
||||||
{CompareResult::LESS_OR_EQUAL, CompareResult::EQUAL},
|
{LESS_OR_EQUAL, EQUAL},
|
||||||
{CompareResult::LESS_OR_EQUAL, CompareResult::NOT_EQUAL},
|
{LESS_OR_EQUAL, NOT_EQUAL},
|
||||||
{CompareResult::GREATER_OR_EQUAL, CompareResult::GREATER},
|
{GREATER_OR_EQUAL, GREATER},
|
||||||
{CompareResult::GREATER_OR_EQUAL, CompareResult::EQUAL},
|
{GREATER_OR_EQUAL, EQUAL},
|
||||||
{CompareResult::GREATER_OR_EQUAL, CompareResult::NOT_EQUAL},
|
{GREATER_OR_EQUAL, NOT_EQUAL},
|
||||||
{CompareResult::LESS, CompareResult::LESS},
|
{LESS, LESS},
|
||||||
{CompareResult::LESS, CompareResult::LESS_OR_EQUAL},
|
{LESS, LESS_OR_EQUAL},
|
||||||
{CompareResult::LESS, CompareResult::NOT_EQUAL},
|
{LESS, NOT_EQUAL},
|
||||||
{CompareResult::GREATER, CompareResult::GREATER},
|
{GREATER, GREATER},
|
||||||
{CompareResult::GREATER, CompareResult::GREATER_OR_EQUAL},
|
{GREATER, GREATER_OR_EQUAL},
|
||||||
{CompareResult::GREATER, CompareResult::NOT_EQUAL},
|
{GREATER, NOT_EQUAL},
|
||||||
{CompareResult::NOT_EQUAL, CompareResult::LESS},
|
{NOT_EQUAL, LESS},
|
||||||
{CompareResult::NOT_EQUAL, CompareResult::GREATER},
|
{NOT_EQUAL, GREATER},
|
||||||
{CompareResult::NOT_EQUAL, CompareResult::LESS_OR_EQUAL},
|
{NOT_EQUAL, LESS_OR_EQUAL},
|
||||||
{CompareResult::NOT_EQUAL, CompareResult::GREATER_OR_EQUAL},
|
{NOT_EQUAL, GREATER_OR_EQUAL},
|
||||||
};
|
};
|
||||||
|
|
||||||
return possible_pairs.contains({expected, result});
|
return possible_pairs.contains({expected, result});
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ComparisonGraph::isAlwaysCompare(CompareResult expected, const ASTPtr & left, const ASTPtr & right) const
|
template <ComparisonGraphNodeType Node>
|
||||||
|
bool ComparisonGraph<Node>::isAlwaysCompare(ComparisonGraphCompareResult expected, const Node & left, const Node & right) const
|
||||||
{
|
{
|
||||||
const auto result = compare(left, right);
|
const auto result = compare(left, right);
|
||||||
|
|
||||||
if (expected == CompareResult::UNKNOWN || result == CompareResult::UNKNOWN)
|
using enum ComparisonGraphCompareResult;
|
||||||
|
if (expected == UNKNOWN || result == UNKNOWN)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (expected == result)
|
if (expected == result)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
static const std::set<std::pair<CompareResult, CompareResult>> possible_pairs =
|
static const std::set<std::pair<ComparisonGraphCompareResult, ComparisonGraphCompareResult>> possible_pairs =
|
||||||
{
|
{
|
||||||
{CompareResult::LESS_OR_EQUAL, CompareResult::LESS},
|
{LESS_OR_EQUAL, LESS},
|
||||||
{CompareResult::LESS_OR_EQUAL, CompareResult::EQUAL},
|
{LESS_OR_EQUAL, EQUAL},
|
||||||
{CompareResult::GREATER_OR_EQUAL, CompareResult::GREATER},
|
{GREATER_OR_EQUAL, GREATER},
|
||||||
{CompareResult::GREATER_OR_EQUAL, CompareResult::EQUAL},
|
{GREATER_OR_EQUAL, EQUAL},
|
||||||
{CompareResult::NOT_EQUAL, CompareResult::GREATER},
|
{NOT_EQUAL, GREATER},
|
||||||
{CompareResult::NOT_EQUAL, CompareResult::LESS},
|
{NOT_EQUAL, LESS},
|
||||||
};
|
};
|
||||||
|
|
||||||
return possible_pairs.contains({expected, result});
|
return possible_pairs.contains({expected, result});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
ASTs ComparisonGraph::getEqual(const ASTPtr & ast) const
|
template <ComparisonGraphNodeType Node>
|
||||||
|
typename ComparisonGraph<Node>::NodeContainer ComparisonGraph<Node>::getEqual(const Node & node) const
|
||||||
{
|
{
|
||||||
const auto res = getComponentId(ast);
|
const auto res = getComponentId(node);
|
||||||
if (!res)
|
if (!res)
|
||||||
return {};
|
return {};
|
||||||
else
|
else
|
||||||
return getComponent(res.value());
|
return getComponent(res.value());
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<size_t> ComparisonGraph::getComponentId(const ASTPtr & ast) const
|
template <ComparisonGraphNodeType Node>
|
||||||
|
std::optional<size_t> ComparisonGraph<Node>::getComponentId(const Node & node) const
|
||||||
{
|
{
|
||||||
const auto hash_it = graph.ast_hash_to_component.find(ast->getTreeHash());
|
const auto hash_it = graph.node_hash_to_component.find(Graph::getHash(node));
|
||||||
if (hash_it == std::end(graph.ast_hash_to_component))
|
if (hash_it == std::end(graph.node_hash_to_component))
|
||||||
return {};
|
return {};
|
||||||
|
|
||||||
const size_t index = hash_it->second;
|
const size_t index = hash_it->second;
|
||||||
if (std::any_of(
|
if (std::any_of(
|
||||||
std::cbegin(graph.vertices[index].asts),
|
std::cbegin(graph.vertices[index].nodes),
|
||||||
std::cend(graph.vertices[index].asts),
|
std::cend(graph.vertices[index].nodes),
|
||||||
[ast](const ASTPtr & constraint_ast)
|
[node](const Node & constraint_node)
|
||||||
{
|
{
|
||||||
return constraint_ast->getTreeHash() == ast->getTreeHash() &&
|
if constexpr (with_ast)
|
||||||
constraint_ast->getColumnName() == ast->getColumnName();
|
return constraint_node->getTreeHash() == node->getTreeHash()
|
||||||
|
&& constraint_node->getColumnName() == node->getColumnName();
|
||||||
|
else
|
||||||
|
return constraint_node->getTreeHash() == node->getTreeHash();
|
||||||
}))
|
}))
|
||||||
{
|
{
|
||||||
return index;
|
return index;
|
||||||
@ -324,33 +488,38 @@ std::optional<size_t> ComparisonGraph::getComponentId(const ASTPtr & ast) const
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ComparisonGraph::hasPath(size_t left, size_t right) const
|
template <ComparisonGraphNodeType Node>
|
||||||
|
bool ComparisonGraph<Node>::hasPath(size_t left, size_t right) const
|
||||||
{
|
{
|
||||||
return findPath(left, right) || findPath(right, left);
|
return findPath(left, right) || findPath(right, left);
|
||||||
}
|
}
|
||||||
|
|
||||||
ASTs ComparisonGraph::getComponent(size_t id) const
|
template <ComparisonGraphNodeType Node>
|
||||||
|
typename ComparisonGraph<Node>::NodeContainer ComparisonGraph<Node>::getComponent(size_t id) const
|
||||||
{
|
{
|
||||||
return graph.vertices[id].asts;
|
return graph.vertices[id].nodes;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ComparisonGraph::EqualComponent::hasConstant() const
|
template <ComparisonGraphNodeType Node>
|
||||||
|
bool ComparisonGraph<Node>::EqualComponent::hasConstant() const
|
||||||
{
|
{
|
||||||
return constant_index.has_value();
|
return constant_index.has_value();
|
||||||
}
|
}
|
||||||
|
|
||||||
ASTPtr ComparisonGraph::EqualComponent::getConstant() const
|
template <ComparisonGraphNodeType Node>
|
||||||
|
Node ComparisonGraph<Node>::EqualComponent::getConstant() const
|
||||||
{
|
{
|
||||||
assert(constant_index);
|
assert(constant_index);
|
||||||
return asts[*constant_index];
|
return nodes[*constant_index];
|
||||||
}
|
}
|
||||||
|
|
||||||
void ComparisonGraph::EqualComponent::buildConstants()
|
template <ComparisonGraphNodeType Node>
|
||||||
|
void ComparisonGraph<Node>::EqualComponent::buildConstants()
|
||||||
{
|
{
|
||||||
constant_index.reset();
|
constant_index.reset();
|
||||||
for (size_t i = 0; i < asts.size(); ++i)
|
for (size_t i = 0; i < nodes.size(); ++i)
|
||||||
{
|
{
|
||||||
if (asts[i]->as<ASTLiteral>())
|
if (tryGetConstantValue(nodes[i]) != nullptr)
|
||||||
{
|
{
|
||||||
constant_index = i;
|
constant_index = i;
|
||||||
return;
|
return;
|
||||||
@ -358,133 +527,120 @@ void ComparisonGraph::EqualComponent::buildConstants()
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ComparisonGraph::CompareResult ComparisonGraph::atomToCompareResult(const CNFQuery::AtomicFormula & atom)
|
template <ComparisonGraphNodeType Node>
|
||||||
|
ComparisonGraphCompareResult ComparisonGraph<Node>::atomToCompareResult(const typename CNF::AtomicFormula & atom)
|
||||||
{
|
{
|
||||||
if (const auto * func = atom.ast->as<ASTFunction>())
|
const auto & node = getNode(atom);
|
||||||
|
if (tryGetFunctionNode(node) != nullptr)
|
||||||
{
|
{
|
||||||
auto expected = functionNameToCompareResult(func->name);
|
auto expected = functionNameToCompareResult(functionName(node));
|
||||||
if (atom.negative)
|
if (atom.negative)
|
||||||
expected = inverseCompareResult(expected);
|
expected = inverseCompareResult(expected);
|
||||||
return expected;
|
return expected;
|
||||||
}
|
}
|
||||||
|
|
||||||
return ComparisonGraph::CompareResult::UNKNOWN;
|
return ComparisonGraphCompareResult::UNKNOWN;
|
||||||
}
|
}
|
||||||
|
|
||||||
ComparisonGraph::CompareResult ComparisonGraph::functionNameToCompareResult(const std::string & name)
|
template <ComparisonGraphNodeType Node>
|
||||||
|
std::optional<Node> ComparisonGraph<Node>::getEqualConst(const Node & node) const
|
||||||
{
|
{
|
||||||
static const std::unordered_map<std::string, CompareResult> relation_to_compare =
|
const auto hash_it = graph.node_hash_to_component.find(Graph::getHash(node));
|
||||||
{
|
if (hash_it == std::end(graph.node_hash_to_component))
|
||||||
{"equals", CompareResult::EQUAL},
|
|
||||||
{"notEquals", CompareResult::NOT_EQUAL},
|
|
||||||
{"less", CompareResult::LESS},
|
|
||||||
{"lessOrEquals", CompareResult::LESS_OR_EQUAL},
|
|
||||||
{"greaterOrEquals", CompareResult::GREATER_OR_EQUAL},
|
|
||||||
{"greater", CompareResult::GREATER},
|
|
||||||
};
|
|
||||||
|
|
||||||
const auto it = relation_to_compare.find(name);
|
|
||||||
return it == std::end(relation_to_compare) ? CompareResult::UNKNOWN : it->second;
|
|
||||||
}
|
|
||||||
|
|
||||||
ComparisonGraph::CompareResult ComparisonGraph::inverseCompareResult(CompareResult result)
|
|
||||||
{
|
|
||||||
static const std::unordered_map<CompareResult, CompareResult> inverse_relations =
|
|
||||||
{
|
|
||||||
{CompareResult::NOT_EQUAL, CompareResult::EQUAL},
|
|
||||||
{CompareResult::EQUAL, CompareResult::NOT_EQUAL},
|
|
||||||
{CompareResult::GREATER_OR_EQUAL, CompareResult::LESS},
|
|
||||||
{CompareResult::GREATER, CompareResult::LESS_OR_EQUAL},
|
|
||||||
{CompareResult::LESS, CompareResult::GREATER_OR_EQUAL},
|
|
||||||
{CompareResult::LESS_OR_EQUAL, CompareResult::GREATER},
|
|
||||||
{CompareResult::UNKNOWN, CompareResult::UNKNOWN},
|
|
||||||
};
|
|
||||||
return inverse_relations.at(result);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::optional<ASTPtr> ComparisonGraph::getEqualConst(const ASTPtr & ast) const
|
|
||||||
{
|
|
||||||
const auto hash_it = graph.ast_hash_to_component.find(ast->getTreeHash());
|
|
||||||
if (hash_it == std::end(graph.ast_hash_to_component))
|
|
||||||
return std::nullopt;
|
return std::nullopt;
|
||||||
|
|
||||||
const size_t index = hash_it->second;
|
const size_t index = hash_it->second;
|
||||||
return graph.vertices[index].hasConstant()
|
|
||||||
? std::optional<ASTPtr>{graph.vertices[index].getConstant()}
|
if (!graph.vertices[index].hasConstant())
|
||||||
: std::nullopt;
|
return std::nullopt;
|
||||||
|
|
||||||
|
if constexpr (with_ast)
|
||||||
|
return graph.vertices[index].getConstant();
|
||||||
|
else
|
||||||
|
{
|
||||||
|
const auto & constant = getConstantValue(graph.vertices[index].getConstant());
|
||||||
|
auto constant_node = std::make_shared<ConstantNode>(constant, node->getResultType());
|
||||||
|
return constant_node;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<std::pair<Field, bool>> ComparisonGraph::getConstUpperBound(const ASTPtr & ast) const
|
template <ComparisonGraphNodeType Node>
|
||||||
|
std::optional<std::pair<Field, bool>> ComparisonGraph<Node>::getConstUpperBound(const Node & node) const
|
||||||
{
|
{
|
||||||
if (const auto * literal = ast->as<ASTLiteral>())
|
if (const auto * constant = tryGetConstantValue(node))
|
||||||
return std::make_pair(literal->value, false);
|
return std::make_pair(*constant, false);
|
||||||
|
|
||||||
const auto it = graph.ast_hash_to_component.find(ast->getTreeHash());
|
const auto it = graph.node_hash_to_component.find(Graph::getHash(node));
|
||||||
if (it == std::end(graph.ast_hash_to_component))
|
if (it == std::end(graph.node_hash_to_component))
|
||||||
return std::nullopt;
|
return std::nullopt;
|
||||||
|
|
||||||
const size_t to = it->second;
|
const size_t to = it->second;
|
||||||
const ssize_t from = ast_const_upper_bound[to];
|
const ssize_t from = node_const_upper_bound[to];
|
||||||
if (from == -1)
|
if (from == -1)
|
||||||
return std::nullopt;
|
return std::nullopt;
|
||||||
|
|
||||||
return std::make_pair(graph.vertices[from].getConstant()->as<ASTLiteral>()->value, dists.at({from, to}) == Path::GREATER);
|
return std::make_pair(getConstantValue(graph.vertices[from].getConstant()), dists.at({from, to}) == Path::GREATER);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<std::pair<Field, bool>> ComparisonGraph::getConstLowerBound(const ASTPtr & ast) const
|
template <ComparisonGraphNodeType Node>
|
||||||
|
std::optional<std::pair<Field, bool>> ComparisonGraph<Node>::getConstLowerBound(const Node & node) const
|
||||||
{
|
{
|
||||||
if (const auto * literal = ast->as<ASTLiteral>())
|
if (const auto * constant = tryGetConstantValue(node))
|
||||||
return std::make_pair(literal->value, false);
|
return std::make_pair(*constant, false);
|
||||||
|
|
||||||
const auto it = graph.ast_hash_to_component.find(ast->getTreeHash());
|
const auto it = graph.node_hash_to_component.find(Graph::getHash(node));
|
||||||
if (it == std::end(graph.ast_hash_to_component))
|
if (it == std::end(graph.node_hash_to_component))
|
||||||
return std::nullopt;
|
return std::nullopt;
|
||||||
|
|
||||||
const size_t from = it->second;
|
const size_t from = it->second;
|
||||||
const ssize_t to = ast_const_lower_bound[from];
|
const ssize_t to = node_const_lower_bound[from];
|
||||||
if (to == -1)
|
if (to == -1)
|
||||||
return std::nullopt;
|
return std::nullopt;
|
||||||
|
|
||||||
return std::make_pair(graph.vertices[to].getConstant()->as<ASTLiteral>()->value, dists.at({from, to}) == Path::GREATER);
|
return std::make_pair(getConstantValue(graph.vertices[to].getConstant()), dists.at({from, to}) == Path::GREATER);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ComparisonGraph::dfsOrder(const Graph & asts_graph, size_t v, std::vector<bool> & visited, std::vector<size_t> & order)
|
template <ComparisonGraphNodeType Node>
|
||||||
|
void ComparisonGraph<Node>::dfsOrder(const Graph & nodes_graph, size_t v, std::vector<bool> & visited, std::vector<size_t> & order)
|
||||||
{
|
{
|
||||||
visited[v] = true;
|
visited[v] = true;
|
||||||
for (const auto & edge : asts_graph.edges[v])
|
for (const auto & edge : nodes_graph.edges[v])
|
||||||
if (!visited[edge.to])
|
if (!visited[edge.to])
|
||||||
dfsOrder(asts_graph, edge.to, visited, order);
|
dfsOrder(nodes_graph, edge.to, visited, order);
|
||||||
|
|
||||||
order.push_back(v);
|
order.push_back(v);
|
||||||
}
|
}
|
||||||
|
|
||||||
ComparisonGraph::Graph ComparisonGraph::reverseGraph(const Graph & asts_graph)
|
template <ComparisonGraphNodeType Node>
|
||||||
|
typename ComparisonGraph<Node>::Graph ComparisonGraph<Node>::reverseGraph(const Graph & nodes_graph)
|
||||||
{
|
{
|
||||||
Graph g;
|
Graph g;
|
||||||
g.ast_hash_to_component = asts_graph.ast_hash_to_component;
|
g.node_hash_to_component = nodes_graph.node_hash_to_component;
|
||||||
g.vertices = asts_graph.vertices;
|
g.vertices = nodes_graph.vertices;
|
||||||
g.edges.resize(g.vertices.size());
|
g.edges.resize(g.vertices.size());
|
||||||
|
|
||||||
for (size_t v = 0; v < asts_graph.vertices.size(); ++v)
|
for (size_t v = 0; v < nodes_graph.vertices.size(); ++v)
|
||||||
for (const auto & edge : asts_graph.edges[v])
|
for (const auto & edge : nodes_graph.edges[v])
|
||||||
g.edges[edge.to].push_back(Edge{edge.type, v});
|
g.edges[edge.to].push_back(Edge{edge.type, v});
|
||||||
|
|
||||||
return g;
|
return g;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<ASTs> ComparisonGraph::getVertices() const
|
template <ComparisonGraphNodeType Node>
|
||||||
|
std::vector<typename ComparisonGraph<Node>::NodeContainer> ComparisonGraph<Node>::getVertices() const
|
||||||
{
|
{
|
||||||
std::vector<ASTs> result;
|
std::vector<NodeContainer> result;
|
||||||
for (const auto & vertex : graph.vertices)
|
for (const auto & vertex : graph.vertices)
|
||||||
{
|
{
|
||||||
result.emplace_back();
|
result.emplace_back();
|
||||||
for (const auto & ast : vertex.asts)
|
for (const auto & node : vertex.nodes)
|
||||||
result.back().push_back(ast);
|
result.back().push_back(node);
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ComparisonGraph::dfsComponents(
|
template <ComparisonGraphNodeType Node>
|
||||||
|
void ComparisonGraph<Node>::dfsComponents(
|
||||||
const Graph & reversed_graph, size_t v,
|
const Graph & reversed_graph, size_t v,
|
||||||
OptionalIndices & components, size_t component)
|
OptionalIndices & components, size_t component)
|
||||||
{
|
{
|
||||||
@ -494,11 +650,12 @@ void ComparisonGraph::dfsComponents(
|
|||||||
dfsComponents(reversed_graph, edge.to, components, component);
|
dfsComponents(reversed_graph, edge.to, components, component);
|
||||||
}
|
}
|
||||||
|
|
||||||
ComparisonGraph::Graph ComparisonGraph::buildGraphFromAstsGraph(const Graph & asts_graph)
|
template <ComparisonGraphNodeType Node>
|
||||||
|
typename ComparisonGraph<Node>::Graph ComparisonGraph<Node>::buildGraphFromNodesGraph(const Graph & nodes_graph)
|
||||||
{
|
{
|
||||||
/// Find strongly connected component by using 2 dfs traversals.
|
/// Find strongly connected component by using 2 dfs traversals.
|
||||||
/// https://en.wikipedia.org/wiki/Kosaraju%27s_algorithm
|
/// https://en.wikipedia.org/wiki/Kosaraju%27s_algorithm
|
||||||
const auto n = asts_graph.vertices.size();
|
const auto n = nodes_graph.vertices.size();
|
||||||
|
|
||||||
std::vector<size_t> order;
|
std::vector<size_t> order;
|
||||||
{
|
{
|
||||||
@ -506,14 +663,14 @@ ComparisonGraph::Graph ComparisonGraph::buildGraphFromAstsGraph(const Graph & as
|
|||||||
for (size_t v = 0; v < n; ++v)
|
for (size_t v = 0; v < n; ++v)
|
||||||
{
|
{
|
||||||
if (!visited[v])
|
if (!visited[v])
|
||||||
dfsOrder(asts_graph, v, visited, order);
|
dfsOrder(nodes_graph, v, visited, order);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
OptionalIndices components(n);
|
OptionalIndices components(n);
|
||||||
size_t component = 0;
|
size_t component = 0;
|
||||||
{
|
{
|
||||||
const Graph reversed_graph = reverseGraph(asts_graph);
|
const Graph reversed_graph = reverseGraph(nodes_graph);
|
||||||
for (auto it = order.rbegin(); it != order.rend(); ++it)
|
for (auto it = order.rbegin(); it != order.rend(); ++it)
|
||||||
{
|
{
|
||||||
if (!components[*it])
|
if (!components[*it])
|
||||||
@ -527,14 +684,14 @@ ComparisonGraph::Graph ComparisonGraph::buildGraphFromAstsGraph(const Graph & as
|
|||||||
Graph result;
|
Graph result;
|
||||||
result.vertices.resize(component);
|
result.vertices.resize(component);
|
||||||
result.edges.resize(component);
|
result.edges.resize(component);
|
||||||
for (const auto & [hash, index] : asts_graph.ast_hash_to_component)
|
for (const auto & [hash, index] : nodes_graph.node_hash_to_component)
|
||||||
{
|
{
|
||||||
assert(components[index]);
|
assert(components[index]);
|
||||||
result.ast_hash_to_component[hash] = *components[index];
|
result.node_hash_to_component[hash] = *components[index];
|
||||||
result.vertices[*components[index]].asts.insert(
|
result.vertices[*components[index]].nodes.insert(
|
||||||
std::end(result.vertices[*components[index]].asts),
|
std::end(result.vertices[*components[index]].nodes),
|
||||||
std::begin(asts_graph.vertices[index].asts),
|
std::begin(nodes_graph.vertices[index].nodes),
|
||||||
std::end(asts_graph.vertices[index].asts)); // asts_graph has only one ast per vertex
|
std::end(nodes_graph.vertices[index].nodes)); // asts_graph has only one ast per vertex
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Calculate constants
|
/// Calculate constants
|
||||||
@ -544,7 +701,7 @@ ComparisonGraph::Graph ComparisonGraph::buildGraphFromAstsGraph(const Graph & as
|
|||||||
/// For each edge in initial graph, we add an edge between components in condensation graph.
|
/// For each edge in initial graph, we add an edge between components in condensation graph.
|
||||||
for (size_t v = 0; v < n; ++v)
|
for (size_t v = 0; v < n; ++v)
|
||||||
{
|
{
|
||||||
for (const auto & edge : asts_graph.edges[v])
|
for (const auto & edge : nodes_graph.edges[v])
|
||||||
result.edges[*components[v]].push_back(Edge{edge.type, *components[edge.to]});
|
result.edges[*components[v]].push_back(Edge{edge.type, *components[edge.to]});
|
||||||
|
|
||||||
/// TODO: make edges unique (left most strict)
|
/// TODO: make edges unique (left most strict)
|
||||||
@ -557,11 +714,11 @@ ComparisonGraph::Graph ComparisonGraph::buildGraphFromAstsGraph(const Graph & as
|
|||||||
{
|
{
|
||||||
if (v != u && result.vertices[v].hasConstant() && result.vertices[u].hasConstant())
|
if (v != u && result.vertices[v].hasConstant() && result.vertices[u].hasConstant())
|
||||||
{
|
{
|
||||||
const auto * left = result.vertices[v].getConstant()->as<ASTLiteral>();
|
const auto & left = getConstantValue(result.vertices[v].getConstant());
|
||||||
const auto * right = result.vertices[u].getConstant()->as<ASTLiteral>();
|
const auto & right = getConstantValue(result.vertices[u].getConstant());
|
||||||
|
|
||||||
/// Only GREATER. Equal constant fields = equal literals so it was already considered above.
|
/// Only GREATER. Equal constant fields = equal literals so it was already considered above.
|
||||||
if (greater(left->value, right->value))
|
if (greater(left, right))
|
||||||
result.edges[v].push_back(Edge{Edge::GREATER, u});
|
result.edges[v].push_back(Edge{Edge::GREATER, u});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -570,7 +727,8 @@ ComparisonGraph::Graph ComparisonGraph::buildGraphFromAstsGraph(const Graph & as
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::map<std::pair<size_t, size_t>, ComparisonGraph::Path> ComparisonGraph::buildDistsFromGraph(const Graph & g)
|
template <ComparisonGraphNodeType Node>
|
||||||
|
std::map<std::pair<size_t, size_t>, typename ComparisonGraph<Node>::Path> ComparisonGraph<Node>::buildDistsFromGraph(const Graph & g)
|
||||||
{
|
{
|
||||||
/// Min path : -1 means GREATER, 0 means GREATER_OR_EQUALS.
|
/// Min path : -1 means GREATER, 0 means GREATER_OR_EQUALS.
|
||||||
/// We use Floyd–Warshall algorithm to find distances between all pairs of vertices.
|
/// We use Floyd–Warshall algorithm to find distances between all pairs of vertices.
|
||||||
@ -602,7 +760,8 @@ std::map<std::pair<size_t, size_t>, ComparisonGraph::Path> ComparisonGraph::buil
|
|||||||
return path;
|
return path;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<std::vector<ssize_t>, std::vector<ssize_t>> ComparisonGraph::buildConstBounds() const
|
template <ComparisonGraphNodeType Node>
|
||||||
|
std::pair<std::vector<ssize_t>, std::vector<ssize_t>> ComparisonGraph<Node>::buildConstBounds() const
|
||||||
{
|
{
|
||||||
const size_t n = graph.vertices.size();
|
const size_t n = graph.vertices.size();
|
||||||
std::vector<ssize_t> lower(n, -1);
|
std::vector<ssize_t> lower(n, -1);
|
||||||
@ -610,7 +769,7 @@ std::pair<std::vector<ssize_t>, std::vector<ssize_t>> ComparisonGraph::buildCons
|
|||||||
|
|
||||||
auto get_value = [this](const size_t vertex) -> Field
|
auto get_value = [this](const size_t vertex) -> Field
|
||||||
{
|
{
|
||||||
return graph.vertices[vertex].getConstant()->as<ASTLiteral>()->value;
|
return getConstantValue(graph.vertices[vertex].getConstant());
|
||||||
};
|
};
|
||||||
|
|
||||||
for (const auto & [edge, path] : dists)
|
for (const auto & [edge, path] : dists)
|
||||||
@ -634,7 +793,10 @@ std::pair<std::vector<ssize_t>, std::vector<ssize_t>> ComparisonGraph::buildCons
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return {lower, upper};
|
return {std::move(lower), std::move(upper)};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template class ComparisonGraph<ASTPtr>;
|
||||||
|
template class ComparisonGraph<QueryTreeNodePtr>;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -2,6 +2,12 @@
|
|||||||
|
|
||||||
#include <Parsers/IAST_fwd.h>
|
#include <Parsers/IAST_fwd.h>
|
||||||
#include <Interpreters/TreeCNFConverter.h>
|
#include <Interpreters/TreeCNFConverter.h>
|
||||||
|
|
||||||
|
#include <Analyzer/Passes/CNF.h>
|
||||||
|
#include <Analyzer/HashUtils.h>
|
||||||
|
#include <Analyzer/IQueryTreeNode.h>
|
||||||
|
|
||||||
|
#include <type_traits>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
#include <map>
|
#include <map>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
@ -9,18 +15,8 @@
|
|||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
/*
|
enum class ComparisonGraphCompareResult : uint8_t
|
||||||
* Graph of relations between terms in constraints.
|
|
||||||
* Allows to compare terms and get equal terms.
|
|
||||||
*/
|
|
||||||
class ComparisonGraph
|
|
||||||
{
|
{
|
||||||
public:
|
|
||||||
/// atomic_formulas are extracted from constraints.
|
|
||||||
explicit ComparisonGraph(const ASTs & atomic_formulas);
|
|
||||||
|
|
||||||
enum class CompareResult
|
|
||||||
{
|
|
||||||
LESS,
|
LESS,
|
||||||
LESS_OR_EQUAL,
|
LESS_OR_EQUAL,
|
||||||
EQUAL,
|
EQUAL,
|
||||||
@ -28,31 +24,47 @@ public:
|
|||||||
GREATER,
|
GREATER,
|
||||||
NOT_EQUAL,
|
NOT_EQUAL,
|
||||||
UNKNOWN,
|
UNKNOWN,
|
||||||
};
|
};
|
||||||
|
|
||||||
static CompareResult atomToCompareResult(const CNFQuery::AtomicFormula & atom);
|
template <typename T>
|
||||||
static CompareResult functionNameToCompareResult(const std::string & name);
|
concept ComparisonGraphNodeType = std::same_as<T, ASTPtr> || std::same_as<T, QueryTreeNodePtr>;
|
||||||
static CompareResult inverseCompareResult(CompareResult result);
|
|
||||||
|
|
||||||
CompareResult compare(const ASTPtr & left, const ASTPtr & right) const;
|
/*
|
||||||
|
* Graph of relations between terms in constraints.
|
||||||
|
* Allows to compare terms and get equal terms.
|
||||||
|
*/
|
||||||
|
template <ComparisonGraphNodeType Node>
|
||||||
|
class ComparisonGraph
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
static constexpr bool with_ast = std::same_as<Node, ASTPtr>;
|
||||||
|
using NodeContainer = std::conditional_t<with_ast, ASTs, QueryTreeNodes>;
|
||||||
|
using CNF = std::conditional_t<with_ast, CNFQuery, Analyzer::CNF>;
|
||||||
|
|
||||||
|
/// atomic_formulas are extracted from constraints.
|
||||||
|
explicit ComparisonGraph(const NodeContainer & atomic_formulas, ContextPtr context = nullptr);
|
||||||
|
|
||||||
|
static ComparisonGraphCompareResult atomToCompareResult(const typename CNF::AtomicFormula & atom);
|
||||||
|
|
||||||
|
ComparisonGraphCompareResult compare(const Node & left, const Node & right) const;
|
||||||
|
|
||||||
/// It's possible that left <expected> right
|
/// It's possible that left <expected> right
|
||||||
bool isPossibleCompare(CompareResult expected, const ASTPtr & left, const ASTPtr & right) const;
|
bool isPossibleCompare(ComparisonGraphCompareResult expected, const Node & left, const Node & right) const;
|
||||||
|
|
||||||
/// It's always true that left <expected> right
|
/// It's always true that left <expected> right
|
||||||
bool isAlwaysCompare(CompareResult expected, const ASTPtr & left, const ASTPtr & right) const;
|
bool isAlwaysCompare(ComparisonGraphCompareResult expected, const Node & left, const Node & right) const;
|
||||||
|
|
||||||
/// Returns all expressions from component to which @ast belongs if any.
|
/// Returns all expressions from component to which @node belongs if any.
|
||||||
ASTs getEqual(const ASTPtr & ast) const;
|
NodeContainer getEqual(const Node & node) const;
|
||||||
|
|
||||||
/// Returns constant expression from component to which @ast belongs if any.
|
/// Returns constant expression from component to which @node belongs if any.
|
||||||
std::optional<ASTPtr> getEqualConst(const ASTPtr & ast) const;
|
std::optional<Node> getEqualConst(const Node & node) const;
|
||||||
|
|
||||||
/// Finds component id to which @ast belongs if any.
|
/// Finds component id to which @node belongs if any.
|
||||||
std::optional<std::size_t> getComponentId(const ASTPtr & ast) const;
|
std::optional<std::size_t> getComponentId(const Node & node) const;
|
||||||
|
|
||||||
/// Returns all expressions from component.
|
/// Returns all expressions from component.
|
||||||
ASTs getComponent(size_t id) const;
|
NodeContainer getComponent(size_t id) const;
|
||||||
|
|
||||||
size_t getNumOfComponents() const { return graph.vertices.size(); }
|
size_t getNumOfComponents() const { return graph.vertices.size(); }
|
||||||
|
|
||||||
@ -61,22 +73,22 @@ public:
|
|||||||
/// Find constants lessOrEqual and greaterOrEqual.
|
/// Find constants lessOrEqual and greaterOrEqual.
|
||||||
/// For int and double linear programming can be applied here.
|
/// For int and double linear programming can be applied here.
|
||||||
/// Returns: {constant, is strict less/greater}
|
/// Returns: {constant, is strict less/greater}
|
||||||
std::optional<std::pair<Field, bool>> getConstUpperBound(const ASTPtr & ast) const;
|
std::optional<std::pair<Field, bool>> getConstUpperBound(const Node & node) const;
|
||||||
std::optional<std::pair<Field, bool>> getConstLowerBound(const ASTPtr & ast) const;
|
std::optional<std::pair<Field, bool>> getConstLowerBound(const Node & node) const;
|
||||||
|
|
||||||
/// Returns all expression in graph.
|
/// Returns all expression in graph.
|
||||||
std::vector<ASTs> getVertices() const;
|
std::vector<NodeContainer> getVertices() const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/// Strongly connected component
|
/// Strongly connected component
|
||||||
struct EqualComponent
|
struct EqualComponent
|
||||||
{
|
{
|
||||||
/// All these expressions are considered as equal.
|
/// All these expressions are considered as equal.
|
||||||
ASTs asts;
|
NodeContainer nodes;
|
||||||
std::optional<size_t> constant_index;
|
std::optional<size_t> constant_index;
|
||||||
|
|
||||||
bool hasConstant() const;
|
bool hasConstant() const;
|
||||||
ASTPtr getConstant() const;
|
Node getConstant() const;
|
||||||
void buildConstants();
|
void buildConstants();
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -110,20 +122,29 @@ private:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
std::unordered_map<IAST::Hash, size_t, ASTHash> ast_hash_to_component;
|
static auto getHash(const Node & node)
|
||||||
|
{
|
||||||
|
if constexpr (with_ast)
|
||||||
|
return node->getTreeHash();
|
||||||
|
else
|
||||||
|
return QueryTreeNodePtrWithHash{node};
|
||||||
|
}
|
||||||
|
|
||||||
|
using NodeHashToComponentContainer = std::conditional_t<with_ast, std::unordered_map<IAST::Hash, size_t, ASTHash>, QueryTreeNodePtrWithHashMap<size_t>>;
|
||||||
|
NodeHashToComponentContainer node_hash_to_component;
|
||||||
std::vector<EqualComponent> vertices;
|
std::vector<EqualComponent> vertices;
|
||||||
std::vector<std::vector<Edge>> edges;
|
std::vector<std::vector<Edge>> edges;
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Receives graph, in which each vertex corresponds to one expression.
|
/// Receives graph, in which each vertex corresponds to one expression.
|
||||||
/// Then finds strongly connected components and builds graph on them.
|
/// Then finds strongly connected components and builds graph on them.
|
||||||
static Graph buildGraphFromAstsGraph(const Graph & asts_graph);
|
static Graph buildGraphFromNodesGraph(const Graph & nodes_graph);
|
||||||
|
|
||||||
static Graph reverseGraph(const Graph & asts_graph);
|
static Graph reverseGraph(const Graph & nodes_graph);
|
||||||
|
|
||||||
/// The first part of finding strongly connected components.
|
/// The first part of finding strongly connected components.
|
||||||
/// Finds order of exit from vertices of dfs traversal of graph.
|
/// Finds order of exit from vertices of dfs traversal of graph.
|
||||||
static void dfsOrder(const Graph & asts_graph, size_t v, std::vector<bool> & visited, std::vector<size_t> & order);
|
static void dfsOrder(const Graph & nodes_graph, size_t v, std::vector<bool> & visited, std::vector<size_t> & order);
|
||||||
|
|
||||||
using OptionalIndices = std::vector<std::optional<size_t>>;
|
using OptionalIndices = std::vector<std::optional<size_t>>;
|
||||||
|
|
||||||
@ -139,13 +160,13 @@ private:
|
|||||||
GREATER_OR_EQUAL,
|
GREATER_OR_EQUAL,
|
||||||
};
|
};
|
||||||
|
|
||||||
static CompareResult pathToCompareResult(Path path, bool inverse);
|
static ComparisonGraphCompareResult pathToCompareResult(Path path, bool inverse);
|
||||||
std::optional<Path> findPath(size_t start, size_t finish) const;
|
std::optional<Path> findPath(size_t start, size_t finish) const;
|
||||||
|
|
||||||
/// Calculate @dists.
|
/// Calculate @dists.
|
||||||
static std::map<std::pair<size_t, size_t>, Path> buildDistsFromGraph(const Graph & g);
|
static std::map<std::pair<size_t, size_t>, Path> buildDistsFromGraph(const Graph & g);
|
||||||
|
|
||||||
/// Calculate @ast_const_lower_bound and @ast_const_lower_bound.
|
/// Calculate @nodeconst_lower_bound and @node_const_lower_bound.
|
||||||
std::pair<std::vector<ssize_t>, std::vector<ssize_t>> buildConstBounds() const;
|
std::pair<std::vector<ssize_t>, std::vector<ssize_t>> buildConstBounds() const;
|
||||||
|
|
||||||
/// Direct acyclic graph in which each vertex corresponds
|
/// Direct acyclic graph in which each vertex corresponds
|
||||||
@ -165,11 +186,11 @@ private:
|
|||||||
|
|
||||||
/// Maximal constant value for each component that
|
/// Maximal constant value for each component that
|
||||||
/// is lower bound for all expressions in component.
|
/// is lower bound for all expressions in component.
|
||||||
std::vector<ssize_t> ast_const_lower_bound;
|
std::vector<ssize_t> node_const_lower_bound;
|
||||||
|
|
||||||
/// Minimal constant value for each component that
|
/// Minimal constant value for each component that
|
||||||
/// is upper bound for all expressions in component.
|
/// is upper bound for all expressions in component.
|
||||||
std::vector<ssize_t> ast_const_upper_bound;
|
std::vector<ssize_t> node_const_upper_bound;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1022,10 +1022,10 @@ String DDLWorker::enqueueQuery(DDLLogEntry & entry)
|
|||||||
{
|
{
|
||||||
String str_buf = node_path.substr(query_path_prefix.length());
|
String str_buf = node_path.substr(query_path_prefix.length());
|
||||||
DB::ReadBufferFromString in(str_buf);
|
DB::ReadBufferFromString in(str_buf);
|
||||||
CurrentMetrics::Metric id;
|
CurrentMetrics::Value pushed_entry;
|
||||||
readText(id, in);
|
readText(pushed_entry, in);
|
||||||
id = std::max(*max_pushed_entry_metric, id);
|
pushed_entry = std::max(CurrentMetrics::get(*max_pushed_entry_metric), pushed_entry);
|
||||||
CurrentMetrics::set(*max_pushed_entry_metric, id);
|
CurrentMetrics::set(*max_pushed_entry_metric, pushed_entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// We cannot create status dirs in a single transaction with previous request,
|
/// We cannot create status dirs in a single transaction with previous request,
|
||||||
|
@ -18,9 +18,12 @@ namespace DB
|
|||||||
QueryPipelineBuilder IInterpreterUnionOrSelectQuery::buildQueryPipeline()
|
QueryPipelineBuilder IInterpreterUnionOrSelectQuery::buildQueryPipeline()
|
||||||
{
|
{
|
||||||
QueryPlan query_plan;
|
QueryPlan query_plan;
|
||||||
|
return buildQueryPipeline(query_plan);
|
||||||
|
}
|
||||||
|
|
||||||
|
QueryPipelineBuilder IInterpreterUnionOrSelectQuery::buildQueryPipeline(QueryPlan & query_plan)
|
||||||
|
{
|
||||||
buildQueryPlan(query_plan);
|
buildQueryPlan(query_plan);
|
||||||
|
|
||||||
return std::move(*query_plan.buildQueryPipeline(
|
return std::move(*query_plan.buildQueryPipeline(
|
||||||
QueryPlanOptimizationSettings::fromContext(context), BuildQueryPipelineSettings::fromContext(context)));
|
QueryPlanOptimizationSettings::fromContext(context), BuildQueryPipelineSettings::fromContext(context)));
|
||||||
}
|
}
|
||||||
|
@ -35,6 +35,7 @@ public:
|
|||||||
|
|
||||||
virtual void buildQueryPlan(QueryPlan & query_plan) = 0;
|
virtual void buildQueryPlan(QueryPlan & query_plan) = 0;
|
||||||
QueryPipelineBuilder buildQueryPipeline();
|
QueryPipelineBuilder buildQueryPipeline();
|
||||||
|
QueryPipelineBuilder buildQueryPipeline(QueryPlan & query_plan);
|
||||||
|
|
||||||
virtual void ignoreWithTotals() = 0;
|
virtual void ignoreWithTotals() = 0;
|
||||||
|
|
||||||
|
@ -355,19 +355,22 @@ BlockIO InterpreterDropQuery::executeToDatabaseImpl(const ASTDropQuery & query,
|
|||||||
/// Flush should not be done if shouldBeEmptyOnDetach() == false,
|
/// Flush should not be done if shouldBeEmptyOnDetach() == false,
|
||||||
/// since in this case getTablesIterator() may do some additional work,
|
/// since in this case getTablesIterator() may do some additional work,
|
||||||
/// see DatabaseMaterializedMySQL::getTablesIterator()
|
/// see DatabaseMaterializedMySQL::getTablesIterator()
|
||||||
for (auto iterator = database->getTablesIterator(getContext()); iterator->isValid(); iterator->next())
|
|
||||||
{
|
|
||||||
iterator->table()->flush();
|
|
||||||
}
|
|
||||||
|
|
||||||
auto table_context = Context::createCopy(getContext());
|
auto table_context = Context::createCopy(getContext());
|
||||||
table_context->setInternalQuery(true);
|
table_context->setInternalQuery(true);
|
||||||
|
/// Do not hold extra shared pointers to tables
|
||||||
|
std::vector<std::pair<String, bool>> tables_to_drop;
|
||||||
for (auto iterator = database->getTablesIterator(table_context); iterator->isValid(); iterator->next())
|
for (auto iterator = database->getTablesIterator(table_context); iterator->isValid(); iterator->next())
|
||||||
{
|
{
|
||||||
|
iterator->table()->flush();
|
||||||
|
tables_to_drop.push_back({iterator->name(), iterator->table()->isDictionary()});
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const auto & table : tables_to_drop)
|
||||||
|
{
|
||||||
|
query_for_table.setTable(table.first);
|
||||||
|
query_for_table.is_dictionary = table.second;
|
||||||
DatabasePtr db;
|
DatabasePtr db;
|
||||||
UUID table_to_wait = UUIDHelpers::Nil;
|
UUID table_to_wait = UUIDHelpers::Nil;
|
||||||
query_for_table.setTable(iterator->name());
|
|
||||||
query_for_table.is_dictionary = iterator->table()->isDictionary();
|
|
||||||
executeToTableImpl(table_context, query_for_table, db, table_to_wait);
|
executeToTableImpl(table_context, query_for_table, db, table_to_wait);
|
||||||
uuids_to_wait.push_back(table_to_wait);
|
uuids_to_wait.push_back(table_to_wait);
|
||||||
}
|
}
|
||||||
@ -428,7 +431,8 @@ AccessRightsElements InterpreterDropQuery::getRequiredAccessForDDLOnCluster() co
|
|||||||
return required_access;
|
return required_access;
|
||||||
}
|
}
|
||||||
|
|
||||||
void InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind kind, ContextPtr global_context, ContextPtr current_context, const StorageID & target_table_id, bool sync)
|
void InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind kind, ContextPtr global_context, ContextPtr current_context,
|
||||||
|
const StorageID & target_table_id, bool sync, bool ignore_sync_setting)
|
||||||
{
|
{
|
||||||
if (DatabaseCatalog::instance().tryGetTable(target_table_id, current_context))
|
if (DatabaseCatalog::instance().tryGetTable(target_table_id, current_context))
|
||||||
{
|
{
|
||||||
@ -445,6 +449,8 @@ void InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind kind, ContextPtr
|
|||||||
/// and not allowed to drop inner table explicitly. Allowing to drop inner table without explicit grant
|
/// and not allowed to drop inner table explicitly. Allowing to drop inner table without explicit grant
|
||||||
/// looks like expected behaviour and we have tests for it.
|
/// looks like expected behaviour and we have tests for it.
|
||||||
auto drop_context = Context::createCopy(global_context);
|
auto drop_context = Context::createCopy(global_context);
|
||||||
|
if (ignore_sync_setting)
|
||||||
|
drop_context->setSetting("database_atomic_wait_for_drop_and_detach_synchronously", false);
|
||||||
drop_context->getClientInfo().query_kind = ClientInfo::QueryKind::SECONDARY_QUERY;
|
drop_context->getClientInfo().query_kind = ClientInfo::QueryKind::SECONDARY_QUERY;
|
||||||
if (auto txn = current_context->getZooKeeperMetadataTransaction())
|
if (auto txn = current_context->getZooKeeperMetadataTransaction())
|
||||||
{
|
{
|
||||||
|
@ -24,7 +24,8 @@ public:
|
|||||||
/// Drop table or database.
|
/// Drop table or database.
|
||||||
BlockIO execute() override;
|
BlockIO execute() override;
|
||||||
|
|
||||||
static void executeDropQuery(ASTDropQuery::Kind kind, ContextPtr global_context, ContextPtr current_context, const StorageID & target_table_id, bool sync);
|
static void executeDropQuery(ASTDropQuery::Kind kind, ContextPtr global_context, ContextPtr current_context,
|
||||||
|
const StorageID & target_table_id, bool sync, bool ignore_sync_setting = false);
|
||||||
|
|
||||||
bool supportsTransactions() const override;
|
bool supportsTransactions() const override;
|
||||||
|
|
||||||
|
@ -21,6 +21,7 @@
|
|||||||
#include <Parsers/ASTShowEngineQuery.h>
|
#include <Parsers/ASTShowEngineQuery.h>
|
||||||
#include <Parsers/ASTShowProcesslistQuery.h>
|
#include <Parsers/ASTShowProcesslistQuery.h>
|
||||||
#include <Parsers/ASTShowTablesQuery.h>
|
#include <Parsers/ASTShowTablesQuery.h>
|
||||||
|
#include <Parsers/ASTShowColumnsQuery.h>
|
||||||
#include <Parsers/ASTUseQuery.h>
|
#include <Parsers/ASTUseQuery.h>
|
||||||
#include <Parsers/ASTWatchQuery.h>
|
#include <Parsers/ASTWatchQuery.h>
|
||||||
#include <Parsers/ASTCreateNamedCollectionQuery.h>
|
#include <Parsers/ASTCreateNamedCollectionQuery.h>
|
||||||
@ -79,6 +80,7 @@
|
|||||||
#include <Interpreters/InterpreterShowEngineQuery.h>
|
#include <Interpreters/InterpreterShowEngineQuery.h>
|
||||||
#include <Interpreters/InterpreterShowProcesslistQuery.h>
|
#include <Interpreters/InterpreterShowProcesslistQuery.h>
|
||||||
#include <Interpreters/InterpreterShowTablesQuery.h>
|
#include <Interpreters/InterpreterShowTablesQuery.h>
|
||||||
|
#include <Interpreters/InterpreterShowColumnsQuery.h>
|
||||||
#include <Interpreters/InterpreterSystemQuery.h>
|
#include <Interpreters/InterpreterSystemQuery.h>
|
||||||
#include <Interpreters/InterpreterUseQuery.h>
|
#include <Interpreters/InterpreterUseQuery.h>
|
||||||
#include <Interpreters/InterpreterWatchQuery.h>
|
#include <Interpreters/InterpreterWatchQuery.h>
|
||||||
@ -175,6 +177,10 @@ std::unique_ptr<IInterpreter> InterpreterFactory::get(ASTPtr & query, ContextMut
|
|||||||
{
|
{
|
||||||
return std::make_unique<InterpreterShowTablesQuery>(query, context);
|
return std::make_unique<InterpreterShowTablesQuery>(query, context);
|
||||||
}
|
}
|
||||||
|
else if (query->as<ASTShowColumnsQuery>())
|
||||||
|
{
|
||||||
|
return std::make_unique<InterpreterShowColumnsQuery>(query, context);
|
||||||
|
}
|
||||||
else if (query->as<ASTShowEnginesQuery>())
|
else if (query->as<ASTShowEnginesQuery>())
|
||||||
{
|
{
|
||||||
return std::make_unique<InterpreterShowEnginesQuery>(query, context);
|
return std::make_unique<InterpreterShowEnginesQuery>(query, context);
|
||||||
|
@ -518,15 +518,13 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
|||||||
settings.additional_table_filters, joined_tables.tablesWithColumns().front().table, *context);
|
settings.additional_table_filters, joined_tables.tablesWithColumns().front().table, *context);
|
||||||
|
|
||||||
ASTPtr parallel_replicas_custom_filter_ast = nullptr;
|
ASTPtr parallel_replicas_custom_filter_ast = nullptr;
|
||||||
if (context->getParallelReplicasMode() == Context::ParallelReplicasMode::CUSTOM_KEY && !joined_tables.tablesWithColumns().empty())
|
if (storage && context->getParallelReplicasMode() == Context::ParallelReplicasMode::CUSTOM_KEY && !joined_tables.tablesWithColumns().empty())
|
||||||
{
|
{
|
||||||
if (settings.parallel_replicas_count > 1)
|
if (settings.parallel_replicas_count > 1)
|
||||||
{
|
{
|
||||||
if (auto custom_key_ast = parseCustomKeyForTable(settings.parallel_replicas_custom_key, *context))
|
if (auto custom_key_ast = parseCustomKeyForTable(settings.parallel_replicas_custom_key, *context))
|
||||||
{
|
{
|
||||||
LOG_TRACE(log, "Processing query on a replica using custom_key '{}'", settings.parallel_replicas_custom_key.value);
|
LOG_TRACE(log, "Processing query on a replica using custom_key '{}'", settings.parallel_replicas_custom_key.value);
|
||||||
if (!storage)
|
|
||||||
throw DB::Exception(ErrorCodes::BAD_ARGUMENTS, "Storage is unknown when trying to parse custom key for parallel replica");
|
|
||||||
|
|
||||||
parallel_replicas_custom_filter_ast = getCustomKeyFilterForParallelReplica(
|
parallel_replicas_custom_filter_ast = getCustomKeyFilterForParallelReplica(
|
||||||
settings.parallel_replicas_count,
|
settings.parallel_replicas_count,
|
||||||
|
@ -72,6 +72,7 @@ public:
|
|||||||
void setProperClientInfo(size_t replica_number, size_t count_participating_replicas);
|
void setProperClientInfo(size_t replica_number, size_t count_participating_replicas);
|
||||||
|
|
||||||
const Planner & getPlanner() const { return planner; }
|
const Planner & getPlanner() const { return planner; }
|
||||||
|
Planner & getPlanner() { return planner; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
ASTPtr query;
|
ASTPtr query;
|
||||||
|
103
src/Interpreters/InterpreterShowColumnsQuery.cpp
Normal file
103
src/Interpreters/InterpreterShowColumnsQuery.cpp
Normal file
@ -0,0 +1,103 @@
|
|||||||
|
#include <Interpreters/InterpreterShowColumnsQuery.h>
|
||||||
|
|
||||||
|
#include <IO/WriteBufferFromString.h>
|
||||||
|
#include <Parsers/ASTShowColumnsQuery.h>
|
||||||
|
#include <Parsers/formatAST.h>
|
||||||
|
#include <Interpreters/Context.h>
|
||||||
|
#include <Interpreters/executeQuery.h>
|
||||||
|
#include <IO/Operators.h>
|
||||||
|
#include <boost/algorithm/string.hpp>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
|
||||||
|
InterpreterShowColumnsQuery::InterpreterShowColumnsQuery(const ASTPtr & query_ptr_, ContextMutablePtr context_)
|
||||||
|
: WithMutableContext(context_)
|
||||||
|
, query_ptr(query_ptr_)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
String InterpreterShowColumnsQuery::getRewrittenQuery()
|
||||||
|
{
|
||||||
|
const auto & query = query_ptr->as<ASTShowColumnsQuery &>();
|
||||||
|
|
||||||
|
WriteBufferFromOwnString rewritten_query;
|
||||||
|
|
||||||
|
rewritten_query << "SELECT name AS field, type AS type, startsWith(type, 'Nullable') AS null, trim(concatWithSeparator(' ', if(is_in_primary_key, 'PRI', ''), if (is_in_sorting_key, 'SOR', ''))) AS key, if(default_kind IN ('ALIAS', 'DEFAULT', 'MATERIALIZED'), default_expression, NULL) AS default, '' AS extra ";
|
||||||
|
|
||||||
|
// TODO Interpret query.extended. It is supposed to show internal/virtual columns. Need to fetch virtual column names, see
|
||||||
|
// IStorage::getVirtuals(). We can't easily do that via SQL.
|
||||||
|
|
||||||
|
if (query.full)
|
||||||
|
{
|
||||||
|
/// "Full" mode is mostly for MySQL compat
|
||||||
|
/// - collation: no such thing in ClickHouse
|
||||||
|
/// - comment
|
||||||
|
/// - privileges: <not implemented, TODO ask system.grants>
|
||||||
|
rewritten_query << ", NULL AS collation, comment, '' AS privileges ";
|
||||||
|
}
|
||||||
|
|
||||||
|
rewritten_query << "FROM system.columns WHERE ";
|
||||||
|
|
||||||
|
String database;
|
||||||
|
String table;
|
||||||
|
if (query.from_table.contains("."))
|
||||||
|
{
|
||||||
|
/// FROM <db>.<table> (abbreviated form)
|
||||||
|
chassert(query.from_database.empty());
|
||||||
|
std::vector<String> split;
|
||||||
|
boost::split(split, query.from_table, boost::is_any_of("."));
|
||||||
|
chassert(split.size() == 2);
|
||||||
|
database = split[0];
|
||||||
|
table = split[1];
|
||||||
|
}
|
||||||
|
else if (query.from_database.empty())
|
||||||
|
{
|
||||||
|
/// FROM <table>
|
||||||
|
chassert(!query.from_table.empty());
|
||||||
|
database = getContext()->getCurrentDatabase();
|
||||||
|
table = query.from_table;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
/// FROM <database> FROM <table>
|
||||||
|
chassert(!query.from_database.empty());
|
||||||
|
chassert(!query.from_table.empty());
|
||||||
|
database = query.from_database;
|
||||||
|
table = query.from_table;
|
||||||
|
}
|
||||||
|
rewritten_query << "database = " << DB::quote << database;
|
||||||
|
rewritten_query << " AND table = " << DB::quote << table;
|
||||||
|
|
||||||
|
if (!query.like.empty())
|
||||||
|
rewritten_query
|
||||||
|
<< " AND name "
|
||||||
|
<< (query.not_like ? "NOT " : "")
|
||||||
|
<< (query.case_insensitive_like ? "ILIKE " : "LIKE ")
|
||||||
|
<< DB::quote << query.like;
|
||||||
|
else if (query.where_expression)
|
||||||
|
rewritten_query << " AND (" << query.where_expression << ")";
|
||||||
|
|
||||||
|
/// Sorting is strictly speaking not necessary but 1. it is convenient for users, 2. SQL currently does not allow to
|
||||||
|
/// sort the output of SHOW COLUMNS otherwise (SELECT * FROM (SHOW COLUMNS ...) ORDER BY ...) is rejected) and 3. some
|
||||||
|
/// SQL tests can take advantage of this.
|
||||||
|
rewritten_query << " ORDER BY field, type, null, key, default, extra";
|
||||||
|
|
||||||
|
if (query.limit_length)
|
||||||
|
rewritten_query << " LIMIT " << query.limit_length;
|
||||||
|
|
||||||
|
return rewritten_query.str();
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
BlockIO InterpreterShowColumnsQuery::execute()
|
||||||
|
{
|
||||||
|
return executeQuery(getRewrittenQuery(), getContext(), true);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
32
src/Interpreters/InterpreterShowColumnsQuery.h
Normal file
32
src/Interpreters/InterpreterShowColumnsQuery.h
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <Interpreters/IInterpreter.h>
|
||||||
|
#include <Parsers/IAST_fwd.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
class Context;
|
||||||
|
|
||||||
|
|
||||||
|
/// Returns a list of columns which meet some conditions.
|
||||||
|
class InterpreterShowColumnsQuery : public IInterpreter, WithMutableContext
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
InterpreterShowColumnsQuery(const ASTPtr & query_ptr_, ContextMutablePtr context_);
|
||||||
|
|
||||||
|
BlockIO execute() override;
|
||||||
|
|
||||||
|
/// Ignore quota and limits here because execute() produces a SELECT query which checks quotas/limits by itself.
|
||||||
|
bool ignoreQuota() const override { return true; }
|
||||||
|
bool ignoreLimits() const override { return true; }
|
||||||
|
|
||||||
|
private:
|
||||||
|
ASTPtr query_ptr;
|
||||||
|
|
||||||
|
String getRewrittenQuery();
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
}
|
@ -12,7 +12,7 @@ namespace DB
|
|||||||
|
|
||||||
BlockIO InterpreterShowProcesslistQuery::execute()
|
BlockIO InterpreterShowProcesslistQuery::execute()
|
||||||
{
|
{
|
||||||
return executeQuery("SELECT * FROM system.processes", getContext(), true);
|
return executeQuery("SELECT * FROM system.processes ORDER BY elapsed DESC", getContext(), true);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#include <IO/ReadBufferFromString.h>
|
#include <IO/WriteBufferFromString.h>
|
||||||
#include <Parsers/ASTShowTablesQuery.h>
|
#include <Parsers/ASTShowTablesQuery.h>
|
||||||
#include <Parsers/formatAST.h>
|
#include <Parsers/formatAST.h>
|
||||||
#include <Interpreters/Context.h>
|
#include <Interpreters/Context.h>
|
||||||
@ -24,7 +24,8 @@ namespace ErrorCodes
|
|||||||
|
|
||||||
|
|
||||||
InterpreterShowTablesQuery::InterpreterShowTablesQuery(const ASTPtr & query_ptr_, ContextMutablePtr context_)
|
InterpreterShowTablesQuery::InterpreterShowTablesQuery(const ASTPtr & query_ptr_, ContextMutablePtr context_)
|
||||||
: WithMutableContext(context_), query_ptr(query_ptr_)
|
: WithMutableContext(context_)
|
||||||
|
, query_ptr(query_ptr_)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -20,8 +20,7 @@ public:
|
|||||||
|
|
||||||
BlockIO execute() override;
|
BlockIO execute() override;
|
||||||
|
|
||||||
/// We ignore the quota and limits here because execute() will rewrite a show query as a SELECT query and then
|
/// Ignore quota and limits here because execute() produces a SELECT query which checks quotas/limits by itself.
|
||||||
/// the SELECT query will checks the quota and limits.
|
|
||||||
bool ignoreQuota() const override { return true; }
|
bool ignoreQuota() const override { return true; }
|
||||||
bool ignoreLimits() const override { return true; }
|
bool ignoreLimits() const override { return true; }
|
||||||
|
|
||||||
|
@ -604,6 +604,7 @@ void InterpreterSystemQuery::restoreReplica()
|
|||||||
|
|
||||||
StoragePtr InterpreterSystemQuery::tryRestartReplica(const StorageID & replica, ContextMutablePtr system_context, bool need_ddl_guard)
|
StoragePtr InterpreterSystemQuery::tryRestartReplica(const StorageID & replica, ContextMutablePtr system_context, bool need_ddl_guard)
|
||||||
{
|
{
|
||||||
|
LOG_TRACE(log, "Restarting replica {}", replica);
|
||||||
auto table_ddl_guard = need_ddl_guard
|
auto table_ddl_guard = need_ddl_guard
|
||||||
? DatabaseCatalog::instance().getDDLGuard(replica.getDatabaseName(), replica.getTableName())
|
? DatabaseCatalog::instance().getDDLGuard(replica.getDatabaseName(), replica.getTableName())
|
||||||
: nullptr;
|
: nullptr;
|
||||||
@ -647,6 +648,7 @@ StoragePtr InterpreterSystemQuery::tryRestartReplica(const StorageID & replica,
|
|||||||
database->attachTable(system_context, replica.table_name, table, data_path);
|
database->attachTable(system_context, replica.table_name, table, data_path);
|
||||||
|
|
||||||
table->startup();
|
table->startup();
|
||||||
|
LOG_TRACE(log, "Restarted replica {}", replica);
|
||||||
return table;
|
return table;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -693,11 +695,11 @@ void InterpreterSystemQuery::restartReplicas(ContextMutablePtr system_context)
|
|||||||
guard.second = catalog.getDDLGuard(guard.first.database_name, guard.first.table_name);
|
guard.second = catalog.getDDLGuard(guard.first.database_name, guard.first.table_name);
|
||||||
|
|
||||||
size_t threads = std::min(static_cast<size_t>(getNumberOfPhysicalCPUCores()), replica_names.size());
|
size_t threads = std::min(static_cast<size_t>(getNumberOfPhysicalCPUCores()), replica_names.size());
|
||||||
|
LOG_DEBUG(log, "Will restart {} replicas using {} threads", replica_names.size(), threads);
|
||||||
ThreadPool pool(CurrentMetrics::RestartReplicaThreads, CurrentMetrics::RestartReplicaThreadsActive, threads);
|
ThreadPool pool(CurrentMetrics::RestartReplicaThreads, CurrentMetrics::RestartReplicaThreadsActive, threads);
|
||||||
|
|
||||||
for (auto & replica : replica_names)
|
for (auto & replica : replica_names)
|
||||||
{
|
{
|
||||||
LOG_TRACE(log, "Restarting replica on {}", replica.getNameForLogs());
|
|
||||||
pool.scheduleOrThrowOnError([&]() { tryRestartReplica(replica, system_context, false); });
|
pool.scheduleOrThrowOnError([&]() { tryRestartReplica(replica, system_context, false); });
|
||||||
}
|
}
|
||||||
pool.wait();
|
pool.wait();
|
||||||
@ -896,7 +898,7 @@ void InterpreterSystemQuery::syncReplica(ASTSystemQuery & query)
|
|||||||
{
|
{
|
||||||
LOG_TRACE(log, "Synchronizing entries in replica's queue with table's log and waiting for current last entry to be processed");
|
LOG_TRACE(log, "Synchronizing entries in replica's queue with table's log and waiting for current last entry to be processed");
|
||||||
auto sync_timeout = getContext()->getSettingsRef().receive_timeout.totalMilliseconds();
|
auto sync_timeout = getContext()->getSettingsRef().receive_timeout.totalMilliseconds();
|
||||||
if (!storage_replicated->waitForProcessingQueue(sync_timeout, query.strict_sync))
|
if (!storage_replicated->waitForProcessingQueue(sync_timeout, query.sync_replica_mode))
|
||||||
{
|
{
|
||||||
LOG_ERROR(log, "SYNC REPLICA {}: Timed out!", table_id.getNameForLogs());
|
LOG_ERROR(log, "SYNC REPLICA {}: Timed out!", table_id.getNameForLogs());
|
||||||
throw Exception(ErrorCodes::TIMEOUT_EXCEEDED, "SYNC REPLICA {}: command timed out. " \
|
throw Exception(ErrorCodes::TIMEOUT_EXCEEDED, "SYNC REPLICA {}: command timed out. " \
|
||||||
|
@ -50,7 +50,7 @@ void MetricLogElement::appendToBlock(MutableColumns & columns) const
|
|||||||
columns[column_idx++]->insert(profile_events[i]);
|
columns[column_idx++]->insert(profile_events[i]);
|
||||||
|
|
||||||
for (size_t i = 0, end = CurrentMetrics::end(); i < end; ++i)
|
for (size_t i = 0, end = CurrentMetrics::end(); i < end; ++i)
|
||||||
columns[column_idx++]->insert(current_metrics[i]);
|
columns[column_idx++]->insert(current_metrics[i].toUnderType());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -97,7 +97,7 @@ void MetricLog::metricThreadFunction()
|
|||||||
elem.milliseconds = timeInMilliseconds(current_time) - timeInSeconds(current_time) * 1000;
|
elem.milliseconds = timeInMilliseconds(current_time) - timeInSeconds(current_time) * 1000;
|
||||||
|
|
||||||
elem.profile_events.resize(ProfileEvents::end());
|
elem.profile_events.resize(ProfileEvents::end());
|
||||||
for (size_t i = 0, end = ProfileEvents::end(); i < end; ++i)
|
for (ProfileEvents::Event i = ProfileEvents::Event(0), end = ProfileEvents::end(); i < end; ++i)
|
||||||
{
|
{
|
||||||
const ProfileEvents::Count new_value = ProfileEvents::global_counters[i].load(std::memory_order_relaxed);
|
const ProfileEvents::Count new_value = ProfileEvents::global_counters[i].load(std::memory_order_relaxed);
|
||||||
auto & old_value = prev_profile_events[i];
|
auto & old_value = prev_profile_events[i];
|
||||||
|
@ -32,7 +32,7 @@ void dumpToMapColumn(const Counters::Snapshot & counters, DB::IColumn * column,
|
|||||||
auto & value_column = tuple_column.getColumn(1);
|
auto & value_column = tuple_column.getColumn(1);
|
||||||
|
|
||||||
size_t size = 0;
|
size_t size = 0;
|
||||||
for (Event event = 0; event < Counters::num_counters; ++event)
|
for (Event event = Event(0); event < Counters::num_counters; ++event)
|
||||||
{
|
{
|
||||||
UInt64 value = counters[event];
|
UInt64 value = counters[event];
|
||||||
|
|
||||||
@ -54,7 +54,7 @@ static void dumpProfileEvents(ProfileEventsSnapshot const & snapshot, DB::Mutabl
|
|||||||
size_t rows = 0;
|
size_t rows = 0;
|
||||||
auto & name_column = columns[NAME_COLUMN_INDEX];
|
auto & name_column = columns[NAME_COLUMN_INDEX];
|
||||||
auto & value_column = columns[VALUE_COLUMN_INDEX];
|
auto & value_column = columns[VALUE_COLUMN_INDEX];
|
||||||
for (Event event = 0; event < Counters::num_counters; ++event)
|
for (Event event = Event(0); event < Counters::num_counters; ++event)
|
||||||
{
|
{
|
||||||
Int64 value = snapshot.counters[event];
|
Int64 value = snapshot.counters[event];
|
||||||
|
|
||||||
|
@ -32,13 +32,13 @@ public:
|
|||||||
|
|
||||||
struct Data
|
struct Data
|
||||||
{
|
{
|
||||||
const ComparisonGraph & graph;
|
const ComparisonGraph<ASTPtr> & graph;
|
||||||
std::set<UInt64> & components;
|
std::set<UInt64> & components;
|
||||||
std::unordered_map<String, String> & old_name;
|
std::unordered_map<String, String> & old_name;
|
||||||
std::unordered_map<String, UInt64> & component;
|
std::unordered_map<String, UInt64> & component;
|
||||||
UInt64 & current_id;
|
UInt64 & current_id;
|
||||||
|
|
||||||
Data(const ComparisonGraph & graph_,
|
Data(const ComparisonGraph<ASTPtr> & graph_,
|
||||||
std::set<UInt64> & components_,
|
std::set<UInt64> & components_,
|
||||||
std::unordered_map<String, String> & old_name_,
|
std::unordered_map<String, String> & old_name_,
|
||||||
std::unordered_map<String, UInt64> & component_,
|
std::unordered_map<String, UInt64> & component_,
|
||||||
@ -165,7 +165,7 @@ ColumnPrice calculatePrice(
|
|||||||
/// price of all columns on which ast depends.
|
/// price of all columns on which ast depends.
|
||||||
/// TODO: branch-and-bound
|
/// TODO: branch-and-bound
|
||||||
void bruteforce(
|
void bruteforce(
|
||||||
const ComparisonGraph & graph,
|
const ComparisonGraph<ASTPtr> & graph,
|
||||||
const std::vector<UInt64> & components,
|
const std::vector<UInt64> & components,
|
||||||
size_t current_component,
|
size_t current_component,
|
||||||
const ColumnPriceByName & column_prices,
|
const ColumnPriceByName & column_prices,
|
||||||
|
@ -49,7 +49,7 @@ TemporaryDataOnDisk::TemporaryDataOnDisk(TemporaryDataOnDiskScopePtr parent_)
|
|||||||
: TemporaryDataOnDiskScope(std::move(parent_), /* limit_ = */ 0)
|
: TemporaryDataOnDiskScope(std::move(parent_), /* limit_ = */ 0)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
TemporaryDataOnDisk::TemporaryDataOnDisk(TemporaryDataOnDiskScopePtr parent_, CurrentMetrics::Value metric_scope)
|
TemporaryDataOnDisk::TemporaryDataOnDisk(TemporaryDataOnDiskScopePtr parent_, CurrentMetrics::Metric metric_scope)
|
||||||
: TemporaryDataOnDiskScope(std::move(parent_), /* limit_ = */ 0)
|
: TemporaryDataOnDiskScope(std::move(parent_), /* limit_ = */ 0)
|
||||||
, current_metric_scope(metric_scope)
|
, current_metric_scope(metric_scope)
|
||||||
{}
|
{}
|
||||||
|
@ -85,7 +85,7 @@ public:
|
|||||||
|
|
||||||
explicit TemporaryDataOnDisk(TemporaryDataOnDiskScopePtr parent_);
|
explicit TemporaryDataOnDisk(TemporaryDataOnDiskScopePtr parent_);
|
||||||
|
|
||||||
explicit TemporaryDataOnDisk(TemporaryDataOnDiskScopePtr parent_, CurrentMetrics::Value metric_scope);
|
explicit TemporaryDataOnDisk(TemporaryDataOnDiskScopePtr parent_, CurrentMetrics::Metric metric_scope);
|
||||||
|
|
||||||
/// If max_file_size > 0, then check that there's enough space on the disk and throw an exception in case of lack of free space
|
/// If max_file_size > 0, then check that there's enough space on the disk and throw an exception in case of lack of free space
|
||||||
TemporaryFileStream & createStream(const Block & header, size_t max_file_size = 0);
|
TemporaryFileStream & createStream(const Block & header, size_t max_file_size = 0);
|
||||||
@ -102,7 +102,7 @@ private:
|
|||||||
mutable std::mutex mutex;
|
mutable std::mutex mutex;
|
||||||
std::vector<TemporaryFileStreamPtr> streams TSA_GUARDED_BY(mutex);
|
std::vector<TemporaryFileStreamPtr> streams TSA_GUARDED_BY(mutex);
|
||||||
|
|
||||||
typename CurrentMetrics::Value current_metric_scope = CurrentMetrics::TemporaryFilesUnknown;
|
typename CurrentMetrics::Metric current_metric_scope = CurrentMetrics::TemporaryFilesUnknown;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -360,80 +360,14 @@ CNFQuery & CNFQuery::pushNotInFunctions()
|
|||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace
|
|
||||||
{
|
|
||||||
CNFQuery::AndGroup reduceOnce(const CNFQuery::AndGroup & groups)
|
|
||||||
{
|
|
||||||
CNFQuery::AndGroup result;
|
|
||||||
for (const CNFQuery::OrGroup & group : groups)
|
|
||||||
{
|
|
||||||
CNFQuery::OrGroup copy(group);
|
|
||||||
bool inserted = false;
|
|
||||||
for (const CNFQuery::AtomicFormula & atom : group)
|
|
||||||
{
|
|
||||||
copy.erase(atom);
|
|
||||||
CNFQuery::AtomicFormula negative_atom(atom);
|
|
||||||
negative_atom.negative = !atom.negative;
|
|
||||||
copy.insert(negative_atom);
|
|
||||||
|
|
||||||
if (groups.contains(copy))
|
|
||||||
{
|
|
||||||
copy.erase(negative_atom);
|
|
||||||
result.insert(copy);
|
|
||||||
inserted = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
copy.erase(negative_atom);
|
|
||||||
copy.insert(atom);
|
|
||||||
}
|
|
||||||
if (!inserted)
|
|
||||||
result.insert(group);
|
|
||||||
}
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool isSubset(const CNFQuery::OrGroup & left, const CNFQuery::OrGroup & right)
|
|
||||||
{
|
|
||||||
if (left.size() > right.size())
|
|
||||||
return false;
|
|
||||||
for (const auto & elem : left)
|
|
||||||
if (!right.contains(elem))
|
|
||||||
return false;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
CNFQuery::AndGroup filterSubsets(const CNFQuery::AndGroup & groups)
|
|
||||||
{
|
|
||||||
CNFQuery::AndGroup result;
|
|
||||||
for (const CNFQuery::OrGroup & group : groups)
|
|
||||||
{
|
|
||||||
bool insert = true;
|
|
||||||
|
|
||||||
for (const CNFQuery::OrGroup & other_group : groups)
|
|
||||||
{
|
|
||||||
if (isSubset(other_group, group) && group != other_group)
|
|
||||||
{
|
|
||||||
insert = false;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (insert)
|
|
||||||
result.insert(group);
|
|
||||||
}
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
CNFQuery & CNFQuery::reduce()
|
CNFQuery & CNFQuery::reduce()
|
||||||
{
|
{
|
||||||
while (true)
|
while (true)
|
||||||
{
|
{
|
||||||
AndGroup new_statements = reduceOnce(statements);
|
AndGroup new_statements = reduceOnceCNFStatements(statements);
|
||||||
if (statements == new_statements)
|
if (statements == new_statements)
|
||||||
{
|
{
|
||||||
statements = filterSubsets(statements);
|
statements = filterCNFSubsets(statements);
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -164,4 +164,72 @@ public:
|
|||||||
|
|
||||||
void pushNotIn(CNFQuery::AtomicFormula & atom);
|
void pushNotIn(CNFQuery::AtomicFormula & atom);
|
||||||
|
|
||||||
|
template <typename TAndGroup>
|
||||||
|
TAndGroup reduceOnceCNFStatements(const TAndGroup & groups)
|
||||||
|
{
|
||||||
|
TAndGroup result;
|
||||||
|
for (const auto & group : groups)
|
||||||
|
{
|
||||||
|
using GroupType = std::decay_t<decltype(group)>;
|
||||||
|
GroupType copy(group);
|
||||||
|
bool inserted = false;
|
||||||
|
for (const auto & atom : group)
|
||||||
|
{
|
||||||
|
copy.erase(atom);
|
||||||
|
using AtomType = std::decay_t<decltype(atom)>;
|
||||||
|
AtomType negative_atom(atom);
|
||||||
|
negative_atom.negative = !atom.negative;
|
||||||
|
copy.insert(negative_atom);
|
||||||
|
|
||||||
|
if (groups.contains(copy))
|
||||||
|
{
|
||||||
|
copy.erase(negative_atom);
|
||||||
|
result.insert(copy);
|
||||||
|
inserted = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
copy.erase(negative_atom);
|
||||||
|
copy.insert(atom);
|
||||||
|
}
|
||||||
|
if (!inserted)
|
||||||
|
result.insert(group);
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename TOrGroup>
|
||||||
|
bool isCNFGroupSubset(const TOrGroup & left, const TOrGroup & right)
|
||||||
|
{
|
||||||
|
if (left.size() > right.size())
|
||||||
|
return false;
|
||||||
|
for (const auto & elem : left)
|
||||||
|
if (!right.contains(elem))
|
||||||
|
return false;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename TAndGroup>
|
||||||
|
TAndGroup filterCNFSubsets(const TAndGroup & groups)
|
||||||
|
{
|
||||||
|
TAndGroup result;
|
||||||
|
for (const auto & group : groups)
|
||||||
|
{
|
||||||
|
bool insert = true;
|
||||||
|
|
||||||
|
for (const auto & other_group : groups)
|
||||||
|
{
|
||||||
|
if (isCNFGroupSubset(other_group, group) && group != other_group)
|
||||||
|
{
|
||||||
|
insert = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (insert)
|
||||||
|
result.insert(group);
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -74,7 +74,7 @@ bool checkIfGroupAlwaysTrueFullMatch(const CNFQuery::OrGroup & group, const Cons
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool checkIfGroupAlwaysTrueGraph(const CNFQuery::OrGroup & group, const ComparisonGraph & graph)
|
bool checkIfGroupAlwaysTrueGraph(const CNFQuery::OrGroup & group, const ComparisonGraph<ASTPtr> & graph)
|
||||||
{
|
{
|
||||||
/// We try to find at least one atom that is always true by using comparison graph.
|
/// We try to find at least one atom that is always true by using comparison graph.
|
||||||
for (const auto & atom : group)
|
for (const auto & atom : group)
|
||||||
@ -82,7 +82,7 @@ bool checkIfGroupAlwaysTrueGraph(const CNFQuery::OrGroup & group, const Comparis
|
|||||||
const auto * func = atom.ast->as<ASTFunction>();
|
const auto * func = atom.ast->as<ASTFunction>();
|
||||||
if (func && func->arguments->children.size() == 2)
|
if (func && func->arguments->children.size() == 2)
|
||||||
{
|
{
|
||||||
const auto expected = ComparisonGraph::atomToCompareResult(atom);
|
const auto expected = ComparisonGraph<ASTPtr>::atomToCompareResult(atom);
|
||||||
if (graph.isAlwaysCompare(expected, func->arguments->children[0], func->arguments->children[1]))
|
if (graph.isAlwaysCompare(expected, func->arguments->children[0], func->arguments->children[1]))
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -108,20 +108,20 @@ bool checkIfAtomAlwaysFalseFullMatch(const CNFQuery::AtomicFormula & atom, const
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool checkIfAtomAlwaysFalseGraph(const CNFQuery::AtomicFormula & atom, const ComparisonGraph & graph)
|
bool checkIfAtomAlwaysFalseGraph(const CNFQuery::AtomicFormula & atom, const ComparisonGraph<ASTPtr> & graph)
|
||||||
{
|
{
|
||||||
const auto * func = atom.ast->as<ASTFunction>();
|
const auto * func = atom.ast->as<ASTFunction>();
|
||||||
if (func && func->arguments->children.size() == 2)
|
if (func && func->arguments->children.size() == 2)
|
||||||
{
|
{
|
||||||
/// TODO: special support for !=
|
/// TODO: special support for !=
|
||||||
const auto expected = ComparisonGraph::atomToCompareResult(atom);
|
const auto expected = ComparisonGraph<ASTPtr>::atomToCompareResult(atom);
|
||||||
return !graph.isPossibleCompare(expected, func->arguments->children[0], func->arguments->children[1]);
|
return !graph.isPossibleCompare(expected, func->arguments->children[0], func->arguments->children[1]);
|
||||||
}
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void replaceToConstants(ASTPtr & term, const ComparisonGraph & graph)
|
void replaceToConstants(ASTPtr & term, const ComparisonGraph<ASTPtr> & graph)
|
||||||
{
|
{
|
||||||
const auto equal_constant = graph.getEqualConst(term);
|
const auto equal_constant = graph.getEqualConst(term);
|
||||||
if (equal_constant)
|
if (equal_constant)
|
||||||
@ -135,7 +135,7 @@ void replaceToConstants(ASTPtr & term, const ComparisonGraph & graph)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
CNFQuery::AtomicFormula replaceTermsToConstants(const CNFQuery::AtomicFormula & atom, const ComparisonGraph & graph)
|
CNFQuery::AtomicFormula replaceTermsToConstants(const CNFQuery::AtomicFormula & atom, const ComparisonGraph<ASTPtr> & graph)
|
||||||
{
|
{
|
||||||
CNFQuery::AtomicFormula result;
|
CNFQuery::AtomicFormula result;
|
||||||
result.negative = atom.negative;
|
result.negative = atom.negative;
|
||||||
|
@ -37,6 +37,18 @@ namespace ErrorCodes
|
|||||||
extern const int LOGICAL_ERROR;
|
extern const int LOGICAL_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static ZooKeeperRetriesInfo getRetriesInfo()
|
||||||
|
{
|
||||||
|
const auto & config_ref = Context::getGlobalContextInstance()->getConfigRef();
|
||||||
|
return ZooKeeperRetriesInfo(
|
||||||
|
"DistributedDDL",
|
||||||
|
&Poco::Logger::get("DDLQueryStatusSource"),
|
||||||
|
config_ref.getInt("distributed_ddl_keeper_max_retries", 5),
|
||||||
|
config_ref.getInt("distributed_ddl_keeper_initial_backoff_ms", 100),
|
||||||
|
config_ref.getInt("distributed_ddl_keeper_max_backoff_ms", 5000)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
bool isSupportedAlterType(int type)
|
bool isSupportedAlterType(int type)
|
||||||
{
|
{
|
||||||
assert(type != ASTAlterCommand::NO_TYPE);
|
assert(type != ASTAlterCommand::NO_TYPE);
|
||||||
@ -174,7 +186,7 @@ BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr_, ContextPtr context,
|
|||||||
entry.tracing_context = OpenTelemetry::CurrentContext();
|
entry.tracing_context = OpenTelemetry::CurrentContext();
|
||||||
String node_path = ddl_worker.enqueueQuery(entry);
|
String node_path = ddl_worker.enqueueQuery(entry);
|
||||||
|
|
||||||
return getDistributedDDLStatus(node_path, entry, context);
|
return getDistributedDDLStatus(node_path, entry, context, /* hosts_to_wait */ nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -182,7 +194,7 @@ class DDLQueryStatusSource final : public ISource
|
|||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
DDLQueryStatusSource(
|
DDLQueryStatusSource(
|
||||||
const String & zk_node_path, const DDLLogEntry & entry, ContextPtr context_, const std::optional<Strings> & hosts_to_wait = {});
|
const String & zk_node_path, const DDLLogEntry & entry, ContextPtr context_, const Strings * hosts_to_wait);
|
||||||
|
|
||||||
String getName() const override { return "DDLQueryStatus"; }
|
String getName() const override { return "DDLQueryStatus"; }
|
||||||
Chunk generate() override;
|
Chunk generate() override;
|
||||||
@ -230,7 +242,7 @@ private:
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
BlockIO getDistributedDDLStatus(const String & node_path, const DDLLogEntry & entry, ContextPtr context, const std::optional<Strings> & hosts_to_wait)
|
BlockIO getDistributedDDLStatus(const String & node_path, const DDLLogEntry & entry, ContextPtr context, const Strings * hosts_to_wait)
|
||||||
{
|
{
|
||||||
BlockIO io;
|
BlockIO io;
|
||||||
if (context->getSettingsRef().distributed_ddl_task_timeout == 0)
|
if (context->getSettingsRef().distributed_ddl_task_timeout == 0)
|
||||||
@ -291,8 +303,8 @@ Block DDLQueryStatusSource::getSampleBlock(ContextPtr context_, bool hosts_to_wa
|
|||||||
}
|
}
|
||||||
|
|
||||||
DDLQueryStatusSource::DDLQueryStatusSource(
|
DDLQueryStatusSource::DDLQueryStatusSource(
|
||||||
const String & zk_node_path, const DDLLogEntry & entry, ContextPtr context_, const std::optional<Strings> & hosts_to_wait)
|
const String & zk_node_path, const DDLLogEntry & entry, ContextPtr context_, const Strings * hosts_to_wait)
|
||||||
: ISource(getSampleBlock(context_, hosts_to_wait.has_value()))
|
: ISource(getSampleBlock(context_, static_cast<bool>(hosts_to_wait)))
|
||||||
, node_path(zk_node_path)
|
, node_path(zk_node_path)
|
||||||
, context(context_)
|
, context(context_)
|
||||||
, watch(CLOCK_MONOTONIC_COARSE)
|
, watch(CLOCK_MONOTONIC_COARSE)
|
||||||
@ -380,7 +392,6 @@ Chunk DDLQueryStatusSource::generate()
|
|||||||
if (is_replicated_database && context->getSettingsRef().database_replicated_enforce_synchronous_settings)
|
if (is_replicated_database && context->getSettingsRef().database_replicated_enforce_synchronous_settings)
|
||||||
node_to_wait = "synced";
|
node_to_wait = "synced";
|
||||||
|
|
||||||
auto zookeeper = context->getZooKeeper();
|
|
||||||
size_t try_number = 0;
|
size_t try_number = 0;
|
||||||
|
|
||||||
while (true)
|
while (true)
|
||||||
@ -420,7 +431,23 @@ Chunk DDLQueryStatusSource::generate()
|
|||||||
sleepForMilliseconds(std::min<size_t>(1000, 50 * (try_number + 1)));
|
sleepForMilliseconds(std::min<size_t>(1000, 50 * (try_number + 1)));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!zookeeper->exists(node_path))
|
bool node_exists = false;
|
||||||
|
Strings tmp_hosts;
|
||||||
|
Strings tmp_active_hosts;
|
||||||
|
|
||||||
|
{
|
||||||
|
auto retries_info = getRetriesInfo();
|
||||||
|
auto retries_ctl = ZooKeeperRetriesControl("executeDDLQueryOnCluster", retries_info);
|
||||||
|
retries_ctl.retryLoop([&]()
|
||||||
|
{
|
||||||
|
auto zookeeper = context->getZooKeeper();
|
||||||
|
node_exists = zookeeper->exists(node_path);
|
||||||
|
tmp_hosts = getChildrenAllowNoNode(zookeeper, fs::path(node_path) / node_to_wait);
|
||||||
|
tmp_active_hosts = getChildrenAllowNoNode(zookeeper, fs::path(node_path) / "active");
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!node_exists)
|
||||||
{
|
{
|
||||||
/// Paradoxically, this exception will be throw even in case of "never_throw" mode.
|
/// Paradoxically, this exception will be throw even in case of "never_throw" mode.
|
||||||
|
|
||||||
@ -432,12 +459,12 @@ Chunk DDLQueryStatusSource::generate()
|
|||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
Strings new_hosts = getNewAndUpdate(getChildrenAllowNoNode(zookeeper, fs::path(node_path) / node_to_wait));
|
Strings new_hosts = getNewAndUpdate(tmp_hosts);
|
||||||
++try_number;
|
++try_number;
|
||||||
if (new_hosts.empty())
|
if (new_hosts.empty())
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
current_active_hosts = getChildrenAllowNoNode(zookeeper, fs::path(node_path) / "active");
|
current_active_hosts = std::move(tmp_active_hosts);
|
||||||
|
|
||||||
MutableColumns columns = output.getHeader().cloneEmptyColumns();
|
MutableColumns columns = output.getHeader().cloneEmptyColumns();
|
||||||
for (const String & host_id : new_hosts)
|
for (const String & host_id : new_hosts)
|
||||||
@ -447,7 +474,15 @@ Chunk DDLQueryStatusSource::generate()
|
|||||||
if (node_to_wait == "finished")
|
if (node_to_wait == "finished")
|
||||||
{
|
{
|
||||||
String status_data;
|
String status_data;
|
||||||
if (zookeeper->tryGet(fs::path(node_path) / "finished" / host_id, status_data))
|
bool finished_exists = false;
|
||||||
|
|
||||||
|
auto retries_info = getRetriesInfo();
|
||||||
|
auto retries_ctl = ZooKeeperRetriesControl("executeDDLQueryOnCluster", retries_info);
|
||||||
|
retries_ctl.retryLoop([&]()
|
||||||
|
{
|
||||||
|
finished_exists = context->getZooKeeper()->tryGet(fs::path(node_path) / "finished" / host_id, status_data);
|
||||||
|
});
|
||||||
|
if (finished_exists)
|
||||||
status.tryDeserializeText(status_data);
|
status.tryDeserializeText(status_data);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
#include <Processors/ISource.h>
|
#include <Processors/ISource.h>
|
||||||
#include <Interpreters/Context_fwd.h>
|
#include <Interpreters/Context_fwd.h>
|
||||||
#include <Parsers/IAST_fwd.h>
|
#include <Parsers/IAST_fwd.h>
|
||||||
|
#include <Storages/MergeTree/ZooKeeperRetries.h>
|
||||||
|
|
||||||
|
|
||||||
namespace zkutil
|
namespace zkutil
|
||||||
@ -42,8 +43,7 @@ struct DDLQueryOnClusterParams
|
|||||||
/// Returns DDLQueryStatusSource, which reads results of query execution on each host in the cluster.
|
/// Returns DDLQueryStatusSource, which reads results of query execution on each host in the cluster.
|
||||||
BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr, ContextPtr context, const DDLQueryOnClusterParams & params = {});
|
BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr, ContextPtr context, const DDLQueryOnClusterParams & params = {});
|
||||||
|
|
||||||
BlockIO getDistributedDDLStatus(
|
BlockIO getDistributedDDLStatus(const String & node_path, const DDLLogEntry & entry, ContextPtr context, const Strings * hosts_to_wait);
|
||||||
const String & node_path, const DDLLogEntry & entry, ContextPtr context, const std::optional<Strings> & hosts_to_wait = {});
|
|
||||||
|
|
||||||
bool maybeRemoveOnCluster(const ASTPtr & query_ptr, ContextPtr context);
|
bool maybeRemoveOnCluster(const ASTPtr & query_ptr, ContextPtr context);
|
||||||
|
|
||||||
|
@ -9,11 +9,11 @@
|
|||||||
|
|
||||||
using namespace DB;
|
using namespace DB;
|
||||||
|
|
||||||
static ComparisonGraph getGraph(const String & query)
|
static ComparisonGraph<ASTPtr> getGraph(const String & query)
|
||||||
{
|
{
|
||||||
ParserExpressionList parser(false);
|
ParserExpressionList parser(false);
|
||||||
ASTPtr ast = parseQuery(parser, query, 0, 0);
|
ASTPtr ast = parseQuery(parser, query, 0, 0);
|
||||||
return ComparisonGraph(ast->children);
|
return ComparisonGraph<ASTPtr>(ast->children);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(ComparisonGraph, Bounds)
|
TEST(ComparisonGraph, Bounds)
|
||||||
@ -47,8 +47,8 @@ TEST(ComparisonGraph, Bounds)
|
|||||||
auto x = std::make_shared<ASTIdentifier>("x");
|
auto x = std::make_shared<ASTIdentifier>("x");
|
||||||
auto y = std::make_shared<ASTIdentifier>("y");
|
auto y = std::make_shared<ASTIdentifier>("y");
|
||||||
|
|
||||||
ASSERT_EQ(graph.compare(x, y), ComparisonGraph::CompareResult::LESS);
|
ASSERT_EQ(graph.compare(x, y), ComparisonGraphCompareResult::LESS);
|
||||||
ASSERT_EQ(graph.compare(y, x), ComparisonGraph::CompareResult::GREATER);
|
ASSERT_EQ(graph.compare(y, x), ComparisonGraphCompareResult::GREATER);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -93,7 +93,7 @@ TEST(ComparisonGraph, Components)
|
|||||||
|
|
||||||
TEST(ComparisonGraph, Compare)
|
TEST(ComparisonGraph, Compare)
|
||||||
{
|
{
|
||||||
using CompareResult = ComparisonGraph::CompareResult;
|
using enum ComparisonGraphCompareResult;
|
||||||
|
|
||||||
{
|
{
|
||||||
String query = "a >= b, c >= b";
|
String query = "a >= b, c >= b";
|
||||||
@ -102,7 +102,7 @@ TEST(ComparisonGraph, Compare)
|
|||||||
auto a = std::make_shared<ASTIdentifier>("a");
|
auto a = std::make_shared<ASTIdentifier>("a");
|
||||||
auto c = std::make_shared<ASTIdentifier>("c");
|
auto c = std::make_shared<ASTIdentifier>("c");
|
||||||
|
|
||||||
ASSERT_EQ(graph.compare(a, c), CompareResult::UNKNOWN);
|
ASSERT_EQ(graph.compare(a, c), UNKNOWN);
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -113,9 +113,9 @@ TEST(ComparisonGraph, Compare)
|
|||||||
auto b = std::make_shared<ASTIdentifier>("b");
|
auto b = std::make_shared<ASTIdentifier>("b");
|
||||||
auto c = std::make_shared<ASTIdentifier>("c");
|
auto c = std::make_shared<ASTIdentifier>("c");
|
||||||
|
|
||||||
ASSERT_EQ(graph.compare(a, c), CompareResult::GREATER);
|
ASSERT_EQ(graph.compare(a, c), GREATER);
|
||||||
ASSERT_EQ(graph.compare(a, b), CompareResult::GREATER_OR_EQUAL);
|
ASSERT_EQ(graph.compare(a, b), GREATER_OR_EQUAL);
|
||||||
ASSERT_EQ(graph.compare(b, c), CompareResult::GREATER);
|
ASSERT_EQ(graph.compare(b, c), GREATER);
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -126,9 +126,9 @@ TEST(ComparisonGraph, Compare)
|
|||||||
auto b = std::make_shared<ASTIdentifier>("b");
|
auto b = std::make_shared<ASTIdentifier>("b");
|
||||||
auto c = std::make_shared<ASTIdentifier>("c");
|
auto c = std::make_shared<ASTIdentifier>("c");
|
||||||
|
|
||||||
ASSERT_EQ(graph.compare(a, b), CompareResult::NOT_EQUAL);
|
ASSERT_EQ(graph.compare(a, b), NOT_EQUAL);
|
||||||
ASSERT_EQ(graph.compare(a, c), CompareResult::GREATER);
|
ASSERT_EQ(graph.compare(a, c), GREATER);
|
||||||
ASSERT_EQ(graph.compare(b, c), CompareResult::UNKNOWN);
|
ASSERT_EQ(graph.compare(b, c), UNKNOWN);
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -154,17 +154,17 @@ TEST(ComparisonGraph, Compare)
|
|||||||
auto lit_3 = std::make_shared<ASTLiteral>(3u);
|
auto lit_3 = std::make_shared<ASTLiteral>(3u);
|
||||||
auto lit_4 = std::make_shared<ASTLiteral>(4u);
|
auto lit_4 = std::make_shared<ASTLiteral>(4u);
|
||||||
|
|
||||||
ASSERT_EQ(graph.compare(lit_3, a), CompareResult::LESS_OR_EQUAL);
|
ASSERT_EQ(graph.compare(lit_3, a), LESS_OR_EQUAL);
|
||||||
ASSERT_FALSE(graph.isAlwaysCompare(CompareResult::LESS, lit_3, a));
|
ASSERT_FALSE(graph.isAlwaysCompare(LESS, lit_3, a));
|
||||||
ASSERT_TRUE(graph.isAlwaysCompare(CompareResult::LESS, lit_2, a));
|
ASSERT_TRUE(graph.isAlwaysCompare(LESS, lit_2, a));
|
||||||
|
|
||||||
ASSERT_EQ(graph.compare(b, lit_2), CompareResult::GREATER);
|
ASSERT_EQ(graph.compare(b, lit_2), GREATER);
|
||||||
ASSERT_EQ(graph.compare(b, lit_3), CompareResult::GREATER);
|
ASSERT_EQ(graph.compare(b, lit_3), GREATER);
|
||||||
ASSERT_EQ(graph.compare(b, lit_4), CompareResult::UNKNOWN);
|
ASSERT_EQ(graph.compare(b, lit_4), UNKNOWN);
|
||||||
|
|
||||||
ASSERT_EQ(graph.compare(d, lit_2), CompareResult::GREATER);
|
ASSERT_EQ(graph.compare(d, lit_2), GREATER);
|
||||||
ASSERT_EQ(graph.compare(d, lit_3), CompareResult::GREATER_OR_EQUAL);
|
ASSERT_EQ(graph.compare(d, lit_3), GREATER_OR_EQUAL);
|
||||||
ASSERT_EQ(graph.compare(d, lit_4), CompareResult::UNKNOWN);
|
ASSERT_EQ(graph.compare(d, lit_4), UNKNOWN);
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -176,8 +176,8 @@ TEST(ComparisonGraph, Compare)
|
|||||||
auto lit_3 = std::make_shared<ASTLiteral>(3);
|
auto lit_3 = std::make_shared<ASTLiteral>(3);
|
||||||
auto lit_15 = std::make_shared<ASTLiteral>(15);
|
auto lit_15 = std::make_shared<ASTLiteral>(15);
|
||||||
|
|
||||||
ASSERT_EQ(graph.compare(a, lit_8), CompareResult::UNKNOWN);
|
ASSERT_EQ(graph.compare(a, lit_8), UNKNOWN);
|
||||||
ASSERT_EQ(graph.compare(a, lit_3), CompareResult::GREATER);
|
ASSERT_EQ(graph.compare(a, lit_3), GREATER);
|
||||||
ASSERT_EQ(graph.compare(a, lit_15), CompareResult::LESS);
|
ASSERT_EQ(graph.compare(a, lit_15), LESS);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -17,17 +17,21 @@ class ASTTableIdentifier;
|
|||||||
void setIdentifierSpecial(ASTPtr & ast);
|
void setIdentifierSpecial(ASTPtr & ast);
|
||||||
|
|
||||||
String getIdentifierName(const IAST * ast);
|
String getIdentifierName(const IAST * ast);
|
||||||
|
|
||||||
std::optional<String> tryGetIdentifierName(const IAST * ast);
|
std::optional<String> tryGetIdentifierName(const IAST * ast);
|
||||||
|
|
||||||
bool tryGetIdentifierNameInto(const IAST * ast, String & name);
|
bool tryGetIdentifierNameInto(const IAST * ast, String & name);
|
||||||
|
|
||||||
inline String getIdentifierName(const ASTPtr & ast)
|
inline String getIdentifierName(const ASTPtr & ast)
|
||||||
{
|
{
|
||||||
return getIdentifierName(ast.get());
|
return getIdentifierName(ast.get());
|
||||||
}
|
}
|
||||||
|
|
||||||
inline std::optional<String> tryGetIdentifierName(const ASTPtr & ast)
|
inline std::optional<String> tryGetIdentifierName(const ASTPtr & ast)
|
||||||
{
|
{
|
||||||
return tryGetIdentifierName(ast.get());
|
return tryGetIdentifierName(ast.get());
|
||||||
}
|
}
|
||||||
|
|
||||||
inline bool tryGetIdentifierNameInto(const ASTPtr & ast, String & name)
|
inline bool tryGetIdentifierNameInto(const ASTPtr & ast, String & name)
|
||||||
{
|
{
|
||||||
return tryGetIdentifierNameInto(ast.get(), name);
|
return tryGetIdentifierNameInto(ast.get(), name);
|
||||||
|
52
src/Parsers/ASTShowColumnsQuery.cpp
Normal file
52
src/Parsers/ASTShowColumnsQuery.cpp
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
#include <Parsers/ASTShowColumnsQuery.h>
|
||||||
|
|
||||||
|
#include <iomanip>
|
||||||
|
#include <Common/quoteString.h>
|
||||||
|
#include <IO/Operators.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
ASTPtr ASTShowColumnsQuery::clone() const
|
||||||
|
{
|
||||||
|
auto res = std::make_shared<ASTShowColumnsQuery>(*this);
|
||||||
|
res->children.clear();
|
||||||
|
cloneOutputOptions(*res);
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ASTShowColumnsQuery::formatQueryImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const
|
||||||
|
{
|
||||||
|
settings.ostr << (settings.hilite ? hilite_keyword : "")
|
||||||
|
<< "SHOW "
|
||||||
|
<< (extended ? "EXTENDED " : "")
|
||||||
|
<< (full ? "FULL " : "")
|
||||||
|
<< "COLUMNS"
|
||||||
|
<< (settings.hilite ? hilite_none : "");
|
||||||
|
|
||||||
|
settings.ostr << (settings.hilite ? hilite_keyword : "") << " FROM " << (settings.hilite ? hilite_none : "") << backQuoteIfNeed(from_table);
|
||||||
|
if (!from_database.empty())
|
||||||
|
settings.ostr << (settings.hilite ? hilite_keyword : "") << " FROM " << (settings.hilite ? hilite_none : "") << backQuoteIfNeed(from_database);
|
||||||
|
|
||||||
|
|
||||||
|
if (!like.empty())
|
||||||
|
settings.ostr << (settings.hilite ? hilite_keyword : "")
|
||||||
|
<< (not_like ? " NOT" : "")
|
||||||
|
<< (case_insensitive_like ? " ILIKE " : " LIKE")
|
||||||
|
<< (settings.hilite ? hilite_none : "")
|
||||||
|
<< DB::quote << like;
|
||||||
|
|
||||||
|
if (where_expression)
|
||||||
|
{
|
||||||
|
settings.ostr << (settings.hilite ? hilite_keyword : "") << " WHERE " << (settings.hilite ? hilite_none : "");
|
||||||
|
where_expression->formatImpl(settings, state, frame);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (limit_length)
|
||||||
|
{
|
||||||
|
settings.ostr << (settings.hilite ? hilite_keyword : "") << " LIMIT " << (settings.hilite ? hilite_none : "");
|
||||||
|
limit_length->formatImpl(settings, state, frame);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
34
src/Parsers/ASTShowColumnsQuery.h
Normal file
34
src/Parsers/ASTShowColumnsQuery.h
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <Parsers/IAST_fwd.h>
|
||||||
|
#include <Parsers/ASTQueryWithOutput.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
/// Query SHOW COLUMNS
|
||||||
|
class ASTShowColumnsQuery : public ASTQueryWithOutput
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
bool extended = false;
|
||||||
|
bool full = false;
|
||||||
|
bool not_like = false;
|
||||||
|
bool case_insensitive_like = false;
|
||||||
|
|
||||||
|
ASTPtr where_expression;
|
||||||
|
ASTPtr limit_length;
|
||||||
|
|
||||||
|
String from_database;
|
||||||
|
String from_table;
|
||||||
|
|
||||||
|
String like;
|
||||||
|
|
||||||
|
String getID(char) const override { return "ShowColumns"; }
|
||||||
|
ASTPtr clone() const override;
|
||||||
|
QueryKind getQueryKind() const override { return QueryKind::Show; }
|
||||||
|
|
||||||
|
protected:
|
||||||
|
void formatQueryImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override;
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
@ -14,31 +14,28 @@ namespace DB
|
|||||||
class ASTShowTablesQuery : public ASTQueryWithOutput
|
class ASTShowTablesQuery : public ASTQueryWithOutput
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
bool databases{false};
|
bool databases = false;
|
||||||
bool clusters{false};
|
bool clusters = false;
|
||||||
bool cluster{false};
|
bool cluster = false;
|
||||||
bool dictionaries{false};
|
bool dictionaries = false;
|
||||||
bool m_settings{false};
|
bool m_settings = false;
|
||||||
bool changed{false};
|
bool changed = false;
|
||||||
bool temporary{false};
|
bool temporary = false;
|
||||||
bool caches{false};
|
bool caches = false;
|
||||||
bool full{false};
|
bool full = false;
|
||||||
|
|
||||||
String cluster_str;
|
String cluster_str;
|
||||||
String from;
|
String from;
|
||||||
String like;
|
String like;
|
||||||
|
|
||||||
bool not_like{false};
|
bool not_like = false;
|
||||||
bool case_insensitive_like{false};
|
bool case_insensitive_like = false;
|
||||||
|
|
||||||
ASTPtr where_expression;
|
ASTPtr where_expression;
|
||||||
ASTPtr limit_length;
|
ASTPtr limit_length;
|
||||||
|
|
||||||
/** Get the text that identifies this element. */
|
|
||||||
String getID(char) const override { return "ShowTables"; }
|
String getID(char) const override { return "ShowTables"; }
|
||||||
|
|
||||||
ASTPtr clone() const override;
|
ASTPtr clone() const override;
|
||||||
|
|
||||||
QueryKind getQueryKind() const override { return QueryKind::Show; }
|
QueryKind getQueryKind() const override { return QueryKind::Show; }
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
@ -182,8 +182,9 @@ void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState &,
|
|||||||
else if (!disk.empty())
|
else if (!disk.empty())
|
||||||
print_identifier(disk);
|
print_identifier(disk);
|
||||||
|
|
||||||
if (strict_sync)
|
if (sync_replica_mode != SyncReplicaMode::DEFAULT)
|
||||||
settings.ostr << (settings.hilite ? hilite_keyword : "") << " STRICT" << (settings.hilite ? hilite_none : "");
|
settings.ostr << ' ' << (settings.hilite ? hilite_keyword : "") << magic_enum::enum_name(sync_replica_mode)
|
||||||
|
<< (settings.hilite ? hilite_none : "");
|
||||||
}
|
}
|
||||||
else if (type == Type::SYNC_DATABASE_REPLICA)
|
else if (type == Type::SYNC_DATABASE_REPLICA)
|
||||||
{
|
{
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
#include <Parsers/ASTQueryWithOnCluster.h>
|
#include <Parsers/ASTQueryWithOnCluster.h>
|
||||||
#include <Parsers/IAST.h>
|
#include <Parsers/IAST.h>
|
||||||
|
#include <Parsers/SyncReplicaMode.h>
|
||||||
|
|
||||||
#include "config.h"
|
#include "config.h"
|
||||||
|
|
||||||
@ -108,7 +109,7 @@ public:
|
|||||||
|
|
||||||
String schema_cache_storage;
|
String schema_cache_storage;
|
||||||
|
|
||||||
bool strict_sync = false;
|
SyncReplicaMode sync_replica_mode = SyncReplicaMode::DEFAULT;
|
||||||
|
|
||||||
String getID(char) const override { return "SYSTEM query"; }
|
String getID(char) const override { return "SYSTEM query"; }
|
||||||
|
|
||||||
|
@ -16,6 +16,7 @@
|
|||||||
#include <Parsers/ParserSetQuery.h>
|
#include <Parsers/ParserSetQuery.h>
|
||||||
#include <Parsers/ParserShowProcesslistQuery.h>
|
#include <Parsers/ParserShowProcesslistQuery.h>
|
||||||
#include <Parsers/ParserShowTablesQuery.h>
|
#include <Parsers/ParserShowTablesQuery.h>
|
||||||
|
#include <Parsers/ParserShowColumnsQuery.h>
|
||||||
#include <Parsers/ParserShowEngineQuery.h>
|
#include <Parsers/ParserShowEngineQuery.h>
|
||||||
#include <Parsers/ParserTablePropertiesQuery.h>
|
#include <Parsers/ParserTablePropertiesQuery.h>
|
||||||
#include <Parsers/ParserWatchQuery.h>
|
#include <Parsers/ParserWatchQuery.h>
|
||||||
@ -35,6 +36,7 @@ namespace DB
|
|||||||
bool ParserQueryWithOutput::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
bool ParserQueryWithOutput::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||||
{
|
{
|
||||||
ParserShowTablesQuery show_tables_p;
|
ParserShowTablesQuery show_tables_p;
|
||||||
|
ParserShowColumnsQuery show_columns_p;
|
||||||
ParserShowEnginesQuery show_engine_p;
|
ParserShowEnginesQuery show_engine_p;
|
||||||
ParserSelectWithUnionQuery select_p;
|
ParserSelectWithUnionQuery select_p;
|
||||||
ParserTablePropertiesQuery table_p;
|
ParserTablePropertiesQuery table_p;
|
||||||
@ -64,6 +66,7 @@ bool ParserQueryWithOutput::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
|
|||||||
|| select_p.parse(pos, query, expected)
|
|| select_p.parse(pos, query, expected)
|
||||||
|| show_create_access_entity_p.parse(pos, query, expected) /// should be before `show_tables_p`
|
|| show_create_access_entity_p.parse(pos, query, expected) /// should be before `show_tables_p`
|
||||||
|| show_tables_p.parse(pos, query, expected)
|
|| show_tables_p.parse(pos, query, expected)
|
||||||
|
|| show_columns_p.parse(pos, query, expected)
|
||||||
|| show_engine_p.parse(pos, query, expected)
|
|| show_engine_p.parse(pos, query, expected)
|
||||||
|| table_p.parse(pos, query, expected)
|
|| table_p.parse(pos, query, expected)
|
||||||
|| describe_cache_p.parse(pos, query, expected)
|
|| describe_cache_p.parse(pos, query, expected)
|
||||||
|
80
src/Parsers/ParserShowColumnsQuery.cpp
Normal file
80
src/Parsers/ParserShowColumnsQuery.cpp
Normal file
@ -0,0 +1,80 @@
|
|||||||
|
#include <Parsers/ParserShowColumnsQuery.h>
|
||||||
|
|
||||||
|
#include <Parsers/ASTIdentifier_fwd.h>
|
||||||
|
#include <Parsers/ASTLiteral.h>
|
||||||
|
#include <Parsers/ASTShowColumnsQuery.h>
|
||||||
|
#include <Parsers/CommonParsers.h>
|
||||||
|
#include <Parsers/ExpressionElementParsers.h>
|
||||||
|
#include <Parsers/ExpressionListParsers.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
bool ParserShowColumnsQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||||
|
{
|
||||||
|
ASTPtr like;
|
||||||
|
ASTPtr from_database;
|
||||||
|
ASTPtr from_table;
|
||||||
|
|
||||||
|
auto query = std::make_shared<ASTShowColumnsQuery>();
|
||||||
|
|
||||||
|
if (!ParserKeyword("SHOW").ignore(pos, expected))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (ParserKeyword("EXTENDED").ignore(pos, expected))
|
||||||
|
query->extended = true;
|
||||||
|
|
||||||
|
if (ParserKeyword("FULL").ignore(pos, expected))
|
||||||
|
query->full = true;
|
||||||
|
|
||||||
|
if (!ParserKeyword("COLUMNS").ignore(pos, expected) || ParserKeyword("FIELDS").ignore(pos, expected))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (ParserKeyword("FROM").ignore(pos, expected) || ParserKeyword("IN").ignore(pos, expected))
|
||||||
|
{
|
||||||
|
if (!ParserCompoundIdentifier().parse(pos, from_table, expected))
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
return false;
|
||||||
|
|
||||||
|
tryGetIdentifierNameInto(from_table, query->from_table);
|
||||||
|
bool abbreviated_form = query->from_table.contains("."); /// FROM <db>.<table>
|
||||||
|
|
||||||
|
if (!abbreviated_form)
|
||||||
|
if (ParserKeyword("FROM").ignore(pos, expected) || ParserKeyword("IN").ignore(pos, expected))
|
||||||
|
if (!ParserIdentifier().parse(pos, from_database, expected))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
tryGetIdentifierNameInto(from_database, query->from_database);
|
||||||
|
|
||||||
|
if (ParserKeyword("NOT").ignore(pos, expected))
|
||||||
|
query->not_like = true;
|
||||||
|
|
||||||
|
if (bool insensitive = ParserKeyword("ILIKE").ignore(pos, expected); insensitive || ParserKeyword("LIKE").ignore(pos, expected))
|
||||||
|
{
|
||||||
|
if (insensitive)
|
||||||
|
query->case_insensitive_like = true;
|
||||||
|
|
||||||
|
if (!ParserStringLiteral().parse(pos, like, expected))
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
else if (query->not_like)
|
||||||
|
return false;
|
||||||
|
else if (ParserKeyword("WHERE").ignore(pos, expected))
|
||||||
|
if (!ParserExpressionWithOptionalAlias(false).parse(pos, query->where_expression, expected))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (ParserKeyword("LIMIT").ignore(pos, expected))
|
||||||
|
if (!ParserExpressionWithOptionalAlias(false).parse(pos, query->limit_length, expected))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (like)
|
||||||
|
query->like = like->as<ASTLiteral &>().value.safeGet<const String &>();
|
||||||
|
|
||||||
|
node = query;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
19
src/Parsers/ParserShowColumnsQuery.h
Normal file
19
src/Parsers/ParserShowColumnsQuery.h
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <Parsers/IParserBase.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
/** Parses queries of the form
|
||||||
|
* SHOW [EXTENDED] [FULL] COLUMNS (FROM|IN) tbl [(FROM|IN) db] [(([NOT] (LIKE|ILIKE) expr) | (WHERE expr))] [LIMIT n]
|
||||||
|
*/
|
||||||
|
class ParserShowColumnsQuery : public IParserBase
|
||||||
|
{
|
||||||
|
protected:
|
||||||
|
const char * getName() const override { return "SHOW COLUMNS query"; }
|
||||||
|
|
||||||
|
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
@ -149,10 +149,8 @@ bool ParserShowTablesQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (s_from.ignore(pos, expected) || s_in.ignore(pos, expected))
|
if (s_from.ignore(pos, expected) || s_in.ignore(pos, expected))
|
||||||
{
|
|
||||||
if (!name_p.parse(pos, database, expected))
|
if (!name_p.parse(pos, database, expected))
|
||||||
return false;
|
return false;
|
||||||
}
|
|
||||||
|
|
||||||
if (s_not.ignore(pos, expected))
|
if (s_not.ignore(pos, expected))
|
||||||
query->not_like = true;
|
query->not_like = true;
|
||||||
@ -168,17 +166,13 @@ bool ParserShowTablesQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
|
|||||||
else if (query->not_like)
|
else if (query->not_like)
|
||||||
return false;
|
return false;
|
||||||
else if (s_where.ignore(pos, expected))
|
else if (s_where.ignore(pos, expected))
|
||||||
{
|
|
||||||
if (!exp_elem.parse(pos, query->where_expression, expected))
|
if (!exp_elem.parse(pos, query->where_expression, expected))
|
||||||
return false;
|
return false;
|
||||||
}
|
|
||||||
|
|
||||||
if (s_limit.ignore(pos, expected))
|
if (s_limit.ignore(pos, expected))
|
||||||
{
|
|
||||||
if (!exp_elem.parse(pos, query->limit_length, expected))
|
if (!exp_elem.parse(pos, query->limit_length, expected))
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
tryGetIdentifierNameInto(database, query->from);
|
tryGetIdentifierNameInto(database, query->from);
|
||||||
|
|
||||||
|
@ -259,8 +259,15 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected &
|
|||||||
return false;
|
return false;
|
||||||
if (!parseDatabaseAndTableAsAST(pos, expected, res->database, res->table))
|
if (!parseDatabaseAndTableAsAST(pos, expected, res->database, res->table))
|
||||||
return false;
|
return false;
|
||||||
if (res->type == Type::SYNC_REPLICA && ParserKeyword{"STRICT"}.ignore(pos, expected))
|
if (res->type == Type::SYNC_REPLICA)
|
||||||
res->strict_sync = true;
|
{
|
||||||
|
if (ParserKeyword{"STRICT"}.ignore(pos, expected))
|
||||||
|
res->sync_replica_mode = SyncReplicaMode::STRICT;
|
||||||
|
else if (ParserKeyword{"LIGHTWEIGHT"}.ignore(pos, expected))
|
||||||
|
res->sync_replica_mode = SyncReplicaMode::LIGHTWEIGHT;
|
||||||
|
else if (ParserKeyword{"PULL"}.ignore(pos, expected))
|
||||||
|
res->sync_replica_mode = SyncReplicaMode::PULL;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
13
src/Parsers/SyncReplicaMode.h
Normal file
13
src/Parsers/SyncReplicaMode.h
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
#pragma once
|
||||||
|
#include <cstdint>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
enum class SyncReplicaMode : uint8_t
|
||||||
|
{
|
||||||
|
DEFAULT,
|
||||||
|
STRICT,
|
||||||
|
LIGHTWEIGHT,
|
||||||
|
PULL,
|
||||||
|
};
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user