Merge branch 'master' into contrib_sparse_checkout

This commit is contained in:
Alexander Tokmakov 2023-03-31 22:55:22 +03:00 committed by GitHub
commit c4c3602f69
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
56 changed files with 2226 additions and 347 deletions

View File

@ -1131,7 +1131,7 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_database_replicated/ClickHouse REPO_COPY=${{runner.temp}}/stateless_database_replicated/ClickHouse
KILL_TIMEOUT=10800 KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=0 RUN_BY_HASH_NUM=0
RUN_BY_HASH_TOTAL=2 RUN_BY_HASH_TOTAL=4
EOF EOF
- name: Download json reports - name: Download json reports
uses: actions/download-artifact@v3 uses: actions/download-artifact@v3
@ -1167,6 +1167,114 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_database_replicated/ClickHouse REPO_COPY=${{runner.temp}}/stateless_database_replicated/ClickHouse
KILL_TIMEOUT=10800 KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=1 RUN_BY_HASH_NUM=1
RUN_BY_HASH_TOTAL=4
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestReleaseDatabaseReplicated2:
needs: [BuilderDebRelease]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stateless_database_replicated
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stateless tests (release, DatabaseReplicated)
REPO_COPY=${{runner.temp}}/stateless_database_replicated/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=2
RUN_BY_HASH_TOTAL=4
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestReleaseDatabaseReplicated3:
needs: [BuilderDebRelease]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stateless_database_replicated
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stateless tests (release, DatabaseReplicated)
REPO_COPY=${{runner.temp}}/stateless_database_replicated/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=3
RUN_BY_HASH_TOTAL=4
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestReleaseS3_0:
needs: [BuilderDebRelease]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stateless_s3_storage
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stateless tests (release, s3 storage)
REPO_COPY=${{runner.temp}}/stateless_s3_storage/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=0
RUN_BY_HASH_TOTAL=2 RUN_BY_HASH_TOTAL=2
EOF EOF
- name: Download json reports - name: Download json reports
@ -1190,7 +1298,7 @@ jobs:
docker ps --quiet | xargs --no-run-if-empty docker kill ||: docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH" sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestReleaseS3: FunctionalStatelessTestReleaseS3_1:
needs: [BuilderDebRelease] needs: [BuilderDebRelease]
runs-on: [self-hosted, func-tester] runs-on: [self-hosted, func-tester]
steps: steps:
@ -1202,6 +1310,8 @@ jobs:
CHECK_NAME=Stateless tests (release, s3 storage) CHECK_NAME=Stateless tests (release, s3 storage)
REPO_COPY=${{runner.temp}}/stateless_s3_storage/ClickHouse REPO_COPY=${{runner.temp}}/stateless_s3_storage/ClickHouse
KILL_TIMEOUT=10800 KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=1
RUN_BY_HASH_TOTAL=2
EOF EOF
- name: Download json reports - name: Download json reports
uses: actions/download-artifact@v3 uses: actions/download-artifact@v3
@ -1271,7 +1381,7 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
KILL_TIMEOUT=10800 KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=0 RUN_BY_HASH_NUM=0
RUN_BY_HASH_TOTAL=2 RUN_BY_HASH_TOTAL=4
EOF EOF
- name: Download json reports - name: Download json reports
uses: actions/download-artifact@v3 uses: actions/download-artifact@v3
@ -1307,7 +1417,79 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
KILL_TIMEOUT=10800 KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=1 RUN_BY_HASH_NUM=1
RUN_BY_HASH_TOTAL=2 RUN_BY_HASH_TOTAL=4
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestAsan2:
needs: [BuilderDebAsan]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stateless_debug
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stateless tests (asan)
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=2
RUN_BY_HASH_TOTAL=4
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestAsan3:
needs: [BuilderDebAsan]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stateless_debug
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stateless tests (asan)
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=3
RUN_BY_HASH_TOTAL=4
EOF EOF
- name: Download json reports - name: Download json reports
uses: actions/download-artifact@v3 uses: actions/download-artifact@v3
@ -1343,7 +1525,7 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
KILL_TIMEOUT=10800 KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=0 RUN_BY_HASH_NUM=0
RUN_BY_HASH_TOTAL=3 RUN_BY_HASH_TOTAL=5
EOF EOF
- name: Download json reports - name: Download json reports
uses: actions/download-artifact@v3 uses: actions/download-artifact@v3
@ -1379,7 +1561,7 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
KILL_TIMEOUT=10800 KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=1 RUN_BY_HASH_NUM=1
RUN_BY_HASH_TOTAL=3 RUN_BY_HASH_TOTAL=5
EOF EOF
- name: Download json reports - name: Download json reports
uses: actions/download-artifact@v3 uses: actions/download-artifact@v3
@ -1415,7 +1597,7 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
KILL_TIMEOUT=10800 KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=2 RUN_BY_HASH_NUM=2
RUN_BY_HASH_TOTAL=3 RUN_BY_HASH_TOTAL=5
EOF EOF
- name: Download json reports - name: Download json reports
uses: actions/download-artifact@v3 uses: actions/download-artifact@v3
@ -1438,7 +1620,79 @@ jobs:
docker ps --quiet | xargs --no-run-if-empty docker kill ||: docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH" sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestUBsan: FunctionalStatelessTestTsan3:
needs: [BuilderDebTsan]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stateless_tsan
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stateless tests (tsan)
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=3
RUN_BY_HASH_TOTAL=5
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestTsan4:
needs: [BuilderDebTsan]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stateless_tsan
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stateless tests (tsan)
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=4
RUN_BY_HASH_TOTAL=5
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestUBsan0:
needs: [BuilderDebUBsan] needs: [BuilderDebUBsan]
runs-on: [self-hosted, func-tester] runs-on: [self-hosted, func-tester]
steps: steps:
@ -1450,6 +1704,44 @@ jobs:
CHECK_NAME=Stateless tests (ubsan) CHECK_NAME=Stateless tests (ubsan)
REPO_COPY=${{runner.temp}}/stateless_ubsan/ClickHouse REPO_COPY=${{runner.temp}}/stateless_ubsan/ClickHouse
KILL_TIMEOUT=10800 KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=0
RUN_BY_HASH_TOTAL=2
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestUBsan1:
needs: [BuilderDebUBsan]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stateless_ubsan
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stateless tests (ubsan)
REPO_COPY=${{runner.temp}}/stateless_ubsan/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=1
RUN_BY_HASH_TOTAL=2
EOF EOF
- name: Download json reports - name: Download json reports
uses: actions/download-artifact@v3 uses: actions/download-artifact@v3
@ -1485,7 +1777,7 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
KILL_TIMEOUT=10800 KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=0 RUN_BY_HASH_NUM=0
RUN_BY_HASH_TOTAL=3 RUN_BY_HASH_TOTAL=6
EOF EOF
- name: Download json reports - name: Download json reports
uses: actions/download-artifact@v3 uses: actions/download-artifact@v3
@ -1521,7 +1813,7 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
KILL_TIMEOUT=10800 KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=1 RUN_BY_HASH_NUM=1
RUN_BY_HASH_TOTAL=3 RUN_BY_HASH_TOTAL=6
EOF EOF
- name: Download json reports - name: Download json reports
uses: actions/download-artifact@v3 uses: actions/download-artifact@v3
@ -1557,7 +1849,115 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
KILL_TIMEOUT=10800 KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=2 RUN_BY_HASH_NUM=2
RUN_BY_HASH_TOTAL=3 RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestMsan3:
needs: [BuilderDebMsan]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stateless_memory
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stateless tests (msan)
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=3
RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestMsan4:
needs: [BuilderDebMsan]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stateless_memory
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stateless tests (msan)
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=4
RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestMsan5:
needs: [BuilderDebMsan]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stateless_memory
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stateless tests (msan)
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=5
RUN_BY_HASH_TOTAL=6
EOF EOF
- name: Download json reports - name: Download json reports
uses: actions/download-artifact@v3 uses: actions/download-artifact@v3
@ -1593,7 +1993,7 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
KILL_TIMEOUT=10800 KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=0 RUN_BY_HASH_NUM=0
RUN_BY_HASH_TOTAL=3 RUN_BY_HASH_TOTAL=5
EOF EOF
- name: Download json reports - name: Download json reports
uses: actions/download-artifact@v3 uses: actions/download-artifact@v3
@ -1629,7 +2029,7 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
KILL_TIMEOUT=10800 KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=1 RUN_BY_HASH_NUM=1
RUN_BY_HASH_TOTAL=3 RUN_BY_HASH_TOTAL=5
EOF EOF
- name: Download json reports - name: Download json reports
uses: actions/download-artifact@v3 uses: actions/download-artifact@v3
@ -1665,7 +2065,79 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
KILL_TIMEOUT=10800 KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=2 RUN_BY_HASH_NUM=2
RUN_BY_HASH_TOTAL=3 RUN_BY_HASH_TOTAL=5
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestDebug3:
needs: [BuilderDebDebug]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stateless_debug
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stateless tests (debug)
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=3
RUN_BY_HASH_TOTAL=5
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestDebug4:
needs: [BuilderDebDebug]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stateless_debug
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stateless tests (debug)
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=4
RUN_BY_HASH_TOTAL=5
EOF EOF
- name: Download json reports - name: Download json reports
uses: actions/download-artifact@v3 uses: actions/download-artifact@v3
@ -2116,7 +2588,7 @@ jobs:
CHECK_NAME=Integration tests (asan) CHECK_NAME=Integration tests (asan)
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
RUN_BY_HASH_NUM=0 RUN_BY_HASH_NUM=0
RUN_BY_HASH_TOTAL=3 RUN_BY_HASH_TOTAL=6
EOF EOF
- name: Download json reports - name: Download json reports
uses: actions/download-artifact@v3 uses: actions/download-artifact@v3
@ -2151,7 +2623,7 @@ jobs:
CHECK_NAME=Integration tests (asan) CHECK_NAME=Integration tests (asan)
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
RUN_BY_HASH_NUM=1 RUN_BY_HASH_NUM=1
RUN_BY_HASH_TOTAL=3 RUN_BY_HASH_TOTAL=6
EOF EOF
- name: Download json reports - name: Download json reports
uses: actions/download-artifact@v3 uses: actions/download-artifact@v3
@ -2186,7 +2658,112 @@ jobs:
CHECK_NAME=Integration tests (asan) CHECK_NAME=Integration tests (asan)
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
RUN_BY_HASH_NUM=2 RUN_BY_HASH_NUM=2
RUN_BY_HASH_TOTAL=3 RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Integration test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsAsan3:
needs: [BuilderDebAsan]
runs-on: [self-hosted, stress-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/integration_tests_asan
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Integration tests (asan)
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
RUN_BY_HASH_NUM=3
RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Integration test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsAsan4:
needs: [BuilderDebAsan]
runs-on: [self-hosted, stress-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/integration_tests_asan
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Integration tests (asan)
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
RUN_BY_HASH_NUM=4
RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Integration test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsAsan5:
needs: [BuilderDebAsan]
runs-on: [self-hosted, stress-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/integration_tests_asan
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Integration tests (asan)
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
RUN_BY_HASH_NUM=5
RUN_BY_HASH_TOTAL=6
EOF EOF
- name: Download json reports - name: Download json reports
uses: actions/download-artifact@v3 uses: actions/download-artifact@v3
@ -2221,7 +2798,7 @@ jobs:
CHECK_NAME=Integration tests (tsan) CHECK_NAME=Integration tests (tsan)
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
RUN_BY_HASH_NUM=0 RUN_BY_HASH_NUM=0
RUN_BY_HASH_TOTAL=4 RUN_BY_HASH_TOTAL=6
EOF EOF
- name: Download json reports - name: Download json reports
uses: actions/download-artifact@v3 uses: actions/download-artifact@v3
@ -2256,7 +2833,7 @@ jobs:
CHECK_NAME=Integration tests (tsan) CHECK_NAME=Integration tests (tsan)
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
RUN_BY_HASH_NUM=1 RUN_BY_HASH_NUM=1
RUN_BY_HASH_TOTAL=4 RUN_BY_HASH_TOTAL=6
EOF EOF
- name: Download json reports - name: Download json reports
uses: actions/download-artifact@v3 uses: actions/download-artifact@v3
@ -2291,7 +2868,7 @@ jobs:
CHECK_NAME=Integration tests (tsan) CHECK_NAME=Integration tests (tsan)
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
RUN_BY_HASH_NUM=2 RUN_BY_HASH_NUM=2
RUN_BY_HASH_TOTAL=4 RUN_BY_HASH_TOTAL=6
EOF EOF
- name: Download json reports - name: Download json reports
uses: actions/download-artifact@v3 uses: actions/download-artifact@v3
@ -2326,7 +2903,77 @@ jobs:
CHECK_NAME=Integration tests (tsan) CHECK_NAME=Integration tests (tsan)
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
RUN_BY_HASH_NUM=3 RUN_BY_HASH_NUM=3
RUN_BY_HASH_TOTAL=4 RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Integration test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsTsan4:
needs: [BuilderDebTsan]
runs-on: [self-hosted, stress-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/integration_tests_tsan
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Integration tests (tsan)
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
RUN_BY_HASH_NUM=4
RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Integration test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsTsan5:
needs: [BuilderDebTsan]
runs-on: [self-hosted, stress-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/integration_tests_tsan
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Integration tests (tsan)
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
RUN_BY_HASH_NUM=5
RUN_BY_HASH_TOTAL=6
EOF EOF
- name: Download json reports - name: Download json reports
uses: actions/download-artifact@v3 uses: actions/download-artifact@v3
@ -2361,7 +3008,7 @@ jobs:
CHECK_NAME=Integration tests (release) CHECK_NAME=Integration tests (release)
REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse
RUN_BY_HASH_NUM=0 RUN_BY_HASH_NUM=0
RUN_BY_HASH_TOTAL=2 RUN_BY_HASH_TOTAL=4
EOF EOF
- name: Download json reports - name: Download json reports
uses: actions/download-artifact@v3 uses: actions/download-artifact@v3
@ -2396,7 +3043,77 @@ jobs:
CHECK_NAME=Integration tests (release) CHECK_NAME=Integration tests (release)
REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse
RUN_BY_HASH_NUM=1 RUN_BY_HASH_NUM=1
RUN_BY_HASH_TOTAL=2 RUN_BY_HASH_TOTAL=4
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Integration test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsRelease2:
needs: [BuilderDebRelease]
runs-on: [self-hosted, stress-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/integration_tests_release
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Integration tests (release)
REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse
RUN_BY_HASH_NUM=2
RUN_BY_HASH_TOTAL=4
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Integration test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsRelease3:
needs: [BuilderDebRelease]
runs-on: [self-hosted, stress-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/integration_tests_release
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Integration tests (release)
REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse
RUN_BY_HASH_NUM=3
RUN_BY_HASH_TOTAL=4
EOF EOF
- name: Download json reports - name: Download json reports
uses: actions/download-artifact@v3 uses: actions/download-artifact@v3
@ -3116,23 +3833,36 @@ jobs:
- FunctionalStatelessTestDebug0 - FunctionalStatelessTestDebug0
- FunctionalStatelessTestDebug1 - FunctionalStatelessTestDebug1
- FunctionalStatelessTestDebug2 - FunctionalStatelessTestDebug2
- FunctionalStatelessTestDebug3
- FunctionalStatelessTestDebug4
- FunctionalStatelessTestRelease - FunctionalStatelessTestRelease
- FunctionalStatelessTestReleaseDatabaseOrdinary - FunctionalStatelessTestReleaseDatabaseOrdinary
- FunctionalStatelessTestReleaseDatabaseReplicated0 - FunctionalStatelessTestReleaseDatabaseReplicated0
- FunctionalStatelessTestReleaseDatabaseReplicated1 - FunctionalStatelessTestReleaseDatabaseReplicated1
- FunctionalStatelessTestReleaseDatabaseReplicated2
- FunctionalStatelessTestReleaseDatabaseReplicated3
- FunctionalStatelessTestAarch64 - FunctionalStatelessTestAarch64
- FunctionalStatelessTestAsan0 - FunctionalStatelessTestAsan0
- FunctionalStatelessTestAsan1 - FunctionalStatelessTestAsan1
- FunctionalStatelessTestAsan2
- FunctionalStatelessTestAsan3
- FunctionalStatelessTestTsan0 - FunctionalStatelessTestTsan0
- FunctionalStatelessTestTsan1 - FunctionalStatelessTestTsan1
- FunctionalStatelessTestTsan2 - FunctionalStatelessTestTsan2
- FunctionalStatelessTestTsan3
- FunctionalStatelessTestTsan4
- FunctionalStatelessTestMsan0 - FunctionalStatelessTestMsan0
- FunctionalStatelessTestMsan1 - FunctionalStatelessTestMsan1
- FunctionalStatelessTestMsan2 - FunctionalStatelessTestMsan2
- FunctionalStatelessTestUBsan - FunctionalStatelessTestMsan3
- FunctionalStatelessTestMsan4
- FunctionalStatelessTestMsan5
- FunctionalStatelessTestUBsan0
- FunctionalStatelessTestUBsan1
- FunctionalStatefulTestDebug - FunctionalStatefulTestDebug
- FunctionalStatefulTestRelease - FunctionalStatefulTestRelease
- FunctionalStatelessTestReleaseS3 - FunctionalStatelessTestReleaseS3_0
- FunctionalStatelessTestReleaseS3_1
- FunctionalStatefulTestAarch64 - FunctionalStatefulTestAarch64
- FunctionalStatefulTestAsan - FunctionalStatefulTestAsan
- FunctionalStatefulTestTsan - FunctionalStatefulTestTsan
@ -3146,12 +3876,19 @@ jobs:
- IntegrationTestsAsan0 - IntegrationTestsAsan0
- IntegrationTestsAsan1 - IntegrationTestsAsan1
- IntegrationTestsAsan2 - IntegrationTestsAsan2
- IntegrationTestsAsan3
- IntegrationTestsAsan4
- IntegrationTestsAsan5
- IntegrationTestsRelease0 - IntegrationTestsRelease0
- IntegrationTestsRelease1 - IntegrationTestsRelease1
- IntegrationTestsRelease2
- IntegrationTestsRelease3
- IntegrationTestsTsan0 - IntegrationTestsTsan0
- IntegrationTestsTsan1 - IntegrationTestsTsan1
- IntegrationTestsTsan2 - IntegrationTestsTsan2
- IntegrationTestsTsan3 - IntegrationTestsTsan3
- IntegrationTestsTsan4
- IntegrationTestsTsan5
- PerformanceComparisonX86-0 - PerformanceComparisonX86-0
- PerformanceComparisonX86-1 - PerformanceComparisonX86-1
- PerformanceComparisonX86-2 - PerformanceComparisonX86-2

View File

@ -13,9 +13,10 @@ The following versions of ClickHouse server are currently being supported with s
| Version | Supported | | Version | Supported |
|:-|:-| |:-|:-|
| 23.3 | ✔️ |
| 23.2 | ✔️ | | 23.2 | ✔️ |
| 23.1 | ✔️ | | 23.1 | ✔️ |
| 22.12 | ✔️ | | 22.12 | |
| 22.11 | ❌ | | 22.11 | ❌ |
| 22.10 | ❌ | | 22.10 | ❌ |
| 22.9 | ❌ | | 22.9 | ❌ |
@ -24,7 +25,7 @@ The following versions of ClickHouse server are currently being supported with s
| 22.6 | ❌ | | 22.6 | ❌ |
| 22.5 | ❌ | | 22.5 | ❌ |
| 22.4 | ❌ | | 22.4 | ❌ |
| 22.3 | ✔️ | | 22.3 | |
| 22.2 | ❌ | | 22.2 | ❌ |
| 22.1 | ❌ | | 22.1 | ❌ |
| 21.* | ❌ | | 21.* | ❌ |

View File

@ -2,11 +2,11 @@
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION, # NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes. # only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
SET(VERSION_REVISION 54472) SET(VERSION_REVISION 54473)
SET(VERSION_MAJOR 23) SET(VERSION_MAJOR 23)
SET(VERSION_MINOR 3) SET(VERSION_MINOR 4)
SET(VERSION_PATCH 1) SET(VERSION_PATCH 1)
SET(VERSION_GITHASH 52bf836e03a6ba7cf2d654eaaf73231701abc3a2) SET(VERSION_GITHASH 46e85357ce2da2a99f56ee83a079e892d7ec3726)
SET(VERSION_DESCRIBE v23.3.1.2537-testing) SET(VERSION_DESCRIBE v23.4.1.1-testing)
SET(VERSION_STRING 23.3.1.2537) SET(VERSION_STRING 23.4.1.1)
# end of autochange # end of autochange

View File

@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
esac esac
ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release" ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release"
ARG VERSION="23.2.4.12" ARG VERSION="23.3.1.2823"
ARG PACKAGES="clickhouse-keeper" ARG PACKAGES="clickhouse-keeper"
# user/group precreated explicitly with fixed uid/gid on purpose. # user/group precreated explicitly with fixed uid/gid on purpose.

View File

@ -33,7 +33,7 @@ RUN arch=${TARGETARCH:-amd64} \
# lts / testing / prestable / etc # lts / testing / prestable / etc
ARG REPO_CHANNEL="stable" ARG REPO_CHANNEL="stable"
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
ARG VERSION="23.2.4.12" ARG VERSION="23.3.1.2823"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
# user/group precreated explicitly with fixed uid/gid on purpose. # user/group precreated explicitly with fixed uid/gid on purpose.

View File

@ -22,7 +22,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
ARG REPO_CHANNEL="stable" ARG REPO_CHANNEL="stable"
ARG REPOSITORY="deb https://packages.clickhouse.com/deb ${REPO_CHANNEL} main" ARG REPOSITORY="deb https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
ARG VERSION="23.2.4.12" ARG VERSION="23.3.1.2823"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
# set non-empty deb_location_url url to create a docker image # set non-empty deb_location_url url to create a docker image

View File

@ -0,0 +1,545 @@
---
sidebar_position: 1
sidebar_label: 2023
---
# 2023 Changelog
### ClickHouse release v23.3.1.2823-lts (46e85357ce2) FIXME as compared to v23.2.1.2537-stable (52bf836e03a)
#### Backward Incompatible Change
* Relax symbols that are allowed in URL authority in *domain*RFC()/netloc(). [#46841](https://github.com/ClickHouse/ClickHouse/pull/46841) ([Azat Khuzhin](https://github.com/azat)).
* Prohibit create tables based on KafkaEngine with DEFAULT/EPHEMERAL/ALIAS/MATERIALIZED statements for columns. [#47138](https://github.com/ClickHouse/ClickHouse/pull/47138) ([Aleksandr Musorin](https://github.com/AVMusorin)).
* An "asynchronous connection drain" feature is removed. Related settings and metrics are removed as well. It was an internal feature, so the removal should not affect users who had never heard about that feature. [#47486](https://github.com/ClickHouse/ClickHouse/pull/47486) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Support 256-bit Decimal data type (more than 38 digits) in `arraySum`/`Min`/`Max`/`Avg`/`Product`, `arrayCumSum`/`CumSumNonNegative`, `arrayDifference`, array construction, IN operator, query parameters, `groupArrayMovingSum`, statistical functions, `min`/`max`/`any`/`argMin`/`argMax`, PostgreSQL wire protocol, MySQL table engine and function, `sumMap`, `mapAdd`, `mapSubtract`, `arrayIntersect`. Add support for big integers in `arrayIntersect`. Statistical aggregate functions involving moments (such as `corr` or various `TTest`s) will use `Float64` as their internal representation (they were using `Decimal128` before this change, but it was pointless), and these functions can return `nan` instead of `inf` in case of infinite variance. Some functions were allowed on `Decimal256` data types but returned `Decimal128` in previous versions - now it is fixed. This closes [#47569](https://github.com/ClickHouse/ClickHouse/issues/47569). This closes [#44864](https://github.com/ClickHouse/ClickHouse/issues/44864). This closes [#28335](https://github.com/ClickHouse/ClickHouse/issues/28335). [#47594](https://github.com/ClickHouse/ClickHouse/pull/47594) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Make backup_threads/restore_threads server settings. [#47881](https://github.com/ClickHouse/ClickHouse/pull/47881) ([Azat Khuzhin](https://github.com/azat)).
* Fix the isIPv6String function which could have outputted a false positive result in the case of an incorrect IPv6 address. For example `1234::1234:` was considered a valid IPv6 address. [#47895](https://github.com/ClickHouse/ClickHouse/pull/47895) ([Nikolay Degterinsky](https://github.com/evillique)).
#### New Feature
* Add new mode for splitting the work on replicas using settings `parallel_replicas_custom_key` and `parallel_replicas_custom_key_filter_type`. If the cluster consists of a single shard with multiple replicas, up to `max_parallel_replicas` will be randomly picked and turned into shards. For each shard, a corresponding filter is added to the query on the initiator before being sent to the shard. If the cluster consists of multiple shards, it will behave the same as `sample_key` but with the possibility to define an arbitrary key. [#45108](https://github.com/ClickHouse/ClickHouse/pull/45108) ([Antonio Andelic](https://github.com/antonio2368)).
* Added query setting `partial_result_on_first_cancel` allowing the canceled query (e.g. due to Ctrl-C) to return a partial result. [#45689](https://github.com/ClickHouse/ClickHouse/pull/45689) ([Alexey Perevyshin](https://github.com/alexX512)).
* Added support of arbitrary tables engines for temporary tables except for Replicated and KeeperMap engines. Partially close [#31497](https://github.com/ClickHouse/ClickHouse/issues/31497). [#46071](https://github.com/ClickHouse/ClickHouse/pull/46071) ([Roman Vasin](https://github.com/rvasin)).
* Add replication of user-defined SQL functions using ZooKeeper. [#46085](https://github.com/ClickHouse/ClickHouse/pull/46085) ([Aleksei Filatov](https://github.com/aalexfvk)).
* Implement `system.server_settings` (similar to `system.settings`), which will contain server configurations. [#46550](https://github.com/ClickHouse/ClickHouse/pull/46550) ([pufit](https://github.com/pufit)).
* Intruduce a function `WIDTH_BUCKET`. [#42974](https://github.com/ClickHouse/ClickHouse/issues/42974). [#46790](https://github.com/ClickHouse/ClickHouse/pull/46790) ([avoiderboi](https://github.com/avoiderboi)).
* Add new function parseDateTime/parseDateTimeInJodaSyntax according to specified format string. parseDateTime parses string to datetime in MySQL syntax, parseDateTimeInJodaSyntax parses in Joda syntax. [#46815](https://github.com/ClickHouse/ClickHouse/pull/46815) ([李扬](https://github.com/taiyang-li)).
* Use `dummy UInt8` for default structure of table function `null`. Closes [#46930](https://github.com/ClickHouse/ClickHouse/issues/46930). [#47006](https://github.com/ClickHouse/ClickHouse/pull/47006) ([flynn](https://github.com/ucasfl)).
* Dec 15, 2021 support for parseDateTimeBestEffort function. closes [#46816](https://github.com/ClickHouse/ClickHouse/issues/46816). [#47071](https://github.com/ClickHouse/ClickHouse/pull/47071) ([chen](https://github.com/xiedeyantu)).
* Add function ULIDStringToDateTime(). Closes [#46945](https://github.com/ClickHouse/ClickHouse/issues/46945). [#47087](https://github.com/ClickHouse/ClickHouse/pull/47087) ([Nikolay Degterinsky](https://github.com/evillique)).
* Add settings `http_wait_end_of_query` and `http_response_buffer_size` that corresponds to URL params `wait_end_of_query` and `buffer_size` for HTTP interface. [#47108](https://github.com/ClickHouse/ClickHouse/pull/47108) ([Vladimir C](https://github.com/vdimir)).
* Support for `UNDROP TABLE` query. Closes [#46811](https://github.com/ClickHouse/ClickHouse/issues/46811). [#47241](https://github.com/ClickHouse/ClickHouse/pull/47241) ([chen](https://github.com/xiedeyantu)).
* Add `system.marked_dropped_tables` table that shows tables that were dropped from `Atomic` databases but were not completely removed yet. [#47364](https://github.com/ClickHouse/ClickHouse/pull/47364) ([chen](https://github.com/xiedeyantu)).
* Add `INSTR` as alias of `positionCaseInsensitive` for MySQL compatibility. Closes [#47529](https://github.com/ClickHouse/ClickHouse/issues/47529). [#47535](https://github.com/ClickHouse/ClickHouse/pull/47535) ([flynn](https://github.com/ucasfl)).
* Added `toDecimalString` function allowing to convert numbers to string with fixed precision. [#47838](https://github.com/ClickHouse/ClickHouse/pull/47838) ([Andrey Zvonov](https://github.com/zvonand)).
* Added operator "REGEXP" (similar to operators "LIKE", "IN", "MOD" etc.) for better compatibility with MySQL. [#47869](https://github.com/ClickHouse/ClickHouse/pull/47869) ([Robert Schulze](https://github.com/rschu1ze)).
* Allow executing reading pipeline for DIRECT dictionary with CLICKHOUSE source in multiple threads. To enable set `dictionary_use_async_executor=1` in `SETTINGS` section for source in `CREATE DICTIONARY` statement. [#47986](https://github.com/ClickHouse/ClickHouse/pull/47986) ([Vladimir C](https://github.com/vdimir)).
* Add merge tree setting `max_number_of_mutatuins_for_replica`. It limit the number of part mutations per replica to the specified amount. Zero means no limit on the number of mutations per replica (the execution can still be constrained by other settings). [#48047](https://github.com/ClickHouse/ClickHouse/pull/48047) ([Vladimir C](https://github.com/vdimir)).
#### Performance Improvement
* Optimize one nullable key aggregate performance. [#45772](https://github.com/ClickHouse/ClickHouse/pull/45772) ([LiuNeng](https://github.com/liuneng1994)).
* Implemented lowercase tokenbf_v1 index utilization for hasTokenOrNull, hasTokenCaseInsensitive and hasTokenCaseInsensitiveOrNull. [#46252](https://github.com/ClickHouse/ClickHouse/pull/46252) ([ltrk2](https://github.com/ltrk2)).
* Optimize the generic SIMD StringSearcher by searching first two chars. [#46289](https://github.com/ClickHouse/ClickHouse/pull/46289) ([Jiebin Sun](https://github.com/jiebinn)).
* System.detached_parts could be significant large. - added several sources with respects block size limitation - in each block iothread pool is used to calculate part size, ie to make syscalls in parallel. [#46624](https://github.com/ClickHouse/ClickHouse/pull/46624) ([Sema Checherinda](https://github.com/CheSema)).
* Increase the default value of `max_replicated_merges_in_queue` for ReplicatedMergeTree tables from 16 to 1000. It allows faster background merge operation on clusters with a very large number of replicas, such as clusters with shared storage in ClickHouse Cloud. [#47050](https://github.com/ClickHouse/ClickHouse/pull/47050) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Backups for large numbers of files were unbelievably slow in previous versions. [#47251](https://github.com/ClickHouse/ClickHouse/pull/47251) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Support filter push down to left table for JOIN with StorageJoin, StorageDictionary, StorageEmbeddedRocksDB. [#47280](https://github.com/ClickHouse/ClickHouse/pull/47280) ([Maksim Kita](https://github.com/kitaisreal)).
* Marks in memory are now compressed, using 3-6x less memory. [#47290](https://github.com/ClickHouse/ClickHouse/pull/47290) ([Michael Kolupaev](https://github.com/al13n321)).
* Updated copier to use group by instead of distinct to get list of partitions. For large tables this reduced the select time from over 500s to under 1s. [#47386](https://github.com/ClickHouse/ClickHouse/pull/47386) ([Clayton McClure](https://github.com/cmcclure-twilio)).
* Address https://github.com/clickhouse/clickhouse/issues/46453. bisect marked https://github.com/clickhouse/clickhouse/pull/35525 as the bad changed. this pr looks to reverse the changes in that pr. [#47544](https://github.com/ClickHouse/ClickHouse/pull/47544) ([Ongkong](https://github.com/ongkong)).
* Fixed excessive reading in queries with `FINAL`. [#47801](https://github.com/ClickHouse/ClickHouse/pull/47801) ([Nikita Taranov](https://github.com/nickitat)).
* Setting `max_final_threads` would be set to number of cores at server startup (by the same algorithm as we use for `max_threads`). This improves concurrency of `final` execution on servers with high number of CPUs. [#47915](https://github.com/ClickHouse/ClickHouse/pull/47915) ([Nikita Taranov](https://github.com/nickitat)).
* Avoid breaking batches on read requests to improve performance. [#47978](https://github.com/ClickHouse/ClickHouse/pull/47978) ([Antonio Andelic](https://github.com/antonio2368)).
#### Improvement
* Add map related functions: mapFromArrays, which allows us to create map from a pair of arrays. [#31125](https://github.com/ClickHouse/ClickHouse/pull/31125) ([李扬](https://github.com/taiyang-li)).
* Rewrite distributed sends to avoid using filesystem as a queue, use in-memory queue instead. [#45491](https://github.com/ClickHouse/ClickHouse/pull/45491) ([Azat Khuzhin](https://github.com/azat)).
* Allow separate grants for named collections (e.g. to be able to give `SHOW/CREATE/ALTER/DROP named collection` access only to certain collections, instead of all at once). Closes [#40894](https://github.com/ClickHouse/ClickHouse/issues/40894). Add new access type `NAMED_COLLECTION_CONTROL` which is not given to default user unless explicitly added to user config (is required to be able to do `GRANT ALL`), also `show_named_collections` is no longer obligatory to be manually specified for default user to be able to have full access rights as was in 23.2. [#46241](https://github.com/ClickHouse/ClickHouse/pull/46241) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Now `X-ClickHouse-Query-Id` and `X-ClickHouse-Timezone` headers are added to response in all queries via http protocol. Previously it was done only for `SELECT` queries. [#46364](https://github.com/ClickHouse/ClickHouse/pull/46364) ([Anton Popov](https://github.com/CurtizJ)).
* Support for connection to a replica set via a URI with a host:port enum and support for the readPreference option in MongoDB dictionaries. Example URI: mongodb://db0.example.com:27017,db1.example.com:27017,db2.example.com:27017/?replicaSet=myRepl&readPreference=primary. [#46524](https://github.com/ClickHouse/ClickHouse/pull/46524) ([artem-yadr](https://github.com/artem-yadr)).
* Re-implement projection analysis on top of query plan. Added setting `query_plan_optimize_projection=1` to switch between old and new version. Fixes [#44963](https://github.com/ClickHouse/ClickHouse/issues/44963). [#46537](https://github.com/ClickHouse/ClickHouse/pull/46537) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Use parquet format v2 instead of v1 in output format by default. Add setting `output_format_parquet_version` to control parquet version, possible values `v1_0`, `v2_4`, `v2_6`, `v2_latest` (default). [#46617](https://github.com/ClickHouse/ClickHouse/pull/46617) ([Kruglov Pavel](https://github.com/Avogar)).
* Not for changelog - part of [#42648](https://github.com/ClickHouse/ClickHouse/issues/42648). [#46632](https://github.com/ClickHouse/ClickHouse/pull/46632) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* Allow to ignore errors while pushing to MATERILIZED VIEW (add new setting `materialized_views_ignore_errors`, by default to `false`, but it is set to `true` for flushing logs to `system.*_log` tables unconditionally). [#46658](https://github.com/ClickHouse/ClickHouse/pull/46658) ([Azat Khuzhin](https://github.com/azat)).
* Enable input_format_json_ignore_unknown_keys_in_named_tuple by default. [#46742](https://github.com/ClickHouse/ClickHouse/pull/46742) ([Kruglov Pavel](https://github.com/Avogar)).
* It is now possible using new configuration syntax to configure Kafka topics with periods in their name. [#46752](https://github.com/ClickHouse/ClickHouse/pull/46752) ([Robert Schulze](https://github.com/rschu1ze)).
* Fix heuristics that check hyperscan patterns for problematic repeats. [#46819](https://github.com/ClickHouse/ClickHouse/pull/46819) ([Robert Schulze](https://github.com/rschu1ze)).
* Don't report ZK node exists to system.errors when a block was created concurrently by a different replica. [#46820](https://github.com/ClickHouse/ClickHouse/pull/46820) ([Raúl Marín](https://github.com/Algunenano)).
* Allow PREWHERE for Merge with different DEFAULT expression for column. [#46831](https://github.com/ClickHouse/ClickHouse/pull/46831) ([Azat Khuzhin](https://github.com/azat)).
* Increase the limit for opened files in `clickhouse-local`. It will be able to read from `web` tables on servers with a huge number of CPU cores. Do not back off reading from the URL table engine in case of too many opened files. This closes [#46852](https://github.com/ClickHouse/ClickHouse/issues/46852). [#46853](https://github.com/ClickHouse/ClickHouse/pull/46853) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Exceptions thrown when numbers cannot be parsed now have an easier-to-read exception message. [#46917](https://github.com/ClickHouse/ClickHouse/pull/46917) ([Robert Schulze](https://github.com/rschu1ze)).
* Added update `system.backups` after every processed task. [#46989](https://github.com/ClickHouse/ClickHouse/pull/46989) ([Aleksandr Musorin](https://github.com/AVMusorin)).
* Allow types conversion in Native input format. Add settings `input_format_native_allow_types_conversion` that controls it (enabled by default). [#46990](https://github.com/ClickHouse/ClickHouse/pull/46990) ([Kruglov Pavel](https://github.com/Avogar)).
* Allow IPv4 in the `range` function to generate IP ranges. [#46995](https://github.com/ClickHouse/ClickHouse/pull/46995) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* Role change was not promoted sometimes before https://github.com/ClickHouse/ClickHouse/pull/46772 This PR just adds tests. [#47002](https://github.com/ClickHouse/ClickHouse/pull/47002) ([Ilya Golshtein](https://github.com/ilejn)).
* Improve exception message when it's impossible to make part move from one volume/disk to another. [#47032](https://github.com/ClickHouse/ClickHouse/pull/47032) ([alesapin](https://github.com/alesapin)).
* Support `Bool` type in `JSONType` function. Previously `Null` type was mistakenly returned for bool values. [#47046](https://github.com/ClickHouse/ClickHouse/pull/47046) ([Anton Popov](https://github.com/CurtizJ)).
* Use _request_body parameter to configure predefined http queries. [#47086](https://github.com/ClickHouse/ClickHouse/pull/47086) ([Constantine Peresypkin](https://github.com/pkit)).
* Removing logging of custom disk structure. [#47103](https://github.com/ClickHouse/ClickHouse/pull/47103) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Allow nested custom disks. Previously custom disks supported only flat disk structure. [#47106](https://github.com/ClickHouse/ClickHouse/pull/47106) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Automatic indentation in the built-in UI SQL editor when Enter is pressed. [#47113](https://github.com/ClickHouse/ClickHouse/pull/47113) ([Alexey Korepanov](https://github.com/alexkorep)).
* Allow control compression in Parquet/ORC/Arrow output formats, support more compression for input formats. This closes [#13541](https://github.com/ClickHouse/ClickHouse/issues/13541). [#47114](https://github.com/ClickHouse/ClickHouse/pull/47114) ([Kruglov Pavel](https://github.com/Avogar)).
* Self-extraction with 'sudo' will attempt to set uid and gid of extracted files to running user. [#47116](https://github.com/ClickHouse/ClickHouse/pull/47116) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* Currently the funtion repeat's second argument must be unsigned integer type, which can not accept a integer value like -1. And this is different from the spark function, so I fix this here to make it same as spark. And it tested as below. [#47134](https://github.com/ClickHouse/ClickHouse/pull/47134) ([KevinyhZou](https://github.com/KevinyhZou)).
* Remove `::__1` part from stacktraces. Display `std::basic_string<char, ...` as `String` in stacktraces. [#47171](https://github.com/ClickHouse/ClickHouse/pull/47171) ([Mike Kot](https://github.com/myrrc)).
* Introduced a separate thread pool for backup IO operations. This will allow to scale it independently from other pool and increase performance. [#47174](https://github.com/ClickHouse/ClickHouse/pull/47174) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Reimplement interserver mode to avoid replay attacks (note, that change is backward compatible with older servers). [#47213](https://github.com/ClickHouse/ClickHouse/pull/47213) ([Azat Khuzhin](https://github.com/azat)).
* Make function `optimizeregularexpression` recognize re groups and refine regexp tree dictionary. [#47218](https://github.com/ClickHouse/ClickHouse/pull/47218) ([Han Fei](https://github.com/hanfei1991)).
* Use MultiRead request and retries for collecting metadata at final stage of backup processing. [#47243](https://github.com/ClickHouse/ClickHouse/pull/47243) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Keeper improvement: Add new 4LW `clrs` to clean resources used by Keeper (e.g. release unused memory). [#47256](https://github.com/ClickHouse/ClickHouse/pull/47256) ([Antonio Andelic](https://github.com/antonio2368)).
* Add optional arguments to codecs `DoubleDelta(bytes_size)`, `Gorilla(bytes_size)`, `FPC(level, float_size)`, it will allow using this codecs without column type in `clickhouse-compressor`. Fix possible abrots and arithmetic errors in `clickhouse-compressor` with these codecs. Fixes: https://github.com/ClickHouse/ClickHouse/discussions/47262. [#47271](https://github.com/ClickHouse/ClickHouse/pull/47271) ([Kruglov Pavel](https://github.com/Avogar)).
* Add support for big int types to runningDifference() function. Closes [#47194](https://github.com/ClickHouse/ClickHouse/issues/47194). [#47322](https://github.com/ClickHouse/ClickHouse/pull/47322) ([Nikolay Degterinsky](https://github.com/evillique)).
* PostgreSQL replication has been adjusted to use "FROM ONLY" clause while performing initial synchronization. This prevents double-fetching the same data in case the target PostgreSQL database uses table inheritance. [#47387](https://github.com/ClickHouse/ClickHouse/pull/47387) ([Maksym Sobolyev](https://github.com/sobomax)).
* Add an expiration window for S3 credentials that have an expiration time to avoid `ExpiredToken` errors in some edge cases. It can be controlled with `expiration_window_seconds` config, the default is 120 seconds. [#47423](https://github.com/ClickHouse/ClickHouse/pull/47423) ([Antonio Andelic](https://github.com/antonio2368)).
* Support Decimals and Date32 in Avro format. [#47434](https://github.com/ClickHouse/ClickHouse/pull/47434) ([Kruglov Pavel](https://github.com/Avogar)).
* Do not start the server if an interrupted conversion from `Ordinary` to `Atomic` was detected, print a better error message with troubleshooting instructions. [#47487](https://github.com/ClickHouse/ClickHouse/pull/47487) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Add a new column `kind` to system.opentelemetry_span_log. This column holds the value of [SpanKind](https://opentelemetry.io/docs/reference/specification/trace/api/#spankind) defined in OpenTelemtry. [#47499](https://github.com/ClickHouse/ClickHouse/pull/47499) ([Frank Chen](https://github.com/FrankChen021)).
* If a backup and restoring data are both in S3 then server-side copy should be used from now on. [#47546](https://github.com/ClickHouse/ClickHouse/pull/47546) ([Vitaly Baranov](https://github.com/vitlibar)).
* Add SSL User Certificate authentication to the native protocol. Closes [#47077](https://github.com/ClickHouse/ClickHouse/issues/47077). [#47596](https://github.com/ClickHouse/ClickHouse/pull/47596) ([Nikolay Degterinsky](https://github.com/evillique)).
* Allow reading/writing nested arrays in Protobuf with only root field name as column name. Previously column name should've contain all nested field names (like `a.b.c Array(Array(Array(UInt32)))`, now you can use just `a Array(Array(Array(UInt32)))`. [#47650](https://github.com/ClickHouse/ClickHouse/pull/47650) ([Kruglov Pavel](https://github.com/Avogar)).
* Added an optional `STRICT` modifier for `SYSTEM SYNC REPLICA` which makes the query wait for replication queue to become empty (just like it worked before https://github.com/ClickHouse/ClickHouse/pull/45648). [#47659](https://github.com/ClickHouse/ClickHouse/pull/47659) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Improvement name of some span logs. [#47667](https://github.com/ClickHouse/ClickHouse/pull/47667) ([Frank Chen](https://github.com/FrankChen021)).
* Now ReplicatedMergeTree with zero copy replication has less load to ZooKeeper. [#47676](https://github.com/ClickHouse/ClickHouse/pull/47676) ([alesapin](https://github.com/alesapin)).
* Prevent using too long chains of aggregate function combinators (they can lead to slow queries in the analysis stage). This closes [#47715](https://github.com/ClickHouse/ClickHouse/issues/47715). [#47716](https://github.com/ClickHouse/ClickHouse/pull/47716) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Support for subquery in parameterized views resolves [#46741](https://github.com/ClickHouse/ClickHouse/issues/46741) Implementation: * Updated to pass the parameter is_create_parameterized_view to subquery processing. Testing: * Added test case with subquery for parameterized view. [#47725](https://github.com/ClickHouse/ClickHouse/pull/47725) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
* Fix memory leak in MySQL integration (reproduces with `connection_auto_close=1`). [#47732](https://github.com/ClickHouse/ClickHouse/pull/47732) ([Kseniia Sumarokova](https://github.com/kssenii)).
* AST Fuzzer support fuzz `EXPLAIN` query. [#47803](https://github.com/ClickHouse/ClickHouse/pull/47803) ([flynn](https://github.com/ucasfl)).
* Fixed error print message while Decimal parameters is incorrect. [#47812](https://github.com/ClickHouse/ClickHouse/pull/47812) ([Yu Feng](https://github.com/Vigor-jpg)).
* Add `X-ClickHouse-Query-Id` to HTTP response when queries fails to execute. [#47813](https://github.com/ClickHouse/ClickHouse/pull/47813) ([Frank Chen](https://github.com/FrankChen021)).
* AST fuzzer support fuzzing `SELECT` query to `EXPLAIN` query randomly. [#47852](https://github.com/ClickHouse/ClickHouse/pull/47852) ([flynn](https://github.com/ucasfl)).
* Improved the overall performance by better utilizing local replica. And forbid reading with parallel replicas from non-replicated MergeTree by default. [#47858](https://github.com/ClickHouse/ClickHouse/pull/47858) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* More accurate CPU usage indication for client: account for usage in some long-living server threads (Segmentator) and do regular CPU accounting for every thread. [#47870](https://github.com/ClickHouse/ClickHouse/pull/47870) ([Sergei Trifonov](https://github.com/serxa)).
* The parameter `exact_rows_before_limit` is used to make `rows_before_limit_at_least` is designed to accurately reflect the number of rows returned before the limit is reached. This pull request addresses issues encountered when the query involves distributed processing across multiple shards or sorting operations. Prior to this update, these scenarios were not functioning as intended. [#47874](https://github.com/ClickHouse/ClickHouse/pull/47874) ([Amos Bird](https://github.com/amosbird)).
* ThreadPool metrics introspection. [#47880](https://github.com/ClickHouse/ClickHouse/pull/47880) ([Azat Khuzhin](https://github.com/azat)).
* Add `WriteBufferFromS3Microseconds` and `WriteBufferFromS3RequestsErrors` profile events. [#47885](https://github.com/ClickHouse/ClickHouse/pull/47885) ([Antonio Andelic](https://github.com/antonio2368)).
* Add `--link` and `--noninteractive` (`-y`) options to clickhouse install. Closes [#47750](https://github.com/ClickHouse/ClickHouse/issues/47750). [#47887](https://github.com/ClickHouse/ClickHouse/pull/47887) ([Nikolay Degterinsky](https://github.com/evillique)).
* Fix decimal-256 text output issue on s390x. [#47932](https://github.com/ClickHouse/ClickHouse/pull/47932) ([MeenaRenganathan22](https://github.com/MeenaRenganathan22)).
* Fixed `UNKNOWN_TABLE` exception when attaching to a materialized view that has dependent tables that are not available. This might be useful when trying to restore state from a backup. [#47975](https://github.com/ClickHouse/ClickHouse/pull/47975) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
* Fix case when (optional) path is not added to encrypted disk configuration. [#47981](https://github.com/ClickHouse/ClickHouse/pull/47981) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Add *OrNull() and *OrZero() variants for parseDateTime(), add alias "str_to_date" for MySQL parity. [#48000](https://github.com/ClickHouse/ClickHouse/pull/48000) ([Robert Schulze](https://github.com/rschu1ze)).
* Improve the code around `background_..._pool_size` settings reading. It should be configured via the main server configuration file. [#48055](https://github.com/ClickHouse/ClickHouse/pull/48055) ([filimonov](https://github.com/filimonov)).
* Support for cte in parameterized views Implementation: * Updated to allow query parameters while evaluating scalar subqueries. Testing: * Added test case with cte for parameterized view. [#48065](https://github.com/ClickHouse/ClickHouse/pull/48065) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
* Add `NOSIGN` keyword for S3 table function and storage engine to avoid signing requests with provided credentials. Add `no_sign_request` config for all functionalities using S3. [#48092](https://github.com/ClickHouse/ClickHouse/pull/48092) ([Antonio Andelic](https://github.com/antonio2368)).
* Support bin integers `(U)Int128/(U)Int256`, `Map` with any key type and `DateTime64` with any precision (not only 3 and 6). [#48119](https://github.com/ClickHouse/ClickHouse/pull/48119) ([Kruglov Pavel](https://github.com/Avogar)).
* Support more ClickHouse types in MsgPack format: (U)Int128/(U)Int256, Enum8(16), Date32, Decimal(32|64|128|256), Tuples. [#48124](https://github.com/ClickHouse/ClickHouse/pull/48124) ([Kruglov Pavel](https://github.com/Avogar)).
* The output of some SHOW ... statements is now sorted. [#48127](https://github.com/ClickHouse/ClickHouse/pull/48127) ([Robert Schulze](https://github.com/rschu1ze)).
* Allow skipping errors related to unknown enum values in row input formats. [#48133](https://github.com/ClickHouse/ClickHouse/pull/48133) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Add `allow_distributed_ddl_queries` option to disallow distributed DDL queries for the cluster in the config. [#48171](https://github.com/ClickHouse/ClickHouse/pull/48171) ([Aleksei Filatov](https://github.com/aalexfvk)).
* Determine the hosts' order in `SHOW CLUSTER` query, a followup for [#48127](https://github.com/ClickHouse/ClickHouse/issues/48127) and [#46240](https://github.com/ClickHouse/ClickHouse/issues/46240). [#48235](https://github.com/ClickHouse/ClickHouse/pull/48235) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Build/Testing/Packaging Improvement
* Split stress test and backward compatibility check (now Upgrade check). [#44879](https://github.com/ClickHouse/ClickHouse/pull/44879) ([Kruglov Pavel](https://github.com/Avogar)).
* Use sccache as a replacement for ccache and using S3 as cache backend. [#46240](https://github.com/ClickHouse/ClickHouse/pull/46240) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Updated Ubuntu Image. [#46784](https://github.com/ClickHouse/ClickHouse/pull/46784) ([Julio Jimenez](https://github.com/juliojimenez)).
* Adds a prompt to allow the removal of an existing `cickhouse` download when using "curl | sh" download of ClickHouse. Prompt is "ClickHouse binary clickhouse already exists. Overwrite? [y/N] ". [#46859](https://github.com/ClickHouse/ClickHouse/pull/46859) ([Dan Roscigno](https://github.com/DanRoscigno)).
* Fix error during server startup on old distros (e.g. Amazon Linux 2) and on ARM that glibc 2.28 symbols are not found. [#47008](https://github.com/ClickHouse/ClickHouse/pull/47008) ([Robert Schulze](https://github.com/rschu1ze)).
* Clang 16 is set to release in the next few days, making it an opportune time to update. [#47027](https://github.com/ClickHouse/ClickHouse/pull/47027) ([Amos Bird](https://github.com/amosbird)).
* Added a CI check which ensures ClickHouse can run with an old glibc on ARM. [#47063](https://github.com/ClickHouse/ClickHouse/pull/47063) ([Robert Schulze](https://github.com/rschu1ze)).
* ClickHouse now builds with C++23. [#47424](https://github.com/ClickHouse/ClickHouse/pull/47424) ([Robert Schulze](https://github.com/rschu1ze)).
* Fixed issue with starting `clickhouse-test` against custom clickhouse binary with `-b`. ... [#47578](https://github.com/ClickHouse/ClickHouse/pull/47578) ([Vasily Nemkov](https://github.com/Enmk)).
* Add a style check to prevent incorrect usage of the `NDEBUG` macro. [#47699](https://github.com/ClickHouse/ClickHouse/pull/47699) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Speed up the build a little. [#47714](https://github.com/ClickHouse/ClickHouse/pull/47714) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Bump vectorscan to 5.4.9. [#47955](https://github.com/ClickHouse/ClickHouse/pull/47955) ([Robert Schulze](https://github.com/rschu1ze)).
* Add a unit test to assert arrow fatal logging does not abort. It covers the changes in https://github.com/ClickHouse/arrow/pull/16. [#47958](https://github.com/ClickHouse/ClickHouse/pull/47958) ([Arthur Passos](https://github.com/arthurpassos)).
* Restore ability of native macos debug server build to start (this time for real). [#48050](https://github.com/ClickHouse/ClickHouse/pull/48050) ([Robert Schulze](https://github.com/rschu1ze)).
* Functional tests will trigger JIT compilation more frequently, in a randomized fashion. See [#48120](https://github.com/ClickHouse/ClickHouse/issues/48120). [#48196](https://github.com/ClickHouse/ClickHouse/pull/48196) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* The `clickhouse/clickhouse-keeper` image used to be pushed only with tags `-alpine`, e.g. `latest-alpine`. As it was suggested in https://github.com/ClickHouse/examples/pull/2, now it will be pushed as suffixless too. [#48236](https://github.com/ClickHouse/ClickHouse/pull/48236) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in an official stable release)
* Fix create materialized view with MaterializedPostgreSQL [#40807](https://github.com/ClickHouse/ClickHouse/pull/40807) ([Maksim Buren](https://github.com/maks-buren630501)).
* Fix formats parser resetting, test processing bad messages in kafka [#45693](https://github.com/ClickHouse/ClickHouse/pull/45693) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix several `RENAME COLUMN` bugs. [#45911](https://github.com/ClickHouse/ClickHouse/pull/45911) ([alesapin](https://github.com/alesapin)).
* Fix data size calculation in Keeper [#46086](https://github.com/ClickHouse/ClickHouse/pull/46086) ([Antonio Andelic](https://github.com/antonio2368)).
* Fixes for 993 [#46384](https://github.com/ClickHouse/ClickHouse/pull/46384) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Fix incorrect alias recursion in QueryNormalizer [#46609](https://github.com/ClickHouse/ClickHouse/pull/46609) ([Raúl Marín](https://github.com/Algunenano)).
* Fix IPv4/IPv6 serialization/deserialization in binary formats [#46616](https://github.com/ClickHouse/ClickHouse/pull/46616) ([Kruglov Pavel](https://github.com/Avogar)).
* ActionsDAG: do not change result of and() during optimization [#46653](https://github.com/ClickHouse/ClickHouse/pull/46653) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
* Fix queries cancellation when a client dies [#46681](https://github.com/ClickHouse/ClickHouse/pull/46681) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Fix arithmetic operations in aggregate optimization [#46705](https://github.com/ClickHouse/ClickHouse/pull/46705) ([Duc Canh Le](https://github.com/canhld94)).
* Fix possible clickhouse-local abort on JSONEachRow schema inference [#46731](https://github.com/ClickHouse/ClickHouse/pull/46731) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix changing an expired role [#46772](https://github.com/ClickHouse/ClickHouse/pull/46772) ([Vitaly Baranov](https://github.com/vitlibar)).
* Fix combined PREWHERE column accumulated from multiple steps [#46785](https://github.com/ClickHouse/ClickHouse/pull/46785) ([Alexander Gololobov](https://github.com/davenger)).
* Use initial range for fetching file size in HTTP read buffer [#46824](https://github.com/ClickHouse/ClickHouse/pull/46824) ([Antonio Andelic](https://github.com/antonio2368)).
* Fix progress bar with URL [#46830](https://github.com/ClickHouse/ClickHouse/pull/46830) ([Antonio Andelic](https://github.com/antonio2368)).
* Do not allow const and non-deterministic secondary indexes [#46839](https://github.com/ClickHouse/ClickHouse/pull/46839) ([Anton Popov](https://github.com/CurtizJ)).
* Fix MSan report in `maxIntersections` function [#46847](https://github.com/ClickHouse/ClickHouse/pull/46847) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix a bug in `Map` data type [#46856](https://github.com/ClickHouse/ClickHouse/pull/46856) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix wrong results of some LIKE searches when the LIKE pattern contains quoted non-quotable characters [#46875](https://github.com/ClickHouse/ClickHouse/pull/46875) ([Robert Schulze](https://github.com/rschu1ze)).
* Fix - WITH FILL would produce abort when FillingTransform processing empty block [#46897](https://github.com/ClickHouse/ClickHouse/pull/46897) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* Fix date and int inference from string in JSON [#46972](https://github.com/ClickHouse/ClickHouse/pull/46972) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix bug in zero-copy replication disk choice during fetch [#47010](https://github.com/ClickHouse/ClickHouse/pull/47010) ([alesapin](https://github.com/alesapin)).
* Fix typo in systemd service definition [#47051](https://github.com/ClickHouse/ClickHouse/pull/47051) ([Palash Goel](https://github.com/palash-goel)).
* Fix NOT_IMPLEMENTED error with CROSS JOIN and algorithm = auto [#47068](https://github.com/ClickHouse/ClickHouse/pull/47068) ([Vladimir C](https://github.com/vdimir)).
* Fix the problem that the 'ReplicatedMergeTree' table failed to insert two similar data when the 'part_type' is configured as 'InMemory' mode. [#47121](https://github.com/ClickHouse/ClickHouse/pull/47121) ([liding1992](https://github.com/liding1992)).
* External dictionaries / library-bridge: Fix error "unknown library method 'extDict_libClone'" [#47136](https://github.com/ClickHouse/ClickHouse/pull/47136) ([alex filatov](https://github.com/phil-88)).
* Fix race in grace hash join with limit [#47153](https://github.com/ClickHouse/ClickHouse/pull/47153) ([Vladimir C](https://github.com/vdimir)).
* Fix concrete columns PREWHERE support [#47154](https://github.com/ClickHouse/ClickHouse/pull/47154) ([Azat Khuzhin](https://github.com/azat)).
* Fix possible deadlock in QueryStatus [#47161](https://github.com/ClickHouse/ClickHouse/pull/47161) ([Kruglov Pavel](https://github.com/Avogar)).
* Backup_Restore_concurrency_check_node [#47216](https://github.com/ClickHouse/ClickHouse/pull/47216) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
* Forbid insert select for the same StorageJoin [#47260](https://github.com/ClickHouse/ClickHouse/pull/47260) ([Vladimir C](https://github.com/vdimir)).
* Skip merged partitions for `min_age_to_force_merge_seconds` merges [#47303](https://github.com/ClickHouse/ClickHouse/pull/47303) ([Antonio Andelic](https://github.com/antonio2368)).
* Modify find_first_symbols so it works as expected for find_first_not_symbols [#47304](https://github.com/ClickHouse/ClickHouse/pull/47304) ([Arthur Passos](https://github.com/arthurpassos)).
* Fix big numbers inference in CSV [#47410](https://github.com/ClickHouse/ClickHouse/pull/47410) ([Kruglov Pavel](https://github.com/Avogar)).
* Disable logical expression optimizer for expression with aliases. [#47451](https://github.com/ClickHouse/ClickHouse/pull/47451) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Remove a feature [#47456](https://github.com/ClickHouse/ClickHouse/pull/47456) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix error in `decodeURLComponent` [#47457](https://github.com/ClickHouse/ClickHouse/pull/47457) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix explain graph with projection [#47473](https://github.com/ClickHouse/ClickHouse/pull/47473) ([flynn](https://github.com/ucasfl)).
* Fix query parameters [#47488](https://github.com/ClickHouse/ClickHouse/pull/47488) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Parameterized view bug fix 47287 47247 [#47495](https://github.com/ClickHouse/ClickHouse/pull/47495) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
* Fuzzer of data formats [#47519](https://github.com/ClickHouse/ClickHouse/pull/47519) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix monotonicity check for DateTime64 [#47526](https://github.com/ClickHouse/ClickHouse/pull/47526) ([Antonio Andelic](https://github.com/antonio2368)).
* Fix block structure mismatch for nullable LowCardinality column [#47537](https://github.com/ClickHouse/ClickHouse/pull/47537) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Proper fix for bug in parquet, revert reverted [#45878](https://github.com/ClickHouse/ClickHouse/issues/45878) [#47538](https://github.com/ClickHouse/ClickHouse/pull/47538) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix BSONEachRow parallel parsing when document size is invalid [#47540](https://github.com/ClickHouse/ClickHouse/pull/47540) ([Kruglov Pavel](https://github.com/Avogar)).
* Preserve error in system.distribution_queue on SYSTEM FLUSH DISTRIBUTED [#47541](https://github.com/ClickHouse/ClickHouse/pull/47541) ([Azat Khuzhin](https://github.com/azat)).
* Revert "Revert "Backup_Restore_concurrency_check_node"" [#47586](https://github.com/ClickHouse/ClickHouse/pull/47586) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
* Check for duplicate column in BSONEachRow format [#47609](https://github.com/ClickHouse/ClickHouse/pull/47609) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix wait for zero copy lock during move [#47631](https://github.com/ClickHouse/ClickHouse/pull/47631) ([alesapin](https://github.com/alesapin)).
* Fix aggregation by partitions [#47634](https://github.com/ClickHouse/ClickHouse/pull/47634) ([Nikita Taranov](https://github.com/nickitat)).
* Fix bug in tuple as array serialization in BSONEachRow format [#47690](https://github.com/ClickHouse/ClickHouse/pull/47690) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix crash in polygonsSymDifferenceCartesian [#47702](https://github.com/ClickHouse/ClickHouse/pull/47702) ([pufit](https://github.com/pufit)).
* Fix reading from storage `File` compressed files with `zlib` and `gzip` compression [#47796](https://github.com/ClickHouse/ClickHouse/pull/47796) ([Anton Popov](https://github.com/CurtizJ)).
* Improve empty query detection for PostgreSQL (for pgx golang driver) [#47854](https://github.com/ClickHouse/ClickHouse/pull/47854) ([Azat Khuzhin](https://github.com/azat)).
* Fix DateTime monotonicity check for LowCardinality [#47860](https://github.com/ClickHouse/ClickHouse/pull/47860) ([Antonio Andelic](https://github.com/antonio2368)).
* Use restore_threads (not backup_threads) for RESTORE ASYNC [#47861](https://github.com/ClickHouse/ClickHouse/pull/47861) ([Azat Khuzhin](https://github.com/azat)).
* Fix DROP COLUMN with ReplicatedMergeTree containing projections [#47883](https://github.com/ClickHouse/ClickHouse/pull/47883) ([Antonio Andelic](https://github.com/antonio2368)).
* Fix for Replicated database recovery [#47901](https://github.com/ClickHouse/ClickHouse/pull/47901) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Hotfix for too verbose warnings in HTTP [#47903](https://github.com/ClickHouse/ClickHouse/pull/47903) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Fix "Field value too long" in catboostEvaluate() [#47970](https://github.com/ClickHouse/ClickHouse/pull/47970) ([Robert Schulze](https://github.com/rschu1ze)).
* Fix [#36971](https://github.com/ClickHouse/ClickHouse/issues/36971): Watchdog: exit with non-zero code if child process exits [#47973](https://github.com/ClickHouse/ClickHouse/pull/47973) ([Коренберг Марк](https://github.com/socketpair)).
* Fix for index file cidx is unexpectedly long [#48010](https://github.com/ClickHouse/ClickHouse/pull/48010) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
* fix MaterializedPostgreSQL query to get attributes (replica-identity) [#48015](https://github.com/ClickHouse/ClickHouse/pull/48015) ([Solomatov Sergei](https://github.com/solomatovs)).
* parseDateTime(): Fix UB (signed integer overflow) [#48019](https://github.com/ClickHouse/ClickHouse/pull/48019) ([Robert Schulze](https://github.com/rschu1ze)).
* Use uniq names for Records in Avro to avoid reusing its schema [#48057](https://github.com/ClickHouse/ClickHouse/pull/48057) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix crash in explain graph with StorageMerge [#48102](https://github.com/ClickHouse/ClickHouse/pull/48102) ([Vladimir C](https://github.com/vdimir)).
* Correctly set TCP/HTTP socket timeouts in Keeper [#48108](https://github.com/ClickHouse/ClickHouse/pull/48108) ([Antonio Andelic](https://github.com/antonio2368)).
* Fix possible member call on null pointer in Avro format [#48184](https://github.com/ClickHouse/ClickHouse/pull/48184) ([Kruglov Pavel](https://github.com/Avogar)).
#### Build Improvement
* Update krb5 to 1.20.1-final to mitigate CVE-2022-42898. [#46485](https://github.com/ClickHouse/ClickHouse/pull/46485) ([MeenaRenganathan22](https://github.com/MeenaRenganathan22)).
* Fixed random crash issues caused by bad pointers in libunwind for s390x. [#46755](https://github.com/ClickHouse/ClickHouse/pull/46755) ([Harry Lee](https://github.com/HarryLeeIBM)).
* Fixed http xz compression issue for s390x. [#46832](https://github.com/ClickHouse/ClickHouse/pull/46832) ([Harry Lee](https://github.com/HarryLeeIBM)).
* Fixed murmurhash function for s390x. [#47036](https://github.com/ClickHouse/ClickHouse/pull/47036) ([Harry Lee](https://github.com/HarryLeeIBM)).
* Fixed halfMD5 and broken cityHash function for s390x. [#47115](https://github.com/ClickHouse/ClickHouse/pull/47115) ([Harry Lee](https://github.com/HarryLeeIBM)).
* Fixed farmhash functions for s390x. [#47223](https://github.com/ClickHouse/ClickHouse/pull/47223) ([Harry Lee](https://github.com/HarryLeeIBM)).
* Fixed endian issue in hashing tuples for s390x. [#47371](https://github.com/ClickHouse/ClickHouse/pull/47371) ([Harry Lee](https://github.com/HarryLeeIBM)).
* Fixed SipHash integer hashing issue and byte order issue in random integer data from GenerateRandom storage engine for s390x. [#47576](https://github.com/ClickHouse/ClickHouse/pull/47576) ([Harry Lee](https://github.com/HarryLeeIBM)).
#### NO CL ENTRY
* NO CL ENTRY: 'Revert "Fix several `RENAME COLUMN` bugs."'. [#46909](https://github.com/ClickHouse/ClickHouse/pull/46909) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* NO CL ENTRY: 'Revert "Add join_algorithm='grace_hash' to stress tests"'. [#46988](https://github.com/ClickHouse/ClickHouse/pull/46988) ([Pradeep Chhetri](https://github.com/chhetripradeep)).
* NO CL ENTRY: 'Revert "Give users option of overwriting"'. [#47169](https://github.com/ClickHouse/ClickHouse/pull/47169) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* NO CL ENTRY: 'Revert "standardize admonitions"'. [#47413](https://github.com/ClickHouse/ClickHouse/pull/47413) ([Rich Raposa](https://github.com/rfraposa)).
* NO CL ENTRY: 'Revert "Backup_Restore_concurrency_check_node"'. [#47581](https://github.com/ClickHouse/ClickHouse/pull/47581) ([Alexander Tokmakov](https://github.com/tavplubix)).
* NO CL ENTRY: 'Update storing-data.md'. [#47598](https://github.com/ClickHouse/ClickHouse/pull/47598) ([San](https://github.com/santrancisco)).
* NO CL ENTRY: 'Revert "Fix BSONEachRow parallel parsing when document size is invalid"'. [#47672](https://github.com/ClickHouse/ClickHouse/pull/47672) ([Alexander Tokmakov](https://github.com/tavplubix)).
* NO CL ENTRY: 'Revert "New navigation"'. [#47694](https://github.com/ClickHouse/ClickHouse/pull/47694) ([Alexander Tokmakov](https://github.com/tavplubix)).
* NO CL ENTRY: 'Revert "Analyzer planner fixes before enable by default"'. [#47721](https://github.com/ClickHouse/ClickHouse/pull/47721) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* NO CL ENTRY: 'Revert "Revert "Analyzer planner fixes before enable by default""'. [#47748](https://github.com/ClickHouse/ClickHouse/pull/47748) ([Maksim Kita](https://github.com/kitaisreal)).
* NO CL ENTRY: 'Revert "Add sanity checks for writing number in variable length format"'. [#47850](https://github.com/ClickHouse/ClickHouse/pull/47850) ([Robert Schulze](https://github.com/rschu1ze)).
* NO CL ENTRY: 'Revert "Revert "Revert "Backup_Restore_concurrency_check_node"""'. [#47963](https://github.com/ClickHouse/ClickHouse/pull/47963) ([Alexander Tokmakov](https://github.com/tavplubix)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Test differences between using materialize_ttl_recalculate_only=1/0 [#45304](https://github.com/ClickHouse/ClickHouse/pull/45304) ([Jordi Villar](https://github.com/jrdi)).
* Fix query in stress script [#45480](https://github.com/ClickHouse/ClickHouse/pull/45480) ([Pradeep Chhetri](https://github.com/chhetripradeep)).
* Add join_algorithm='grace_hash' to stress tests [#45607](https://github.com/ClickHouse/ClickHouse/pull/45607) ([Pradeep Chhetri](https://github.com/chhetripradeep)).
* Support `group_by_use_nulls` setting in new analyzer [#45910](https://github.com/ClickHouse/ClickHouse/pull/45910) ([Dmitry Novik](https://github.com/novikd)).
* Randomize setting `ratio_of_defaults_for_sparse_serialization` [#46118](https://github.com/ClickHouse/ClickHouse/pull/46118) ([Anton Popov](https://github.com/CurtizJ)).
* Add CrossToInnerJoinPass [#46408](https://github.com/ClickHouse/ClickHouse/pull/46408) ([Vladimir C](https://github.com/vdimir)).
* Fix flakiness of test_backup_restore_on_cluster/test_disallow_concurrency [#46517](https://github.com/ClickHouse/ClickHouse/pull/46517) ([Azat Khuzhin](https://github.com/azat)).
* Map field to string fix [#46618](https://github.com/ClickHouse/ClickHouse/pull/46618) ([Maksim Kita](https://github.com/kitaisreal)).
* Enable perf tests added in [#45364](https://github.com/ClickHouse/ClickHouse/issues/45364) [#46623](https://github.com/ClickHouse/ClickHouse/pull/46623) ([Nikita Taranov](https://github.com/nickitat)).
* Logical expression optimizer in new analyzer [#46644](https://github.com/ClickHouse/ClickHouse/pull/46644) ([Antonio Andelic](https://github.com/antonio2368)).
* Named collections: finish replacing old code for storages [#46647](https://github.com/ClickHouse/ClickHouse/pull/46647) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Make tiny improvements [#46659](https://github.com/ClickHouse/ClickHouse/pull/46659) ([ltrk2](https://github.com/ltrk2)).
* Fix openssl/s390x build (setenv + link order) [#46684](https://github.com/ClickHouse/ClickHouse/pull/46684) ([Boris Kuschel](https://github.com/bkuschel)).
* Analyzer AutoFinalOnQueryPass fix [#46729](https://github.com/ClickHouse/ClickHouse/pull/46729) ([Maksim Kita](https://github.com/kitaisreal)).
* Mark failed build reports as pending on reruns [#46736](https://github.com/ClickHouse/ClickHouse/pull/46736) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Do not reanalyze expressions from aggregation in projection [#46738](https://github.com/ClickHouse/ClickHouse/pull/46738) ([Vladimir C](https://github.com/vdimir)).
* Update CHANGELOG.md [#46766](https://github.com/ClickHouse/ClickHouse/pull/46766) ([Ilya Yatsishin](https://github.com/qoega)).
* Poco: Remove some dead code [#46768](https://github.com/ClickHouse/ClickHouse/pull/46768) ([Robert Schulze](https://github.com/rschu1ze)).
* More concise logging at trace level for PREWHERE steps [#46771](https://github.com/ClickHouse/ClickHouse/pull/46771) ([Alexander Gololobov](https://github.com/davenger)).
* Follow-up to [#41534](https://github.com/ClickHouse/ClickHouse/issues/41534) [#46775](https://github.com/ClickHouse/ClickHouse/pull/46775) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Fix timeout for all expect tests (wrong usage of expect_after timeout) [#46779](https://github.com/ClickHouse/ClickHouse/pull/46779) ([Azat Khuzhin](https://github.com/azat)).
* Reduce updates of Mergeable Check [#46781](https://github.com/ClickHouse/ClickHouse/pull/46781) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Updated Slack invite link [#46783](https://github.com/ClickHouse/ClickHouse/pull/46783) ([clickhouse-adrianfraguela](https://github.com/clickhouse-adrianfraguela)).
* Print all stacktraces in hung check [#46787](https://github.com/ClickHouse/ClickHouse/pull/46787) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Quick temporary fix for stress tests [#46789](https://github.com/ClickHouse/ClickHouse/pull/46789) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Update version after release [#46792](https://github.com/ClickHouse/ClickHouse/pull/46792) ([Antonio Andelic](https://github.com/antonio2368)).
* Update version_date.tsv and changelogs after v23.2.1.2537-stable [#46794](https://github.com/ClickHouse/ClickHouse/pull/46794) ([robot-clickhouse](https://github.com/robot-clickhouse)).
* Remove ZSTD version from CMake output [#46796](https://github.com/ClickHouse/ClickHouse/pull/46796) ([Robert Schulze](https://github.com/rschu1ze)).
* Update version_date.tsv and changelogs after v22.11.6.44-stable [#46801](https://github.com/ClickHouse/ClickHouse/pull/46801) ([robot-clickhouse](https://github.com/robot-clickhouse)).
* CMake: Add best effort checks that the build machine isn't too old [#46803](https://github.com/ClickHouse/ClickHouse/pull/46803) ([Robert Schulze](https://github.com/rschu1ze)).
* Fix async reading pipeline when small limit is present [#46804](https://github.com/ClickHouse/ClickHouse/pull/46804) ([Nikita Taranov](https://github.com/nickitat)).
* Cleanup string search code [#46814](https://github.com/ClickHouse/ClickHouse/pull/46814) ([Robert Schulze](https://github.com/rschu1ze)).
* Stateless cmake version [#46821](https://github.com/ClickHouse/ClickHouse/pull/46821) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* refine regexp tree dictionary [#46822](https://github.com/ClickHouse/ClickHouse/pull/46822) ([Han Fei](https://github.com/hanfei1991)).
* Non-significant change [#46844](https://github.com/ClickHouse/ClickHouse/pull/46844) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Add a trap [#46845](https://github.com/ClickHouse/ClickHouse/pull/46845) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Better handling of fatal errors [#46846](https://github.com/ClickHouse/ClickHouse/pull/46846) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Add a test for [#43184](https://github.com/ClickHouse/ClickHouse/issues/43184) [#46848](https://github.com/ClickHouse/ClickHouse/pull/46848) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix wrong function name [#46849](https://github.com/ClickHouse/ClickHouse/pull/46849) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Add a test for [#45214](https://github.com/ClickHouse/ClickHouse/issues/45214) [#46850](https://github.com/ClickHouse/ClickHouse/pull/46850) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Final fixes for expect tests [#46857](https://github.com/ClickHouse/ClickHouse/pull/46857) ([Azat Khuzhin](https://github.com/azat)).
* Small optimization of LIKE patterns with > 1 trailing % [#46869](https://github.com/ClickHouse/ClickHouse/pull/46869) ([Robert Schulze](https://github.com/rschu1ze)).
* Add new metrics to system.asynchronous_metrics [#46886](https://github.com/ClickHouse/ClickHouse/pull/46886) ([Azat Khuzhin](https://github.com/azat)).
* Fix flaky `test_concurrent_queries_restriction_by_query_kind` [#46887](https://github.com/ClickHouse/ClickHouse/pull/46887) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix test test_async_backups_to_same_destination. [#46888](https://github.com/ClickHouse/ClickHouse/pull/46888) ([Vitaly Baranov](https://github.com/vitlibar)).
* Make ASTSelectQuery::formatImpl() more robust [#46889](https://github.com/ClickHouse/ClickHouse/pull/46889) ([Robert Schulze](https://github.com/rschu1ze)).
* tests: fix 02116_interactive_hello for "official build" [#46911](https://github.com/ClickHouse/ClickHouse/pull/46911) ([Azat Khuzhin](https://github.com/azat)).
* Fix some expect tests leftovers and enable them in fasttest [#46915](https://github.com/ClickHouse/ClickHouse/pull/46915) ([Azat Khuzhin](https://github.com/azat)).
* Increase ddl timeout for DROP statement in backup restore tests [#46920](https://github.com/ClickHouse/ClickHouse/pull/46920) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
* A better alternative to [#46344](https://github.com/ClickHouse/ClickHouse/issues/46344) [#46921](https://github.com/ClickHouse/ClickHouse/pull/46921) ([Robert Schulze](https://github.com/rschu1ze)).
* Code review from @tavplubix [#46922](https://github.com/ClickHouse/ClickHouse/pull/46922) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Planner: trivial count optimization [#46923](https://github.com/ClickHouse/ClickHouse/pull/46923) ([Igor Nikonov](https://github.com/devcrafter)).
* Typo: SIZES_OF_ARRAYS_DOESNT_MATCH --> SIZES_OF_ARRAYS_DONT_MATCH [#46940](https://github.com/ClickHouse/ClickHouse/pull/46940) ([Robert Schulze](https://github.com/rschu1ze)).
* Another fix for clone() for ASTColumnMatchers [#46947](https://github.com/ClickHouse/ClickHouse/pull/46947) ([Nikolay Degterinsky](https://github.com/evillique)).
* Un-inline likePatternToRegexp() [#46950](https://github.com/ClickHouse/ClickHouse/pull/46950) ([Robert Schulze](https://github.com/rschu1ze)).
* Fix missing format_description [#46959](https://github.com/ClickHouse/ClickHouse/pull/46959) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* ARM: Activate LDAPR with -march flag instead via -XClang [#46960](https://github.com/ClickHouse/ClickHouse/pull/46960) ([Robert Schulze](https://github.com/rschu1ze)).
* Preset description on the tweak reset [#46963](https://github.com/ClickHouse/ClickHouse/pull/46963) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Update version_date.tsv and changelogs after v22.3.19.6-lts [#46964](https://github.com/ClickHouse/ClickHouse/pull/46964) ([robot-clickhouse](https://github.com/robot-clickhouse)).
* Update version_date.tsv and changelogs after v22.8.14.53-lts [#46969](https://github.com/ClickHouse/ClickHouse/pull/46969) ([robot-clickhouse](https://github.com/robot-clickhouse)).
* Better exception messages when schema_inference_hints is ill-formatted [#46971](https://github.com/ClickHouse/ClickHouse/pull/46971) ([Kruglov Pavel](https://github.com/Avogar)).
* Decrease log level in "disks" [#46976](https://github.com/ClickHouse/ClickHouse/pull/46976) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Change the cherry-pick PR body [#46977](https://github.com/ClickHouse/ClickHouse/pull/46977) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Rename recent stateless tests to fix order [#46991](https://github.com/ClickHouse/ClickHouse/pull/46991) ([Kruglov Pavel](https://github.com/Avogar)).
* Pass headers from StorageURL to WriteBufferFromHTTP [#46996](https://github.com/ClickHouse/ClickHouse/pull/46996) ([Konstantin Bogdanov](https://github.com/thevar1able)).
* Change level log in executeQuery [#46997](https://github.com/ClickHouse/ClickHouse/pull/46997) ([Andrey Bystrov](https://github.com/AndyBys)).
* Add thevar1able to trusted contributors [#46998](https://github.com/ClickHouse/ClickHouse/pull/46998) ([Konstantin Bogdanov](https://github.com/thevar1able)).
* Use /etc/default/clickhouse in systemd too [#47003](https://github.com/ClickHouse/ClickHouse/pull/47003) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Fix tmp_path_template in HTTPHandler::processQuery [#47007](https://github.com/ClickHouse/ClickHouse/pull/47007) ([Vladimir C](https://github.com/vdimir)).
* Fix flaky azure test [#47011](https://github.com/ClickHouse/ClickHouse/pull/47011) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Temporary enable force_sync for keeper in CI [#47024](https://github.com/ClickHouse/ClickHouse/pull/47024) ([alesapin](https://github.com/alesapin)).
* ActionsDAG: do not change result of and() during optimization - part 2 [#47028](https://github.com/ClickHouse/ClickHouse/pull/47028) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
* Add upgrade check to stateful dependent field [#47031](https://github.com/ClickHouse/ClickHouse/pull/47031) ([Kruglov Pavel](https://github.com/Avogar)).
* Disable path check in SQLite storage for clickhouse-local [#47052](https://github.com/ClickHouse/ClickHouse/pull/47052) ([Nikolay Degterinsky](https://github.com/evillique)).
* Terminate long-running offline non-busy runners in EC2 [#47064](https://github.com/ClickHouse/ClickHouse/pull/47064) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Fix Keeper with `force_sync = false` [#47065](https://github.com/ClickHouse/ClickHouse/pull/47065) ([Antonio Andelic](https://github.com/antonio2368)).
* Update version_date.tsv and changelogs after v23.2.2.20-stable [#47069](https://github.com/ClickHouse/ClickHouse/pull/47069) ([robot-clickhouse](https://github.com/robot-clickhouse)).
* Update version_date.tsv and changelogs after v23.1.4.58-stable [#47070](https://github.com/ClickHouse/ClickHouse/pull/47070) ([robot-clickhouse](https://github.com/robot-clickhouse)).
* Update version_date.tsv and changelogs after v22.12.4.76-stable [#47074](https://github.com/ClickHouse/ClickHouse/pull/47074) ([robot-clickhouse](https://github.com/robot-clickhouse)).
* Fix empty result when selection from only one side of join in analyzer [#47093](https://github.com/ClickHouse/ClickHouse/pull/47093) ([Vladimir C](https://github.com/vdimir)).
* Suppress "Cannot flush" for Distributed tables in upgrade check [#47095](https://github.com/ClickHouse/ClickHouse/pull/47095) ([Azat Khuzhin](https://github.com/azat)).
* Make stacktraces in hung check more readable [#47096](https://github.com/ClickHouse/ClickHouse/pull/47096) ([Alexander Tokmakov](https://github.com/tavplubix)).
* release lambda resources before detaching thread group [#47098](https://github.com/ClickHouse/ClickHouse/pull/47098) ([Sema Checherinda](https://github.com/CheSema)).
* Analyzer Planner fixes before enable by default [#47101](https://github.com/ClickHouse/ClickHouse/pull/47101) ([Maksim Kita](https://github.com/kitaisreal)).
* do flushUntrackedMemory when context switches [#47102](https://github.com/ClickHouse/ClickHouse/pull/47102) ([Sema Checherinda](https://github.com/CheSema)).
* fix: keeper systemd service file include invalid inline comment [#47105](https://github.com/ClickHouse/ClickHouse/pull/47105) ([SuperDJY](https://github.com/cmsxbc)).
* Add code for autoscaling lambda [#47107](https://github.com/ClickHouse/ClickHouse/pull/47107) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Enable lightweight delete support by default [#47109](https://github.com/ClickHouse/ClickHouse/pull/47109) ([Alexander Gololobov](https://github.com/davenger)).
* Update typing for a new PyGithub version [#47123](https://github.com/ClickHouse/ClickHouse/pull/47123) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Shorten some code with CTAD [#47139](https://github.com/ClickHouse/ClickHouse/pull/47139) ([Robert Schulze](https://github.com/rschu1ze)).
* Make 01710_projections more stable. [#47145](https://github.com/ClickHouse/ClickHouse/pull/47145) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* fix_JSON_searchField [#47147](https://github.com/ClickHouse/ClickHouse/pull/47147) ([Aleksei Tikhomirov](https://github.com/aletik256)).
* Mark 01771_bloom_filter_not_has as no-parallel and long [#47148](https://github.com/ClickHouse/ClickHouse/pull/47148) ([Azat Khuzhin](https://github.com/azat)).
* Use unique names and paths in `test_replicated_database` [#47152](https://github.com/ClickHouse/ClickHouse/pull/47152) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Add stupid retries in clickhouse-test health check. [#47158](https://github.com/ClickHouse/ClickHouse/pull/47158) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* 02346_full_text_search.sql: Add result separators to simplify analysis [#47166](https://github.com/ClickHouse/ClickHouse/pull/47166) ([Robert Schulze](https://github.com/rschu1ze)).
* More correct handling of fatal errors [#47175](https://github.com/ClickHouse/ClickHouse/pull/47175) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Update read in StorageMemory [#47180](https://github.com/ClickHouse/ClickHouse/pull/47180) ([Konstantin Morozov](https://github.com/k-morozov)).
* Doc update for mapFromArrays() [#47183](https://github.com/ClickHouse/ClickHouse/pull/47183) ([Robert Schulze](https://github.com/rschu1ze)).
* Fix failure context for Upgrade check [#47191](https://github.com/ClickHouse/ClickHouse/pull/47191) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Add support for different expected errors [#47196](https://github.com/ClickHouse/ClickHouse/pull/47196) ([Raúl Marín](https://github.com/Algunenano)).
* Fix ip coding on s390x [#47208](https://github.com/ClickHouse/ClickHouse/pull/47208) ([Suzy Wang](https://github.com/SuzyWangIBMer)).
* Add real client (initiator server) address into the logs for interserver mode [#47214](https://github.com/ClickHouse/ClickHouse/pull/47214) ([Azat Khuzhin](https://github.com/azat)).
* Fix 01019_alter_materialized_view_consistent [#47215](https://github.com/ClickHouse/ClickHouse/pull/47215) ([Vladimir C](https://github.com/vdimir)).
* Fix RewriteArrayExistsToHasPass [#47225](https://github.com/ClickHouse/ClickHouse/pull/47225) ([Maksim Kita](https://github.com/kitaisreal)).
* Release shared ptrs after finishing a transaction [#47245](https://github.com/ClickHouse/ClickHouse/pull/47245) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Add default constructor for `MultiReadResponse` [#47254](https://github.com/ClickHouse/ClickHouse/pull/47254) ([Antonio Andelic](https://github.com/antonio2368)).
* Join threads if exception happened in `ZooKeeperImpl` constructor [#47261](https://github.com/ClickHouse/ClickHouse/pull/47261) ([Antonio Andelic](https://github.com/antonio2368)).
* use std::lerp, constexpr hex.h [#47268](https://github.com/ClickHouse/ClickHouse/pull/47268) ([Mike Kot](https://github.com/myrrc)).
* Update version_date.tsv and changelogs after v23.2.3.17-stable [#47269](https://github.com/ClickHouse/ClickHouse/pull/47269) ([robot-clickhouse](https://github.com/robot-clickhouse)).
* Fix bug in zero copy replica which can lead to dataloss [#47274](https://github.com/ClickHouse/ClickHouse/pull/47274) ([alesapin](https://github.com/alesapin)).
* Fix typo [#47282](https://github.com/ClickHouse/ClickHouse/pull/47282) ([Nikolay Degterinsky](https://github.com/evillique)).
* Follow-up to [#46681](https://github.com/ClickHouse/ClickHouse/issues/46681) [#47284](https://github.com/ClickHouse/ClickHouse/pull/47284) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Fix test 02566_ipv4_ipv6_binary_formats [#47295](https://github.com/ClickHouse/ClickHouse/pull/47295) ([Kruglov Pavel](https://github.com/Avogar)).
* Set fixed index_granularity for test 00636 [#47298](https://github.com/ClickHouse/ClickHouse/pull/47298) ([Sema Checherinda](https://github.com/CheSema)).
* Add a manual trigger for release workflow [#47302](https://github.com/ClickHouse/ClickHouse/pull/47302) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Fix 02570_fallback_from_async_insert [#47308](https://github.com/ClickHouse/ClickHouse/pull/47308) ([Vladimir C](https://github.com/vdimir)).
* Catch exceptions in LiveViewPeriodicRefreshTask [#47309](https://github.com/ClickHouse/ClickHouse/pull/47309) ([Vladimir C](https://github.com/vdimir)).
* Fix MergeTreeTransaction::isReadOnly [#47310](https://github.com/ClickHouse/ClickHouse/pull/47310) ([Vladimir C](https://github.com/vdimir)).
* Fix an assertion with implicit transactions in interserver mode [#47312](https://github.com/ClickHouse/ClickHouse/pull/47312) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Fix `File exists` error in Upgrade check [#47314](https://github.com/ClickHouse/ClickHouse/pull/47314) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Support transformQueryForExternalDatabase for analyzer [#47316](https://github.com/ClickHouse/ClickHouse/pull/47316) ([Vladimir C](https://github.com/vdimir)).
* Disable parallel format in health check [#47318](https://github.com/ClickHouse/ClickHouse/pull/47318) ([Ilya Yatsishin](https://github.com/qoega)).
* Analyzer - fix combine logic for limit expression and limit setting [#47324](https://github.com/ClickHouse/ClickHouse/pull/47324) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* Suppress expected errors from test 01111 in Upgrade check [#47365](https://github.com/ClickHouse/ClickHouse/pull/47365) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Fix GROUPING function initialization for grouping sets [#47370](https://github.com/ClickHouse/ClickHouse/pull/47370) ([Dmitry Novik](https://github.com/novikd)).
* Add join_algorithm='grace_hash' to stress tests [#47372](https://github.com/ClickHouse/ClickHouse/pull/47372) ([Pradeep Chhetri](https://github.com/chhetripradeep)).
* Fix 02343_group_by_use_nulls test in new analyzer [#47373](https://github.com/ClickHouse/ClickHouse/pull/47373) ([Dmitry Novik](https://github.com/novikd)).
* Disable 02368_cancel_write_into_hdfs in stress tests [#47382](https://github.com/ClickHouse/ClickHouse/pull/47382) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Analyzer planner fixes before enable by default [#47383](https://github.com/ClickHouse/ClickHouse/pull/47383) ([Maksim Kita](https://github.com/kitaisreal)).
* Fix `ALTER CLEAR COLUMN` with sparse columns [#47384](https://github.com/ClickHouse/ClickHouse/pull/47384) ([Anton Popov](https://github.com/CurtizJ)).
* Fix: apply reading in order for distinct [#47385](https://github.com/ClickHouse/ClickHouse/pull/47385) ([Igor Nikonov](https://github.com/devcrafter)).
* add checks for ptr [#47398](https://github.com/ClickHouse/ClickHouse/pull/47398) ([Sema Checherinda](https://github.com/CheSema)).
* Remove distinct on top of MergingAggregatedStep [#47399](https://github.com/ClickHouse/ClickHouse/pull/47399) ([Igor Nikonov](https://github.com/devcrafter)).
* Update LRUFileCachePriority.cpp [#47411](https://github.com/ClickHouse/ClickHouse/pull/47411) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Make test 02473_optimize_old_parts less flaky [#47416](https://github.com/ClickHouse/ClickHouse/pull/47416) ([Michael Kolupaev](https://github.com/al13n321)).
* Add test to prevent regressions when using bitmapHasAny [#47419](https://github.com/ClickHouse/ClickHouse/pull/47419) ([Jordi Villar](https://github.com/jrdi)).
* Update README.md [#47421](https://github.com/ClickHouse/ClickHouse/pull/47421) ([Tyler Hannan](https://github.com/tylerhannan)).
* Refactor query cache (make use of CacheBase) [#47428](https://github.com/ClickHouse/ClickHouse/pull/47428) ([Robert Schulze](https://github.com/rschu1ze)).
* Suppress Hung Check with UBsan [#47429](https://github.com/ClickHouse/ClickHouse/pull/47429) ([Alexander Tokmakov](https://github.com/tavplubix)).
* [docs] Document add async_insert_max_query_number [#47431](https://github.com/ClickHouse/ClickHouse/pull/47431) ([Antonio Bonuccelli](https://github.com/nellicus)).
* Apply settings for EXPLAIN earlier (in the same way we do for SELECT). [#47433](https://github.com/ClickHouse/ClickHouse/pull/47433) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Update version_date.tsv and changelogs after v23.2.4.12-stable [#47448](https://github.com/ClickHouse/ClickHouse/pull/47448) ([robot-clickhouse](https://github.com/robot-clickhouse)).
* Fix aggregation-in-order with aliases. [#47449](https://github.com/ClickHouse/ClickHouse/pull/47449) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Fix 01429_join_on_error_messages [#47450](https://github.com/ClickHouse/ClickHouse/pull/47450) ([Vladimir C](https://github.com/vdimir)).
* Update version_date.tsv and changelogs after v23.1.5.24-stable [#47452](https://github.com/ClickHouse/ClickHouse/pull/47452) ([robot-clickhouse](https://github.com/robot-clickhouse)).
* Update version_date.tsv and changelogs after v22.12.5.34-stable [#47453](https://github.com/ClickHouse/ClickHouse/pull/47453) ([robot-clickhouse](https://github.com/robot-clickhouse)).
* Better error messages in ReplicatedMergeTreeAttachThread [#47454](https://github.com/ClickHouse/ClickHouse/pull/47454) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Update version_date.tsv and changelogs after v22.8.15.23-lts [#47455](https://github.com/ClickHouse/ClickHouse/pull/47455) ([robot-clickhouse](https://github.com/robot-clickhouse)).
* Disable grace hash join in upgrade check [#47474](https://github.com/ClickHouse/ClickHouse/pull/47474) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Revert [#46622](https://github.com/ClickHouse/ClickHouse/issues/46622) (test_async_insert_memory) [#47476](https://github.com/ClickHouse/ClickHouse/pull/47476) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Fix `00933_test_fix_extra_seek_on_compressed_cache` in releases. [#47490](https://github.com/ClickHouse/ClickHouse/pull/47490) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix long test `02371_select_projection_normal_agg.sql` [#47491](https://github.com/ClickHouse/ClickHouse/pull/47491) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Revert [#45878](https://github.com/ClickHouse/ClickHouse/issues/45878) and add a test [#47492](https://github.com/ClickHouse/ClickHouse/pull/47492) ([Kruglov Pavel](https://github.com/Avogar)).
* Planner JOIN TREE build fix [#47498](https://github.com/ClickHouse/ClickHouse/pull/47498) ([Maksim Kita](https://github.com/kitaisreal)).
* Better support of identifiers from compound expressions in analyzer [#47506](https://github.com/ClickHouse/ClickHouse/pull/47506) ([Anton Popov](https://github.com/CurtizJ)).
* Adapt some tests to pass with and without the analyzer [#47525](https://github.com/ClickHouse/ClickHouse/pull/47525) ([Raúl Marín](https://github.com/Algunenano)).
* Small enhancements [#47534](https://github.com/ClickHouse/ClickHouse/pull/47534) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Support constants in INTERPOLATE clause (new analyzer) [#47539](https://github.com/ClickHouse/ClickHouse/pull/47539) ([Dmitry Novik](https://github.com/novikd)).
* Remove TOTALS handling in FillingTransform [#47542](https://github.com/ClickHouse/ClickHouse/pull/47542) ([Igor Nikonov](https://github.com/devcrafter)).
* Hide too noisy log messages, fix some tests [#47547](https://github.com/ClickHouse/ClickHouse/pull/47547) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Fix some flaky tests [#47553](https://github.com/ClickHouse/ClickHouse/pull/47553) ([Azat Khuzhin](https://github.com/azat)).
* remove counters for threads, fix negative counters [#47564](https://github.com/ClickHouse/ClickHouse/pull/47564) ([Sema Checherinda](https://github.com/CheSema)).
* Fix typo [#47565](https://github.com/ClickHouse/ClickHouse/pull/47565) ([hq1](https://github.com/aerosol)).
* Fixes for upgrade check [#47570](https://github.com/ClickHouse/ClickHouse/pull/47570) ([Azat Khuzhin](https://github.com/azat)).
* Change error code in case of columns definitions was empty in ODBC [#47573](https://github.com/ClickHouse/ClickHouse/pull/47573) ([Azat Khuzhin](https://github.com/azat)).
* Add missing SYSTEM FLUSH LOGS for log messages statistics [#47575](https://github.com/ClickHouse/ClickHouse/pull/47575) ([Azat Khuzhin](https://github.com/azat)).
* Fix performance regression in aggregation [#47582](https://github.com/ClickHouse/ClickHouse/pull/47582) ([Anton Popov](https://github.com/CurtizJ)).
* ReadFromMergeTree explain prewhere and row policy actions [#47583](https://github.com/ClickHouse/ClickHouse/pull/47583) ([Maksim Kita](https://github.com/kitaisreal)).
* Fix possible failures of 01300_client_save_history_when_terminated_long [#47606](https://github.com/ClickHouse/ClickHouse/pull/47606) ([Azat Khuzhin](https://github.com/azat)).
* checksum: do not check inverted index files [#47607](https://github.com/ClickHouse/ClickHouse/pull/47607) ([save-my-heart](https://github.com/save-my-heart)).
* Add sanity checks for writing number in variable length format [#47608](https://github.com/ClickHouse/ClickHouse/pull/47608) ([Azat Khuzhin](https://github.com/azat)).
* Analyzer planner fixes before enable by default [#47622](https://github.com/ClickHouse/ClickHouse/pull/47622) ([Maksim Kita](https://github.com/kitaisreal)).
* Fix exception message in clickhouse-test [#47625](https://github.com/ClickHouse/ClickHouse/pull/47625) ([Nikolay Degterinsky](https://github.com/evillique)).
* FillingTransform: remove unnecessary indirection when accessing columns [#47632](https://github.com/ClickHouse/ClickHouse/pull/47632) ([Igor Nikonov](https://github.com/devcrafter)).
* fix typo in HashJoin insertion that enables debug code in release build [#46726](https://github.com/ClickHouse/ClickHouse/issues/46726) [#47647](https://github.com/ClickHouse/ClickHouse/pull/47647) ([jorisgio](https://github.com/jorisgio)).
* clang-tidy >= 15: write CheckOptions in dictionary format [#47648](https://github.com/ClickHouse/ClickHouse/pull/47648) ([Robert Schulze](https://github.com/rschu1ze)).
* CMake: Build ClickHouse w/o GNU extensions [#47651](https://github.com/ClickHouse/ClickHouse/pull/47651) ([Robert Schulze](https://github.com/rschu1ze)).
* Faster fasttest [#47654](https://github.com/ClickHouse/ClickHouse/pull/47654) ([Robert Schulze](https://github.com/rschu1ze)).
* Add background pools size metrics [#47656](https://github.com/ClickHouse/ClickHouse/pull/47656) ([Sergei Trifonov](https://github.com/serxa)).
* Improve ThreadPool [#47657](https://github.com/ClickHouse/ClickHouse/pull/47657) ([Vitaly Baranov](https://github.com/vitlibar)).
* cmake: remove support for gold linker [#47660](https://github.com/ClickHouse/ClickHouse/pull/47660) ([Robert Schulze](https://github.com/rschu1ze)).
* Updated events and recordings [#47668](https://github.com/ClickHouse/ClickHouse/pull/47668) ([clickhouse-adrianfraguela](https://github.com/clickhouse-adrianfraguela)).
* Follow-up to [#47660](https://github.com/ClickHouse/ClickHouse/issues/47660): Further removal of gold linker support [#47669](https://github.com/ClickHouse/ClickHouse/pull/47669) ([Robert Schulze](https://github.com/rschu1ze)).
* Enable parallel execution for two tests [#47670](https://github.com/ClickHouse/ClickHouse/pull/47670) ([Robert Schulze](https://github.com/rschu1ze)).
* Restore native macos build [#47673](https://github.com/ClickHouse/ClickHouse/pull/47673) ([Robert Schulze](https://github.com/rschu1ze)).
* CMake: Remove further cruft from build [#47680](https://github.com/ClickHouse/ClickHouse/pull/47680) ([Robert Schulze](https://github.com/rschu1ze)).
* fix test / remove hardcoded database [#47682](https://github.com/ClickHouse/ClickHouse/pull/47682) ([Denny Crane](https://github.com/den-crane)).
* Apply log_queries_cut_to_length in MergeTreeWhereOptimizer [#47684](https://github.com/ClickHouse/ClickHouse/pull/47684) ([Vladimir C](https://github.com/vdimir)).
* Fix logical error in evaluate constant expression [#47685](https://github.com/ClickHouse/ClickHouse/pull/47685) ([Vladimir C](https://github.com/vdimir)).
* Try making `test_keeper_mntr_data_size` less flaky [#47687](https://github.com/ClickHouse/ClickHouse/pull/47687) ([Antonio Andelic](https://github.com/antonio2368)).
* Fix limit offset [#47688](https://github.com/ClickHouse/ClickHouse/pull/47688) ([flynn](https://github.com/ucasfl)).
* Fix startup on older systemd versions [#47689](https://github.com/ClickHouse/ClickHouse/pull/47689) ([Thomas Casteleyn](https://github.com/Hipska)).
* More random query id in tests [#47700](https://github.com/ClickHouse/ClickHouse/pull/47700) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Add a style check for unsafe code [#47703](https://github.com/ClickHouse/ClickHouse/pull/47703) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Make the code in Join less disgusting [#47712](https://github.com/ClickHouse/ClickHouse/pull/47712) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fixup git reference to LLVM [#47719](https://github.com/ClickHouse/ClickHouse/pull/47719) ([Robert Schulze](https://github.com/rschu1ze)).
* Preparation for libcxx(abi), llvm, clang-tidy 16 [#47722](https://github.com/ClickHouse/ClickHouse/pull/47722) ([Robert Schulze](https://github.com/rschu1ze)).
* Rename cfg parameter query_cache.size to query_cache.max_size [#47724](https://github.com/ClickHouse/ClickHouse/pull/47724) ([Robert Schulze](https://github.com/rschu1ze)).
* Add optimization for MemoryStorageStep [#47726](https://github.com/ClickHouse/ClickHouse/pull/47726) ([Konstantin Morozov](https://github.com/k-morozov)).
* Fix aggregation with constant key in planner [#47727](https://github.com/ClickHouse/ClickHouse/pull/47727) ([Dmitry Novik](https://github.com/novikd)).
* Disable setting in 02343_group_by_use_nulls_distributed (for new analyzer) [#47728](https://github.com/ClickHouse/ClickHouse/pull/47728) ([Dmitry Novik](https://github.com/novikd)).
* Add a test for [#21469](https://github.com/ClickHouse/ClickHouse/issues/21469) [#47736](https://github.com/ClickHouse/ClickHouse/pull/47736) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Add a test for [#23804](https://github.com/ClickHouse/ClickHouse/issues/23804) [#47737](https://github.com/ClickHouse/ClickHouse/pull/47737) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Add a test for [#18937](https://github.com/ClickHouse/ClickHouse/issues/18937) [#47738](https://github.com/ClickHouse/ClickHouse/pull/47738) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Add a test for [#17756](https://github.com/ClickHouse/ClickHouse/issues/17756) [#47739](https://github.com/ClickHouse/ClickHouse/pull/47739) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Add a test for [#23162](https://github.com/ClickHouse/ClickHouse/issues/23162) [#47740](https://github.com/ClickHouse/ClickHouse/pull/47740) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* remove unused code [#47743](https://github.com/ClickHouse/ClickHouse/pull/47743) ([flynn](https://github.com/ucasfl)).
* Fix broken cross-compiled macos builds [#47744](https://github.com/ClickHouse/ClickHouse/pull/47744) ([Robert Schulze](https://github.com/rschu1ze)).
* Randomize query cache settings [#47749](https://github.com/ClickHouse/ClickHouse/pull/47749) ([Robert Schulze](https://github.com/rschu1ze)).
* Clarify steps for reopened cherry-pick PRs [#47755](https://github.com/ClickHouse/ClickHouse/pull/47755) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Fix ZK exception error message [#47757](https://github.com/ClickHouse/ClickHouse/pull/47757) ([Raúl Marín](https://github.com/Algunenano)).
* Add ComparisonTupleEliminationVisitor [#47758](https://github.com/ClickHouse/ClickHouse/pull/47758) ([Vladimir C](https://github.com/vdimir)).
* Add a fuse for backport branches w/o a created PR [#47760](https://github.com/ClickHouse/ClickHouse/pull/47760) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Fix partition ID byte order for s390x [#47769](https://github.com/ClickHouse/ClickHouse/pull/47769) ([Harry Lee](https://github.com/HarryLeeIBM)).
* Stop `wait for quorum` retries on shutdown [#47770](https://github.com/ClickHouse/ClickHouse/pull/47770) ([Igor Nikonov](https://github.com/devcrafter)).
* More preparation for upgrade to libcxx(abi), llvm, clang-tidy 16 [#47771](https://github.com/ClickHouse/ClickHouse/pull/47771) ([Robert Schulze](https://github.com/rschu1ze)).
* Only valid Reviews.STATES overwrite existing reviews [#47789](https://github.com/ClickHouse/ClickHouse/pull/47789) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Apply black formatter to all python scripts [#47790](https://github.com/ClickHouse/ClickHouse/pull/47790) ([Anton Popov](https://github.com/CurtizJ)).
* Try fix window view test [#47791](https://github.com/ClickHouse/ClickHouse/pull/47791) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Update test for nested lambdas [#47795](https://github.com/ClickHouse/ClickHouse/pull/47795) ([Dmitry Novik](https://github.com/novikd)).
* Decrease scale_down ratio for faster deflation [#47798](https://github.com/ClickHouse/ClickHouse/pull/47798) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Fix 993 and two other tests [#47802](https://github.com/ClickHouse/ClickHouse/pull/47802) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Fix flaky test 02417_opentelemetry_insert_on_distributed_table [#47811](https://github.com/ClickHouse/ClickHouse/pull/47811) ([Azat Khuzhin](https://github.com/azat)).
* Make 01086_odbc_roundtrip less flaky [#47820](https://github.com/ClickHouse/ClickHouse/pull/47820) ([Antonio Andelic](https://github.com/antonio2368)).
* Place short return before big block, improve logging [#47822](https://github.com/ClickHouse/ClickHouse/pull/47822) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* [FixTests] Remove wrong chassert() in UserDefinedSQLObjectsLoaderFromZooKeeper.cpp [#47839](https://github.com/ClickHouse/ClickHouse/pull/47839) ([Vitaly Baranov](https://github.com/vitlibar)).
* Fix test test_replicated_merge_tree_encryption_codec [#47851](https://github.com/ClickHouse/ClickHouse/pull/47851) ([Vitaly Baranov](https://github.com/vitlibar)).
* Allow injecting timeout errors on Keeper [#47856](https://github.com/ClickHouse/ClickHouse/pull/47856) ([Raúl Marín](https://github.com/Algunenano)).
* Comment stale cherry-pick PRs once a day to remind for resolving conflicts [#47857](https://github.com/ClickHouse/ClickHouse/pull/47857) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Followup to [#47802](https://github.com/ClickHouse/ClickHouse/issues/47802) [#47864](https://github.com/ClickHouse/ClickHouse/pull/47864) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Slightly better error message [#47868](https://github.com/ClickHouse/ClickHouse/pull/47868) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Make test_server_reload non-parallel [#47871](https://github.com/ClickHouse/ClickHouse/pull/47871) ([Alexander Tokmakov](https://github.com/tavplubix)).
* aspell-dict.txt: keep sorted things sorted [#47878](https://github.com/ClickHouse/ClickHouse/pull/47878) ([Robert Schulze](https://github.com/rschu1ze)).
* throw exception when all retries exhausted [#47902](https://github.com/ClickHouse/ClickHouse/pull/47902) ([Sema Checherinda](https://github.com/CheSema)).
* Fix GRANT query formatting [#47908](https://github.com/ClickHouse/ClickHouse/pull/47908) ([Nikolay Degterinsky](https://github.com/evillique)).
* Fix exception type in arrayElement function [#47909](https://github.com/ClickHouse/ClickHouse/pull/47909) ([Nikolay Degterinsky](https://github.com/evillique)).
* Fix logical error in DistributedSink [#47916](https://github.com/ClickHouse/ClickHouse/pull/47916) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Fix terminate in parts check thread [#47917](https://github.com/ClickHouse/ClickHouse/pull/47917) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Limit keeper request batching by size in bytes [#47918](https://github.com/ClickHouse/ClickHouse/pull/47918) ([Alexander Gololobov](https://github.com/davenger)).
* Improve replicated user defined functions [#47919](https://github.com/ClickHouse/ClickHouse/pull/47919) ([Vitaly Baranov](https://github.com/vitlibar)).
* Update 01072_window_view_multiple_columns_groupby.sh [#47928](https://github.com/ClickHouse/ClickHouse/pull/47928) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Added test. Closes [#12264](https://github.com/ClickHouse/ClickHouse/issues/12264) [#47931](https://github.com/ClickHouse/ClickHouse/pull/47931) ([Ilya Yatsishin](https://github.com/qoega)).
* Disallow concurrent backup restore test - removed SYSTEM SYNC [#47944](https://github.com/ClickHouse/ClickHouse/pull/47944) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
* Artifacts s3 prefix [#47945](https://github.com/ClickHouse/ClickHouse/pull/47945) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Set content-length for empty POST requests [#47950](https://github.com/ClickHouse/ClickHouse/pull/47950) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Fix test `02050_client_profile_events` [#47951](https://github.com/ClickHouse/ClickHouse/pull/47951) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Fix tsan error lock-order-inversion [#47953](https://github.com/ClickHouse/ClickHouse/pull/47953) ([Kruglov Pavel](https://github.com/Avogar)).
* Update docs for parseDateTime() (follow-up to [#46815](https://github.com/ClickHouse/ClickHouse/issues/46815)) [#47959](https://github.com/ClickHouse/ClickHouse/pull/47959) ([Robert Schulze](https://github.com/rschu1ze)).
* Docs: Update secondary index example [#47961](https://github.com/ClickHouse/ClickHouse/pull/47961) ([Robert Schulze](https://github.com/rschu1ze)).
* Fix compilation on MacOS [#47967](https://github.com/ClickHouse/ClickHouse/pull/47967) ([Jordi Villar](https://github.com/jrdi)).
* [Refactoring] Move information about current hosts and list of all hosts to BackupCoordination [#47971](https://github.com/ClickHouse/ClickHouse/pull/47971) ([Vitaly Baranov](https://github.com/vitlibar)).
* Stabilize tests for new function parseDateTimeInJodaSyntax [#47974](https://github.com/ClickHouse/ClickHouse/pull/47974) ([Robert Schulze](https://github.com/rschu1ze)).
* Docs: Fix links [#47976](https://github.com/ClickHouse/ClickHouse/pull/47976) ([Robert Schulze](https://github.com/rschu1ze)).
* Try fix rabbitmq test [#47987](https://github.com/ClickHouse/ClickHouse/pull/47987) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Better type check in arrayElement function [#47989](https://github.com/ClickHouse/ClickHouse/pull/47989) ([Nikolay Degterinsky](https://github.com/evillique)).
* Fix incorrect code indentation [#48011](https://github.com/ClickHouse/ClickHouse/pull/48011) ([exmy](https://github.com/exmy)).
* CMake: Remove configuration of CMAKE_SHARED_LINKER_FLAGS [#48018](https://github.com/ClickHouse/ClickHouse/pull/48018) ([Robert Schulze](https://github.com/rschu1ze)).
* Remove the old changelog script [#48042](https://github.com/ClickHouse/ClickHouse/pull/48042) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix automatic indentation in the built-in UI SQL editor [#48045](https://github.com/ClickHouse/ClickHouse/pull/48045) ([Nikolay Degterinsky](https://github.com/evillique)).
* Rename `system.marked_dropped_tables` to `dropped_tables` [#48048](https://github.com/ClickHouse/ClickHouse/pull/48048) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Automatically correct some mistakes in the changelog [#48052](https://github.com/ClickHouse/ClickHouse/pull/48052) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Docs: Document [FULL] keyword in SHOW TABLES [#48061](https://github.com/ClickHouse/ClickHouse/pull/48061) ([Robert Schulze](https://github.com/rschu1ze)).
* Fix stateless tests numbers [#48063](https://github.com/ClickHouse/ClickHouse/pull/48063) ([Raúl Marín](https://github.com/Algunenano)).
* Docs: Update syntax of some SHOW queries [#48064](https://github.com/ClickHouse/ClickHouse/pull/48064) ([Robert Schulze](https://github.com/rschu1ze)).
* Simplify backup coordination for file infos [#48095](https://github.com/ClickHouse/ClickHouse/pull/48095) ([Vitaly Baranov](https://github.com/vitlibar)).
* materialized pg small fix [#48098](https://github.com/ClickHouse/ClickHouse/pull/48098) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Update SQLite to 3.41.2 [#48101](https://github.com/ClickHouse/ClickHouse/pull/48101) ([Nikolay Degterinsky](https://github.com/evillique)).
* Fix test numbers again and enforce it with style [#48106](https://github.com/ClickHouse/ClickHouse/pull/48106) ([Raúl Marín](https://github.com/Algunenano)).
* s390x reinterpret as float64 [#48112](https://github.com/ClickHouse/ClickHouse/pull/48112) ([Suzy Wang](https://github.com/SuzyWangIBMer)).
* Remove slow outdated test [#48114](https://github.com/ClickHouse/ClickHouse/pull/48114) ([alesapin](https://github.com/alesapin)).
* Cosmetic follow-up to [#46252](https://github.com/ClickHouse/ClickHouse/issues/46252) [#48128](https://github.com/ClickHouse/ClickHouse/pull/48128) ([Robert Schulze](https://github.com/rschu1ze)).
* Merging "Support undrop table" [#48130](https://github.com/ClickHouse/ClickHouse/pull/48130) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Fix double whitespace in exception message [#48132](https://github.com/ClickHouse/ClickHouse/pull/48132) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Improve script for updating clickhouse-docs [#48135](https://github.com/ClickHouse/ClickHouse/pull/48135) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Fix stdlib compatibility issues [#48150](https://github.com/ClickHouse/ClickHouse/pull/48150) ([DimasKovas](https://github.com/DimasKovas)).
* Make test test_disallow_concurrency less flaky [#48152](https://github.com/ClickHouse/ClickHouse/pull/48152) ([Vitaly Baranov](https://github.com/vitlibar)).
* Remove unused mockSystemDatabase from gtest_transform_query_for_exter… [#48162](https://github.com/ClickHouse/ClickHouse/pull/48162) ([Vladimir C](https://github.com/vdimir)).
* Update environmental-sensors.md [#48166](https://github.com/ClickHouse/ClickHouse/pull/48166) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Correctly handle NULL constants in logical optimizer for new analyzer [#48168](https://github.com/ClickHouse/ClickHouse/pull/48168) ([Antonio Andelic](https://github.com/antonio2368)).
* Try making KeeperMap test more stable [#48170](https://github.com/ClickHouse/ClickHouse/pull/48170) ([Antonio Andelic](https://github.com/antonio2368)).
* Deprecate EXPLAIN QUERY TREE with disabled analyzer. [#48177](https://github.com/ClickHouse/ClickHouse/pull/48177) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Use uniq file names in 02149_* tests to avoid SIGBUS in stress tests [#48187](https://github.com/ClickHouse/ClickHouse/pull/48187) ([Kruglov Pavel](https://github.com/Avogar)).
* Update style in ParserKQLSort.cpp [#48199](https://github.com/ClickHouse/ClickHouse/pull/48199) ([Ilya Yatsishin](https://github.com/qoega)).
* Remove support for std::unary/binary_function (removed in C++17) [#48204](https://github.com/ClickHouse/ClickHouse/pull/48204) ([Robert Schulze](https://github.com/rschu1ze)).
* Remove unused setting [#48208](https://github.com/ClickHouse/ClickHouse/pull/48208) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Remove wrong assert from LogicalExpressionOptimizerPass [#48214](https://github.com/ClickHouse/ClickHouse/pull/48214) ([Antonio Andelic](https://github.com/antonio2368)).
* MySQL compatibility: Make str_to_date alias case-insensitive [#48220](https://github.com/ClickHouse/ClickHouse/pull/48220) ([Robert Schulze](https://github.com/rschu1ze)).
* Disable AST optimizations for projection analysis. [#48221](https://github.com/ClickHouse/ClickHouse/pull/48221) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Fix Too big of a difference between test numbers [#48224](https://github.com/ClickHouse/ClickHouse/pull/48224) ([Vladimir C](https://github.com/vdimir)).
* Stabilize 02477_age [#48225](https://github.com/ClickHouse/ClickHouse/pull/48225) ([Robert Schulze](https://github.com/rschu1ze)).
* Rename setting stop_reading_on_first_cancel [#48226](https://github.com/ClickHouse/ClickHouse/pull/48226) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Address flaky 02346_full_text_search [#48227](https://github.com/ClickHouse/ClickHouse/pull/48227) ([Robert Schulze](https://github.com/rschu1ze)).
* Fix incorrect ThreadPool usage after ThreadPool introspection [#48244](https://github.com/ClickHouse/ClickHouse/pull/48244) ([Azat Khuzhin](https://github.com/azat)).
* fix test numbers again [#48264](https://github.com/ClickHouse/ClickHouse/pull/48264) ([Alexander Tokmakov](https://github.com/tavplubix)).
#### Testing Improvement
* Fixed functional test 02534_keyed_siphash and 02552_siphash128_reference for s390x. [#47615](https://github.com/ClickHouse/ClickHouse/pull/47615) ([Harry Lee](https://github.com/HarryLeeIBM)).

View File

@ -11,14 +11,14 @@ This is intended for continuous integration checks that run on Linux servers. If
The cross-build for macOS is based on the [Build instructions](../development/build.md), follow them first. The cross-build for macOS is based on the [Build instructions](../development/build.md), follow them first.
## Install Clang-14 ## Install Clang-15
Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup. Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup.
For example the commands for Bionic are like: For example the commands for Bionic are like:
``` bash ``` bash
sudo echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-14 main" >> /etc/apt/sources.list sudo echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-15 main" >> /etc/apt/sources.list
sudo apt-get install clang-14 sudo apt-get install clang-15
``` ```
## Install Cross-Compilation Toolset {#install-cross-compilation-toolset} ## Install Cross-Compilation Toolset {#install-cross-compilation-toolset}
@ -55,7 +55,7 @@ curl -L 'https://github.com/phracker/MacOSX-SDKs/releases/download/10.15/MacOSX1
cd ClickHouse cd ClickHouse
mkdir build-darwin mkdir build-darwin
cd build-darwin cd build-darwin
CC=clang-14 CXX=clang++-14 cmake -DCMAKE_AR:FILEPATH=${CCTOOLS}/bin/x86_64-apple-darwin-ar -DCMAKE_INSTALL_NAME_TOOL=${CCTOOLS}/bin/x86_64-apple-darwin-install_name_tool -DCMAKE_RANLIB:FILEPATH=${CCTOOLS}/bin/x86_64-apple-darwin-ranlib -DLINKER_NAME=${CCTOOLS}/bin/x86_64-apple-darwin-ld -DCMAKE_TOOLCHAIN_FILE=cmake/darwin/toolchain-x86_64.cmake .. CC=clang-15 CXX=clang++-15 cmake -DCMAKE_AR:FILEPATH=${CCTOOLS}/bin/x86_64-apple-darwin-ar -DCMAKE_INSTALL_NAME_TOOL=${CCTOOLS}/bin/x86_64-apple-darwin-install_name_tool -DCMAKE_RANLIB:FILEPATH=${CCTOOLS}/bin/x86_64-apple-darwin-ranlib -DLINKER_NAME=${CCTOOLS}/bin/x86_64-apple-darwin-ld -DCMAKE_TOOLCHAIN_FILE=cmake/darwin/toolchain-x86_64.cmake ..
ninja ninja
``` ```

View File

@ -244,11 +244,10 @@ FROM
) WHERE view_range > 1 ) WHERE view_range > 1
ORDER BY ORDER BY
is_comments_enabled ASC, is_comments_enabled ASC,
num_views ASC num_views ASC;
``` ```
```response ```response
┌─views─────────────┬─is_comments_enabled─┬────prob_like_dislike─┐ ┌─views─────────────┬─is_comments_enabled─┬────prob_like_dislike─┐
< 10.00 false 0.08224180712685371 < 10.00 false 0.08224180712685371
< 100.00 false 0.06346337759167248 < 100.00 false 0.06346337759167248
@ -273,134 +272,10 @@ ORDER BY
└───────────────────┴─────────────────────┴──────────────────────┘ └───────────────────┴─────────────────────┴──────────────────────┘
22 rows in set. Elapsed: 8.460 sec. Processed 4.56 billion rows, 77.48 GB (538.73 million rows/s., 9.16 GB/s.) 22 rows in set. Elapsed: 8.460 sec. Processed 4.56 billion rows, 77.48 GB (538.73 million rows/s., 9.16 GB/s.)
``` ```
Enabling comments seems to be correlated with a higher rate of engagement. Enabling comments seems to be correlated with a higher rate of engagement.
### How does the number of videos change over time - notable events?
```sql
SELECT
toStartOfMonth(toDateTime(upload_date)) AS month,
uniq(uploader_id) AS uploaders,
count() as num_videos,
sum(view_count) as view_count
FROM youtube
WHERE (month >= '2005-01-01') AND (month < '2021-12-01')
GROUP BY month
ORDER BY month ASC
```
```response
┌──────month─┬─uploaders─┬─num_videos─┬───view_count─┐
│ 2005-04-01 │ 5 │ 6 │ 213597737 │
│ 2005-05-01 │ 6 │ 9 │ 2944005 │
│ 2005-06-01 │ 165 │ 351 │ 18624981 │
│ 2005-07-01 │ 395 │ 1168 │ 94164872 │
│ 2005-08-01 │ 1171 │ 3128 │ 124540774 │
│ 2005-09-01 │ 2418 │ 5206 │ 475536249 │
│ 2005-10-01 │ 6750 │ 13747 │ 737593613 │
│ 2005-11-01 │ 13706 │ 28078 │ 1896116976 │
│ 2005-12-01 │ 24756 │ 49885 │ 2478418930 │
│ 2006-01-01 │ 49992 │ 100447 │ 4532656581 │
│ 2006-02-01 │ 67882 │ 138485 │ 5677516317 │
│ 2006-03-01 │ 103358 │ 212237 │ 8430301366 │
│ 2006-04-01 │ 114615 │ 234174 │ 9980760440 │
│ 2006-05-01 │ 152682 │ 332076 │ 14129117212 │
│ 2006-06-01 │ 193962 │ 429538 │ 17014143263 │
│ 2006-07-01 │ 234401 │ 530311 │ 18721143410 │
│ 2006-08-01 │ 281280 │ 614128 │ 20473502342 │
│ 2006-09-01 │ 312434 │ 679906 │ 23158422265 │
│ 2006-10-01 │ 404873 │ 897590 │ 27357846117 │
```
A spike of uploaders [around covid is noticeable](https://www.theverge.com/2020/3/27/21197642/youtube-with-me-style-videos-views-coronavirus-cook-workout-study-home-beauty).
### More subtitiles over time and when
With advances in speech recognition, its easier than ever to create subtitles for video with youtube adding auto-captioning in late 2009 - was the jump then?
```sql
SELECT
toStartOfMonth(upload_date) AS month,
countIf(has_subtitles) / count() AS percent_subtitles,
percent_subtitles - any(percent_subtitles) OVER (ORDER BY month ASC ROWS BETWEEN 1 PRECEDING AND 1 PRECEDING) AS previous
FROM youtube
WHERE (month >= '2015-01-01') AND (month < '2021-12-02')
GROUP BY month
ORDER BY month ASC
```
```response
┌──────month─┬───percent_subtitles─┬────────────────previous─┐
│ 2015-01-01 │ 0.2652653881082824 │ 0.2652653881082824 │
│ 2015-02-01 │ 0.3147556050309162 │ 0.049490216922633834 │
│ 2015-03-01 │ 0.32460464492371877 │ 0.009849039892802558 │
│ 2015-04-01 │ 0.33471963051468445 │ 0.010114985590965686 │
│ 2015-05-01 │ 0.3168087575501062 │ -0.017910872964578273 │
│ 2015-06-01 │ 0.3162609788438222 │ -0.0005477787062839745 │
│ 2015-07-01 │ 0.31828767677518033 │ 0.0020266979313581235 │
│ 2015-08-01 │ 0.3045551564286859 │ -0.013732520346494415 │
│ 2015-09-01 │ 0.311221133995152 │ 0.006665977566466086 │
│ 2015-10-01 │ 0.30574870926812175 │ -0.005472424727030245 │
│ 2015-11-01 │ 0.31125409712077234 │ 0.0055053878526505895 │
│ 2015-12-01 │ 0.3190967954651779 │ 0.007842698344405541 │
│ 2016-01-01 │ 0.32636021432496176 │ 0.007263418859783877 │
```
The data results show a spike in 2009. Apparently at that, time YouTube was removing their community captions feature, which allowed you to upload captions for other people's video.
This prompted a very successful campaign to have creators add captions to their videos for hard of hearing and deaf viewers.
### Top uploaders over time
```sql
WITH uploaders AS
(
SELECT uploader
FROM youtube
GROUP BY uploader
ORDER BY sum(view_count) DESC
LIMIT 10
)
SELECT
month,
uploader,
sum(view_count) AS total_views,
avg(dislike_count / like_count) AS like_to_dislike_ratio
FROM youtube
WHERE uploader IN (uploaders)
GROUP BY
toStartOfMonth(upload_date) AS month,
uploader
ORDER BY
month ASC,
total_views DESC
1001 rows in set. Elapsed: 34.917 sec. Processed 4.58 billion rows, 69.08 GB (131.15 million rows/s., 1.98 GB/s.)
```
```response
┌──────month─┬─uploader───────────────────┬─total_views─┬─like_to_dislike_ratio─┐
│ 1970-01-01 │ T-Series │ 10957099 │ 0.022784656361208206 │
│ 1970-01-01 │ Ryan's World │ 0 │ 0.003035559410234172 │
│ 1970-01-01 │ SET India │ 0 │ nan │
│ 2006-09-01 │ Cocomelon - Nursery Rhymes │ 256406497 │ 0.7005566715978622 │
│ 2007-06-01 │ Cocomelon - Nursery Rhymes │ 33641320 │ 0.7088650914344298 │
│ 2008-02-01 │ WWE │ 43733469 │ 0.07198856488734842 │
│ 2008-03-01 │ WWE │ 16514541 │ 0.1230603715431997 │
│ 2008-04-01 │ WWE │ 5907295 │ 0.2089399470159618 │
│ 2008-05-01 │ WWE │ 7779627 │ 0.09101676560436774 │
│ 2008-06-01 │ WWE │ 7018780 │ 0.0974184753155297 │
│ 2008-07-01 │ WWE │ 4686447 │ 0.1263845422065158 │
│ 2008-08-01 │ WWE │ 4514312 │ 0.08384574274791441 │
│ 2008-09-01 │ WWE │ 3717092 │ 0.07872802579349912 │
```
### How do like ratio changes as views go up? ### How do like ratio changes as views go up?
```sql ```sql
@ -421,13 +296,10 @@ GROUP BY
ORDER BY ORDER BY
view_range ASC, view_range ASC,
is_comments_enabled ASC is_comments_enabled ASC
) );
20 rows in set. Elapsed: 9.043 sec. Processed 4.56 billion rows, 77.48 GB (503.99 million rows/s., 8.57 GB/s.)
``` ```
```response ```response
┌─view_range────────┬─is_comments_enabled─┬─like_ratio─┐ ┌─view_range────────┬─is_comments_enabled─┬─like_ratio─┐
< 10.00 false 0.66 < 10.00 false 0.66
< 10.00 true 0.66 < 10.00 true 0.66
@ -451,6 +323,7 @@ ORDER BY
< 10.00 billion true 19.5 < 10.00 billion true 19.5
└───────────────────┴─────────────────────┴────────────┘ └───────────────────┴─────────────────────┴────────────┘
20 rows in set. Elapsed: 63.664 sec. Processed 4.56 billion rows, 113.93 GB (71.59 million rows/s., 1.79 GB/s.)
``` ```
### How are views distributed? ### How are views distributed?
@ -468,9 +341,7 @@ FROM
) )
ARRAY JOIN ARRAY JOIN
quantiles, quantiles,
labels labels;
12 rows in set. Elapsed: 1.864 sec. Processed 4.56 billion rows, 36.46 GB (2.45 billion rows/s., 19.56 GB/s.)
``` ```
```response ```response
@ -488,4 +359,6 @@ ARRAY JOIN
│ 20th │ 16 │ │ 20th │ 16 │
│ 10th │ 6 │ │ 10th │ 6 │
└────────────┴─────────┘ └────────────┴─────────┘
12 rows in set. Elapsed: 1.864 sec. Processed 4.56 billion rows, 36.46 GB (2.45 billion rows/s., 19.56 GB/s.)
``` ```

View File

@ -1047,7 +1047,7 @@ Default value: `0`.
Sets the number of threads performing background merges and mutations for tables with MergeTree engines. This setting is also could be applied at server startup from the `default` profile configuration for backward compatibility at the ClickHouse server start. You can only increase the number of threads at runtime. To lower the number of threads you have to restart the server. By adjusting this setting, you manage CPU and disk load. Smaller pool size utilizes less CPU and disk resources, but background processes advance slower which might eventually impact query performance. Sets the number of threads performing background merges and mutations for tables with MergeTree engines. This setting is also could be applied at server startup from the `default` profile configuration for backward compatibility at the ClickHouse server start. You can only increase the number of threads at runtime. To lower the number of threads you have to restart the server. By adjusting this setting, you manage CPU and disk load. Smaller pool size utilizes less CPU and disk resources, but background processes advance slower which might eventually impact query performance.
Before changing it, please also take a look at related MergeTree settings, such as `number_of_free_entries_in_pool_to_lower_max_size_of_merge` and `number_of_free_entries_in_pool_to_execute_mutation`. Before changing it, please also take a look at related MergeTree settings, such as [number_of_free_entries_in_pool_to_lower_max_size_of_merge](../../operations/settings/merge-tree-settings.md#number-of-free-entries-in-pool-to-lower-max-size-of-merge) and [number_of_free_entries_in_pool_to_execute_mutation](../../operations/settings/merge-tree-settings.md#number-of-free-entries-in-pool-to-execute-mutation).
Possible values: Possible values:

View File

@ -553,6 +553,32 @@ Default value: 8192
Merge reads rows from parts in blocks of `merge_max_block_size` rows, then merges and writes the result into a new part. The read block is placed in RAM, so `merge_max_block_size` affects the size of the RAM required for the merge. Thus, merges can consume a large amount of RAM for tables with very wide rows (if the average row size is 100kb, then when merging 10 parts, (100kb * 10 * 8192) = ~ 8GB of RAM). By decreasing `merge_max_block_size`, you can reduce the amount of RAM required for a merge but slow down a merge. Merge reads rows from parts in blocks of `merge_max_block_size` rows, then merges and writes the result into a new part. The read block is placed in RAM, so `merge_max_block_size` affects the size of the RAM required for the merge. Thus, merges can consume a large amount of RAM for tables with very wide rows (if the average row size is 100kb, then when merging 10 parts, (100kb * 10 * 8192) = ~ 8GB of RAM). By decreasing `merge_max_block_size`, you can reduce the amount of RAM required for a merge but slow down a merge.
## number_of_free_entries_in_pool_to_lower_max_size_of_merge {#number-of-free-entries-in-pool-to-lower-max-size-of-merge}
When there is less than specified number of free entries in pool (or replicated queue), start to lower maximum size of merge to process (or to put in queue).
This is to allow small merges to process - not filling the pool with long running merges.
Possible values:
- Any positive integer.
Default value: 8
## number_of_free_entries_in_pool_to_execute_mutation {#number-of-free-entries-in-pool-to-execute-mutation}
When there is less than specified number of free entries in pool, do not execute part mutations.
This is to leave free threads for regular merges and avoid "Too many parts".
Possible values:
- Any positive integer.
Default value: 20
**Usage**
The value of the `number_of_free_entries_in_pool_to_execute_mutation` setting should be less than the value of the [background_pool_size](/docs/en/operations/server-configuration-parameters/settings#background_pool_size) * [background_pool_size](/docs/en/operations/server-configuration-parameters/settings#background_merges_mutations_concurrency_ratio). Otherwise, ClickHouse throws an exception.
## max_part_loading_threads {#max-part-loading-threads} ## max_part_loading_threads {#max-part-loading-threads}
The maximum number of threads that read parts when ClickHouse starts. The maximum number of threads that read parts when ClickHouse starts.

View File

@ -91,7 +91,7 @@ The command must read arguments from `STDIN` and must output the result to `STDO
**Example** **Example**
Creating `test_function` using XML configuration. Creating `test_function` using XML configuration.
File test_function.xml. File `test_function.xml` (`/etc/clickhouse-server/test_function.xml` with default path settings).
```xml ```xml
<functions> <functions>
<function> <function>
@ -108,7 +108,7 @@ File test_function.xml.
</functions> </functions>
``` ```
Script file inside `user_scripts` folder `test_function.py`. Script file inside `user_scripts` folder `test_function.py` (`/var/lib/clickhouse/user_scripts/test_function.py` with default path settings).
```python ```python
#!/usr/bin/python3 #!/usr/bin/python3
@ -136,7 +136,7 @@ Result:
``` ```
Creating `test_function_sum` manually specifying `execute_direct` to `0` using XML configuration. Creating `test_function_sum` manually specifying `execute_direct` to `0` using XML configuration.
File test_function.xml. File `test_function.xml` (`/etc/clickhouse-server/test_function.xml` with default path settings).
```xml ```xml
<functions> <functions>
<function> <function>
@ -173,7 +173,7 @@ Result:
``` ```
Creating `test_function_sum_json` with named arguments and format [JSONEachRow](../../interfaces/formats.md#jsoneachrow) using XML configuration. Creating `test_function_sum_json` with named arguments and format [JSONEachRow](../../interfaces/formats.md#jsoneachrow) using XML configuration.
File test_function.xml. File `test_function.xml` (`/etc/clickhouse-server/test_function.xml` with default path settings).
```xml ```xml
<functions> <functions>
<function> <function>
@ -195,7 +195,7 @@ File test_function.xml.
</functions> </functions>
``` ```
Script file inside `user_scripts` folder `test_function_sum_json.py`. Script file inside `user_scripts` folder `test_function_sum_json.py` (`/var/lib/clickhouse/user_scripts/test_function_sum_json.py` with default path settings).
```python ```python
#!/usr/bin/python3 #!/usr/bin/python3
@ -228,7 +228,7 @@ Result:
``` ```
Executable user defined functions can take constant parameters configured in `command` setting (works only for user defined functions with `executable` type). Executable user defined functions can take constant parameters configured in `command` setting (works only for user defined functions with `executable` type).
File test_function_parameter_python.xml. File `test_function_parameter_python.xml` (`/etc/clickhouse-server/test_function_parameter_python.xml` with default path settings).
```xml ```xml
<functions> <functions>
<function> <function>
@ -244,7 +244,7 @@ File test_function_parameter_python.xml.
</functions> </functions>
``` ```
Script file inside `user_scripts` folder `test_function_parameter_python.py`. Script file inside `user_scripts` folder `test_function_parameter_python.py` (`/var/lib/clickhouse/user_scripts/test_function_parameter_python.py` with default path settings).
```python ```python
#!/usr/bin/python3 #!/usr/bin/python3

View File

@ -30,7 +30,7 @@ This statement is identical to the query:
SELECT name FROM system.databases [WHERE name [NOT] LIKE | ILIKE '<pattern>'] [LIMIT <N>] [INTO OUTFILE filename] [FORMAT format] SELECT name FROM system.databases [WHERE name [NOT] LIKE | ILIKE '<pattern>'] [LIMIT <N>] [INTO OUTFILE filename] [FORMAT format]
``` ```
### Examples **Examples**
Getting database names, containing the symbols sequence 'de' in their names: Getting database names, containing the symbols sequence 'de' in their names:
@ -92,7 +92,7 @@ Result:
└────────────────────────────────┘ └────────────────────────────────┘
``` ```
### See Also **See also**
- [CREATE DATABASE](https://clickhouse.com/docs/en/sql-reference/statements/create/database/#query-language-create-database) - [CREATE DATABASE](https://clickhouse.com/docs/en/sql-reference/statements/create/database/#query-language-create-database)
@ -128,7 +128,7 @@ This statement is identical to the query:
SELECT name FROM system.tables [WHERE name [NOT] LIKE | ILIKE '<pattern>'] [LIMIT <N>] [INTO OUTFILE <filename>] [FORMAT <format>] SELECT name FROM system.tables [WHERE name [NOT] LIKE | ILIKE '<pattern>'] [LIMIT <N>] [INTO OUTFILE <filename>] [FORMAT <format>]
``` ```
### Examples **Examples**
Getting table names, containing the symbols sequence 'user' in their names: Getting table names, containing the symbols sequence 'user' in their names:
@ -191,11 +191,59 @@ Result:
└────────────────────────────────┘ └────────────────────────────────┘
``` ```
### See Also **See also**
- [Create Tables](https://clickhouse.com/docs/en/getting-started/tutorial/#create-tables) - [Create Tables](https://clickhouse.com/docs/en/getting-started/tutorial/#create-tables)
- [SHOW CREATE TABLE](https://clickhouse.com/docs/en/sql-reference/statements/show/#show-create-table) - [SHOW CREATE TABLE](https://clickhouse.com/docs/en/sql-reference/statements/show/#show-create-table)
## SHOW COLUMNS
Displays a list of columns
```sql
SHOW [EXTENDED] [FULL] COLUMNS {FROM | IN} <table> [{FROM | IN} <db>] [{[NOT] {LIKE | ILIKE} '<pattern>' | WHERE <expr>}] [LIMIT <N>] [INTO
OUTFILE <filename>] [FORMAT <format>]
```
The database and table name can be specified in abbreviated form as `<db>.<table>`, i.e. `FROM tab FROM db` and `FROM db.tab` are
equivalent. If no database is specified, the query returns the list of columns from the current database.
The optional keyword `EXTENDED` currently has no effect, it only exists for MySQL compatibility.
The optional keyword `FULL` causes the output to include the collation, comment and privilege columns.
`SHOW COLUMNS` produces a result table with the following structure:
- field - The name of the column (String)
- type - The column data type (String)
- null - If the column data type is Nullable (UInt8)
- key - `PRI` if the column is part of the primary key, `SOR` if the column is part of the sorting key, empty otherwise (String)
- default - Default expression of the column if it is of type `ALIAS`, `DEFAULT`, or `MATERIALIZED`, otherwise `NULL`. (Nullable(String))
- extra - Additional information, currently unused (String)
- collation - (only if `FULL` keyword was specified) Collation of the column, always `NULL` because ClickHouse has no per-column collations (Nullable(String))
- comment - (only if `FULL` keyword was specified) Comment on the column (String)
- privilege - (only if `FULL` keyword was specified) The privilege you have on this column, currently not available (String)
**Examples**
Getting information about all columns in table 'order' starting with 'delivery_':
```sql
SHOW COLUMNS FROM 'orders' LIKE 'delivery_%'
```
Result:
``` text
┌─field───────────┬─type─────┬─null─┬─key─────┬─default─┬─extra─┐
│ delivery_date │ DateTime │ 0 │ PRI SOR │ ᴺᵁᴸᴸ │ │
│ delivery_status │ Bool │ 0 │ │ ᴺᵁᴸᴸ │ │
└─────────────────┴──────────┴──────┴─────────┴─────────┴───────┘
```
**See also**
- [system.columns](https://clickhouse.com/docs/en/operations/system-tables/columns)
## SHOW DICTIONARIES ## SHOW DICTIONARIES
Displays a list of [Dictionaries](../../sql-reference/dictionaries/index.md). Displays a list of [Dictionaries](../../sql-reference/dictionaries/index.md).
@ -212,7 +260,7 @@ You can get the same results as the `SHOW DICTIONARIES` query in the following w
SELECT name FROM system.dictionaries WHERE database = <db> [AND name LIKE <pattern>] [LIMIT <N>] [INTO OUTFILE <filename>] [FORMAT <format>] SELECT name FROM system.dictionaries WHERE database = <db> [AND name LIKE <pattern>] [LIMIT <N>] [INTO OUTFILE <filename>] [FORMAT <format>]
``` ```
**Example** **Examples**
The following query selects the first two rows from the list of tables in the `system` database, whose names contain `reg`. The following query selects the first two rows from the list of tables in the `system` database, whose names contain `reg`.
@ -231,7 +279,7 @@ SHOW DICTIONARIES FROM db LIKE '%reg%' LIMIT 2
Shows privileges for a user. Shows privileges for a user.
### Syntax **Syntax**
``` sql ``` sql
SHOW GRANTS [FOR user1 [, user2 ...]] SHOW GRANTS [FOR user1 [, user2 ...]]
@ -245,7 +293,7 @@ Shows parameters that were used at a [user creation](../../sql-reference/stateme
`SHOW CREATE USER` does not output user passwords. `SHOW CREATE USER` does not output user passwords.
### Syntax **Syntax**
``` sql ``` sql
SHOW CREATE USER [name1 [, name2 ...] | CURRENT_USER] SHOW CREATE USER [name1 [, name2 ...] | CURRENT_USER]
@ -255,7 +303,7 @@ SHOW CREATE USER [name1 [, name2 ...] | CURRENT_USER]
Shows parameters that were used at a [role creation](../../sql-reference/statements/create/role.md). Shows parameters that were used at a [role creation](../../sql-reference/statements/create/role.md).
### Syntax **Syntax**
``` sql ``` sql
SHOW CREATE ROLE name1 [, name2 ...] SHOW CREATE ROLE name1 [, name2 ...]
@ -265,7 +313,7 @@ SHOW CREATE ROLE name1 [, name2 ...]
Shows parameters that were used at a [row policy creation](../../sql-reference/statements/create/row-policy.md). Shows parameters that were used at a [row policy creation](../../sql-reference/statements/create/row-policy.md).
### Syntax **Syntax**
``` sql ``` sql
SHOW CREATE [ROW] POLICY name ON [database1.]table1 [, [database2.]table2 ...] SHOW CREATE [ROW] POLICY name ON [database1.]table1 [, [database2.]table2 ...]
@ -275,7 +323,7 @@ SHOW CREATE [ROW] POLICY name ON [database1.]table1 [, [database2.]table2 ...]
Shows parameters that were used at a [quota creation](../../sql-reference/statements/create/quota.md). Shows parameters that were used at a [quota creation](../../sql-reference/statements/create/quota.md).
### Syntax **Syntax**
``` sql ``` sql
SHOW CREATE QUOTA [name1 [, name2 ...] | CURRENT] SHOW CREATE QUOTA [name1 [, name2 ...] | CURRENT]
@ -285,7 +333,7 @@ SHOW CREATE QUOTA [name1 [, name2 ...] | CURRENT]
Shows parameters that were used at a [settings profile creation](../../sql-reference/statements/create/settings-profile.md). Shows parameters that were used at a [settings profile creation](../../sql-reference/statements/create/settings-profile.md).
### Syntax **Syntax**
``` sql ``` sql
SHOW CREATE [SETTINGS] PROFILE name1 [, name2 ...] SHOW CREATE [SETTINGS] PROFILE name1 [, name2 ...]
@ -295,7 +343,7 @@ SHOW CREATE [SETTINGS] PROFILE name1 [, name2 ...]
Returns a list of [user account](../../guides/sre/user-management/index.md#user-account-management) names. To view user accounts parameters, see the system table [system.users](../../operations/system-tables/users.md#system_tables-users). Returns a list of [user account](../../guides/sre/user-management/index.md#user-account-management) names. To view user accounts parameters, see the system table [system.users](../../operations/system-tables/users.md#system_tables-users).
### Syntax **Syntax**
``` sql ``` sql
SHOW USERS SHOW USERS
@ -305,7 +353,7 @@ SHOW USERS
Returns a list of [roles](../../guides/sre/user-management/index.md#role-management). To view another parameters, see system tables [system.roles](../../operations/system-tables/roles.md#system_tables-roles) and [system.role_grants](../../operations/system-tables/role-grants.md#system_tables-role_grants). Returns a list of [roles](../../guides/sre/user-management/index.md#role-management). To view another parameters, see system tables [system.roles](../../operations/system-tables/roles.md#system_tables-roles) and [system.role_grants](../../operations/system-tables/role-grants.md#system_tables-role_grants).
### Syntax **Syntax**
``` sql ``` sql
SHOW [CURRENT|ENABLED] ROLES SHOW [CURRENT|ENABLED] ROLES
@ -314,7 +362,7 @@ SHOW [CURRENT|ENABLED] ROLES
Returns a list of [setting profiles](../../guides/sre/user-management/index.md#settings-profiles-management). To view user accounts parameters, see the system table [settings_profiles](../../operations/system-tables/settings_profiles.md#system_tables-settings_profiles). Returns a list of [setting profiles](../../guides/sre/user-management/index.md#settings-profiles-management). To view user accounts parameters, see the system table [settings_profiles](../../operations/system-tables/settings_profiles.md#system_tables-settings_profiles).
### Syntax **Syntax**
``` sql ``` sql
SHOW [SETTINGS] PROFILES SHOW [SETTINGS] PROFILES
@ -324,7 +372,7 @@ SHOW [SETTINGS] PROFILES
Returns a list of [row policies](../../guides/sre/user-management/index.md#row-policy-management) for the specified table. To view user accounts parameters, see the system table [system.row_policies](../../operations/system-tables/row_policies.md#system_tables-row_policies). Returns a list of [row policies](../../guides/sre/user-management/index.md#row-policy-management) for the specified table. To view user accounts parameters, see the system table [system.row_policies](../../operations/system-tables/row_policies.md#system_tables-row_policies).
### Syntax **Syntax**
``` sql ``` sql
SHOW [ROW] POLICIES [ON [db.]table] SHOW [ROW] POLICIES [ON [db.]table]
@ -334,7 +382,7 @@ SHOW [ROW] POLICIES [ON [db.]table]
Returns a list of [quotas](../../guides/sre/user-management/index.md#quotas-management). To view quotas parameters, see the system table [system.quotas](../../operations/system-tables/quotas.md#system_tables-quotas). Returns a list of [quotas](../../guides/sre/user-management/index.md#quotas-management). To view quotas parameters, see the system table [system.quotas](../../operations/system-tables/quotas.md#system_tables-quotas).
### Syntax **Syntax**
``` sql ``` sql
SHOW QUOTAS SHOW QUOTAS
@ -344,7 +392,7 @@ SHOW QUOTAS
Returns a [quota](../../operations/quotas.md) consumption for all users or for current user. To view another parameters, see system tables [system.quotas_usage](../../operations/system-tables/quotas_usage.md#system_tables-quotas_usage) and [system.quota_usage](../../operations/system-tables/quota_usage.md#system_tables-quota_usage). Returns a [quota](../../operations/quotas.md) consumption for all users or for current user. To view another parameters, see system tables [system.quotas_usage](../../operations/system-tables/quotas_usage.md#system_tables-quotas_usage) and [system.quota_usage](../../operations/system-tables/quota_usage.md#system_tables-quota_usage).
### Syntax **Syntax**
``` sql ``` sql
SHOW [CURRENT] QUOTA SHOW [CURRENT] QUOTA
@ -353,7 +401,7 @@ SHOW [CURRENT] QUOTA
Shows all [users](../../guides/sre/user-management/index.md#user-account-management), [roles](../../guides/sre/user-management/index.md#role-management), [profiles](../../guides/sre/user-management/index.md#settings-profiles-management), etc. and all their [grants](../../sql-reference/statements/grant.md#grant-privileges). Shows all [users](../../guides/sre/user-management/index.md#user-account-management), [roles](../../guides/sre/user-management/index.md#role-management), [profiles](../../guides/sre/user-management/index.md#settings-profiles-management), etc. and all their [grants](../../sql-reference/statements/grant.md#grant-privileges).
### Syntax **Syntax**
``` sql ``` sql
SHOW ACCESS SHOW ACCESS
@ -366,13 +414,14 @@ Returns a list of clusters. All available clusters are listed in the [system.clu
`SHOW CLUSTER name` query displays the contents of system.clusters table for this cluster. `SHOW CLUSTER name` query displays the contents of system.clusters table for this cluster.
::: :::
### Syntax **Syntax**
``` sql ``` sql
SHOW CLUSTER '<name>' SHOW CLUSTER '<name>'
SHOW CLUSTERS [[NOT] LIKE|ILIKE '<pattern>'] [LIMIT <N>] SHOW CLUSTERS [[NOT] LIKE|ILIKE '<pattern>'] [LIMIT <N>]
``` ```
### Examples
**Examples**
Query: Query:

View File

@ -20,14 +20,14 @@ BackupMutablePtr BackupFactory::createBackup(const CreateParams & params) const
const String & engine_name = params.backup_info.backup_engine_name; const String & engine_name = params.backup_info.backup_engine_name;
auto it = creators.find(engine_name); auto it = creators.find(engine_name);
if (it == creators.end()) if (it == creators.end())
throw Exception(ErrorCodes::BACKUP_ENGINE_NOT_FOUND, "Not found backup engine {}", engine_name); throw Exception(ErrorCodes::BACKUP_ENGINE_NOT_FOUND, "Not found backup engine '{}'", engine_name);
return (it->second)(params); return (it->second)(params);
} }
void BackupFactory::registerBackupEngine(const String & engine_name, const CreatorFn & creator_fn) void BackupFactory::registerBackupEngine(const String & engine_name, const CreatorFn & creator_fn)
{ {
if (creators.contains(engine_name)) if (creators.contains(engine_name))
throw Exception(ErrorCodes::LOGICAL_ERROR, "Backup engine {} was registered twice", engine_name); throw Exception(ErrorCodes::LOGICAL_ERROR, "Backup engine '{}' was registered twice", engine_name);
creators[engine_name] = creator_fn; creators[engine_name] = creator_fn;
} }

View File

@ -41,7 +41,7 @@ namespace
key = "backups.allowed_disk[" + std::to_string(++counter) + "]"; key = "backups.allowed_disk[" + std::to_string(++counter) + "]";
if (!config.has(key)) if (!config.has(key))
throw Exception(ErrorCodes::BAD_ARGUMENTS, throw Exception(ErrorCodes::BAD_ARGUMENTS,
"Disk {} is not allowed for backups, see the 'backups.allowed_disk' configuration parameter", quoteString(disk_name)); "Disk '{}' is not allowed for backups, see the 'backups.allowed_disk' configuration parameter", quoteString(disk_name));
} }
} }
@ -54,7 +54,7 @@ namespace
bool path_ok = path.empty() || (path.is_relative() && (*path.begin() != "..")); bool path_ok = path.empty() || (path.is_relative() && (*path.begin() != ".."));
if (!path_ok) if (!path_ok)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Path {} to backup must be inside the specified disk {}", throw Exception(ErrorCodes::BAD_ARGUMENTS, "Path '{}' to backup must be inside the specified disk '{}'",
quoteString(path.c_str()), quoteString(disk_name)); quoteString(path.c_str()), quoteString(disk_name));
} }

View File

@ -78,6 +78,12 @@
M(BackupsThreadsActive, "Number of threads in thread pool for BACKUP running a task.") \ M(BackupsThreadsActive, "Number of threads in thread pool for BACKUP running a task.") \
M(RestoreThreads, "Number of threads in the thread pool for RESTORE.") \ M(RestoreThreads, "Number of threads in the thread pool for RESTORE.") \
M(RestoreThreadsActive, "Number of threads in the thread pool for RESTORE running a task.") \ M(RestoreThreadsActive, "Number of threads in the thread pool for RESTORE running a task.") \
M(MarksLoaderThreads, "Number of threads in thread pool for loading marks.") \
M(MarksLoaderThreadsActive, "Number of threads in the thread pool for loading marks running a task.") \
M(IOPrefetchThreads, "Number of threads in the IO prefertch thread pool.") \
M(IOPrefetchThreadsActive, "Number of threads in the IO prefetch thread pool running a task.") \
M(IOWriterThreads, "Number of threads in the IO writer thread pool.") \
M(IOWriterThreadsActive, "Number of threads in the IO writer thread pool running a task.") \
M(IOThreads, "Number of threads in the IO thread pool.") \ M(IOThreads, "Number of threads in the IO thread pool.") \
M(IOThreadsActive, "Number of threads in the IO thread pool running a task.") \ M(IOThreadsActive, "Number of threads in the IO thread pool running a task.") \
M(ThreadPoolRemoteFSReaderThreads, "Number of threads in the thread pool for remote_filesystem_read_method=threadpool.") \ M(ThreadPoolRemoteFSReaderThreads, "Number of threads in the thread pool for remote_filesystem_read_method=threadpool.") \

View File

@ -661,7 +661,7 @@ BlockIO DatabaseReplicated::tryEnqueueReplicatedDDL(const ASTPtr & query, Contex
String node_path = ddl_worker->tryEnqueueAndExecuteEntry(entry, query_context); String node_path = ddl_worker->tryEnqueueAndExecuteEntry(entry, query_context);
Strings hosts_to_wait = getZooKeeper()->getChildren(zookeeper_path + "/replicas"); Strings hosts_to_wait = getZooKeeper()->getChildren(zookeeper_path + "/replicas");
return getDistributedDDLStatus(node_path, entry, query_context, hosts_to_wait); return getDistributedDDLStatus(node_path, entry, query_context, &hosts_to_wait);
} }
static UUID getTableUUIDIfReplicated(const String & metadata, ContextPtr context) static UUID getTableUUIDIfReplicated(const String & metadata, ContextPtr context)

View File

@ -18,6 +18,7 @@
#include "array/arrayIndex.h" #include "array/arrayIndex.h"
#include "Functions/like.h" #include "Functions/like.h"
#include "Functions/FunctionsStringSearch.h" #include "Functions/FunctionsStringSearch.h"
#include <Common/HashTable/HashSet.h>
namespace DB namespace DB
@ -616,103 +617,116 @@ public:
"Number of arguments for function {} doesn't match: passed {}, should be 2", "Number of arguments for function {} doesn't match: passed {}, should be 2",
getName(), arguments.size()); getName(), arguments.size());
const DataTypeMap * left = checkAndGetDataType<DataTypeMap>(arguments[0].type.get()); const auto * left = checkAndGetDataType<DataTypeMap>(arguments[0].type.get());
const DataTypeMap * right = checkAndGetDataType<DataTypeMap>(arguments[1].type.get()); const auto * right = checkAndGetDataType<DataTypeMap>(arguments[1].type.get());
if (!left || !right) if (!left || !right)
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "The two arguments for function {} must be both Map type", throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
getName()); "The two arguments for function {} must be both Map type", getName());
if (!left->getKeyType()->equals(*right->getKeyType()) || !left->getValueType()->equals(*right->getValueType())) if (!left->getKeyType()->equals(*right->getKeyType()) || !left->getValueType()->equals(*right->getValueType()))
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "The Key And Value type of Map for function {} must be the same", throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
getName()); "The Key And Value type of Map for function {} must be the same", getName());
return std::make_shared<DataTypeMap>(left->getKeyType(), left->getValueType()); return std::make_shared<DataTypeMap>(left->getKeyType(), left->getValueType());
} }
bool useDefaultImplementationForConstants() const override { return true; } bool useDefaultImplementationForConstants() const override { return true; }
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
{ {
const ColumnMap * col_map_left = typeid_cast<const ColumnMap *>(arguments[0].column.get()); bool is_left_const = isColumnConst(*arguments[0].column);
const auto * col_const_map_left = checkAndGetColumnConst<ColumnMap>(arguments[0].column.get()); bool is_right_const = isColumnConst(*arguments[1].column);
bool col_const_map_left_flag = false;
if (col_const_map_left)
{
col_const_map_left_flag = true;
col_map_left = typeid_cast<const ColumnMap *>(&col_const_map_left->getDataColumn());
}
if (!col_map_left)
return nullptr;
const ColumnMap * col_map_right = typeid_cast<const ColumnMap *>(arguments[1].column.get()); const auto * map_column_left = is_left_const
const auto * col_const_map_right = checkAndGetColumnConst<ColumnMap>(arguments[1].column.get()); ? checkAndGetColumnConstData<ColumnMap>(arguments[0].column.get())
bool col_const_map_right_flag = false; : checkAndGetColumn<ColumnMap>(arguments[0].column.get());
if (col_const_map_right)
{
col_const_map_right_flag = true;
col_map_right = typeid_cast<const ColumnMap *>(&col_const_map_right->getDataColumn());
}
if (!col_map_right)
return nullptr;
const auto & nested_column_left = col_map_left->getNestedColumn(); const auto * map_column_right = is_right_const
const auto & keys_data_left = col_map_left->getNestedData().getColumn(0); ? checkAndGetColumnConstData<ColumnMap>(arguments[1].column.get())
const auto & values_data_left = col_map_left->getNestedData().getColumn(1); : checkAndGetColumn<ColumnMap>(arguments[1].column.get());
if (!map_column_left || !map_column_right)
throw Exception(ErrorCodes::ILLEGAL_COLUMN,
"Arguments for function {} must be maps, got {} and {} instead",
getName(), arguments[0].column->getName(), arguments[1].column->getName());
const auto & nested_column_left = map_column_left->getNestedColumn();
const auto & keys_data_left = map_column_left->getNestedData().getColumn(0);
const auto & values_data_left = map_column_left->getNestedData().getColumn(1);
const auto & offsets_left = nested_column_left.getOffsets(); const auto & offsets_left = nested_column_left.getOffsets();
const auto & nested_column_right = col_map_right->getNestedColumn(); const auto & nested_column_right = map_column_right->getNestedColumn();
const auto & keys_data_right = col_map_right->getNestedData().getColumn(0); const auto & keys_data_right = map_column_right->getNestedData().getColumn(0);
const auto & values_data_right = col_map_right->getNestedData().getColumn(1); const auto & values_data_right = map_column_right->getNestedData().getColumn(1);
const auto & offsets_right = nested_column_right.getOffsets(); const auto & offsets_right = nested_column_right.getOffsets();
const auto & result_type_map = static_cast<const DataTypeMap &>(*result_type); auto result_keys = keys_data_left.cloneEmpty();
const DataTypePtr & key_type = result_type_map.getKeyType(); auto result_values = values_data_left.cloneEmpty();
const DataTypePtr & value_type = result_type_map.getValueType();
MutableColumnPtr keys_data = key_type->createColumn(); size_t size_to_reserve = keys_data_right.size() + (keys_data_left.size() - keys_data_right.size());
MutableColumnPtr values_data = value_type->createColumn();
MutableColumnPtr offsets = DataTypeNumber<IColumn::Offset>().createColumn(); result_keys->reserve(size_to_reserve);
result_values->reserve(size_to_reserve);
auto result_offsets = ColumnVector<IColumn::Offset>::create(input_rows_count);
auto & result_offsets_data = result_offsets->getData();
using Set = HashSetWithStackMemory<StringRef, StringRefHash, 4>;
Set right_keys_const;
if (is_right_const)
{
for (size_t i = 0; i < keys_data_right.size(); ++i)
right_keys_const.insert(keys_data_right.getDataAt(i));
}
IColumn::Offset current_offset = 0; IColumn::Offset current_offset = 0;
for (size_t row_idx = 0; row_idx < input_rows_count; ++row_idx) for (size_t row_idx = 0; row_idx < input_rows_count; ++row_idx)
{ {
size_t left_it_begin = col_const_map_left_flag ? 0 : offsets_left[row_idx - 1]; size_t left_from = is_left_const ? 0 : offsets_left[row_idx - 1];
size_t left_it_end = col_const_map_left_flag ? offsets_left.size() : offsets_left[row_idx]; size_t left_to = is_left_const ? offsets_left[0] : offsets_left[row_idx];
size_t right_it_begin = col_const_map_right_flag ? 0 : offsets_right[row_idx - 1];
size_t right_it_end = col_const_map_right_flag ? offsets_right.size() : offsets_right[row_idx];
for (size_t i = left_it_begin; i < left_it_end; ++i) size_t right_from = is_right_const ? 0 : offsets_right[row_idx - 1];
size_t right_to = is_right_const ? offsets_right[0] : offsets_right[row_idx];
auto execute_row = [&](const auto & set)
{ {
bool matched = false; for (size_t i = left_from; i < left_to; ++i)
auto key = keys_data_left.getDataAt(i);
for (size_t j = right_it_begin; j < right_it_end; ++j)
{ {
if (keys_data_right.getDataAt(j).toString() == key.toString()) if (!set.find(keys_data_left.getDataAt(i)))
{ {
matched = true; result_keys->insertFrom(keys_data_left, i);
break; result_values->insertFrom(values_data_left, i);
++current_offset;
} }
} }
if (!matched) };
{
keys_data->insertFrom(keys_data_left, i);
values_data->insertFrom(values_data_left, i);
++current_offset;
}
}
for (size_t j = right_it_begin; j < right_it_end; ++j) if (is_right_const)
{ {
keys_data->insertFrom(keys_data_right, j); execute_row(right_keys_const);
values_data->insertFrom(values_data_right, j); }
++current_offset; else
{
Set right_keys;
for (size_t i = right_from; i < right_to; ++i)
right_keys.insert(keys_data_right.getDataAt(i));
execute_row(right_keys);
} }
offsets->insert(current_offset); size_t right_map_size = right_to - right_from;
result_keys->insertRangeFrom(keys_data_right, right_from, right_map_size);
result_values->insertRangeFrom(values_data_right, right_from, right_map_size);
current_offset += right_map_size;
result_offsets_data[row_idx] = current_offset;
} }
auto nested_column = ColumnArray::create( auto nested_column = ColumnArray::create(
ColumnTuple::create(Columns{std::move(keys_data), std::move(values_data)}), ColumnTuple::create(Columns{std::move(result_keys), std::move(result_values)}),
std::move(offsets)); std::move(result_offsets));
return ColumnMap::create(nested_column); return ColumnMap::create(nested_column);
} }

View File

@ -28,13 +28,13 @@ void ProgressValues::read(ReadBuffer & in, UInt64 server_revision)
void ProgressValues::write(WriteBuffer & out, UInt64 client_revision) const void ProgressValues::write(WriteBuffer & out, UInt64 client_revision) const
{ {
writeVarUInt(read_rows, out); writeVarUIntOverflow(read_rows, out);
writeVarUInt(read_bytes, out); writeVarUIntOverflow(read_bytes, out);
writeVarUInt(total_rows_to_read, out); writeVarUIntOverflow(total_rows_to_read, out);
if (client_revision >= DBMS_MIN_REVISION_WITH_CLIENT_WRITE_INFO) if (client_revision >= DBMS_MIN_REVISION_WITH_CLIENT_WRITE_INFO)
{ {
writeVarUInt(written_rows, out); writeVarUIntOverflow(written_rows, out);
writeVarUInt(written_bytes, out); writeVarUIntOverflow(written_bytes, out);
} }
if (client_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_SERVER_QUERY_TIME_IN_PROGRESS) if (client_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_SERVER_QUERY_TIME_IN_PROGRESS)
{ {

View File

@ -2,6 +2,7 @@
#include <iostream> #include <iostream>
#include <base/types.h> #include <base/types.h>
#include <base/defines.h>
#include <IO/ReadBuffer.h> #include <IO/ReadBuffer.h>
#include <IO/WriteBuffer.h> #include <IO/WriteBuffer.h>
@ -14,12 +15,32 @@ namespace ErrorCodes
} }
/** Write UInt64 in variable length format (base128) NOTE Only up to 2^63 - 1 are supported. */ /** Variable-Length Quantity (VLQ) Base-128 compression
*
* NOTE: Due to historical reasons, only up to 1<<63-1 are supported, which
* cannot be changed without breaking the backward compatibility.
* Also some drivers may support full 1<<64 range (i.e. python -
* clickhouse-driver), while others has the same limitations as ClickHouse
* (i.e. Rust - clickhouse-rs).
* So implementing VLQ for the whole 1<<64 range will require different set of
* helpers.
*/
constexpr UInt64 VAR_UINT_MAX = (1ULL<<63) - 1;
/** Write UInt64 in variable length format (base128) */
void writeVarUInt(UInt64 x, std::ostream & ostr); void writeVarUInt(UInt64 x, std::ostream & ostr);
void writeVarUInt(UInt64 x, WriteBuffer & ostr); void writeVarUInt(UInt64 x, WriteBuffer & ostr);
char * writeVarUInt(UInt64 x, char * ostr); char * writeVarUInt(UInt64 x, char * ostr);
/** Write UInt64 in variable length format, wrap the value to VAR_UINT_MAX if it exceed VAR_UINT_MAX (to bypass sanity check) */
template <typename ...Args>
auto writeVarUIntOverflow(UInt64 x, Args && ... args)
{
return writeVarUInt(std::min(x, VAR_UINT_MAX), std::forward<Args>(args)...);
}
/** Read UInt64, written in variable length format (base128) */ /** Read UInt64, written in variable length format (base128) */
void readVarUInt(UInt64 & x, std::istream & istr); void readVarUInt(UInt64 & x, std::istream & istr);
void readVarUInt(UInt64 & x, ReadBuffer & istr); void readVarUInt(UInt64 & x, ReadBuffer & istr);
@ -186,6 +207,7 @@ inline const char * readVarUInt(UInt64 & x, const char * istr, size_t size)
inline void writeVarUInt(UInt64 x, WriteBuffer & ostr) inline void writeVarUInt(UInt64 x, WriteBuffer & ostr)
{ {
chassert(x <= VAR_UINT_MAX);
for (size_t i = 0; i < 9; ++i) for (size_t i = 0; i < 9; ++i)
{ {
uint8_t byte = x & 0x7F; uint8_t byte = x & 0x7F;
@ -205,6 +227,7 @@ inline void writeVarUInt(UInt64 x, WriteBuffer & ostr)
inline void writeVarUInt(UInt64 x, std::ostream & ostr) inline void writeVarUInt(UInt64 x, std::ostream & ostr)
{ {
chassert(x <= VAR_UINT_MAX);
for (size_t i = 0; i < 9; ++i) for (size_t i = 0; i < 9; ++i)
{ {
uint8_t byte = x & 0x7F; uint8_t byte = x & 0x7F;
@ -222,6 +245,7 @@ inline void writeVarUInt(UInt64 x, std::ostream & ostr)
inline char * writeVarUInt(UInt64 x, char * ostr) inline char * writeVarUInt(UInt64 x, char * ostr)
{ {
chassert(x <= VAR_UINT_MAX);
for (size_t i = 0; i < 9; ++i) for (size_t i = 0; i < 9; ++i)
{ {
uint8_t byte = x & 0x7F; uint8_t byte = x & 0x7F;

View File

@ -18,26 +18,32 @@ int main(int argc, char ** argv)
} }
DB::UInt64 x = DB::parse<UInt64>(argv[1]); DB::UInt64 x = DB::parse<UInt64>(argv[1]);
std::cout << std::hex << std::showbase << "Input: " << x << std::endl;
Poco::HexBinaryEncoder hex(std::cout); Poco::HexBinaryEncoder hex(std::cout);
DB::writeVarUInt(x, hex); std::cout << "writeVarUIntOverflow(std::ostream): 0x";
DB::writeVarUIntOverflow(x, hex);
std::cout << std::endl; std::cout << std::endl;
std::string s; std::string s;
{ {
DB::WriteBufferFromString wb(s); DB::WriteBufferFromString wb(s);
DB::writeVarUInt(x, wb); DB::writeVarUIntOverflow(x, wb);
wb.next(); wb.next();
} }
std::cout << "writeVarUIntOverflow(WriteBuffer): 0x";
hex << s; hex << s;
std::cout << std::endl; std::cout << std::endl;
s.clear(); s.clear();
s.resize(9); s.resize(9);
s.resize(DB::writeVarUInt(x, s.data()) - s.data()); s.resize(DB::writeVarUIntOverflow(x, s.data()) - s.data());
std::cout << "writeVarUIntOverflow(char *): 0x";
hex << s; hex << s;
std::cout << std::endl; std::cout << std::endl;
@ -46,7 +52,7 @@ int main(int argc, char ** argv)
DB::ReadBufferFromString rb(s); DB::ReadBufferFromString rb(s);
DB::readVarUInt(y, rb); DB::readVarUInt(y, rb);
std::cerr << "x: " << x << ", y: " << y << std::endl; std::cerr << "Input: " << x << ", readVarUInt(writeVarUIntOverflow()): " << y << std::endl;
return 0; return 0;
} }

View File

@ -145,6 +145,12 @@ namespace CurrentMetrics
extern const Metric BackgroundFetchesPoolSize; extern const Metric BackgroundFetchesPoolSize;
extern const Metric BackgroundCommonPoolTask; extern const Metric BackgroundCommonPoolTask;
extern const Metric BackgroundCommonPoolSize; extern const Metric BackgroundCommonPoolSize;
extern const Metric MarksLoaderThreads;
extern const Metric MarksLoaderThreadsActive;
extern const Metric IOPrefetchThreads;
extern const Metric IOPrefetchThreadsActive;
extern const Metric IOWriterThreads;
extern const Metric IOWriterThreadsActive;
} }
namespace DB namespace DB
@ -2018,7 +2024,8 @@ ThreadPool & Context::getLoadMarksThreadpool() const
{ {
auto pool_size = config.getUInt(".load_marks_threadpool_pool_size", 50); auto pool_size = config.getUInt(".load_marks_threadpool_pool_size", 50);
auto queue_size = config.getUInt(".load_marks_threadpool_queue_size", 1000000); auto queue_size = config.getUInt(".load_marks_threadpool_queue_size", 1000000);
shared->load_marks_threadpool = std::make_unique<ThreadPool>(pool_size, pool_size, queue_size); shared->load_marks_threadpool = std::make_unique<ThreadPool>(
CurrentMetrics::MarksLoaderThreads, CurrentMetrics::MarksLoaderThreadsActive, pool_size, pool_size, queue_size);
} }
return *shared->load_marks_threadpool; return *shared->load_marks_threadpool;
} }
@ -2043,7 +2050,8 @@ ThreadPool & Context::getPrefetchThreadpool() const
{ {
auto pool_size = getPrefetchThreadpoolSize(); auto pool_size = getPrefetchThreadpoolSize();
auto queue_size = config.getUInt(".prefetch_threadpool_queue_size", 1000000); auto queue_size = config.getUInt(".prefetch_threadpool_queue_size", 1000000);
shared->prefetch_threadpool = std::make_unique<ThreadPool>(pool_size, pool_size, queue_size); shared->prefetch_threadpool = std::make_unique<ThreadPool>(
CurrentMetrics::IOPrefetchThreads, CurrentMetrics::IOPrefetchThreadsActive, pool_size, pool_size, queue_size);
} }
return *shared->prefetch_threadpool; return *shared->prefetch_threadpool;
} }
@ -3967,7 +3975,8 @@ ThreadPool & Context::getThreadPoolWriter() const
auto pool_size = config.getUInt(".threadpool_writer_pool_size", 100); auto pool_size = config.getUInt(".threadpool_writer_pool_size", 100);
auto queue_size = config.getUInt(".threadpool_writer_queue_size", 1000000); auto queue_size = config.getUInt(".threadpool_writer_queue_size", 1000000);
shared->threadpool_writer = std::make_unique<ThreadPool>(pool_size, pool_size, queue_size); shared->threadpool_writer = std::make_unique<ThreadPool>(
CurrentMetrics::IOWriterThreads, CurrentMetrics::IOWriterThreadsActive, pool_size, pool_size, queue_size);
} }
return *shared->threadpool_writer; return *shared->threadpool_writer;

View File

@ -21,6 +21,7 @@
#include <Parsers/ASTShowEngineQuery.h> #include <Parsers/ASTShowEngineQuery.h>
#include <Parsers/ASTShowProcesslistQuery.h> #include <Parsers/ASTShowProcesslistQuery.h>
#include <Parsers/ASTShowTablesQuery.h> #include <Parsers/ASTShowTablesQuery.h>
#include <Parsers/ASTShowColumnsQuery.h>
#include <Parsers/ASTUseQuery.h> #include <Parsers/ASTUseQuery.h>
#include <Parsers/ASTWatchQuery.h> #include <Parsers/ASTWatchQuery.h>
#include <Parsers/ASTCreateNamedCollectionQuery.h> #include <Parsers/ASTCreateNamedCollectionQuery.h>
@ -79,6 +80,7 @@
#include <Interpreters/InterpreterShowEngineQuery.h> #include <Interpreters/InterpreterShowEngineQuery.h>
#include <Interpreters/InterpreterShowProcesslistQuery.h> #include <Interpreters/InterpreterShowProcesslistQuery.h>
#include <Interpreters/InterpreterShowTablesQuery.h> #include <Interpreters/InterpreterShowTablesQuery.h>
#include <Interpreters/InterpreterShowColumnsQuery.h>
#include <Interpreters/InterpreterSystemQuery.h> #include <Interpreters/InterpreterSystemQuery.h>
#include <Interpreters/InterpreterUseQuery.h> #include <Interpreters/InterpreterUseQuery.h>
#include <Interpreters/InterpreterWatchQuery.h> #include <Interpreters/InterpreterWatchQuery.h>
@ -175,6 +177,10 @@ std::unique_ptr<IInterpreter> InterpreterFactory::get(ASTPtr & query, ContextMut
{ {
return std::make_unique<InterpreterShowTablesQuery>(query, context); return std::make_unique<InterpreterShowTablesQuery>(query, context);
} }
else if (query->as<ASTShowColumnsQuery>())
{
return std::make_unique<InterpreterShowColumnsQuery>(query, context);
}
else if (query->as<ASTShowEnginesQuery>()) else if (query->as<ASTShowEnginesQuery>())
{ {
return std::make_unique<InterpreterShowEnginesQuery>(query, context); return std::make_unique<InterpreterShowEnginesQuery>(query, context);

View File

@ -0,0 +1,103 @@
#include <Interpreters/InterpreterShowColumnsQuery.h>
#include <IO/WriteBufferFromString.h>
#include <Parsers/ASTShowColumnsQuery.h>
#include <Parsers/formatAST.h>
#include <Interpreters/Context.h>
#include <Interpreters/executeQuery.h>
#include <IO/Operators.h>
#include <boost/algorithm/string.hpp>
namespace DB
{
InterpreterShowColumnsQuery::InterpreterShowColumnsQuery(const ASTPtr & query_ptr_, ContextMutablePtr context_)
: WithMutableContext(context_)
, query_ptr(query_ptr_)
{
}
String InterpreterShowColumnsQuery::getRewrittenQuery()
{
const auto & query = query_ptr->as<ASTShowColumnsQuery &>();
WriteBufferFromOwnString rewritten_query;
rewritten_query << "SELECT name AS field, type AS type, startsWith(type, 'Nullable') AS null, trim(concatWithSeparator(' ', if(is_in_primary_key, 'PRI', ''), if (is_in_sorting_key, 'SOR', ''))) AS key, if(default_kind IN ('ALIAS', 'DEFAULT', 'MATERIALIZED'), default_expression, NULL) AS default, '' AS extra ";
// TODO Interpret query.extended. It is supposed to show internal/virtual columns. Need to fetch virtual column names, see
// IStorage::getVirtuals(). We can't easily do that via SQL.
if (query.full)
{
/// "Full" mode is mostly for MySQL compat
/// - collation: no such thing in ClickHouse
/// - comment
/// - privileges: <not implemented, TODO ask system.grants>
rewritten_query << ", NULL AS collation, comment, '' AS privileges ";
}
rewritten_query << "FROM system.columns WHERE ";
String database;
String table;
if (query.from_table.contains("."))
{
/// FROM <db>.<table> (abbreviated form)
chassert(query.from_database.empty());
std::vector<String> split;
boost::split(split, query.from_table, boost::is_any_of("."));
chassert(split.size() == 2);
database = split[0];
table = split[1];
}
else if (query.from_database.empty())
{
/// FROM <table>
chassert(!query.from_table.empty());
database = getContext()->getCurrentDatabase();
table = query.from_table;
}
else
{
/// FROM <database> FROM <table>
chassert(!query.from_database.empty());
chassert(!query.from_table.empty());
database = query.from_database;
table = query.from_table;
}
rewritten_query << "database = " << DB::quote << database;
rewritten_query << " AND table = " << DB::quote << table;
if (!query.like.empty())
rewritten_query
<< " AND name "
<< (query.not_like ? "NOT " : "")
<< (query.case_insensitive_like ? "ILIKE " : "LIKE ")
<< DB::quote << query.like;
else if (query.where_expression)
rewritten_query << " AND (" << query.where_expression << ")";
/// Sorting is strictly speaking not necessary but 1. it is convenient for users, 2. SQL currently does not allow to
/// sort the output of SHOW COLUMNS otherwise (SELECT * FROM (SHOW COLUMNS ...) ORDER BY ...) is rejected) and 3. some
/// SQL tests can take advantage of this.
rewritten_query << " ORDER BY field, type, null, key, default, extra";
if (query.limit_length)
rewritten_query << " LIMIT " << query.limit_length;
return rewritten_query.str();
}
BlockIO InterpreterShowColumnsQuery::execute()
{
return executeQuery(getRewrittenQuery(), getContext(), true);
}
}

View File

@ -0,0 +1,32 @@
#pragma once
#include <Interpreters/IInterpreter.h>
#include <Parsers/IAST_fwd.h>
namespace DB
{
class Context;
/// Returns a list of columns which meet some conditions.
class InterpreterShowColumnsQuery : public IInterpreter, WithMutableContext
{
public:
InterpreterShowColumnsQuery(const ASTPtr & query_ptr_, ContextMutablePtr context_);
BlockIO execute() override;
/// Ignore quota and limits here because execute() produces a SELECT query which checks quotas/limits by itself.
bool ignoreQuota() const override { return true; }
bool ignoreLimits() const override { return true; }
private:
ASTPtr query_ptr;
String getRewrittenQuery();
};
}

View File

@ -12,7 +12,7 @@ namespace DB
BlockIO InterpreterShowProcesslistQuery::execute() BlockIO InterpreterShowProcesslistQuery::execute()
{ {
return executeQuery("SELECT * FROM system.processes", getContext(), true); return executeQuery("SELECT * FROM system.processes ORDER BY elapsed DESC", getContext(), true);
} }
} }

View File

@ -1,4 +1,4 @@
#include <IO/ReadBufferFromString.h> #include <IO/WriteBufferFromString.h>
#include <Parsers/ASTShowTablesQuery.h> #include <Parsers/ASTShowTablesQuery.h>
#include <Parsers/formatAST.h> #include <Parsers/formatAST.h>
#include <Interpreters/Context.h> #include <Interpreters/Context.h>
@ -24,7 +24,8 @@ namespace ErrorCodes
InterpreterShowTablesQuery::InterpreterShowTablesQuery(const ASTPtr & query_ptr_, ContextMutablePtr context_) InterpreterShowTablesQuery::InterpreterShowTablesQuery(const ASTPtr & query_ptr_, ContextMutablePtr context_)
: WithMutableContext(context_), query_ptr(query_ptr_) : WithMutableContext(context_)
, query_ptr(query_ptr_)
{ {
} }

View File

@ -20,8 +20,7 @@ public:
BlockIO execute() override; BlockIO execute() override;
/// We ignore the quota and limits here because execute() will rewrite a show query as a SELECT query and then /// Ignore quota and limits here because execute() produces a SELECT query which checks quotas/limits by itself.
/// the SELECT query will checks the quota and limits.
bool ignoreQuota() const override { return true; } bool ignoreQuota() const override { return true; }
bool ignoreLimits() const override { return true; } bool ignoreLimits() const override { return true; }

View File

@ -37,6 +37,18 @@ namespace ErrorCodes
extern const int LOGICAL_ERROR; extern const int LOGICAL_ERROR;
} }
static ZooKeeperRetriesInfo getRetriesInfo()
{
const auto & config_ref = Context::getGlobalContextInstance()->getConfigRef();
return ZooKeeperRetriesInfo(
"DistributedDDL",
&Poco::Logger::get("DDLQueryStatusSource"),
config_ref.getInt("distributed_ddl_keeper_max_retries", 5),
config_ref.getInt("distributed_ddl_keeper_initial_backoff_ms", 100),
config_ref.getInt("distributed_ddl_keeper_max_backoff_ms", 5000)
);
}
bool isSupportedAlterType(int type) bool isSupportedAlterType(int type)
{ {
assert(type != ASTAlterCommand::NO_TYPE); assert(type != ASTAlterCommand::NO_TYPE);
@ -174,7 +186,7 @@ BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr_, ContextPtr context,
entry.tracing_context = OpenTelemetry::CurrentContext(); entry.tracing_context = OpenTelemetry::CurrentContext();
String node_path = ddl_worker.enqueueQuery(entry); String node_path = ddl_worker.enqueueQuery(entry);
return getDistributedDDLStatus(node_path, entry, context); return getDistributedDDLStatus(node_path, entry, context, /* hosts_to_wait */ nullptr);
} }
@ -182,7 +194,7 @@ class DDLQueryStatusSource final : public ISource
{ {
public: public:
DDLQueryStatusSource( DDLQueryStatusSource(
const String & zk_node_path, const DDLLogEntry & entry, ContextPtr context_, const std::optional<Strings> & hosts_to_wait = {}); const String & zk_node_path, const DDLLogEntry & entry, ContextPtr context_, const Strings * hosts_to_wait);
String getName() const override { return "DDLQueryStatus"; } String getName() const override { return "DDLQueryStatus"; }
Chunk generate() override; Chunk generate() override;
@ -230,7 +242,7 @@ private:
}; };
BlockIO getDistributedDDLStatus(const String & node_path, const DDLLogEntry & entry, ContextPtr context, const std::optional<Strings> & hosts_to_wait) BlockIO getDistributedDDLStatus(const String & node_path, const DDLLogEntry & entry, ContextPtr context, const Strings * hosts_to_wait)
{ {
BlockIO io; BlockIO io;
if (context->getSettingsRef().distributed_ddl_task_timeout == 0) if (context->getSettingsRef().distributed_ddl_task_timeout == 0)
@ -291,8 +303,8 @@ Block DDLQueryStatusSource::getSampleBlock(ContextPtr context_, bool hosts_to_wa
} }
DDLQueryStatusSource::DDLQueryStatusSource( DDLQueryStatusSource::DDLQueryStatusSource(
const String & zk_node_path, const DDLLogEntry & entry, ContextPtr context_, const std::optional<Strings> & hosts_to_wait) const String & zk_node_path, const DDLLogEntry & entry, ContextPtr context_, const Strings * hosts_to_wait)
: ISource(getSampleBlock(context_, hosts_to_wait.has_value())) : ISource(getSampleBlock(context_, static_cast<bool>(hosts_to_wait)))
, node_path(zk_node_path) , node_path(zk_node_path)
, context(context_) , context(context_)
, watch(CLOCK_MONOTONIC_COARSE) , watch(CLOCK_MONOTONIC_COARSE)
@ -380,7 +392,6 @@ Chunk DDLQueryStatusSource::generate()
if (is_replicated_database && context->getSettingsRef().database_replicated_enforce_synchronous_settings) if (is_replicated_database && context->getSettingsRef().database_replicated_enforce_synchronous_settings)
node_to_wait = "synced"; node_to_wait = "synced";
auto zookeeper = context->getZooKeeper();
size_t try_number = 0; size_t try_number = 0;
while (true) while (true)
@ -420,7 +431,23 @@ Chunk DDLQueryStatusSource::generate()
sleepForMilliseconds(std::min<size_t>(1000, 50 * (try_number + 1))); sleepForMilliseconds(std::min<size_t>(1000, 50 * (try_number + 1)));
} }
if (!zookeeper->exists(node_path)) bool node_exists = false;
Strings tmp_hosts;
Strings tmp_active_hosts;
{
auto retries_info = getRetriesInfo();
auto retries_ctl = ZooKeeperRetriesControl("executeDDLQueryOnCluster", retries_info);
retries_ctl.retryLoop([&]()
{
auto zookeeper = context->getZooKeeper();
node_exists = zookeeper->exists(node_path);
tmp_hosts = getChildrenAllowNoNode(zookeeper, fs::path(node_path) / node_to_wait);
tmp_active_hosts = getChildrenAllowNoNode(zookeeper, fs::path(node_path) / "active");
});
}
if (!node_exists)
{ {
/// Paradoxically, this exception will be throw even in case of "never_throw" mode. /// Paradoxically, this exception will be throw even in case of "never_throw" mode.
@ -432,12 +459,12 @@ Chunk DDLQueryStatusSource::generate()
return {}; return {};
} }
Strings new_hosts = getNewAndUpdate(getChildrenAllowNoNode(zookeeper, fs::path(node_path) / node_to_wait)); Strings new_hosts = getNewAndUpdate(tmp_hosts);
++try_number; ++try_number;
if (new_hosts.empty()) if (new_hosts.empty())
continue; continue;
current_active_hosts = getChildrenAllowNoNode(zookeeper, fs::path(node_path) / "active"); current_active_hosts = std::move(tmp_active_hosts);
MutableColumns columns = output.getHeader().cloneEmptyColumns(); MutableColumns columns = output.getHeader().cloneEmptyColumns();
for (const String & host_id : new_hosts) for (const String & host_id : new_hosts)
@ -447,7 +474,15 @@ Chunk DDLQueryStatusSource::generate()
if (node_to_wait == "finished") if (node_to_wait == "finished")
{ {
String status_data; String status_data;
if (zookeeper->tryGet(fs::path(node_path) / "finished" / host_id, status_data)) bool finished_exists = false;
auto retries_info = getRetriesInfo();
auto retries_ctl = ZooKeeperRetriesControl("executeDDLQueryOnCluster", retries_info);
retries_ctl.retryLoop([&]()
{
finished_exists = context->getZooKeeper()->tryGet(fs::path(node_path) / "finished" / host_id, status_data);
});
if (finished_exists)
status.tryDeserializeText(status_data); status.tryDeserializeText(status_data);
} }
else else

View File

@ -5,6 +5,7 @@
#include <Processors/ISource.h> #include <Processors/ISource.h>
#include <Interpreters/Context_fwd.h> #include <Interpreters/Context_fwd.h>
#include <Parsers/IAST_fwd.h> #include <Parsers/IAST_fwd.h>
#include <Storages/MergeTree/ZooKeeperRetries.h>
namespace zkutil namespace zkutil
@ -42,8 +43,7 @@ struct DDLQueryOnClusterParams
/// Returns DDLQueryStatusSource, which reads results of query execution on each host in the cluster. /// Returns DDLQueryStatusSource, which reads results of query execution on each host in the cluster.
BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr, ContextPtr context, const DDLQueryOnClusterParams & params = {}); BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr, ContextPtr context, const DDLQueryOnClusterParams & params = {});
BlockIO getDistributedDDLStatus( BlockIO getDistributedDDLStatus(const String & node_path, const DDLLogEntry & entry, ContextPtr context, const Strings * hosts_to_wait);
const String & node_path, const DDLLogEntry & entry, ContextPtr context, const std::optional<Strings> & hosts_to_wait = {});
bool maybeRemoveOnCluster(const ASTPtr & query_ptr, ContextPtr context); bool maybeRemoveOnCluster(const ASTPtr & query_ptr, ContextPtr context);

View File

@ -17,17 +17,21 @@ class ASTTableIdentifier;
void setIdentifierSpecial(ASTPtr & ast); void setIdentifierSpecial(ASTPtr & ast);
String getIdentifierName(const IAST * ast); String getIdentifierName(const IAST * ast);
std::optional<String> tryGetIdentifierName(const IAST * ast); std::optional<String> tryGetIdentifierName(const IAST * ast);
bool tryGetIdentifierNameInto(const IAST * ast, String & name); bool tryGetIdentifierNameInto(const IAST * ast, String & name);
inline String getIdentifierName(const ASTPtr & ast) inline String getIdentifierName(const ASTPtr & ast)
{ {
return getIdentifierName(ast.get()); return getIdentifierName(ast.get());
} }
inline std::optional<String> tryGetIdentifierName(const ASTPtr & ast) inline std::optional<String> tryGetIdentifierName(const ASTPtr & ast)
{ {
return tryGetIdentifierName(ast.get()); return tryGetIdentifierName(ast.get());
} }
inline bool tryGetIdentifierNameInto(const ASTPtr & ast, String & name) inline bool tryGetIdentifierNameInto(const ASTPtr & ast, String & name)
{ {
return tryGetIdentifierNameInto(ast.get(), name); return tryGetIdentifierNameInto(ast.get(), name);

View File

@ -0,0 +1,52 @@
#include <Parsers/ASTShowColumnsQuery.h>
#include <iomanip>
#include <Common/quoteString.h>
#include <IO/Operators.h>
namespace DB
{
ASTPtr ASTShowColumnsQuery::clone() const
{
auto res = std::make_shared<ASTShowColumnsQuery>(*this);
res->children.clear();
cloneOutputOptions(*res);
return res;
}
void ASTShowColumnsQuery::formatQueryImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const
{
settings.ostr << (settings.hilite ? hilite_keyword : "")
<< "SHOW "
<< (extended ? "EXTENDED " : "")
<< (full ? "FULL " : "")
<< "COLUMNS"
<< (settings.hilite ? hilite_none : "");
settings.ostr << (settings.hilite ? hilite_keyword : "") << " FROM " << (settings.hilite ? hilite_none : "") << backQuoteIfNeed(from_table);
if (!from_database.empty())
settings.ostr << (settings.hilite ? hilite_keyword : "") << " FROM " << (settings.hilite ? hilite_none : "") << backQuoteIfNeed(from_database);
if (!like.empty())
settings.ostr << (settings.hilite ? hilite_keyword : "")
<< (not_like ? " NOT" : "")
<< (case_insensitive_like ? " ILIKE " : " LIKE")
<< (settings.hilite ? hilite_none : "")
<< DB::quote << like;
if (where_expression)
{
settings.ostr << (settings.hilite ? hilite_keyword : "") << " WHERE " << (settings.hilite ? hilite_none : "");
where_expression->formatImpl(settings, state, frame);
}
if (limit_length)
{
settings.ostr << (settings.hilite ? hilite_keyword : "") << " LIMIT " << (settings.hilite ? hilite_none : "");
limit_length->formatImpl(settings, state, frame);
}
}
}

View File

@ -0,0 +1,34 @@
#pragma once
#include <Parsers/IAST_fwd.h>
#include <Parsers/ASTQueryWithOutput.h>
namespace DB
{
/// Query SHOW COLUMNS
class ASTShowColumnsQuery : public ASTQueryWithOutput
{
public:
bool extended = false;
bool full = false;
bool not_like = false;
bool case_insensitive_like = false;
ASTPtr where_expression;
ASTPtr limit_length;
String from_database;
String from_table;
String like;
String getID(char) const override { return "ShowColumns"; }
ASTPtr clone() const override;
QueryKind getQueryKind() const override { return QueryKind::Show; }
protected:
void formatQueryImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override;
};
}

View File

@ -14,31 +14,28 @@ namespace DB
class ASTShowTablesQuery : public ASTQueryWithOutput class ASTShowTablesQuery : public ASTQueryWithOutput
{ {
public: public:
bool databases{false}; bool databases = false;
bool clusters{false}; bool clusters = false;
bool cluster{false}; bool cluster = false;
bool dictionaries{false}; bool dictionaries = false;
bool m_settings{false}; bool m_settings = false;
bool changed{false}; bool changed = false;
bool temporary{false}; bool temporary = false;
bool caches{false}; bool caches = false;
bool full{false}; bool full = false;
String cluster_str; String cluster_str;
String from; String from;
String like; String like;
bool not_like{false}; bool not_like = false;
bool case_insensitive_like{false}; bool case_insensitive_like = false;
ASTPtr where_expression; ASTPtr where_expression;
ASTPtr limit_length; ASTPtr limit_length;
/** Get the text that identifies this element. */
String getID(char) const override { return "ShowTables"; } String getID(char) const override { return "ShowTables"; }
ASTPtr clone() const override; ASTPtr clone() const override;
QueryKind getQueryKind() const override { return QueryKind::Show; } QueryKind getQueryKind() const override { return QueryKind::Show; }
protected: protected:

View File

@ -16,6 +16,7 @@
#include <Parsers/ParserSetQuery.h> #include <Parsers/ParserSetQuery.h>
#include <Parsers/ParserShowProcesslistQuery.h> #include <Parsers/ParserShowProcesslistQuery.h>
#include <Parsers/ParserShowTablesQuery.h> #include <Parsers/ParserShowTablesQuery.h>
#include <Parsers/ParserShowColumnsQuery.h>
#include <Parsers/ParserShowEngineQuery.h> #include <Parsers/ParserShowEngineQuery.h>
#include <Parsers/ParserTablePropertiesQuery.h> #include <Parsers/ParserTablePropertiesQuery.h>
#include <Parsers/ParserWatchQuery.h> #include <Parsers/ParserWatchQuery.h>
@ -35,6 +36,7 @@ namespace DB
bool ParserQueryWithOutput::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) bool ParserQueryWithOutput::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
{ {
ParserShowTablesQuery show_tables_p; ParserShowTablesQuery show_tables_p;
ParserShowColumnsQuery show_columns_p;
ParserShowEnginesQuery show_engine_p; ParserShowEnginesQuery show_engine_p;
ParserSelectWithUnionQuery select_p; ParserSelectWithUnionQuery select_p;
ParserTablePropertiesQuery table_p; ParserTablePropertiesQuery table_p;
@ -64,6 +66,7 @@ bool ParserQueryWithOutput::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
|| select_p.parse(pos, query, expected) || select_p.parse(pos, query, expected)
|| show_create_access_entity_p.parse(pos, query, expected) /// should be before `show_tables_p` || show_create_access_entity_p.parse(pos, query, expected) /// should be before `show_tables_p`
|| show_tables_p.parse(pos, query, expected) || show_tables_p.parse(pos, query, expected)
|| show_columns_p.parse(pos, query, expected)
|| show_engine_p.parse(pos, query, expected) || show_engine_p.parse(pos, query, expected)
|| table_p.parse(pos, query, expected) || table_p.parse(pos, query, expected)
|| describe_cache_p.parse(pos, query, expected) || describe_cache_p.parse(pos, query, expected)

View File

@ -0,0 +1,80 @@
#include <Parsers/ParserShowColumnsQuery.h>
#include <Parsers/ASTIdentifier_fwd.h>
#include <Parsers/ASTLiteral.h>
#include <Parsers/ASTShowColumnsQuery.h>
#include <Parsers/CommonParsers.h>
#include <Parsers/ExpressionElementParsers.h>
#include <Parsers/ExpressionListParsers.h>
namespace DB
{
bool ParserShowColumnsQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
{
ASTPtr like;
ASTPtr from_database;
ASTPtr from_table;
auto query = std::make_shared<ASTShowColumnsQuery>();
if (!ParserKeyword("SHOW").ignore(pos, expected))
return false;
if (ParserKeyword("EXTENDED").ignore(pos, expected))
query->extended = true;
if (ParserKeyword("FULL").ignore(pos, expected))
query->full = true;
if (!ParserKeyword("COLUMNS").ignore(pos, expected) || ParserKeyword("FIELDS").ignore(pos, expected))
return false;
if (ParserKeyword("FROM").ignore(pos, expected) || ParserKeyword("IN").ignore(pos, expected))
{
if (!ParserCompoundIdentifier().parse(pos, from_table, expected))
return false;
}
else
return false;
tryGetIdentifierNameInto(from_table, query->from_table);
bool abbreviated_form = query->from_table.contains("."); /// FROM <db>.<table>
if (!abbreviated_form)
if (ParserKeyword("FROM").ignore(pos, expected) || ParserKeyword("IN").ignore(pos, expected))
if (!ParserIdentifier().parse(pos, from_database, expected))
return false;
tryGetIdentifierNameInto(from_database, query->from_database);
if (ParserKeyword("NOT").ignore(pos, expected))
query->not_like = true;
if (bool insensitive = ParserKeyword("ILIKE").ignore(pos, expected); insensitive || ParserKeyword("LIKE").ignore(pos, expected))
{
if (insensitive)
query->case_insensitive_like = true;
if (!ParserStringLiteral().parse(pos, like, expected))
return false;
}
else if (query->not_like)
return false;
else if (ParserKeyword("WHERE").ignore(pos, expected))
if (!ParserExpressionWithOptionalAlias(false).parse(pos, query->where_expression, expected))
return false;
if (ParserKeyword("LIMIT").ignore(pos, expected))
if (!ParserExpressionWithOptionalAlias(false).parse(pos, query->limit_length, expected))
return false;
if (like)
query->like = like->as<ASTLiteral &>().value.safeGet<const String &>();
node = query;
return true;
}
}

View File

@ -0,0 +1,19 @@
#pragma once
#include <Parsers/IParserBase.h>
namespace DB
{
/** Parses queries of the form
* SHOW [EXTENDED] [FULL] COLUMNS (FROM|IN) tbl [(FROM|IN) db] [(([NOT] (LIKE|ILIKE) expr) | (WHERE expr))] [LIMIT n]
*/
class ParserShowColumnsQuery : public IParserBase
{
protected:
const char * getName() const override { return "SHOW COLUMNS query"; }
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
};
}

View File

@ -149,10 +149,8 @@ bool ParserShowTablesQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
} }
if (s_from.ignore(pos, expected) || s_in.ignore(pos, expected)) if (s_from.ignore(pos, expected) || s_in.ignore(pos, expected))
{
if (!name_p.parse(pos, database, expected)) if (!name_p.parse(pos, database, expected))
return false; return false;
}
if (s_not.ignore(pos, expected)) if (s_not.ignore(pos, expected))
query->not_like = true; query->not_like = true;
@ -168,16 +166,12 @@ bool ParserShowTablesQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
else if (query->not_like) else if (query->not_like)
return false; return false;
else if (s_where.ignore(pos, expected)) else if (s_where.ignore(pos, expected))
{
if (!exp_elem.parse(pos, query->where_expression, expected)) if (!exp_elem.parse(pos, query->where_expression, expected))
return false; return false;
}
if (s_limit.ignore(pos, expected)) if (s_limit.ignore(pos, expected))
{
if (!exp_elem.parse(pos, query->limit_length, expected)) if (!exp_elem.parse(pos, query->limit_length, expected))
return false; return false;
}
} }
tryGetIdentifierNameInto(database, query->from); tryGetIdentifierNameInto(database, query->from);

View File

@ -1,2 +1,4 @@
clickhouse_add_executable (comma_separated_streams comma_separated_streams.cpp) if (TARGET ch_contrib::hivemetastore)
target_link_libraries (comma_separated_streams PRIVATE dbms) clickhouse_add_executable (comma_separated_streams comma_separated_streams.cpp)
target_link_libraries (comma_separated_streams PRIVATE dbms)
endif()

View File

@ -1895,7 +1895,7 @@ void TCPHandler::sendData(const Block & block)
{ {
--unknown_packet_in_send_data; --unknown_packet_in_send_data;
if (unknown_packet_in_send_data == 0) if (unknown_packet_in_send_data == 0)
writeVarUInt(UInt64(-1), *out); writeVarUInt(VAR_UINT_MAX, *out);
} }
writeVarUInt(Protocol::Server::Data, *out); writeVarUInt(Protocol::Server::Data, *out);

View File

@ -2082,10 +2082,10 @@ void StorageMergeTree::backupData(BackupEntriesCollector & backup_entries_collec
Int64 min_data_version = std::numeric_limits<Int64>::max(); Int64 min_data_version = std::numeric_limits<Int64>::max();
for (const auto & data_part : data_parts) for (const auto & data_part : data_parts)
min_data_version = std::min(min_data_version, data_part->info.getDataVersion()); min_data_version = std::min(min_data_version, data_part->info.getDataVersion() + 1);
backup_entries_collector.addBackupEntries(backupParts(data_parts, data_path_in_backup, local_context)); backup_entries_collector.addBackupEntries(backupParts(data_parts, data_path_in_backup, local_context));
backup_entries_collector.addBackupEntries(backupMutations(min_data_version + 1, data_path_in_backup)); backup_entries_collector.addBackupEntries(backupMutations(min_data_version, data_path_in_backup));
} }

View File

@ -35,6 +35,7 @@ const char * auto_contributors[] {
"Aleksei Filatov", "Aleksei Filatov",
"Aleksei Levushkin", "Aleksei Levushkin",
"Aleksei Semiglazov", "Aleksei Semiglazov",
"Aleksei Tikhomirov",
"Aleksey", "Aleksey",
"Aleksey Akulovich", "Aleksey Akulovich",
"Alex", "Alex",
@ -84,6 +85,7 @@ const char * auto_contributors[] {
"Alexey Gusev", "Alexey Gusev",
"Alexey Ilyukhov", "Alexey Ilyukhov",
"Alexey Ivanov", "Alexey Ivanov",
"Alexey Korepanov",
"Alexey Milovidov", "Alexey Milovidov",
"Alexey Perevyshin", "Alexey Perevyshin",
"Alexey Tronov", "Alexey Tronov",
@ -135,6 +137,7 @@ const char * auto_contributors[] {
"Andrii R", "Andrii R",
"Andy Liang", "Andy Liang",
"Andy Yang", "Andy Yang",
"AndyB",
"Anish Bhanwala", "Anish Bhanwala",
"Anmol Arora", "Anmol Arora",
"Anna", "Anna",
@ -155,6 +158,7 @@ const char * auto_contributors[] {
"Anton Yuzhaninov", "Anton Yuzhaninov",
"Anton Zhabolenko", "Anton Zhabolenko",
"Antonio Andelic", "Antonio Andelic",
"Antonio Bonuccelli",
"Ariel Robaldo", "Ariel Robaldo",
"Arsen Hakobyan", "Arsen Hakobyan",
"Arslan G", "Arslan G",
@ -227,6 +231,7 @@ const char * auto_contributors[] {
"Christoph Wurm", "Christoph Wurm",
"Chun-Sheng, Li", "Chun-Sheng, Li",
"Ciprian Hacman", "Ciprian Hacman",
"Clayton McClure",
"Clement Rodriguez", "Clement Rodriguez",
"ClickHouse Admin", "ClickHouse Admin",
"Clément Rodriguez", "Clément Rodriguez",
@ -256,6 +261,7 @@ const char * auto_contributors[] {
"Dario", "Dario",
"DarkWanderer", "DarkWanderer",
"Darío", "Darío",
"Dave Lahn",
"Denis Burlaka", "Denis Burlaka",
"Denis Glazachev", "Denis Glazachev",
"Denis Krivak", "Denis Krivak",
@ -391,6 +397,7 @@ const char * auto_contributors[] {
"HeenaBansal2009", "HeenaBansal2009",
"Hiroaki Nakamura", "Hiroaki Nakamura",
"Hongbin", "Hongbin",
"Hosun Lee",
"HuFuwang", "HuFuwang",
"Hui Wang", "Hui Wang",
"ILya Limarenko", "ILya Limarenko",
@ -457,12 +464,15 @@ const char * auto_contributors[] {
"Jiebin Sun", "Jiebin Sun",
"Joanna Hulboj", "Joanna Hulboj",
"Jochen Schalanda", "Jochen Schalanda",
"Joey",
"Johannes Visintini",
"John", "John",
"John Hummel", "John Hummel",
"John Skopis", "John Skopis",
"Jonatas Freitas", "Jonatas Freitas",
"Jonathan-Ackerman", "Jonathan-Ackerman",
"Jordi Villar", "Jordi Villar",
"Joris Giovannangeli",
"Jose", "Jose",
"Josh Taylor", "Josh Taylor",
"João Figueiredo", "João Figueiredo",
@ -481,6 +491,7 @@ const char * auto_contributors[] {
"Kevin Chiang", "Kevin Chiang",
"Kevin Michel", "Kevin Michel",
"Kevin Zhang", "Kevin Zhang",
"KevinyhZou",
"KinderRiven", "KinderRiven",
"Kiran", "Kiran",
"Kirill Danshin", "Kirill Danshin",
@ -525,6 +536,7 @@ const char * auto_contributors[] {
"Li Yin", "Li Yin",
"Liu Cong", "Liu Cong",
"LiuCong", "LiuCong",
"LiuNeng",
"LiuYangkuan", "LiuYangkuan",
"Lloyd-Pottiger", "Lloyd-Pottiger",
"Lopatin Konstantin", "Lopatin Konstantin",
@ -544,6 +556,7 @@ const char * auto_contributors[] {
"Maksim Buren", "Maksim Buren",
"Maksim Fedotov", "Maksim Fedotov",
"Maksim Kita", "Maksim Kita",
"Maksym Sobolyev",
"Mallik Hassan", "Mallik Hassan",
"Malte", "Malte",
"Manuel de la Peña", "Manuel de la Peña",
@ -625,6 +638,7 @@ const char * auto_contributors[] {
"Mikhail Salosin", "Mikhail Salosin",
"Mikhail Surin", "Mikhail Surin",
"Mikhail f. Shiryaev", "Mikhail f. Shiryaev",
"MikhailBurdukov",
"MikuSugar", "MikuSugar",
"Milad Arabi", "Milad Arabi",
"Mingliang Pan", "Mingliang Pan",
@ -694,6 +708,7 @@ const char * auto_contributors[] {
"OmarBazaraa", "OmarBazaraa",
"OnePiece", "OnePiece",
"Onehr7", "Onehr7",
"Ongkong",
"Orivej Desh", "Orivej Desh",
"Orkhan Zeynalli", "Orkhan Zeynalli",
"Oskar Wojciski", "Oskar Wojciski",
@ -701,6 +716,7 @@ const char * auto_contributors[] {
"PHO", "PHO",
"Pablo Alegre", "Pablo Alegre",
"Pablo Marcos", "Pablo Marcos",
"Palash Goel",
"Paramtamtam", "Paramtamtam",
"Patrick Zippenfenig", "Patrick Zippenfenig",
"Paul Loyd", "Paul Loyd",
@ -795,6 +811,7 @@ const char * auto_contributors[] {
"Sergei Bocharov", "Sergei Bocharov",
"Sergei Semin", "Sergei Semin",
"Sergei Shtykov", "Sergei Shtykov",
"Sergei Solomatov",
"Sergei Trifonov", "Sergei Trifonov",
"Sergei Tsetlin (rekub)", "Sergei Tsetlin (rekub)",
"Sergey Demurin", "Sergey Demurin",
@ -844,6 +861,7 @@ const char * auto_contributors[] {
"Stupnikov Andrey", "Stupnikov Andrey",
"SuperBot", "SuperBot",
"SuperDJY", "SuperDJY",
"SupunKavinda",
"Suzy Wang", "Suzy Wang",
"SuzyWangIBMer", "SuzyWangIBMer",
"Sébastien", "Sébastien",
@ -865,6 +883,7 @@ const char * auto_contributors[] {
"The-Alchemist", "The-Alchemist",
"Thom O'Connor", "Thom O'Connor",
"Thomas Berdy", "Thomas Berdy",
"Thomas Casteleyn",
"Tian Xinhui", "Tian Xinhui",
"Tiaonmmn", "Tiaonmmn",
"Tigran Khudaverdyan", "Tigran Khudaverdyan",
@ -996,6 +1015,7 @@ const char * auto_contributors[] {
"Zhipeng", "Zhipeng",
"Zijie Lu", "Zijie Lu",
"Zoran Pandovski", "Zoran Pandovski",
"[데이터플랫폼팀] 이호선",
"a.palagashvili", "a.palagashvili",
"aaapetrenko", "aaapetrenko",
"abdrakhmanov", "abdrakhmanov",
@ -1011,6 +1031,7 @@ const char * auto_contributors[] {
"akuzm", "akuzm",
"alekseik1", "alekseik1",
"alesapin", "alesapin",
"alex filatov",
"alex-zaitsev", "alex-zaitsev",
"alex.lvxin", "alex.lvxin",
"alexX512", "alexX512",
@ -1035,12 +1056,14 @@ const char * auto_contributors[] {
"anton", "anton",
"ap11", "ap11",
"aprudaev", "aprudaev",
"artem-yadr",
"artpaul", "artpaul",
"asiana21", "asiana21",
"atereh", "atereh",
"attack204", "attack204",
"avasiliev", "avasiliev",
"avogar", "avogar",
"avoiderboi",
"avsharapov", "avsharapov",
"awakeljw", "awakeljw",
"awesomeleo", "awesomeleo",
@ -1079,6 +1102,7 @@ const char * auto_contributors[] {
"chou.fan", "chou.fan",
"christophe.kalenzaga", "christophe.kalenzaga",
"clarkcaoliu", "clarkcaoliu",
"clickhouse-adrianfraguela",
"clickhouse-robot-curie", "clickhouse-robot-curie",
"cms", "cms",
"cmsxbc", "cmsxbc",
@ -1111,6 +1135,7 @@ const char * auto_contributors[] {
"dmitriy", "dmitriy",
"dmitry kuzmin", "dmitry kuzmin",
"dongyifeng", "dongyifeng",
"ducle.canh",
"eaxdev", "eaxdev",
"eejoin", "eejoin",
"egatov", "egatov",
@ -1123,6 +1148,8 @@ const char * auto_contributors[] {
"erikbaan", "erikbaan",
"ermaotech", "ermaotech",
"evtan", "evtan",
"exX512",
"exmy",
"exprmntr", "exprmntr",
"ezhaka", "ezhaka",
"f1yegor", "f1yegor",
@ -1152,6 +1179,7 @@ const char * auto_contributors[] {
"fuwhu", "fuwhu",
"fuzhe1989", "fuzhe1989",
"fuzzERot", "fuzzERot",
"fyu",
"g-arslan", "g-arslan",
"ggerogery", "ggerogery",
"giordyb", "giordyb",
@ -1178,9 +1206,11 @@ const char * auto_contributors[] {
"hhell", "hhell",
"homeward", "homeward",
"hotid", "hotid",
"houbaron",
"huangzhaowei", "huangzhaowei",
"hustnn", "hustnn",
"huzhichengdd", "huzhichengdd",
"iammagicc",
"ianton-ru", "ianton-ru",
"ice1x", "ice1x",
"idfer", "idfer",
@ -1212,6 +1242,7 @@ const char * auto_contributors[] {
"jinjunzh", "jinjunzh",
"jkuklis", "jkuklis",
"jthmath", "jthmath",
"jun won",
"jus1096", "jus1096",
"jyz0309", "jyz0309",
"karnevil13", "karnevil13",
@ -1223,6 +1254,7 @@ const char * auto_contributors[] {
"kigerzhang", "kigerzhang",
"kirillikoff", "kirillikoff",
"kmeaw", "kmeaw",
"kolechenkov",
"koloshmet", "koloshmet",
"kolsys", "kolsys",
"konnectr", "konnectr",
@ -1252,6 +1284,7 @@ const char * auto_contributors[] {
"liangqian", "liangqian",
"libenwang", "libenwang",
"lichengxiang", "lichengxiang",
"liding1992",
"linceyou", "linceyou",
"lincion", "lincion",
"lingo-xp", "lingo-xp",
@ -1330,6 +1363,7 @@ const char * auto_contributors[] {
"nauta", "nauta",
"nautaa", "nautaa",
"ndchikin", "ndchikin",
"nellicus",
"neng.liu", "neng.liu",
"never lee", "never lee",
"ni1l", "ni1l",
@ -1545,6 +1579,7 @@ const char * auto_contributors[] {
"Дмитрий Канатников", "Дмитрий Канатников",
"Иванов Евгений", "Иванов Евгений",
"Илья Исаев", "Илья Исаев",
"Коренберг Марк",
"Коренберг ☢️ Марк", "Коренберг ☢️ Марк",
"Павел Литвиненко", "Павел Литвиненко",
"Смитюх Вячеслав", "Смитюх Вячеслав",

View File

@ -558,9 +558,6 @@ class SettingsRandomizer:
"enable_memory_bound_merging_of_aggregation_results": lambda: random.randint( "enable_memory_bound_merging_of_aggregation_results": lambda: random.randint(
0, 1 0, 1
), ),
"min_count_to_compile_expression": lambda: random.choice([0, 3]),
"min_count_to_compile_aggregate_expression": lambda: random.choice([0, 3]),
"min_count_to_compile_sort_description": lambda: random.choice([0, 3]),
} }
@staticmethod @staticmethod

View File

@ -202,6 +202,16 @@ class CommandRequest:
self.timer = Timer(timeout, kill_process) self.timer = Timer(timeout, kill_process)
self.timer.start() self.timer.start()
def remove_trash_from_stderr(self, stderr):
# FIXME https://github.com/ClickHouse/ClickHouse/issues/48181
if not stderr:
return stderr
lines = stderr.split("\n")
lines = [
x for x in lines if ("completion_queue" not in x and "Kick failed" not in x)
]
return "\n".join(lines)
def get_answer(self): def get_answer(self):
self.process.wait(timeout=DEFAULT_QUERY_TIMEOUT) self.process.wait(timeout=DEFAULT_QUERY_TIMEOUT)
self.stdout_file.seek(0) self.stdout_file.seek(0)
@ -218,7 +228,9 @@ class CommandRequest:
logging.debug(f"Timed out. Last stdout:{stdout}, stderr:{stderr}") logging.debug(f"Timed out. Last stdout:{stdout}, stderr:{stderr}")
raise QueryTimeoutExceedException("Client timed out!") raise QueryTimeoutExceedException("Client timed out!")
if (self.process.returncode != 0 or stderr) and not self.ignore_error: if (
self.process.returncode != 0 or self.remove_trash_from_stderr(stderr)
) and not self.ignore_error:
raise QueryRuntimeException( raise QueryRuntimeException(
"Client failed! Return code: {}, stderr: {}".format( "Client failed! Return code: {}, stderr: {}".format(
self.process.returncode, stderr self.process.returncode, stderr

View File

@ -90,6 +90,11 @@ def start_cluster():
cluster.shutdown() cluster.shutdown()
def check_exists(zk, path):
zk.sync(path)
return zk.exists(path)
def test_drop_replica(start_cluster): def test_drop_replica(start_cluster):
node_1_1.query( node_1_1.query(
"INSERT INTO test.test_table SELECT number, toString(number) FROM numbers(100)" "INSERT INTO test.test_table SELECT number, toString(number) FROM numbers(100)"
@ -158,10 +163,11 @@ def test_drop_replica(start_cluster):
) )
node_1_3.query("SYSTEM DROP REPLICA 'node_1_1'") node_1_3.query("SYSTEM DROP REPLICA 'node_1_1'")
exists_replica_1_1 = zk.exists( exists_replica_1_1 = check_exists(
zk,
"/clickhouse/tables/test3/{shard}/replicated/test_table/replicas/{replica}".format( "/clickhouse/tables/test3/{shard}/replicated/test_table/replicas/{replica}".format(
shard=1, replica="node_1_1" shard=1, replica="node_1_1"
) ),
) )
assert exists_replica_1_1 != None assert exists_replica_1_1 != None
@ -171,26 +177,29 @@ def test_drop_replica(start_cluster):
shard=1 shard=1
) )
) )
exists_replica_1_1 = zk.exists( exists_replica_1_1 = check_exists(
zk,
"/clickhouse/tables/test2/{shard}/replicated/test_table/replicas/{replica}".format( "/clickhouse/tables/test2/{shard}/replicated/test_table/replicas/{replica}".format(
shard=1, replica="node_1_1" shard=1, replica="node_1_1"
) ),
) )
assert exists_replica_1_1 == None assert exists_replica_1_1 == None
node_1_2.query("SYSTEM DROP REPLICA 'node_1_1' FROM TABLE test.test_table") node_1_2.query("SYSTEM DROP REPLICA 'node_1_1' FROM TABLE test.test_table")
exists_replica_1_1 = zk.exists( exists_replica_1_1 = check_exists(
zk,
"/clickhouse/tables/test/{shard}/replicated/test_table/replicas/{replica}".format( "/clickhouse/tables/test/{shard}/replicated/test_table/replicas/{replica}".format(
shard=1, replica="node_1_1" shard=1, replica="node_1_1"
) ),
) )
assert exists_replica_1_1 == None assert exists_replica_1_1 == None
node_1_2.query("SYSTEM DROP REPLICA 'node_1_1' FROM DATABASE test1") node_1_2.query("SYSTEM DROP REPLICA 'node_1_1' FROM DATABASE test1")
exists_replica_1_1 = zk.exists( exists_replica_1_1 = check_exists(
zk,
"/clickhouse/tables/test1/{shard}/replicated/test_table/replicas/{replica}".format( "/clickhouse/tables/test1/{shard}/replicated/test_table/replicas/{replica}".format(
shard=1, replica="node_1_1" shard=1, replica="node_1_1"
) ),
) )
assert exists_replica_1_1 == None assert exists_replica_1_1 == None
@ -199,17 +208,19 @@ def test_drop_replica(start_cluster):
shard=1 shard=1
) )
) )
exists_replica_1_1 = zk.exists( exists_replica_1_1 = check_exists(
zk,
"/clickhouse/tables/test3/{shard}/replicated/test_table/replicas/{replica}".format( "/clickhouse/tables/test3/{shard}/replicated/test_table/replicas/{replica}".format(
shard=1, replica="node_1_1" shard=1, replica="node_1_1"
) ),
) )
assert exists_replica_1_1 == None assert exists_replica_1_1 == None
node_1_2.query("SYSTEM DROP REPLICA 'node_1_1'") node_1_2.query("SYSTEM DROP REPLICA 'node_1_1'")
exists_replica_1_1 = zk.exists( exists_replica_1_1 = check_exists(
zk,
"/clickhouse/tables/test4/{shard}/replicated/test_table/replicas/{replica}".format( "/clickhouse/tables/test4/{shard}/replicated/test_table/replicas/{replica}".format(
shard=1, replica="node_1_1" shard=1, replica="node_1_1"
) ),
) )
assert exists_replica_1_1 == None assert exists_replica_1_1 == None

View File

@ -90,11 +90,12 @@ def test_system_replicated_fetches(started_cluster):
) )
for elem in fetches_result: for elem in fetches_result:
assert ( # FIXME https://github.com/ClickHouse/ClickHouse/issues/45435
elem["bytes_read_compressed"] <= elem["total_size_bytes_compressed"] # assert (
), "Bytes read ({}) more than total bytes ({}). It's a bug".format( # elem["bytes_read_compressed"] <= elem["total_size_bytes_compressed"]
elem["bytes_read_compressed"], elem["total_size_bytes_compressed"] # ), "Bytes read ({}) more than total bytes ({}). It's a bug".format(
) # elem["bytes_read_compressed"], elem["total_size_bytes_compressed"]
# )
assert ( assert (
0.0 <= elem["progress"] <= 1.0 0.0 <= elem["progress"] <= 1.0
), "Progress shouldn't less than 0 and bigger than 1, got {}".format( ), "Progress shouldn't less than 0 and bigger than 1, got {}".format(

View File

@ -0,0 +1,22 @@
<test>
<query>
WITH (range(10), range(10))::Map(String, UInt64) AS m1, (range(3), range(3))::Map(String, UInt64) AS m2
SELECT count() FROM numbers(500000)
WHERE NOT ignore(mapUpdate(materialize(m1), materialize(m2)))
</query>
<query>
WITH (range(10), range(10))::Map(String, UInt64) AS m1, (range(3), range(3))::Map(String, UInt64) AS m2
SELECT count() FROM numbers(500000)
WHERE NOT ignore(mapUpdate(materialize(m1), m2))
</query>
<query>
WITH (range(100), range(100))::Map(String, UInt64) AS m1, (range(30), range(30))::Map(String, UInt64) AS m2
SELECT count() FROM numbers(50000)
WHERE NOT ignore(mapUpdate(materialize(m1), materialize(m2)))
</query>
<query>
WITH (range(100), range(100))::Map(String, UInt64) AS m1, (range(30), range(30))::Map(String, UInt64) AS m2
SELECT count() FROM numbers(50000)
WHERE NOT ignore(mapUpdate(materialize(m1), m2))
</query>
</test>

View File

@ -1,5 +1,5 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# Tags: race, zookeeper, no-parallel, no-upgrade-check # Tags: race, zookeeper, no-parallel, no-upgrade-check, no-replicated-database
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh

View File

@ -1,5 +1,5 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# Tags: long # Tags: long, no-tsan
# shellcheck disable=SC2154 # shellcheck disable=SC2154

View File

@ -31,3 +31,25 @@
{1:2,2:3} {1:2,2:3}
{'x':'y','x':'y'} {'x':'y','x':'y'}
{'x':'y','x':'y'} {'x':'y','x':'y'}
{'k1':11,'k2':22}
{'k1':11,'k2':22}
{'k1':11,'k2':22}
{'k1':11,'k2':22}
{'k1':1,'k2':22,'k3':33,'k4':44}
{'k1':1,'k2':22,'k3':33,'k4':44}
{'k1':1,'k2':22,'k3':33,'k4':44}
{'k1':1,'k2':22,'k3':33,'k4':44}
{'k1':1,'k2':2,'k3':33,'k4':44}
{'k1':1,'k2':2,'k3':33,'k4':44}
{'k1':1,'k2':2,'k3':33,'k4':44}
{'k1':1,'k2':2,'k3':33,'k4':44}
{}
{0:0}
{1:1,0:0}
{1:1,0:0,2:4}
{1:1,3:3,0:0,2:4}
{1:1,3:3,0:0,2:4,4:16}
{1:1,3:3,5:5,0:0,2:4,4:16}
{1:1,3:3,5:5,0:0,2:4,4:16,6:36}
{1:1,3:3,5:5,7:7,0:0,2:4,4:16,6:36}
{1:1,3:3,5:5,7:7,0:0,2:4,4:16,6:36,8:64}

View File

@ -15,6 +15,25 @@ SELECT mapApply((x, y) -> (x, x + 1), materialize(map(1, 0, 2, 0)));
SELECT mapApply((x, y) -> ('x', 'y'), map(1, 0, 2, 0)); SELECT mapApply((x, y) -> ('x', 'y'), map(1, 0, 2, 0));
SELECT mapApply((x, y) -> ('x', 'y'), materialize(map(1, 0, 2, 0))); SELECT mapApply((x, y) -> ('x', 'y'), materialize(map(1, 0, 2, 0)));
SELECT mapUpdate(map('k1', 1, 'k2', 2), map('k1', 11, 'k2', 22));
SELECT mapUpdate(materialize(map('k1', 1, 'k2', 2)), map('k1', 11, 'k2', 22));
SELECT mapUpdate(map('k1', 1, 'k2', 2), materialize(map('k1', 11, 'k2', 22)));
SELECT mapUpdate(materialize(map('k1', 1, 'k2', 2)), materialize(map('k1', 11, 'k2', 22)));
SELECT mapUpdate(map('k1', 1, 'k2', 2, 'k3', 3), map('k2', 22, 'k3', 33, 'k4', 44));
SELECT mapUpdate(materialize(map('k1', 1, 'k2', 2, 'k3', 3)), map('k2', 22, 'k3', 33, 'k4', 44));
SELECT mapUpdate(map('k1', 1, 'k2', 2, 'k3', 3), materialize(map('k2', 22, 'k3', 33, 'k4', 44)));
SELECT mapUpdate(materialize(map('k1', 1, 'k2', 2, 'k3', 3)), materialize(map('k2', 22, 'k3', 33, 'k4', 44)));
SELECT mapUpdate(map('k1', 1, 'k2', 2), map('k3', 33, 'k4', 44));
SELECT mapUpdate(materialize(map('k1', 1, 'k2', 2)), map('k3', 33, 'k4', 44));
SELECT mapUpdate(map('k1', 1, 'k2', 2), materialize(map('k3', 33, 'k4', 44)));
SELECT mapUpdate(materialize(map('k1', 1, 'k2', 2)), materialize(map('k3', 33, 'k4', 44)));
WITH (range(0, number % 10), range(0, number % 10))::Map(UInt64, UInt64) AS m1,
(range(0, number % 10, 2), arrayMap(x -> x * x, range(0, number % 10, 2)))::Map(UInt64, UInt64) AS m2
SELECT DISTINCT mapUpdate(m1, m2) FROM numbers (100000);
SELECT mapApply(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } SELECT mapApply(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH }
SELECT mapApply((x, y) -> (x), map(1, 0, 2, 0)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } SELECT mapApply((x, y) -> (x), map(1, 0, 2, 0)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
SELECT mapApply((x, y) -> ('x'), map(1, 0, 2, 0)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } SELECT mapApply((x, y) -> ('x'), map(1, 0, 2, 0)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }

View File

@ -0,0 +1,38 @@
int32 Nullable(Int32) 1 \N
str String 0 SOR \N
uint64 UInt64 0 PRI SOR \N
--- EXTENDED
int32 Nullable(Int32) 1 \N
str String 0 SOR \N
uint64 UInt64 0 PRI SOR \N
--- FULL
int32 Nullable(Int32) 1 \N \N example comment
str String 0 SOR \N \N
uint64 UInt64 0 PRI SOR \N \N
--- LIKE
int32 Nullable(Int32) 1 \N
uint64 UInt64 0 PRI SOR \N
--- NOT LIKE
str String 0 SOR \N
--- ILIKE
int32 Nullable(Int32) 1 \N
uint64 UInt64 0 PRI SOR \N
--- NOT ILIKE
str String 0 SOR \N
--- WHERE
int32 Nullable(Int32) 1 \N
uint64 UInt64 0 PRI SOR \N
--- LIMIT
int32 Nullable(Int32) 1 \N
--- Original table
int32 Nullable(Int32) 1 \N
str String 0 SOR \N
uint64 UInt64 0 PRI SOR \N
--- Equally named table in other database
int32 Int32 0 \N
str String 0 \N
uint64 UInt64 0 PRI SOR \N
--- Short form
int32 Int32 0 \N
str String 0 \N
uint64 UInt64 0 PRI SOR \N

View File

@ -0,0 +1,70 @@
-- Tags: no-parallel
-- no-parallel: creates a custom database schema and expects to use it exclusively
-- Create a test table and verify that the output of SHOW COLUMNS is sane.
-- The matching of actual/expected results relies on the fact that the output of SHOW COLUMNS is sorted.
DROP TABLE IF EXISTS tab;
CREATE TABLE tab
(
`uint64` UInt64,
`int32` Nullable(Int32) COMMENT 'example comment',
`str` String,
INDEX idx str TYPE set(1000)
)
ENGINE = MergeTree
PRIMARY KEY (uint64)
ORDER BY (uint64, str);
SHOW COLUMNS FROM tab;
SELECT '--- EXTENDED';
SHOW EXTENDED COLUMNS FROM tab;
SELECT '--- FULL';
SHOW FULL COLUMNS FROM tab;
SELECT '--- LIKE';
SHOW COLUMNS FROM tab LIKE '%int%';
SELECT '--- NOT LIKE';
SHOW COLUMNS FROM tab NOT LIKE '%int%';
SELECT '--- ILIKE';
SHOW COLUMNS FROM tab ILIKE '%INT%';
SELECT '--- NOT ILIKE';
SHOW COLUMNS FROM tab NOT ILIKE '%INT%';
SELECT '--- WHERE';
SHOW COLUMNS FROM tab WHERE field LIKE '%int%';
SELECT '--- LIMIT';
SHOW COLUMNS FROM tab LIMIT 1;
-- Create a table in a different database. Intentionally useing the same table/column names as above so
-- we notice if something is buggy in the implementation of SHOW COLUMNS.
DROP DATABASE IF EXISTS database_123456789abcde;
CREATE DATABASE database_123456789abcde; -- pseudo-random database name
DROP TABLE IF EXISTS database_123456789abcde.tab;
CREATE TABLE database_123456789abcde.tab
(
`uint64` UInt64,
`int32` Int32,
`str` String
)
ENGINE = MergeTree
ORDER BY uint64;
SELECT '--- Original table';
SHOW COLUMNS FROM tab;
SELECT '--- Equally named table in other database';
SHOW COLUMNS FROM tab FROM database_123456789abcde;
SELECT '--- Short form';
SHOW COLUMNS FROM database_123456789abcde.tab;
DROP DATABASE database_123456789abcde;
DROP TABLE tab;

View File

@ -1,3 +1,4 @@
v23.3.1.2823-lts 2023-03-31
v23.2.4.12-stable 2023-03-10 v23.2.4.12-stable 2023-03-10
v23.2.3.17-stable 2023-03-06 v23.2.3.17-stable 2023-03-06
v23.2.2.20-stable 2023-03-01 v23.2.2.20-stable 2023-03-01

1 v23.2.4.12-stable v23.3.1.2823-lts 2023-03-10 2023-03-31
1 v23.3.1.2823-lts 2023-03-31
2 v23.2.4.12-stable v23.2.4.12-stable 2023-03-10 2023-03-10
3 v23.2.3.17-stable v23.2.3.17-stable 2023-03-06 2023-03-06
4 v23.2.2.20-stable v23.2.2.20-stable 2023-03-01 2023-03-01