mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 15:42:02 +00:00
Merge branch 'master' into fix-data-part-modification-time
This commit is contained in:
commit
e21c04ff9c
536
.github/workflows/release_branches.yml
vendored
536
.github/workflows/release_branches.yml
vendored
@ -407,7 +407,7 @@ jobs:
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatelessTestAsan:
|
||||
FunctionalStatelessTestAsan0:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
@ -419,6 +419,8 @@ jobs:
|
||||
CHECK_NAME=Stateless tests (address, actions)
|
||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=0
|
||||
RUN_BY_HASH_TOTAL=2
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
@ -442,7 +444,44 @@ jobs:
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatelessTestTsan:
|
||||
FunctionalStatelessTestAsan1:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (address, actions)
|
||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=1
|
||||
RUN_BY_HASH_TOTAL=2
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatelessTestTsan0:
|
||||
needs: [BuilderDebTsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
@ -454,6 +493,82 @@ jobs:
|
||||
CHECK_NAME=Stateless tests (thread, actions)
|
||||
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=0
|
||||
RUN_BY_HASH_TOTAL=3
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatelessTestTsan1:
|
||||
needs: [BuilderDebTsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (thread, actions)
|
||||
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=1
|
||||
RUN_BY_HASH_TOTAL=3
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatelessTestTsan2:
|
||||
needs: [BuilderDebTsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (thread, actions)
|
||||
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=2
|
||||
RUN_BY_HASH_TOTAL=3
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
@ -512,7 +627,7 @@ jobs:
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatelessTestMsan:
|
||||
FunctionalStatelessTestMsan0:
|
||||
needs: [BuilderDebMsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
@ -524,6 +639,8 @@ jobs:
|
||||
CHECK_NAME=Stateless tests (memory, actions)
|
||||
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=0
|
||||
RUN_BY_HASH_TOTAL=3
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
@ -547,7 +664,81 @@ jobs:
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatelessTestDebug:
|
||||
FunctionalStatelessTestMsan1:
|
||||
needs: [BuilderDebMsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_memory
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (memory, actions)
|
||||
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=1
|
||||
RUN_BY_HASH_TOTAL=3
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatelessTestMsan2:
|
||||
needs: [BuilderDebMsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_memory
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (memory, actions)
|
||||
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=2
|
||||
RUN_BY_HASH_TOTAL=3
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatelessTestDebug0:
|
||||
needs: [BuilderDebDebug]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
@ -559,6 +750,82 @@ jobs:
|
||||
CHECK_NAME=Stateless tests (debug, actions)
|
||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=0
|
||||
RUN_BY_HASH_TOTAL=3
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatelessTestDebug1:
|
||||
needs: [BuilderDebDebug]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (debug, actions)
|
||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=1
|
||||
RUN_BY_HASH_TOTAL=3
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatelessTestDebug2:
|
||||
needs: [BuilderDebDebug]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (debug, actions)
|
||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=2
|
||||
RUN_BY_HASH_TOTAL=3
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
@ -975,8 +1242,8 @@ jobs:
|
||||
#############################################################################################
|
||||
############################# INTEGRATION TESTS #############################################
|
||||
#############################################################################################
|
||||
IntegrationTestsAsan:
|
||||
needs: [BuilderDebAsan, FunctionalStatelessTestAsan]
|
||||
IntegrationTestsAsan0:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
@ -986,6 +1253,8 @@ jobs:
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (asan, actions)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=0
|
||||
RUN_BY_HASH_TOTAL=3
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
@ -1009,8 +1278,80 @@ jobs:
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
IntegrationTestsTsan:
|
||||
needs: [BuilderDebTsan, FunctionalStatelessTestTsan]
|
||||
IntegrationTestsAsan1:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (asan, actions)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=1
|
||||
RUN_BY_HASH_TOTAL=3
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
IntegrationTestsAsan2:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (asan, actions)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=2
|
||||
RUN_BY_HASH_TOTAL=3
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
IntegrationTestsTsan0:
|
||||
needs: [BuilderDebTsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
@ -1020,6 +1361,8 @@ jobs:
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (thread, actions)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||
RUN_BY_HASH_NUM=0
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
@ -1043,8 +1386,116 @@ jobs:
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
IntegrationTestsRelease:
|
||||
needs: [BuilderDebRelease, FunctionalStatelessTestRelease]
|
||||
IntegrationTestsTsan1:
|
||||
needs: [BuilderDebTsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (thread, actions)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||
RUN_BY_HASH_NUM=1
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
IntegrationTestsTsan2:
|
||||
needs: [BuilderDebTsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (thread, actions)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||
RUN_BY_HASH_NUM=2
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
IntegrationTestsTsan3:
|
||||
needs: [BuilderDebTsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (thread, actions)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||
RUN_BY_HASH_NUM=3
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
IntegrationTestsRelease0:
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
@ -1054,6 +1505,44 @@ jobs:
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (release, actions)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse
|
||||
RUN_BY_HASH_NUM=0
|
||||
RUN_BY_HASH_TOTAL=2
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
IntegrationTestsRelease1:
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_release
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (release, actions)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse
|
||||
RUN_BY_HASH_NUM=1
|
||||
RUN_BY_HASH_TOTAL=2
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
@ -1081,11 +1570,18 @@ jobs:
|
||||
needs:
|
||||
- DockerHubPush
|
||||
- BuilderReport
|
||||
- FunctionalStatelessTestDebug
|
||||
- FunctionalStatelessTestDebug0
|
||||
- FunctionalStatelessTestDebug1
|
||||
- FunctionalStatelessTestDebug2
|
||||
- FunctionalStatelessTestRelease
|
||||
- FunctionalStatelessTestAsan
|
||||
- FunctionalStatelessTestTsan
|
||||
- FunctionalStatelessTestMsan
|
||||
- FunctionalStatelessTestAsan0
|
||||
- FunctionalStatelessTestAsan1
|
||||
- FunctionalStatelessTestTsan0
|
||||
- FunctionalStatelessTestTsan1
|
||||
- FunctionalStatelessTestTsan2
|
||||
- FunctionalStatelessTestMsan0
|
||||
- FunctionalStatelessTestMsan1
|
||||
- FunctionalStatelessTestMsan2
|
||||
- FunctionalStatelessTestUBsan
|
||||
- FunctionalStatefulTestDebug
|
||||
- FunctionalStatefulTestRelease
|
||||
@ -1098,9 +1594,15 @@ jobs:
|
||||
- StressTestTsan
|
||||
- StressTestMsan
|
||||
- StressTestUBsan
|
||||
- IntegrationTestsAsan
|
||||
- IntegrationTestsRelease
|
||||
- IntegrationTestsTsan
|
||||
- IntegrationTestsAsan0
|
||||
- IntegrationTestsAsan1
|
||||
- IntegrationTestsAsan2
|
||||
- IntegrationTestsRelease0
|
||||
- IntegrationTestsRelease1
|
||||
- IntegrationTestsTsan0
|
||||
- IntegrationTestsTsan1
|
||||
- IntegrationTestsTsan2
|
||||
- IntegrationTestsTsan3
|
||||
- CompatibilityCheck
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
|
@ -72,6 +72,7 @@ RUN python3 -m pip install \
|
||||
grpcio-tools \
|
||||
kafka-python \
|
||||
kazoo \
|
||||
lz4 \
|
||||
minio \
|
||||
protobuf \
|
||||
psycopg2-binary==2.8.6 \
|
||||
|
@ -37,6 +37,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
[rabbitmq_skip_broken_messages = N,]
|
||||
[rabbitmq_max_block_size = N,]
|
||||
[rabbitmq_flush_interval_ms = N]
|
||||
[rabbitmq_queue_settings_list = 'x-dead-letter-exchange=my-dlx,x-max-length=10,x-overflow=reject-publish']
|
||||
```
|
||||
|
||||
Required parameters:
|
||||
@ -59,6 +60,7 @@ Optional parameters:
|
||||
- `rabbitmq_skip_broken_messages` – RabbitMQ message parser tolerance to schema-incompatible messages per block. Default: `0`. If `rabbitmq_skip_broken_messages = N` then the engine skips *N* RabbitMQ messages that cannot be parsed (a message equals a row of data).
|
||||
- `rabbitmq_max_block_size`
|
||||
- `rabbitmq_flush_interval_ms`
|
||||
- `rabbitmq_queue_settings_list` - allows to set RabbitMQ settings when creating a queue. Available settings: `x-max-length`, `x-max-length-bytes`, `x-message-ttl`, `x-expires`, `x-priority`, `x-max-priority`, `x-overflow`, `x-dead-letter-exchange`, `x-queue-type`. The `durable` setting is enabled automatically for the queue.
|
||||
|
||||
SSL connection:
|
||||
|
||||
|
@ -41,7 +41,7 @@ Example of a polygon dictionary configuration:
|
||||
</dictionary>
|
||||
```
|
||||
|
||||
Tne corresponding [DDL-query](../../../sql-reference/statements/create/dictionary.md#create-dictionary-query):
|
||||
The corresponding [DDL-query](../../../sql-reference/statements/create/dictionary.md#create-dictionary-query):
|
||||
``` sql
|
||||
CREATE DICTIONARY polygon_dict_name (
|
||||
key Array(Array(Array(Array(Float64)))),
|
||||
|
@ -129,6 +129,9 @@ world
|
||||
|
||||
Каждый элемент структуры типа [Nested](../sql-reference/data-types/nested-data-structures/nested.md) представляется как отдельный массив.
|
||||
|
||||
Входящие параметры типа "перечисление" (`ENUM`) могут передаваться в виде значений или порядковых номеров. Сначала переданное значение будет сопоставляться с элементами перечисления. Если совпадение не будет найдено и при этом переданное значение является числом, оно будет трактоваться как порядковый номер в перечислении.
|
||||
Если входящие параметры типа `ENUM` содержат только порядковые номера, рекомендуется включить настройку [input_format_tsv_enum_as_number](../operations/settings/settings.md#settings-input_format_tsv_enum_as_number) для ускорения парсинга.
|
||||
|
||||
Например:
|
||||
|
||||
``` sql
|
||||
@ -362,6 +365,9 @@ $ clickhouse-client --format_csv_delimiter="|" --query="INSERT INTO test.csv FOR
|
||||
|
||||
Если установлена настройка [input_format_defaults_for_omitted_fields = 1](../operations/settings/settings.md#session_settings-input_format_defaults_for_omitted_fields) и тип столбца не `Nullable(T)`, то пустые значения без кавычек заменяются значениями по умолчанию для типа данных столбца.
|
||||
|
||||
Входящие параметры типа "перечисление" (`ENUM`) могут передаваться в виде значений или порядковых номеров. Сначала переданное значение будет сопоставляться с элементами перечисления. Если совпадение не будет найдено и при этом переданное значение является числом, оно будет трактоваться как порядковый номер в перечислении.
|
||||
Если входящие параметры типа `ENUM` содержат только порядковые номера, рекомендуется включить настройку [input_format_tsv_enum_as_number](../operations/settings/settings.md#settings-input_format_tsv_enum_as_number) для ускорения парсинга.
|
||||
|
||||
Формат CSV поддерживает вывод totals и extremes аналогично `TabSeparated`.
|
||||
|
||||
## CSVWithNames {#csvwithnames}
|
||||
@ -693,7 +699,7 @@ CREATE TABLE IF NOT EXISTS example_table
|
||||
- Если `input_format_defaults_for_omitted_fields = 1`, то значение по умолчанию для `x` равно `0`, а значение по умолчанию `a` равно `x * 2`.
|
||||
|
||||
!!! note "Предупреждение"
|
||||
Если `input_format_defaults_for_omitted_fields = 1`, то при обработке запросов ClickHouse потребляет больше вычислительных ресурсов, чем если `input_format_defaults_for_omitted_fields = 0`.
|
||||
При добавлении данных с помощью `input_format_defaults_for_omitted_fields = 1`, ClickHouse потребляет больше вычислительных ресурсов по сравнению с `input_format_defaults_for_omitted_fields = 0`.
|
||||
|
||||
### Выборка данных {#vyborka-dannykh}
|
||||
|
||||
|
@ -391,12 +391,14 @@ INSERT INTO test VALUES (lower('Hello')), (lower('world')), (lower('INSERT')), (
|
||||
|
||||
## input_format_tsv_enum_as_number {#settings-input_format_tsv_enum_as_number}
|
||||
|
||||
Включает или отключает парсинг значений перечислений как идентификаторов перечислений для входного формата TSV.
|
||||
Включает или отключает парсинг значений перечислений как порядковых номеров.
|
||||
|
||||
Если режим включен, то во входящих данных в формате `TCV` значения перечисления (тип `ENUM`) всегда трактуются как порядковые номера, а не как элементы перечисления. Эту настройку рекомендуется включать для оптимизации парсинга, если данные типа `ENUM` содержат только порядковые номера, а не сами элементы перечисления.
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- 0 — парсинг значений перечисления как значений.
|
||||
- 1 — парсинг значений перечисления как идентификаторов перечисления.
|
||||
- 0 — входящие значения типа `ENUM` сначала сопоставляются с элементами перечисления, а если совпадений не найдено, то трактуются как порядковые номера.
|
||||
- 1 — входящие значения типа `ENUM` сразу трактуются как порядковые номера.
|
||||
|
||||
Значение по умолчанию: 0.
|
||||
|
||||
@ -410,10 +412,39 @@ CREATE TABLE table_with_enum_column_for_tsv_insert (Id Int32,Value Enum('first'
|
||||
|
||||
При включенной настройке `input_format_tsv_enum_as_number`:
|
||||
|
||||
Запрос:
|
||||
|
||||
```sql
|
||||
SET input_format_tsv_enum_as_number = 1;
|
||||
INSERT INTO table_with_enum_column_for_tsv_insert FORMAT TSV 102 2;
|
||||
INSERT INTO table_with_enum_column_for_tsv_insert FORMAT TSV 103 1;
|
||||
SELECT * FROM table_with_enum_column_for_tsv_insert;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
```text
|
||||
┌──Id─┬─Value──┐
|
||||
│ 102 │ second │
|
||||
└─────┴────────┘
|
||||
```
|
||||
|
||||
Запрос:
|
||||
|
||||
```sql
|
||||
SET input_format_tsv_enum_as_number = 1;
|
||||
INSERT INTO table_with_enum_column_for_tsv_insert FORMAT TSV 103 'first';
|
||||
```
|
||||
|
||||
сгенерирует исключение.
|
||||
|
||||
При отключенной настройке `input_format_tsv_enum_as_number`:
|
||||
|
||||
Запрос:
|
||||
|
||||
```sql
|
||||
SET input_format_tsv_enum_as_number = 0;
|
||||
INSERT INTO table_with_enum_column_for_tsv_insert FORMAT TSV 102 2;
|
||||
INSERT INTO table_with_enum_column_for_tsv_insert FORMAT TSV 103 'first';
|
||||
SELECT * FROM table_with_enum_column_for_tsv_insert;
|
||||
```
|
||||
|
||||
@ -428,15 +459,6 @@ SELECT * FROM table_with_enum_column_for_tsv_insert;
|
||||
└─────┴────────┘
|
||||
```
|
||||
|
||||
При отключенной настройке `input_format_tsv_enum_as_number` запрос `INSERT`:
|
||||
|
||||
```sql
|
||||
SET input_format_tsv_enum_as_number = 0;
|
||||
INSERT INTO table_with_enum_column_for_tsv_insert FORMAT TSV 102 2;
|
||||
```
|
||||
|
||||
сгенерирует исключение.
|
||||
|
||||
## input_format_null_as_default {#settings-input-format-null-as-default}
|
||||
|
||||
Включает или отключает инициализацию [значениями по умолчанию](../../sql-reference/statements/create/table.md#create-default-values) ячеек с [NULL](../../sql-reference/syntax.md#null-literal), если тип данных столбца не позволяет [хранить NULL](../../sql-reference/data-types/nullable.md#data_type-nullable).
|
||||
@ -1511,12 +1533,13 @@ SELECT area/period FROM account_orders FORMAT JSON;
|
||||
|
||||
## input_format_csv_enum_as_number {#settings-input_format_csv_enum_as_number}
|
||||
|
||||
Включает или отключает парсинг значений перечислений как идентификаторов перечислений для входного формата CSV.
|
||||
Включает или отключает парсинг значений перечислений как порядковых номеров.
|
||||
Если режим включен, то во входящих данных в формате `CSV` значения перечисления (тип `ENUM`) всегда трактуются как порядковые номера, а не как элементы перечисления. Эту настройку рекомендуется включать для оптимизации парсинга, если данные типа `ENUM` содержат только порядковые номера, а не сами элементы перечисления.
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- 0 — парсинг значений перечисления как значений.
|
||||
- 1 — парсинг значений перечисления как идентификаторов перечисления.
|
||||
- 0 — входящие значения типа `ENUM` сначала сопоставляются с элементами перечисления, а если совпадений не найдено, то трактуются как порядковые номера.
|
||||
- 1 — входящие значения типа `ENUM` сразу трактуются как порядковые номера.
|
||||
|
||||
Значение по умолчанию: 0.
|
||||
|
||||
@ -1530,10 +1553,11 @@ CREATE TABLE table_with_enum_column_for_csv_insert (Id Int32,Value Enum('first'
|
||||
|
||||
При включенной настройке `input_format_csv_enum_as_number`:
|
||||
|
||||
Запрос:
|
||||
|
||||
```sql
|
||||
SET input_format_csv_enum_as_number = 1;
|
||||
INSERT INTO table_with_enum_column_for_csv_insert FORMAT CSV 102,2;
|
||||
SELECT * FROM table_with_enum_column_for_csv_insert;
|
||||
```
|
||||
|
||||
Результат:
|
||||
@ -1544,15 +1568,37 @@ SELECT * FROM table_with_enum_column_for_csv_insert;
|
||||
└─────┴────────┘
|
||||
```
|
||||
|
||||
При отключенной настройке `input_format_csv_enum_as_number` запрос `INSERT`:
|
||||
Запрос:
|
||||
|
||||
```sql
|
||||
SET input_format_csv_enum_as_number = 0;
|
||||
INSERT INTO table_with_enum_column_for_csv_insert FORMAT CSV 102,2;
|
||||
SET input_format_csv_enum_as_number = 1;
|
||||
INSERT INTO table_with_enum_column_for_csv_insert FORMAT CSV 103,'first'
|
||||
```
|
||||
|
||||
сгенерирует исключение.
|
||||
|
||||
При отключенной настройке `input_format_csv_enum_as_number`:
|
||||
|
||||
Запрос:
|
||||
|
||||
```sql
|
||||
SET input_format_csv_enum_as_number = 0;
|
||||
INSERT INTO table_with_enum_column_for_csv_insert FORMAT CSV 102,2
|
||||
INSERT INTO table_with_enum_column_for_csv_insert FORMAT CSV 103,'first'
|
||||
SELECT * FROM table_with_enum_column_for_csv_insert;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
```text
|
||||
┌──Id─┬─Value──┐
|
||||
│ 102 │ second │
|
||||
└─────┴────────┘
|
||||
┌──Id─┬─Value─┐
|
||||
│ 103 │ first │
|
||||
└─────┴───────┘
|
||||
```
|
||||
|
||||
## output_format_csv_crlf_end_of_line {#settings-output-format-csv-crlf-end-of-line}
|
||||
|
||||
Использовать в качестве разделителя строк для CSV формата CRLF (DOS/Windows стиль) вместо LF (Unix стиль).
|
||||
|
@ -54,7 +54,7 @@ namespace
|
||||
const Poco::SHA1Engine::Digest & digest = engine.digest();
|
||||
|
||||
Poco::SHA1Engine::Digest calculated_password_sha1(sha1_size);
|
||||
for (size_t i = 0; i < sha1_size; i++)
|
||||
for (size_t i = 0; i < sha1_size; ++i)
|
||||
calculated_password_sha1[i] = scrambled_password[i] ^ digest[i];
|
||||
|
||||
auto calculated_password_double_sha1 = Util::encodeSHA1(calculated_password_sha1);
|
||||
|
@ -448,7 +448,7 @@ LDAPClient::SearchResults LDAPClient::search(const SearchParams & search_params)
|
||||
vals = nullptr;
|
||||
});
|
||||
|
||||
for (std::size_t i = 0; vals[i]; i++)
|
||||
for (size_t i = 0; vals[i]; ++i)
|
||||
{
|
||||
if (vals[i]->bv_val && vals[i]->bv_len > 0)
|
||||
result.emplace(vals[i]->bv_val, vals[i]->bv_len);
|
||||
@ -473,7 +473,7 @@ LDAPClient::SearchResults LDAPClient::search(const SearchParams & search_params)
|
||||
referrals = nullptr;
|
||||
});
|
||||
|
||||
for (std::size_t i = 0; referrals[i]; i++)
|
||||
for (size_t i = 0; referrals[i]; ++i)
|
||||
{
|
||||
LOG_WARNING(&Poco::Logger::get("LDAPClient"), "Received reference during LDAP search but not following it: {}", referrals[i]);
|
||||
}
|
||||
|
@ -90,7 +90,7 @@ private:
|
||||
throw;
|
||||
}
|
||||
|
||||
for (i = 0; i < old_size; i++)
|
||||
for (i = 0; i < old_size; ++i)
|
||||
{
|
||||
nested_func->merge(&new_state[i * nested_size_of_data],
|
||||
&old_state[i * nested_size_of_data],
|
||||
|
@ -271,7 +271,7 @@ public:
|
||||
{
|
||||
lower_bound = std::min(lower_bound, other.lower_bound);
|
||||
upper_bound = std::max(upper_bound, other.upper_bound);
|
||||
for (size_t i = 0; i < other.size; i++)
|
||||
for (size_t i = 0; i < other.size; ++i)
|
||||
add(other.points[i].mean, other.points[i].weight, max_bins);
|
||||
}
|
||||
|
||||
|
@ -56,7 +56,7 @@ static bool ALWAYS_INLINE inline is_all_zeros(const UInt8 * flags, size_t size)
|
||||
i += 8;
|
||||
}
|
||||
|
||||
for (; i < size; i++)
|
||||
for (; i < size; ++i)
|
||||
if (flags[i])
|
||||
return false;
|
||||
|
||||
|
@ -4,6 +4,8 @@
|
||||
#include <iomanip>
|
||||
#include <string_view>
|
||||
#include <filesystem>
|
||||
#include <map>
|
||||
#include <unordered_map>
|
||||
|
||||
#include <base/argsToConfig.h>
|
||||
#include <base/DateLUT.h>
|
||||
@ -52,6 +54,7 @@
|
||||
#include <Processors/Executors/PullingAsyncPipelineExecutor.h>
|
||||
#include <Processors/Transforms/AddingDefaultsTransform.h>
|
||||
#include <Interpreters/ReplaceQueryParameterVisitor.h>
|
||||
#include <Interpreters/ProfileEventsExt.h>
|
||||
#include <IO/WriteBufferFromOStream.h>
|
||||
#include <IO/CompressionMethod.h>
|
||||
#include <Client/InternalTextLogs.h>
|
||||
@ -105,6 +108,99 @@ namespace ProfileEvents
|
||||
namespace DB
|
||||
{
|
||||
|
||||
static void incrementProfileEventsBlock(Block & dst, const Block & src)
|
||||
{
|
||||
if (!dst)
|
||||
{
|
||||
dst = src;
|
||||
return;
|
||||
}
|
||||
|
||||
assertBlocksHaveEqualStructure(src, dst, "ProfileEvents");
|
||||
|
||||
std::unordered_map<String, size_t> name_pos;
|
||||
for (size_t i = 0; i < dst.columns(); ++i)
|
||||
name_pos[dst.getByPosition(i).name] = i;
|
||||
|
||||
size_t dst_rows = dst.rows();
|
||||
MutableColumns mutable_columns = dst.mutateColumns();
|
||||
|
||||
auto & dst_column_host_name = typeid_cast<ColumnString &>(*mutable_columns[name_pos["host_name"]]);
|
||||
auto & dst_array_current_time = typeid_cast<ColumnUInt32 &>(*mutable_columns[name_pos["current_time"]]).getData();
|
||||
auto & dst_array_thread_id = typeid_cast<ColumnUInt64 &>(*mutable_columns[name_pos["thread_id"]]).getData();
|
||||
auto & dst_array_type = typeid_cast<ColumnInt8 &>(*mutable_columns[name_pos["type"]]).getData();
|
||||
auto & dst_column_name = typeid_cast<ColumnString &>(*mutable_columns[name_pos["name"]]);
|
||||
auto & dst_array_value = typeid_cast<ColumnInt64 &>(*mutable_columns[name_pos["value"]]).getData();
|
||||
|
||||
const auto & src_column_host_name = typeid_cast<const ColumnString &>(*src.getByName("host_name").column);
|
||||
const auto & src_array_current_time = typeid_cast<const ColumnUInt32 &>(*src.getByName("current_time").column).getData();
|
||||
const auto & src_array_thread_id = typeid_cast<const ColumnUInt64 &>(*src.getByName("thread_id").column).getData();
|
||||
const auto & src_column_name = typeid_cast<const ColumnString &>(*src.getByName("name").column);
|
||||
const auto & src_array_value = typeid_cast<const ColumnInt64 &>(*src.getByName("value").column).getData();
|
||||
|
||||
struct Id
|
||||
{
|
||||
StringRef name;
|
||||
StringRef host_name;
|
||||
UInt64 thread_id;
|
||||
|
||||
bool operator<(const Id & rhs) const
|
||||
{
|
||||
return std::tie(name, host_name, thread_id)
|
||||
< std::tie(rhs.name, rhs.host_name, rhs.thread_id);
|
||||
}
|
||||
};
|
||||
std::map<Id, UInt64> rows_by_name;
|
||||
for (size_t src_row = 0; src_row < src.rows(); ++src_row)
|
||||
{
|
||||
Id id{
|
||||
src_column_name.getDataAt(src_row),
|
||||
src_column_host_name.getDataAt(src_row),
|
||||
src_array_thread_id[src_row],
|
||||
};
|
||||
rows_by_name[id] = src_row;
|
||||
}
|
||||
|
||||
/// Merge src into dst.
|
||||
for (size_t dst_row = 0; dst_row < dst_rows; ++dst_row)
|
||||
{
|
||||
Id id{
|
||||
dst_column_name.getDataAt(dst_row),
|
||||
dst_column_host_name.getDataAt(dst_row),
|
||||
dst_array_thread_id[dst_row],
|
||||
};
|
||||
|
||||
if (auto it = rows_by_name.find(id); it != rows_by_name.end())
|
||||
{
|
||||
size_t src_row = it->second;
|
||||
dst_array_current_time[dst_row] = src_array_current_time[src_row];
|
||||
|
||||
switch (dst_array_type[dst_row])
|
||||
{
|
||||
case ProfileEvents::Type::INCREMENT:
|
||||
dst_array_value[dst_row] += src_array_value[src_row];
|
||||
break;
|
||||
case ProfileEvents::Type::GAUGE:
|
||||
dst_array_value[dst_row] = src_array_value[src_row];
|
||||
break;
|
||||
}
|
||||
|
||||
rows_by_name.erase(it);
|
||||
}
|
||||
}
|
||||
|
||||
/// Copy rows from src that dst does not contains.
|
||||
for (const auto & [id, pos] : rows_by_name)
|
||||
{
|
||||
for (size_t col = 0; col < src.columns(); ++col)
|
||||
{
|
||||
mutable_columns[col]->insert((*src.getByPosition(col).column)[pos]);
|
||||
}
|
||||
}
|
||||
|
||||
dst.setColumns(std::move(mutable_columns));
|
||||
}
|
||||
|
||||
|
||||
std::atomic_flag exit_on_signal = ATOMIC_FLAG_INIT;
|
||||
|
||||
@ -753,7 +849,7 @@ void ClientBase::onProfileEvents(Block & block)
|
||||
}
|
||||
else
|
||||
{
|
||||
profile_events.last_block = block;
|
||||
incrementProfileEventsBlock(profile_events.last_block, block);
|
||||
}
|
||||
}
|
||||
profile_events.watch.restart();
|
||||
|
@ -24,7 +24,12 @@ class ColumnFunction final : public COWHelper<IColumn, ColumnFunction>
|
||||
private:
|
||||
friend class COWHelper<IColumn, ColumnFunction>;
|
||||
|
||||
ColumnFunction(size_t size, FunctionBasePtr function_, const ColumnsWithTypeAndName & columns_to_capture, bool is_short_circuit_argument_ = false, bool is_function_compiled_ = false);
|
||||
ColumnFunction(
|
||||
size_t size,
|
||||
FunctionBasePtr function_,
|
||||
const ColumnsWithTypeAndName & columns_to_capture,
|
||||
bool is_short_circuit_argument_ = false,
|
||||
bool is_function_compiled_ = false);
|
||||
|
||||
public:
|
||||
const char * getFamilyName() const override { return "Function"; }
|
||||
|
@ -293,7 +293,7 @@ void executeColumnIfNeeded(ColumnWithTypeAndName & column, bool empty)
|
||||
column.column = column_function->getResultType()->createColumn();
|
||||
}
|
||||
|
||||
int checkShirtCircuitArguments(const ColumnsWithTypeAndName & arguments)
|
||||
int checkShortCircuitArguments(const ColumnsWithTypeAndName & arguments)
|
||||
{
|
||||
int last_short_circuit_argument_index = -1;
|
||||
for (size_t i = 0; i != arguments.size(); ++i)
|
||||
|
@ -66,7 +66,7 @@ void executeColumnIfNeeded(ColumnWithTypeAndName & column, bool empty = false);
|
||||
|
||||
/// Check if arguments contain lazy executed argument. If contain, return index of the last one,
|
||||
/// otherwise return -1.
|
||||
int checkShirtCircuitArguments(const ColumnsWithTypeAndName & arguments);
|
||||
int checkShortCircuitArguments(const ColumnsWithTypeAndName & arguments);
|
||||
|
||||
void copyMask(const PaddedPODArray<UInt8> & from, PaddedPODArray<UInt8> & to);
|
||||
|
||||
|
@ -838,7 +838,7 @@ bool Dwarf::findLocation(
|
||||
|
||||
// The next inlined subroutine's call file and call line is the current
|
||||
// caller's location.
|
||||
for (size_t i = 0; i < num_found - 1; i++)
|
||||
for (size_t i = 0; i < num_found - 1; ++i)
|
||||
{
|
||||
call_locations[i].file = call_locations[i + 1].file;
|
||||
call_locations[i].line = call_locations[i + 1].line;
|
||||
|
@ -72,6 +72,24 @@ static thread_local bool has_alt_stack = false;
|
||||
#endif
|
||||
|
||||
|
||||
std::vector<ThreadGroupStatus::ProfileEventsCountersAndMemory> ThreadGroupStatus::getProfileEventsCountersAndMemoryForThreads()
|
||||
{
|
||||
std::lock_guard guard(mutex);
|
||||
|
||||
/// It is OK to move it, since it is enough to report statistics for the thread at least once.
|
||||
auto stats = std::move(finished_threads_counters_memory);
|
||||
for (auto * thread : threads)
|
||||
{
|
||||
stats.emplace_back(ProfileEventsCountersAndMemory{
|
||||
thread->performance_counters.getPartiallyAtomicSnapshot(),
|
||||
thread->memory_tracker.get(),
|
||||
thread->thread_id,
|
||||
});
|
||||
}
|
||||
|
||||
return stats;
|
||||
}
|
||||
|
||||
ThreadStatus::ThreadStatus()
|
||||
: thread_id{getThreadId()}
|
||||
{
|
||||
@ -139,11 +157,17 @@ ThreadStatus::~ThreadStatus()
|
||||
{
|
||||
/// It's a minor tracked memory leak here (not the memory itself but it's counter).
|
||||
/// We've already allocated a little bit more than the limit and cannot track it in the thread memory tracker or its parent.
|
||||
tryLogCurrentException(log);
|
||||
}
|
||||
|
||||
if (thread_group)
|
||||
{
|
||||
std::lock_guard guard(thread_group->mutex);
|
||||
thread_group->finished_threads_counters_memory.emplace_back(ThreadGroupStatus::ProfileEventsCountersAndMemory{
|
||||
performance_counters.getPartiallyAtomicSnapshot(),
|
||||
memory_tracker.get(),
|
||||
thread_id,
|
||||
});
|
||||
thread_group->threads.erase(this);
|
||||
}
|
||||
|
||||
|
@ -61,6 +61,13 @@ using ThreadStatusPtr = ThreadStatus *;
|
||||
class ThreadGroupStatus
|
||||
{
|
||||
public:
|
||||
struct ProfileEventsCountersAndMemory
|
||||
{
|
||||
ProfileEvents::Counters::Snapshot counters;
|
||||
Int64 memory_usage;
|
||||
UInt64 thread_id;
|
||||
};
|
||||
|
||||
mutable std::mutex mutex;
|
||||
|
||||
ProfileEvents::Counters performance_counters{VariableContext::Process};
|
||||
@ -83,6 +90,10 @@ public:
|
||||
|
||||
String query;
|
||||
UInt64 normalized_query_hash = 0;
|
||||
|
||||
std::vector<ProfileEventsCountersAndMemory> finished_threads_counters_memory;
|
||||
|
||||
std::vector<ProfileEventsCountersAndMemory> getProfileEventsCountersAndMemoryForThreads();
|
||||
};
|
||||
|
||||
using ThreadGroupStatusPtr = std::shared_ptr<ThreadGroupStatus>;
|
||||
|
@ -153,7 +153,7 @@ void TraceCollector::run()
|
||||
Array trace;
|
||||
trace.reserve(trace_size);
|
||||
|
||||
for (size_t i = 0; i < trace_size; i++)
|
||||
for (size_t i = 0; i < trace_size; ++i)
|
||||
{
|
||||
uintptr_t addr = 0;
|
||||
readPODBinary(addr, in);
|
||||
|
@ -41,7 +41,7 @@ static void append(std::vector<String> & to, const std::vector<String> & what, s
|
||||
static bool parseNumber(const String & description, size_t l, size_t r, size_t & res)
|
||||
{
|
||||
res = 0;
|
||||
for (size_t pos = l; pos < r; pos ++)
|
||||
for (size_t pos = l; pos < r; ++pos)
|
||||
{
|
||||
if (!isNumericASCII(description[pos]))
|
||||
return false;
|
||||
|
@ -209,7 +209,7 @@ void Block::eraseImpl(size_t position)
|
||||
for (auto it = index_by_name.begin(); it != index_by_name.end();)
|
||||
{
|
||||
if (it->second == position)
|
||||
index_by_name.erase(it++);
|
||||
it = index_by_name.erase(it);
|
||||
else
|
||||
{
|
||||
if (it->second > position)
|
||||
|
@ -71,7 +71,7 @@ Native41::Native41(const String & password_, const String & scramble_)
|
||||
const Poco::SHA1Engine::Digest & digest = engine3.digest();
|
||||
|
||||
scramble.resize(SCRAMBLE_LENGTH);
|
||||
for (size_t i = 0; i < SCRAMBLE_LENGTH; i++)
|
||||
for (size_t i = 0; i < SCRAMBLE_LENGTH; ++i)
|
||||
scramble[i] = static_cast<unsigned char>(password_sha1[i] ^ digest[i]);
|
||||
}
|
||||
|
||||
@ -191,7 +191,7 @@ void Sha256Password::authenticate(
|
||||
}
|
||||
|
||||
password.resize(plaintext_size);
|
||||
for (int i = 0; i < plaintext_size; i++)
|
||||
for (int i = 0; i < plaintext_size; ++i)
|
||||
{
|
||||
password[i] = plaintext[i] ^ static_cast<unsigned char>(scramble[i % SCRAMBLE_LENGTH]);
|
||||
}
|
||||
|
@ -41,7 +41,7 @@ void GTIDSets::parse(const String gtid_format)
|
||||
GTIDSet set;
|
||||
set.uuid = DB::parse<UUID>(server_ids[0]);
|
||||
|
||||
for (size_t k = 1; k < server_ids.size(); k++)
|
||||
for (size_t k = 1; k < server_ids.size(); ++k)
|
||||
{
|
||||
std::vector<String> inters;
|
||||
boost::split(inters, server_ids[k], [](char c) { return c == '-'; });
|
||||
@ -74,7 +74,7 @@ void GTIDSets::update(const GTID & other)
|
||||
{
|
||||
if (set.uuid == other.uuid)
|
||||
{
|
||||
for (auto i = 0U; i < set.intervals.size(); i++)
|
||||
for (auto i = 0U; i < set.intervals.size(); ++i)
|
||||
{
|
||||
auto & current = set.intervals[i];
|
||||
|
||||
@ -134,7 +134,7 @@ String GTIDSets::toString() const
|
||||
{
|
||||
WriteBufferFromOwnString buffer;
|
||||
|
||||
for (size_t i = 0; i < sets.size(); i++)
|
||||
for (size_t i = 0; i < sets.size(); ++i)
|
||||
{
|
||||
GTIDSet set = sets[i];
|
||||
writeUUIDText(set.uuid, buffer);
|
||||
|
@ -159,7 +159,7 @@ namespace MySQLReplication
|
||||
payload.ignore(1);
|
||||
|
||||
column_count = readLengthEncodedNumber(payload);
|
||||
for (auto i = 0U; i < column_count; i++)
|
||||
for (auto i = 0U; i < column_count; ++i)
|
||||
{
|
||||
UInt8 v = 0x00;
|
||||
payload.readStrict(reinterpret_cast<char *>(&v), 1);
|
||||
@ -188,7 +188,7 @@ namespace MySQLReplication
|
||||
{
|
||||
auto pos = 0;
|
||||
column_meta.reserve(column_count);
|
||||
for (auto i = 0U; i < column_count; i++)
|
||||
for (auto i = 0U; i < column_count; ++i)
|
||||
{
|
||||
UInt16 typ = column_type[i];
|
||||
switch (typ)
|
||||
@ -255,7 +255,7 @@ namespace MySQLReplication
|
||||
out << "Table Len: " << std::to_string(this->table_len) << '\n';
|
||||
out << "Table: " << this->table << '\n';
|
||||
out << "Column Count: " << this->column_count << '\n';
|
||||
for (auto i = 0U; i < column_count; i++)
|
||||
for (UInt32 i = 0; i < column_count; ++i)
|
||||
{
|
||||
out << "Column Type [" << i << "]: " << std::to_string(column_type[i]) << ", Meta: " << column_meta[i] << '\n';
|
||||
}
|
||||
@ -312,7 +312,7 @@ namespace MySQLReplication
|
||||
UInt32 null_index = 0;
|
||||
|
||||
UInt32 re_count = 0;
|
||||
for (auto i = 0U; i < number_columns; i++)
|
||||
for (UInt32 i = 0; i < number_columns; ++i)
|
||||
{
|
||||
if (bitmap[i])
|
||||
re_count++;
|
||||
@ -321,7 +321,7 @@ namespace MySQLReplication
|
||||
boost::dynamic_bitset<> columns_null_set;
|
||||
readBitmap(payload, columns_null_set, re_count);
|
||||
|
||||
for (auto i = 0U; i < number_columns; i++)
|
||||
for (UInt32 i = 0; i < number_columns; ++i)
|
||||
{
|
||||
UInt32 field_len = 0;
|
||||
|
||||
@ -523,7 +523,7 @@ namespace MySQLReplication
|
||||
res += (val ^ (mask & compressed_integer_align_numbers[compressed_integers]));
|
||||
}
|
||||
|
||||
for (auto k = 0U; k < uncompressed_integers; k++)
|
||||
for (size_t k = 0; k < uncompressed_integers; ++k)
|
||||
{
|
||||
UInt32 val = 0;
|
||||
readBigEndianStrict(payload, reinterpret_cast<char *>(&val), 4);
|
||||
@ -536,7 +536,7 @@ namespace MySQLReplication
|
||||
size_t uncompressed_decimals = scale / digits_per_integer;
|
||||
size_t compressed_decimals = scale - (uncompressed_decimals * digits_per_integer);
|
||||
|
||||
for (auto k = 0U; k < uncompressed_decimals; k++)
|
||||
for (size_t k = 0; k < uncompressed_decimals; ++k)
|
||||
{
|
||||
UInt32 val = 0;
|
||||
readBigEndianStrict(payload, reinterpret_cast<char *>(&val), 4);
|
||||
@ -669,7 +669,7 @@ namespace MySQLReplication
|
||||
header.dump(out);
|
||||
out << "Schema: " << this->schema << '\n';
|
||||
out << "Table: " << this->table << '\n';
|
||||
for (auto i = 0U; i < rows.size(); i++)
|
||||
for (size_t i = 0; i < rows.size(); ++i)
|
||||
{
|
||||
out << "Row[" << i << "]: " << applyVisitor(to_string, rows[i]) << '\n';
|
||||
}
|
||||
|
@ -15,7 +15,7 @@ namespace ProtocolText
|
||||
ResultSetRow::ResultSetRow(const Serializations & serializations, const Columns & columns_, int row_num_)
|
||||
: columns(columns_), row_num(row_num_)
|
||||
{
|
||||
for (size_t i = 0; i < columns.size(); i++)
|
||||
for (size_t i = 0; i < columns.size(); ++i)
|
||||
{
|
||||
if (columns[i]->isNullAt(row_num))
|
||||
{
|
||||
@ -39,7 +39,7 @@ size_t ResultSetRow::getPayloadSize() const
|
||||
|
||||
void ResultSetRow::writePayloadImpl(WriteBuffer & buffer) const
|
||||
{
|
||||
for (size_t i = 0; i < columns.size(); i++)
|
||||
for (size_t i = 0; i < columns.size(); ++i)
|
||||
{
|
||||
if (columns[i]->isNullAt(row_num))
|
||||
buffer.write(serialized[i].data(), 1);
|
||||
|
@ -85,7 +85,7 @@ void insertPostgreSQLValue(
|
||||
assert_cast<ColumnString &>(column).insertData(value.data(), value.size());
|
||||
break;
|
||||
case ExternalResultDescription::ValueType::vtUUID:
|
||||
assert_cast<ColumnUInt128 &>(column).insert(parse<UUID>(value.data(), value.size()));
|
||||
assert_cast<ColumnUUID &>(column).insertValue(parse<UUID>(value.data(), value.size()));
|
||||
break;
|
||||
case ExternalResultDescription::ValueType::vtDate:
|
||||
assert_cast<ColumnUInt16 &>(column).insertValue(UInt16{LocalDate{std::string(value)}.getDayNum()});
|
||||
|
@ -593,6 +593,7 @@ class IColumn;
|
||||
M(Bool, input_format_null_as_default, true, "For text input formats initialize null fields with default values if data type of this field is not nullable", 0) \
|
||||
M(Bool, input_format_arrow_import_nested, false, "Allow to insert array of structs into Nested table in Arrow input format.", 0) \
|
||||
M(Bool, input_format_orc_import_nested, false, "Allow to insert array of structs into Nested table in ORC input format.", 0) \
|
||||
M(Int64, input_format_orc_row_batch_size, 100'000, "Batch size when reading ORC stripes.", 0) \
|
||||
M(Bool, input_format_parquet_import_nested, false, "Allow to insert array of structs into Nested table in Parquet input format.", 0) \
|
||||
M(Bool, input_format_allow_seeks, true, "Allow seeks while reading in ORC/Parquet/Arrow input formats", 0) \
|
||||
\
|
||||
|
@ -119,7 +119,7 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String
|
||||
static const std::unordered_set<std::string_view> engines_with_arguments{"MySQL", "MaterializeMySQL", "MaterializedMySQL",
|
||||
"Lazy", "Replicated", "PostgreSQL", "MaterializedPostgreSQL", "SQLite"};
|
||||
|
||||
static const std::unordered_set<std::string_view> engines_with_table_overrides{"MaterializeMySQL", "MaterializedMySQL"};
|
||||
static const std::unordered_set<std::string_view> engines_with_table_overrides{"MaterializeMySQL", "MaterializedMySQL", "MaterializedPostgreSQL"};
|
||||
bool engine_may_have_arguments = engines_with_arguments.contains(engine_name);
|
||||
|
||||
if (engine_define->engine->arguments && !engine_may_have_arguments)
|
||||
|
@ -110,12 +110,12 @@ std::exception_ptr CacheDictionary<dictionary_key_type>::getLastException() cons
|
||||
}
|
||||
|
||||
template <DictionaryKeyType dictionary_key_type>
|
||||
const IDictionarySource * CacheDictionary<dictionary_key_type>::getSource() const
|
||||
DictionarySourcePtr CacheDictionary<dictionary_key_type>::getSource() const
|
||||
{
|
||||
/// Mutex required here because of the getSourceAndUpdateIfNeeded() function
|
||||
/// which is used from another thread.
|
||||
std::lock_guard lock(source_mutex);
|
||||
return source_ptr.get();
|
||||
return source_ptr;
|
||||
}
|
||||
|
||||
template <DictionaryKeyType dictionary_key_type>
|
||||
|
@ -104,7 +104,7 @@ public:
|
||||
allow_read_expired_keys);
|
||||
}
|
||||
|
||||
const IDictionarySource * getSource() const override;
|
||||
DictionarySourcePtr getSource() const override;
|
||||
|
||||
const DictionaryLifetime & getLifetime() const override { return dict_lifetime; }
|
||||
|
||||
@ -172,7 +172,7 @@ private:
|
||||
/// MultiVersion is not used here because it works with constant pointers.
|
||||
/// For some reason almost all methods in IDictionarySource interface are
|
||||
/// not constant.
|
||||
SharedDictionarySourcePtr getSourceAndUpdateIfNeeded() const
|
||||
DictionarySourcePtr getSourceAndUpdateIfNeeded() const
|
||||
{
|
||||
std::lock_guard lock(source_mutex);
|
||||
if (error_count)
|
||||
@ -190,7 +190,7 @@ private:
|
||||
|
||||
/// Dictionary source should be used with mutex
|
||||
mutable std::mutex source_mutex;
|
||||
mutable SharedDictionarySourcePtr source_ptr;
|
||||
mutable DictionarySourcePtr source_ptr;
|
||||
|
||||
CacheDictionaryStoragePtr cache_storage_ptr;
|
||||
mutable CacheDictionaryUpdateQueue<dictionary_key_type> update_queue;
|
||||
|
@ -61,7 +61,7 @@ public:
|
||||
|
||||
DictionarySourcePtr clone() const override
|
||||
{
|
||||
return std::make_unique<CassandraDictionarySource>(dict_struct, configuration, sample_block);
|
||||
return std::make_shared<CassandraDictionarySource>(dict_struct, configuration, sample_block);
|
||||
}
|
||||
|
||||
Pipe loadIds(const std::vector<UInt64> & ids) override;
|
||||
|
@ -17,7 +17,6 @@
|
||||
#include "DictionaryStructure.h"
|
||||
#include "ExternalQueryBuilder.h"
|
||||
#include "readInvalidateQuery.h"
|
||||
#include "writeParenthesisedString.h"
|
||||
#include "DictionaryFactory.h"
|
||||
#include "DictionarySourceHelpers.h"
|
||||
|
||||
|
@ -60,7 +60,7 @@ public:
|
||||
|
||||
bool hasUpdateField() const override;
|
||||
|
||||
DictionarySourcePtr clone() const override { return std::make_unique<ClickHouseDictionarySource>(*this); }
|
||||
DictionarySourcePtr clone() const override { return std::make_shared<ClickHouseDictionarySource>(*this); }
|
||||
|
||||
std::string toString() const override;
|
||||
|
||||
|
@ -58,7 +58,7 @@ public:
|
||||
return std::make_shared<DirectDictionary>(getDictionaryID(), dict_struct, source_ptr->clone());
|
||||
}
|
||||
|
||||
const IDictionarySource * getSource() const override { return source_ptr.get(); }
|
||||
DictionarySourcePtr getSource() const override { return source_ptr; }
|
||||
|
||||
const DictionaryLifetime & getLifetime() const override { return dict_lifetime; }
|
||||
|
||||
|
@ -158,7 +158,7 @@ bool ExecutableDictionarySource::hasUpdateField() const
|
||||
|
||||
DictionarySourcePtr ExecutableDictionarySource::clone() const
|
||||
{
|
||||
return std::make_unique<ExecutableDictionarySource>(*this);
|
||||
return std::make_shared<ExecutableDictionarySource>(*this);
|
||||
}
|
||||
|
||||
std::string ExecutableDictionarySource::toString() const
|
||||
|
@ -152,7 +152,7 @@ bool ExecutablePoolDictionarySource::hasUpdateField() const
|
||||
|
||||
DictionarySourcePtr ExecutablePoolDictionarySource::clone() const
|
||||
{
|
||||
return std::make_unique<ExecutablePoolDictionarySource>(*this);
|
||||
return std::make_shared<ExecutablePoolDictionarySource>(*this);
|
||||
}
|
||||
|
||||
std::string ExecutablePoolDictionarySource::toString() const
|
||||
|
@ -1,14 +1,23 @@
|
||||
#include "ExternalQueryBuilder.h"
|
||||
|
||||
#include <boost/range/join.hpp>
|
||||
|
||||
#include <IO/WriteBuffer.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <boost/range/join.hpp>
|
||||
#include "DictionaryStructure.h"
|
||||
#include "writeParenthesisedString.h"
|
||||
#include <Dictionaries/DictionaryStructure.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
static inline void writeParenthesisedString(const String & s, WriteBuffer & buf)
|
||||
{
|
||||
writeChar('(', buf);
|
||||
writeString(s, buf);
|
||||
writeChar(')', buf);
|
||||
}
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int UNSUPPORTED_METHOD;
|
||||
|
@ -51,7 +51,7 @@ public:
|
||||
///Not supported for FileDictionarySource
|
||||
bool hasUpdateField() const override { return false; }
|
||||
|
||||
DictionarySourcePtr clone() const override { return std::make_unique<FileDictionarySource>(*this); }
|
||||
DictionarySourcePtr clone() const override { return std::make_shared<FileDictionarySource>(*this); }
|
||||
|
||||
std::string toString() const override;
|
||||
|
||||
|
@ -61,7 +61,7 @@ public:
|
||||
return std::make_shared<FlatDictionary>(getDictionaryID(), dict_struct, source_ptr->clone(), dict_lifetime, configuration, update_field_loaded_block);
|
||||
}
|
||||
|
||||
const IDictionarySource * getSource() const override { return source_ptr.get(); }
|
||||
DictionarySourcePtr getSource() const override { return source_ptr; }
|
||||
|
||||
const DictionaryLifetime & getLifetime() const override { return dict_lifetime; }
|
||||
|
||||
|
@ -207,7 +207,7 @@ bool HTTPDictionarySource::hasUpdateField() const
|
||||
|
||||
DictionarySourcePtr HTTPDictionarySource::clone() const
|
||||
{
|
||||
return std::make_unique<HTTPDictionarySource>(*this);
|
||||
return std::make_shared<HTTPDictionarySource>(*this);
|
||||
}
|
||||
|
||||
std::string HTTPDictionarySource::toString() const
|
||||
|
@ -71,7 +71,7 @@ public:
|
||||
return std::make_shared<HashedArrayDictionary<dictionary_key_type>>(getDictionaryID(), dict_struct, source_ptr->clone(), configuration, update_field_loaded_block);
|
||||
}
|
||||
|
||||
const IDictionarySource * getSource() const override { return source_ptr.get(); }
|
||||
DictionarySourcePtr getSource() const override { return source_ptr; }
|
||||
|
||||
const DictionaryLifetime & getLifetime() const override { return configuration.lifetime; }
|
||||
|
||||
|
@ -78,7 +78,7 @@ public:
|
||||
return std::make_shared<HashedDictionary<dictionary_key_type, sparse>>(getDictionaryID(), dict_struct, source_ptr->clone(), configuration, update_field_loaded_block);
|
||||
}
|
||||
|
||||
const IDictionarySource * getSource() const override { return source_ptr.get(); }
|
||||
DictionarySourcePtr getSource() const override { return source_ptr; }
|
||||
|
||||
const DictionaryLifetime & getLifetime() const override { return configuration.lifetime; }
|
||||
|
||||
|
@ -1,16 +1,16 @@
|
||||
#pragma once
|
||||
|
||||
#include <Core/Names.h>
|
||||
#include <Interpreters/IExternalLoadable.h>
|
||||
#include <Interpreters/StorageID.h>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <Dictionaries/IDictionarySource.h>
|
||||
#include <Dictionaries/DictionaryStructure.h>
|
||||
#include <DataTypes/IDataType.h>
|
||||
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
|
||||
#include <Core/Names.h>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <Interpreters/IExternalLoadable.h>
|
||||
#include <Interpreters/StorageID.h>
|
||||
#include <Dictionaries/IDictionarySource.h>
|
||||
#include <Dictionaries/DictionaryStructure.h>
|
||||
#include <DataTypes/IDataType.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -19,7 +19,7 @@ namespace ErrorCodes
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
struct IDictionary;
|
||||
class IDictionary;
|
||||
using DictionaryPtr = std::unique_ptr<IDictionary>;
|
||||
|
||||
/** DictionaryKeyType provides IDictionary client information about
|
||||
@ -47,8 +47,9 @@ enum class DictionarySpecialKeyType
|
||||
/**
|
||||
* Base class for Dictionaries implementation.
|
||||
*/
|
||||
struct IDictionary : public IExternalLoadable
|
||||
class IDictionary : public IExternalLoadable
|
||||
{
|
||||
public:
|
||||
explicit IDictionary(const StorageID & dictionary_id_)
|
||||
: dictionary_id(dictionary_id_)
|
||||
, full_name(dictionary_id.getInternalDictionaryName())
|
||||
@ -99,7 +100,7 @@ struct IDictionary : public IExternalLoadable
|
||||
|
||||
virtual double getLoadFactor() const = 0;
|
||||
|
||||
virtual const IDictionarySource * getSource() const = 0;
|
||||
virtual DictionarySourcePtr getSource() const = 0;
|
||||
|
||||
virtual const DictionaryStructure & getStructure() const = 0;
|
||||
|
||||
@ -200,7 +201,7 @@ struct IDictionary : public IExternalLoadable
|
||||
|
||||
bool isModified() const override
|
||||
{
|
||||
const auto * source = getSource();
|
||||
const auto source = getSource();
|
||||
return source && source->isModified();
|
||||
}
|
||||
|
||||
|
@ -10,8 +10,7 @@
|
||||
namespace DB
|
||||
{
|
||||
class IDictionarySource;
|
||||
using DictionarySourcePtr = std::unique_ptr<IDictionarySource>;
|
||||
using SharedDictionarySourcePtr = std::shared_ptr<IDictionarySource>;
|
||||
using DictionarySourcePtr = std::shared_ptr<IDictionarySource>;
|
||||
|
||||
/** Data-provider interface for external dictionaries,
|
||||
* abstracts out the data source (file, MySQL, ClickHouse, external program, network request et cetera)
|
||||
|
@ -56,7 +56,7 @@ public:
|
||||
return std::make_shared<IPAddressDictionary>(getDictionaryID(), dict_struct, source_ptr->clone(), dict_lifetime, require_nonempty);
|
||||
}
|
||||
|
||||
const IDictionarySource * getSource() const override { return source_ptr.get(); }
|
||||
DictionarySourcePtr getSource() const override { return source_ptr; }
|
||||
|
||||
const DictionaryLifetime & getLifetime() const override { return dict_lifetime; }
|
||||
|
||||
|
@ -129,7 +129,7 @@ Pipe LibraryDictionarySource::loadKeys(const Columns & key_columns, const std::v
|
||||
|
||||
DictionarySourcePtr LibraryDictionarySource::clone() const
|
||||
{
|
||||
return std::make_unique<LibraryDictionarySource>(*this);
|
||||
return std::make_shared<LibraryDictionarySource>(*this);
|
||||
}
|
||||
|
||||
|
||||
|
@ -65,7 +65,7 @@ public:
|
||||
///Not yet supported
|
||||
bool hasUpdateField() const override { return false; }
|
||||
|
||||
DictionarySourcePtr clone() const override { return std::make_unique<MongoDBDictionarySource>(*this); }
|
||||
DictionarySourcePtr clone() const override { return std::make_shared<MongoDBDictionarySource>(*this); }
|
||||
|
||||
std::string toString() const override;
|
||||
|
||||
|
@ -231,7 +231,7 @@ bool MySQLDictionarySource::hasUpdateField() const
|
||||
|
||||
DictionarySourcePtr MySQLDictionarySource::clone() const
|
||||
{
|
||||
return std::make_unique<MySQLDictionarySource>(*this);
|
||||
return std::make_shared<MySQLDictionarySource>(*this);
|
||||
}
|
||||
|
||||
std::string MySQLDictionarySource::toString() const
|
||||
|
@ -87,7 +87,7 @@ public:
|
||||
|
||||
double getLoadFactor() const override { return 1.0; }
|
||||
|
||||
const IDictionarySource * getSource() const override { return source_ptr.get(); }
|
||||
DictionarySourcePtr getSource() const override { return source_ptr; }
|
||||
|
||||
const DictionaryStructure & getStructure() const override { return dict_struct; }
|
||||
|
||||
|
@ -151,7 +151,7 @@ void SlabsPolygonIndex::indexBuild(const std::vector<Polygon> & polygons)
|
||||
}
|
||||
}
|
||||
|
||||
for (size_t i = 0; i != all_edges.size(); i++)
|
||||
for (size_t i = 0; i != all_edges.size(); ++i)
|
||||
{
|
||||
size_t l = edge_left[i];
|
||||
size_t r = edge_right[i];
|
||||
|
@ -161,7 +161,7 @@ bool PostgreSQLDictionarySource::supportsSelectiveLoad() const
|
||||
|
||||
DictionarySourcePtr PostgreSQLDictionarySource::clone() const
|
||||
{
|
||||
return std::make_unique<PostgreSQLDictionarySource>(*this);
|
||||
return std::make_shared<PostgreSQLDictionarySource>(*this);
|
||||
}
|
||||
|
||||
|
||||
|
@ -67,7 +67,7 @@ public:
|
||||
return std::make_shared<RangeHashedDictionary>(getDictionaryID(), dict_struct, source_ptr->clone(), dict_lifetime, require_nonempty, update_field_loaded_block);
|
||||
}
|
||||
|
||||
const IDictionarySource * getSource() const override { return source_ptr.get(); }
|
||||
DictionarySourcePtr getSource() const override { return source_ptr; }
|
||||
|
||||
const DictionaryLifetime & getLifetime() const override { return dict_lifetime; }
|
||||
|
||||
|
@ -76,7 +76,7 @@ namespace ErrorCodes
|
||||
|
||||
bool hasUpdateField() const override { return false; }
|
||||
|
||||
DictionarySourcePtr clone() const override { return std::make_unique<RedisDictionarySource>(*this); }
|
||||
DictionarySourcePtr clone() const override { return std::make_shared<RedisDictionarySource>(*this); }
|
||||
|
||||
std::string toString() const override;
|
||||
|
||||
|
@ -162,7 +162,7 @@ bool XDBCDictionarySource::hasUpdateField() const
|
||||
|
||||
DictionarySourcePtr XDBCDictionarySource::clone() const
|
||||
{
|
||||
return std::make_unique<XDBCDictionarySource>(*this);
|
||||
return std::make_shared<XDBCDictionarySource>(*this);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1,12 +0,0 @@
|
||||
#include "writeParenthesisedString.h"
|
||||
|
||||
namespace DB
|
||||
{
|
||||
void writeParenthesisedString(const String & s, WriteBuffer & buf)
|
||||
{
|
||||
writeChar('(', buf);
|
||||
writeString(s, buf);
|
||||
writeChar(')', buf);
|
||||
}
|
||||
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <IO/WriteHelpers.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
void writeParenthesisedString(const String & s, WriteBuffer & buf);
|
||||
|
||||
|
||||
}
|
@ -67,7 +67,7 @@ std::unique_ptr<ReadBufferFromFileBase> DiskAzureBlobStorage::readFile(
|
||||
auto settings = current_settings.get();
|
||||
auto metadata = readMeta(path);
|
||||
|
||||
LOG_TRACE(log, "Read from file by path: {}", backQuote(metadata_disk->getPath() + path));
|
||||
LOG_TEST(log, "Read from file by path: {}", backQuote(metadata_disk->getPath() + path));
|
||||
|
||||
bool threadpool_read = read_settings.remote_fs_method == RemoteFSReadMethod::threadpool;
|
||||
|
||||
|
@ -91,7 +91,7 @@ DiskCacheWrapper::readFile(
|
||||
if (!cache_file_predicate(path))
|
||||
return DiskDecorator::readFile(path, settings, size);
|
||||
|
||||
LOG_DEBUG(log, "Read file {} from cache", backQuote(path));
|
||||
LOG_TEST(log, "Read file {} from cache", backQuote(path));
|
||||
|
||||
if (cache_disk->exists(path))
|
||||
return cache_disk->readFile(path, settings, size);
|
||||
@ -105,11 +105,11 @@ DiskCacheWrapper::readFile(
|
||||
{
|
||||
/// This thread will responsible for file downloading to cache.
|
||||
metadata->status = DOWNLOADING;
|
||||
LOG_DEBUG(log, "File {} doesn't exist in cache. Will download it", backQuote(path));
|
||||
LOG_TEST(log, "File {} doesn't exist in cache. Will download it", backQuote(path));
|
||||
}
|
||||
else if (metadata->status == DOWNLOADING)
|
||||
{
|
||||
LOG_DEBUG(log, "Waiting for file {} download to cache", backQuote(path));
|
||||
LOG_TEST(log, "Waiting for file {} download to cache", backQuote(path));
|
||||
metadata->condition.wait(lock, [metadata] { return metadata->status == DOWNLOADED || metadata->status == ERROR; });
|
||||
}
|
||||
}
|
||||
@ -134,7 +134,7 @@ DiskCacheWrapper::readFile(
|
||||
}
|
||||
cache_disk->moveFile(tmp_path, path);
|
||||
|
||||
LOG_DEBUG(log, "File {} downloaded to cache", backQuote(path));
|
||||
LOG_TEST(log, "File {} downloaded to cache", backQuote(path));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
@ -163,7 +163,7 @@ DiskCacheWrapper::writeFile(const String & path, size_t buf_size, WriteMode mode
|
||||
if (!cache_file_predicate(path))
|
||||
return DiskDecorator::writeFile(path, buf_size, mode);
|
||||
|
||||
LOG_DEBUG(log, "Write file {} to cache", backQuote(path));
|
||||
LOG_TRACE(log, "Write file {} to cache", backQuote(path));
|
||||
|
||||
auto dir_path = directoryPath(path);
|
||||
if (!cache_disk->exists(dir_path))
|
||||
|
@ -253,7 +253,7 @@ void DiskMemory::clearDirectory(const String & path)
|
||||
throw Exception(
|
||||
"Failed to clear directory '" + path + "'. " + iter->first + " is a directory", ErrorCodes::CANNOT_DELETE_DIRECTORY);
|
||||
|
||||
files.erase(iter++);
|
||||
iter = files.erase(iter);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -75,7 +75,7 @@ std::unique_ptr<ReadBufferFromFileBase> DiskHDFS::readFile(const String & path,
|
||||
{
|
||||
auto metadata = readMeta(path);
|
||||
|
||||
LOG_TRACE(log,
|
||||
LOG_TEST(log,
|
||||
"Read from file by path: {}. Existing HDFS objects: {}",
|
||||
backQuote(metadata_disk->getPath() + path), metadata.remote_fs_objects.size());
|
||||
|
||||
|
@ -177,7 +177,7 @@ IDiskRemote::Metadata IDiskRemote::createMeta(const String & path) const
|
||||
|
||||
void IDiskRemote::removeMeta(const String & path, RemoteFSPathKeeperPtr fs_paths_keeper)
|
||||
{
|
||||
LOG_DEBUG(log, "Remove file by path: {}", backQuote(metadata_disk->getPath() + path));
|
||||
LOG_TRACE(log, "Remove file by path: {}", backQuote(metadata_disk->getPath() + path));
|
||||
|
||||
if (!metadata_disk->isFile(path))
|
||||
throw Exception(ErrorCodes::CANNOT_DELETE_DIRECTORY, "Path '{}' is a directory", path);
|
||||
@ -464,7 +464,7 @@ bool IDiskRemote::tryReserve(UInt64 bytes)
|
||||
std::lock_guard lock(reservation_mutex);
|
||||
if (bytes == 0)
|
||||
{
|
||||
LOG_DEBUG(log, "Reserving 0 bytes on remote_fs disk {}", backQuote(name));
|
||||
LOG_TRACE(log, "Reserving 0 bytes on remote_fs disk {}", backQuote(name));
|
||||
++reservation_count;
|
||||
return true;
|
||||
}
|
||||
@ -473,7 +473,7 @@ bool IDiskRemote::tryReserve(UInt64 bytes)
|
||||
UInt64 unreserved_space = available_space - std::min(available_space, reserved_bytes);
|
||||
if (unreserved_space >= bytes)
|
||||
{
|
||||
LOG_DEBUG(log, "Reserving {} on disk {}, having unreserved {}.",
|
||||
LOG_TRACE(log, "Reserving {} on disk {}, having unreserved {}.",
|
||||
ReadableSize(bytes), backQuote(name), ReadableSize(unreserved_space));
|
||||
++reservation_count;
|
||||
reserved_bytes += bytes;
|
||||
|
@ -219,7 +219,7 @@ std::unique_ptr<ReadBufferFromFileBase> DiskS3::readFile(const String & path, co
|
||||
auto settings = current_settings.get();
|
||||
auto metadata = readMeta(path);
|
||||
|
||||
LOG_TRACE(log, "Read from file by path: {}. Existing S3 objects: {}",
|
||||
LOG_TEST(log, "Read from file by path: {}. Existing S3 objects: {}",
|
||||
backQuote(metadata_disk->getPath() + path), metadata.remote_fs_objects.size());
|
||||
|
||||
bool threadpool_read = read_settings.remote_fs_method == RemoteFSReadMethod::threadpool;
|
||||
@ -355,7 +355,7 @@ void DiskS3::findLastRevision()
|
||||
/// Construct revision number from high to low bits.
|
||||
String revision;
|
||||
revision.reserve(64);
|
||||
for (int bit = 0; bit < 64; bit++)
|
||||
for (int bit = 0; bit < 64; ++bit)
|
||||
{
|
||||
auto revision_prefix = revision + "1";
|
||||
|
||||
|
@ -114,6 +114,7 @@ FormatSettings getFormatSettings(ContextPtr context, const Settings & settings)
|
||||
format_settings.arrow.low_cardinality_as_dictionary = settings.output_format_arrow_low_cardinality_as_dictionary;
|
||||
format_settings.arrow.import_nested = settings.input_format_arrow_import_nested;
|
||||
format_settings.orc.import_nested = settings.input_format_orc_import_nested;
|
||||
format_settings.orc.row_batch_size = settings.input_format_orc_row_batch_size;
|
||||
format_settings.defaults_for_omitted_fields = settings.input_format_defaults_for_omitted_fields;
|
||||
format_settings.capn_proto.enum_comparing_mode = settings.format_capn_proto_enum_comparising_mode;
|
||||
format_settings.seekable_read = settings.input_format_allow_seeks;
|
||||
|
@ -200,6 +200,7 @@ struct FormatSettings
|
||||
struct
|
||||
{
|
||||
bool import_nested = false;
|
||||
int64_t row_batch_size = 100'000;
|
||||
} orc;
|
||||
|
||||
/// For capnProto format we should determine how to
|
||||
|
@ -33,7 +33,7 @@ struct CRCImpl
|
||||
static CRCBase<ReturnType> base(polynomial);
|
||||
|
||||
T crc = 0;
|
||||
for (size_t i = 0; i < size; i++)
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
crc = base.tab[(crc ^ buf[i]) & 0xff] ^ (crc >> 8);
|
||||
return crc;
|
||||
}
|
||||
|
@ -94,7 +94,7 @@ private:
|
||||
Impl::execute(src_remaining, dst_remaining);
|
||||
|
||||
if constexpr (is_big_int_v<T> || std::is_same_v<T, Decimal256>)
|
||||
for (size_t i = 0; i < rows_remaining; i++)
|
||||
for (size_t i = 0; i < rows_remaining; ++i)
|
||||
dst_data[rows_size + i] = dst_remaining[i];
|
||||
else
|
||||
memcpy(&dst_data[rows_size], dst_remaining, rows_remaining * sizeof(ReturnType));
|
||||
|
@ -68,11 +68,12 @@ public:
|
||||
|
||||
std::shared_ptr<const IDictionary> getDictionary(const String & dictionary_name)
|
||||
{
|
||||
auto dict = getContext()->getExternalDictionariesLoader().getDictionary(dictionary_name, getContext());
|
||||
auto current_context = getContext();
|
||||
auto dict = current_context->getExternalDictionariesLoader().getDictionary(dictionary_name, current_context);
|
||||
|
||||
if (!access_checked)
|
||||
{
|
||||
getContext()->checkAccess(AccessType::dictGet, dict->getDatabaseOrNoDatabaseTag(), dict->getDictionaryID().getTableName());
|
||||
current_context->checkAccess(AccessType::dictGet, dict->getDatabaseOrNoDatabaseTag(), dict->getDictionaryID().getTableName());
|
||||
access_checked = true;
|
||||
}
|
||||
|
||||
@ -106,8 +107,9 @@ public:
|
||||
if (!attr_name_col)
|
||||
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Second argument of function dictGet must be a constant string");
|
||||
|
||||
const auto dictionary_name = dict_name_col->getValue<String>();
|
||||
const auto attribute_name = attr_name_col->getValue<String>();
|
||||
const auto & dictionary_name = dict_name_col->getValue<String>();
|
||||
const auto & attribute_name = attr_name_col->getValue<String>();
|
||||
|
||||
return getDictionary(dictionary_name)->isInjective(attribute_name);
|
||||
}
|
||||
|
||||
|
@ -609,7 +609,7 @@ ColumnPtr FunctionAnyArityLogical<Impl, Name>::executeImpl(
|
||||
ColumnsWithTypeAndName arguments = std::move(args);
|
||||
|
||||
/// Special implementation for short-circuit arguments.
|
||||
if (checkShirtCircuitArguments(arguments) != -1)
|
||||
if (checkShortCircuitArguments(arguments) != -1)
|
||||
return executeShortCircuit(arguments, result_type);
|
||||
|
||||
ColumnRawPtrs args_in;
|
||||
|
@ -185,7 +185,7 @@ public:
|
||||
if constexpr (!Impl::isSaturable())
|
||||
{
|
||||
auto * result = nativeBoolCast(b, types[0], values[0]);
|
||||
for (size_t i = 1; i < types.size(); i++)
|
||||
for (size_t i = 1; i < types.size(); ++i)
|
||||
result = Impl::apply(b, result, nativeBoolCast(b, types[i], values[i]));
|
||||
return b.CreateSelect(result, b.getInt8(1), b.getInt8(0));
|
||||
}
|
||||
@ -194,7 +194,7 @@ public:
|
||||
auto * stop = llvm::BasicBlock::Create(next->getContext(), "", next->getParent());
|
||||
b.SetInsertPoint(stop);
|
||||
auto * phi = b.CreatePHI(b.getInt8Ty(), values.size());
|
||||
for (size_t i = 0; i < types.size(); i++)
|
||||
for (size_t i = 0; i < types.size(); ++i)
|
||||
{
|
||||
b.SetInsertPoint(next);
|
||||
auto * value = values[i];
|
||||
|
@ -204,7 +204,7 @@ private:
|
||||
|
||||
std::map<KeyType, ValType> summing_map;
|
||||
|
||||
for (size_t i = 0; i < row_count; i++)
|
||||
for (size_t i = 0; i < row_count; ++i)
|
||||
{
|
||||
[[maybe_unused]] bool first = true;
|
||||
for (auto & arg : args)
|
||||
@ -222,7 +222,7 @@ private:
|
||||
}
|
||||
|
||||
Field temp_val;
|
||||
for (size_t j = 0; j < len; j++)
|
||||
for (size_t j = 0; j < len; ++j)
|
||||
{
|
||||
KeyType key;
|
||||
if constexpr (std::is_same<KeyType, String>::value)
|
||||
|
@ -42,7 +42,7 @@ struct FormatImpl
|
||||
static void parseNumber(const String & description, UInt64 l, UInt64 r, UInt64 & res)
|
||||
{
|
||||
res = 0;
|
||||
for (UInt64 pos = l; pos < r; pos++)
|
||||
for (UInt64 pos = l; pos < r; ++pos)
|
||||
{
|
||||
if (!isNumericASCII(description[pos]))
|
||||
throw Exception("Not a number in curly braces at position " + std::to_string(pos), ErrorCodes::BAD_ARGUMENTS);
|
||||
|
@ -76,7 +76,7 @@ public:
|
||||
auto & dst_data = dst->getData();
|
||||
dst_data.resize(input_rows_count);
|
||||
|
||||
for (size_t row = 0; row < input_rows_count; row++)
|
||||
for (size_t row = 0; row < input_rows_count; ++row)
|
||||
{
|
||||
const double lon = col_lon->getFloat64(row);
|
||||
const double lat = col_lat->getFloat64(row);
|
||||
|
@ -58,7 +58,7 @@ public:
|
||||
auto & dst_data = dst->getData();
|
||||
dst_data.resize(input_rows_count);
|
||||
|
||||
for (size_t row = 0; row < input_rows_count; row++)
|
||||
for (size_t row = 0; row < input_rows_count; ++row)
|
||||
{
|
||||
const int resolution = col_hindex->getUInt(row);
|
||||
if (resolution > MAX_H3_RES)
|
||||
|
@ -63,7 +63,7 @@ public:
|
||||
auto & dst_data = dst->getData();
|
||||
dst_data.resize(input_rows_count);
|
||||
|
||||
for (size_t row = 0; row < input_rows_count; row++)
|
||||
for (size_t row = 0; row < input_rows_count; ++row)
|
||||
{
|
||||
const UInt64 resolution = col_hindex->getUInt(row);
|
||||
if (resolution > MAX_H3_RES)
|
||||
|
@ -55,7 +55,7 @@ public:
|
||||
auto & dst_data = dst->getData();
|
||||
dst_data.resize(input_rows_count);
|
||||
|
||||
for (size_t row = 0; row < input_rows_count; row++)
|
||||
for (size_t row = 0; row < input_rows_count; ++row)
|
||||
{
|
||||
const UInt64 hindex = col_hindex->getUInt(row);
|
||||
|
||||
|
@ -64,7 +64,7 @@ public:
|
||||
auto current_offset = 0;
|
||||
std::vector<int> faces;
|
||||
|
||||
for (size_t row = 0; row < input_rows_count; row++)
|
||||
for (size_t row = 0; row < input_rows_count; ++row)
|
||||
{
|
||||
int max_faces = maxFaceCount(data[row]);
|
||||
|
||||
@ -73,7 +73,7 @@ public:
|
||||
// function name h3GetFaces (v3.x) changed to getIcosahedronFaces (v4.0.0).
|
||||
getIcosahedronFaces(data[row], faces.data());
|
||||
|
||||
for (int i = 0; i < max_faces; i++)
|
||||
for (int i = 0; i < max_faces; ++i)
|
||||
{
|
||||
// valid icosahedron faces are represented by integers 0-19
|
||||
if (faces[i] >= 0 && faces[i] <= 19)
|
||||
|
@ -55,7 +55,7 @@ public:
|
||||
auto & dst_data = dst->getData();
|
||||
dst_data.resize(input_rows_count);
|
||||
|
||||
for (size_t row = 0; row < input_rows_count; row++)
|
||||
for (size_t row = 0; row < input_rows_count; ++row)
|
||||
{
|
||||
const UInt64 hindex = col_hindex->getUInt(row);
|
||||
|
||||
|
@ -58,7 +58,7 @@ public:
|
||||
auto & dst_data = dst->getData();
|
||||
dst_data.resize(input_rows_count);
|
||||
|
||||
for (size_t row = 0; row < input_rows_count; row++)
|
||||
for (size_t row = 0; row < input_rows_count; ++row)
|
||||
{
|
||||
const UInt64 resolution = col_hindex->getUInt(row);
|
||||
if (resolution > MAX_H3_RES)
|
||||
|
@ -63,7 +63,7 @@ public:
|
||||
auto & dst_data = dst->getData();
|
||||
dst_data.resize(input_rows_count);
|
||||
|
||||
for (size_t row = 0; row < input_rows_count; row++)
|
||||
for (size_t row = 0; row < input_rows_count; ++row)
|
||||
{
|
||||
const UInt64 hindex_origin = col_hindex_origin->getUInt(row);
|
||||
const UInt64 hindex_dest = col_hindex_dest->getUInt(row);
|
||||
|
@ -56,7 +56,7 @@ public:
|
||||
auto & dst_data = dst->getData();
|
||||
dst_data.resize(input_rows_count);
|
||||
|
||||
for (size_t row = 0 ; row < input_rows_count ; row++)
|
||||
for (size_t row = 0 ; row < input_rows_count ; ++row)
|
||||
{
|
||||
UInt8 res = isPentagon(data[row]);
|
||||
dst_data[row] = res;
|
||||
|
@ -56,7 +56,7 @@ public:
|
||||
auto & dst_data = dst->getData();
|
||||
dst_data.resize(input_rows_count);
|
||||
|
||||
for (size_t row = 0 ; row < input_rows_count ; row++)
|
||||
for (size_t row = 0 ; row < input_rows_count ; ++row)
|
||||
{
|
||||
UInt8 res = isResClassIII(data[row]);
|
||||
dst_data[row] = res;
|
||||
|
@ -55,7 +55,7 @@ public:
|
||||
auto & dst_data = dst->getData();
|
||||
dst_data.resize(input_rows_count);
|
||||
|
||||
for (size_t row = 0; row < input_rows_count; row++)
|
||||
for (size_t row = 0; row < input_rows_count; ++row)
|
||||
{
|
||||
const UInt64 hindex = col_hindex->getUInt(row);
|
||||
|
||||
|
@ -76,7 +76,7 @@ public:
|
||||
|
||||
std::vector<H3Index> hindex_vec;
|
||||
|
||||
for (size_t row = 0; row < input_rows_count; row++)
|
||||
for (size_t row = 0; row < input_rows_count; ++row)
|
||||
{
|
||||
const UInt64 parent_hindex = col_hindex->getUInt(row);
|
||||
const UInt8 child_resolution = col_resolution->getUInt(row);
|
||||
|
@ -66,7 +66,7 @@ public:
|
||||
auto & dst_data = dst->getData();
|
||||
dst_data.resize(input_rows_count);
|
||||
|
||||
for (size_t row = 0; row < input_rows_count; row++)
|
||||
for (size_t row = 0; row < input_rows_count; ++row)
|
||||
{
|
||||
const UInt64 hindex = col_hindex->getUInt(row);
|
||||
const UInt8 resolution = col_resolution->getUInt(row);
|
||||
|
@ -73,7 +73,7 @@ public:
|
||||
|
||||
std::vector<H3Index> hindex_vec;
|
||||
|
||||
for (size_t row = 0; row < input_rows_count; row++)
|
||||
for (size_t row = 0; row < input_rows_count; ++row)
|
||||
{
|
||||
const H3Index origin_hindex = col_hindex->getUInt(row);
|
||||
const int k = col_k->getInt(row);
|
||||
|
@ -969,7 +969,7 @@ private:
|
||||
|
||||
static void executeShortCircuitArguments(ColumnsWithTypeAndName & arguments)
|
||||
{
|
||||
int last_short_circuit_argument_index = checkShirtCircuitArguments(arguments);
|
||||
int last_short_circuit_argument_index = checkShortCircuitArguments(arguments);
|
||||
if (last_short_circuit_argument_index == -1)
|
||||
return;
|
||||
|
||||
|
@ -210,7 +210,7 @@ namespace DB
|
||||
ColumnUInt8::MutablePtr col_res = ColumnUInt8::create(input_rows_count);
|
||||
ColumnUInt8::Container & vec_res = col_res->getData();
|
||||
|
||||
for (size_t i = 0; i < input_rows_count; i++)
|
||||
for (size_t i = 0; i < input_rows_count; ++i)
|
||||
{
|
||||
const auto cidr = parseIPWithCIDR(col_cidr.getDataAt(i));
|
||||
vec_res[i] = isAddressInRange(addr, cidr) ? 1 : 0;
|
||||
@ -227,7 +227,7 @@ namespace DB
|
||||
|
||||
ColumnUInt8::MutablePtr col_res = ColumnUInt8::create(input_rows_count);
|
||||
ColumnUInt8::Container & vec_res = col_res->getData();
|
||||
for (size_t i = 0; i < input_rows_count; i++)
|
||||
for (size_t i = 0; i < input_rows_count; ++i)
|
||||
{
|
||||
const auto addr = IPAddressVariant(col_addr.getDataAt(i));
|
||||
vec_res[i] = isAddressInRange(addr, cidr) ? 1 : 0;
|
||||
@ -241,7 +241,7 @@ namespace DB
|
||||
ColumnUInt8::MutablePtr col_res = ColumnUInt8::create(input_rows_count);
|
||||
ColumnUInt8::Container & vec_res = col_res->getData();
|
||||
|
||||
for (size_t i = 0; i < input_rows_count; i++)
|
||||
for (size_t i = 0; i < input_rows_count; ++i)
|
||||
{
|
||||
const auto addr = IPAddressVariant(col_addr.getDataAt(i));
|
||||
const auto cidr = parseIPWithCIDR(col_cidr.getDataAt(i));
|
||||
|
@ -310,7 +310,7 @@ public:
|
||||
|
||||
FunctionLike func_like;
|
||||
|
||||
for (size_t row = 0; row < input_rows_count; row++)
|
||||
for (size_t row = 0; row < input_rows_count; ++row)
|
||||
{
|
||||
size_t element_start_row = row != 0 ? column_array.getOffsets()[row-1] : 0;
|
||||
size_t elem_size = column_array.getOffsets()[row]- element_start_row;
|
||||
@ -457,7 +457,7 @@ public:
|
||||
|
||||
IColumn::Offset current_offset = 0;
|
||||
|
||||
for (size_t row = 0; row < input_rows_count; row++)
|
||||
for (size_t row = 0; row < input_rows_count; ++row)
|
||||
{
|
||||
size_t element_start_row = row != 0 ? nested_column.getOffsets()[row-1] : 0;
|
||||
size_t element_size = nested_column.getOffsets()[row]- element_start_row;
|
||||
@ -492,7 +492,7 @@ public:
|
||||
auto res = func_like.executeImpl(new_arguments, result_type, input_rows_count);
|
||||
const auto & container = checkAndGetColumn<ColumnUInt8>(res.get())->getData();
|
||||
|
||||
for (size_t row_num = 0; row_num < element_size; row_num++)
|
||||
for (size_t row_num = 0; row_num < element_size; ++row_num)
|
||||
{
|
||||
if (container[row_num] == 1)
|
||||
{
|
||||
|
@ -262,7 +262,7 @@ public:
|
||||
private:
|
||||
static void executeShortCircuitArguments(ColumnsWithTypeAndName & arguments)
|
||||
{
|
||||
int last_short_circuit_argument_index = checkShirtCircuitArguments(arguments);
|
||||
int last_short_circuit_argument_index = checkShortCircuitArguments(arguments);
|
||||
if (last_short_circuit_argument_index < 0)
|
||||
return;
|
||||
|
||||
|
@ -139,7 +139,7 @@ public:
|
||||
}
|
||||
else
|
||||
{
|
||||
for (size_t i = 1; i < arguments.size(); i++)
|
||||
for (size_t i = 1; i < arguments.size(); ++i)
|
||||
{
|
||||
const auto * array = checkAndGetDataType<DataTypeArray>(arguments[i].get());
|
||||
if (array == nullptr)
|
||||
|
@ -78,7 +78,7 @@ public:
|
||||
{
|
||||
auto geometries = Converter::convert(arguments[0].column->convertToFullColumnIfConst());
|
||||
|
||||
for (size_t i = 0; i < input_rows_count; i++)
|
||||
for (size_t i = 0; i < input_rows_count; ++i)
|
||||
res_data.emplace_back(boost::geometry::area(geometries[i]));
|
||||
}
|
||||
}
|
||||
|
@ -75,7 +75,7 @@ public:
|
||||
{
|
||||
auto geometries = Converter::convert(arguments[0].column->convertToFullColumnIfConst());
|
||||
|
||||
for (size_t i = 0; i < input_rows_count; i++)
|
||||
for (size_t i = 0; i < input_rows_count; ++i)
|
||||
{
|
||||
Polygon<Point> convex_hull{};
|
||||
boost::geometry::convex_hull(geometries[i], convex_hull);
|
||||
|
@ -77,7 +77,7 @@ public:
|
||||
{
|
||||
auto geometries = Converter::convert(arguments[0].column->convertToFullColumnIfConst());
|
||||
|
||||
for (size_t i = 0; i < input_rows_count; i++)
|
||||
for (size_t i = 0; i < input_rows_count; ++i)
|
||||
res_data.emplace_back(boost::geometry::perimeter(geometries[i]));
|
||||
}
|
||||
}
|
||||
|
@ -83,7 +83,7 @@ public:
|
||||
auto first = LeftConverter::convert(arguments[0].column->convertToFullColumnIfConst());
|
||||
auto second = RightConverter::convert(arguments[1].column->convertToFullColumnIfConst());
|
||||
|
||||
for (size_t i = 0; i < input_rows_count; i++)
|
||||
for (size_t i = 0; i < input_rows_count; ++i)
|
||||
{
|
||||
boost::geometry::correct(first[i]);
|
||||
boost::geometry::correct(second[i]);
|
||||
|
@ -82,7 +82,7 @@ public:
|
||||
auto first = LeftConverter::convert(arguments[0].column->convertToFullColumnIfConst());
|
||||
auto second = RightConverter::convert(arguments[1].column->convertToFullColumnIfConst());
|
||||
|
||||
for (size_t i = 0; i < input_rows_count; i++)
|
||||
for (size_t i = 0; i < input_rows_count; ++i)
|
||||
{
|
||||
boost::geometry::correct(first[i]);
|
||||
boost::geometry::correct(second[i]);
|
||||
|
@ -81,7 +81,7 @@ public:
|
||||
auto second = RightConverter::convert(arguments[1].column->convertToFullColumnIfConst());
|
||||
|
||||
/// NOLINTNEXTLINE(clang-analyzer-core.uninitialized.Assign)
|
||||
for (size_t i = 0; i < input_rows_count; i++)
|
||||
for (size_t i = 0; i < input_rows_count; ++i)
|
||||
{
|
||||
boost::geometry::correct(first[i]);
|
||||
boost::geometry::correct(second[i]);
|
||||
|
@ -82,7 +82,7 @@ public:
|
||||
|
||||
/// We are not interested in some pitfalls in third-party libraries
|
||||
/// NOLINTNEXTLINE(clang-analyzer-core.uninitialized.Assign)
|
||||
for (size_t i = 0; i < input_rows_count; i++)
|
||||
for (size_t i = 0; i < input_rows_count; ++i)
|
||||
{
|
||||
/// Orient the polygons correctly.
|
||||
boost::geometry::correct(first[i]);
|
||||
|
@ -85,7 +85,7 @@ public:
|
||||
auto second = RightConverter::convert(arguments[1].column->convertToFullColumnIfConst());
|
||||
|
||||
/// NOLINTNEXTLINE(clang-analyzer-core.uninitialized.Assign)
|
||||
for (size_t i = 0; i < input_rows_count; i++)
|
||||
for (size_t i = 0; i < input_rows_count; ++i)
|
||||
{
|
||||
boost::geometry::correct(first[i]);
|
||||
boost::geometry::correct(second[i]);
|
||||
|
@ -55,7 +55,7 @@ public:
|
||||
Serializer serializer;
|
||||
Geometry geometry;
|
||||
|
||||
for (size_t i = 0; i < input_rows_count; i++)
|
||||
for (size_t i = 0; i < input_rows_count; ++i)
|
||||
{
|
||||
const auto & str = column_string->getDataAt(i).toString();
|
||||
boost::geometry::read_wkt(str, geometry);
|
||||
|
@ -79,7 +79,7 @@ public:
|
||||
|
||||
auto figures = Converter::convert(arguments[0].column->convertToFullColumnIfConst());
|
||||
|
||||
for (size_t i = 0; i < input_rows_count; i++)
|
||||
for (size_t i = 0; i < input_rows_count; ++i)
|
||||
{
|
||||
std::stringstream str; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
|
||||
boost::geometry::correct(figures[i]);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user