mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-23 16:12:01 +00:00
Merge branch 'ClickHouse:master' into hex
This commit is contained in:
commit
7cb8171d37
2
.gitattributes
vendored
2
.gitattributes
vendored
@ -1,4 +1,2 @@
|
|||||||
contrib/* linguist-vendored
|
contrib/* linguist-vendored
|
||||||
*.h linguist-language=C++
|
*.h linguist-language=C++
|
||||||
# to avoid frequent conflicts
|
|
||||||
tests/queries/0_stateless/arcadia_skip_list.txt text merge=union
|
|
||||||
|
4
.github/workflows/backport_branches.yml
vendored
4
.github/workflows/backport_branches.yml
vendored
@ -207,8 +207,6 @@ jobs:
|
|||||||
- BuilderDebRelease
|
- BuilderDebRelease
|
||||||
- BuilderDebAsan
|
- BuilderDebAsan
|
||||||
- BuilderDebTsan
|
- BuilderDebTsan
|
||||||
- BuilderDebUBsan
|
|
||||||
- BuilderDebMsan
|
|
||||||
- BuilderDebDebug
|
- BuilderDebDebug
|
||||||
runs-on: [self-hosted, style-checker]
|
runs-on: [self-hosted, style-checker]
|
||||||
steps:
|
steps:
|
||||||
@ -333,7 +331,7 @@ jobs:
|
|||||||
############################# INTEGRATION TESTS #############################################
|
############################# INTEGRATION TESTS #############################################
|
||||||
#############################################################################################
|
#############################################################################################
|
||||||
IntegrationTestsRelease:
|
IntegrationTestsRelease:
|
||||||
needs: [BuilderDebRelease, FunctionalStatelessTestRelease]
|
needs: [BuilderDebRelease]
|
||||||
runs-on: [self-hosted, stress-tester]
|
runs-on: [self-hosted, stress-tester]
|
||||||
steps:
|
steps:
|
||||||
- name: Download json reports
|
- name: Download json reports
|
||||||
|
420
.github/workflows/main.yml
vendored
420
.github/workflows/main.yml
vendored
@ -886,7 +886,7 @@ jobs:
|
|||||||
docker kill $(docker ps -q) ||:
|
docker kill $(docker ps -q) ||:
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
docker rm -f $(docker ps -a -q) ||:
|
||||||
sudo rm -fr $TEMP_PATH
|
sudo rm -fr $TEMP_PATH
|
||||||
FunctionalStatelessTestTsan:
|
FunctionalStatelessTestTsan0:
|
||||||
needs: [BuilderDebTsan]
|
needs: [BuilderDebTsan]
|
||||||
runs-on: [self-hosted, func-tester]
|
runs-on: [self-hosted, func-tester]
|
||||||
steps:
|
steps:
|
||||||
@ -903,6 +903,70 @@ jobs:
|
|||||||
CHECK_NAME: 'Stateless tests (thread, actions)'
|
CHECK_NAME: 'Stateless tests (thread, actions)'
|
||||||
REPO_COPY: ${{runner.temp}}/stateless_tsan/ClickHouse
|
REPO_COPY: ${{runner.temp}}/stateless_tsan/ClickHouse
|
||||||
KILL_TIMEOUT: 10800
|
KILL_TIMEOUT: 10800
|
||||||
|
RUN_BY_HASH_NUM: 0
|
||||||
|
RUN_BY_HASH_TOTAL: 3
|
||||||
|
run: |
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
mkdir -p $TEMP_PATH
|
||||||
|
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||||
|
cd $REPO_COPY/tests/ci
|
||||||
|
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker kill $(docker ps -q) ||:
|
||||||
|
docker rm -f $(docker ps -a -q) ||:
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
FunctionalStatelessTestTsan1:
|
||||||
|
needs: [BuilderDebTsan]
|
||||||
|
runs-on: [self-hosted, func-tester]
|
||||||
|
steps:
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
path: ${{runner.temp}}/reports_dir
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Functional test
|
||||||
|
env:
|
||||||
|
TEMP_PATH: ${{runner.temp}}/stateless_tsan
|
||||||
|
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME: 'Stateless tests (thread, actions)'
|
||||||
|
REPO_COPY: ${{runner.temp}}/stateless_tsan/ClickHouse
|
||||||
|
KILL_TIMEOUT: 10800
|
||||||
|
RUN_BY_HASH_NUM: 1
|
||||||
|
RUN_BY_HASH_TOTAL: 3
|
||||||
|
run: |
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
mkdir -p $TEMP_PATH
|
||||||
|
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||||
|
cd $REPO_COPY/tests/ci
|
||||||
|
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker kill $(docker ps -q) ||:
|
||||||
|
docker rm -f $(docker ps -a -q) ||:
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
FunctionalStatelessTestTsan2:
|
||||||
|
needs: [BuilderDebTsan]
|
||||||
|
runs-on: [self-hosted, func-tester]
|
||||||
|
steps:
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
path: ${{runner.temp}}/reports_dir
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Functional test
|
||||||
|
env:
|
||||||
|
TEMP_PATH: ${{runner.temp}}/stateless_tsan
|
||||||
|
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME: 'Stateless tests (thread, actions)'
|
||||||
|
REPO_COPY: ${{runner.temp}}/stateless_tsan/ClickHouse
|
||||||
|
KILL_TIMEOUT: 10800
|
||||||
|
RUN_BY_HASH_NUM: 2
|
||||||
|
RUN_BY_HASH_TOTAL: 3
|
||||||
run: |
|
run: |
|
||||||
sudo rm -fr $TEMP_PATH
|
sudo rm -fr $TEMP_PATH
|
||||||
mkdir -p $TEMP_PATH
|
mkdir -p $TEMP_PATH
|
||||||
@ -944,7 +1008,7 @@ jobs:
|
|||||||
docker kill $(docker ps -q) ||:
|
docker kill $(docker ps -q) ||:
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
docker rm -f $(docker ps -a -q) ||:
|
||||||
sudo rm -fr $TEMP_PATH
|
sudo rm -fr $TEMP_PATH
|
||||||
FunctionalStatelessTestMsan:
|
FunctionalStatelessTestMsan0:
|
||||||
needs: [BuilderDebMsan]
|
needs: [BuilderDebMsan]
|
||||||
runs-on: [self-hosted, func-tester]
|
runs-on: [self-hosted, func-tester]
|
||||||
steps:
|
steps:
|
||||||
@ -961,6 +1025,8 @@ jobs:
|
|||||||
CHECK_NAME: 'Stateless tests (memory, actions)'
|
CHECK_NAME: 'Stateless tests (memory, actions)'
|
||||||
REPO_COPY: ${{runner.temp}}/stateless_memory/ClickHouse
|
REPO_COPY: ${{runner.temp}}/stateless_memory/ClickHouse
|
||||||
KILL_TIMEOUT: 10800
|
KILL_TIMEOUT: 10800
|
||||||
|
RUN_BY_HASH_NUM: 0
|
||||||
|
RUN_BY_HASH_TOTAL: 3
|
||||||
run: |
|
run: |
|
||||||
sudo rm -fr $TEMP_PATH
|
sudo rm -fr $TEMP_PATH
|
||||||
mkdir -p $TEMP_PATH
|
mkdir -p $TEMP_PATH
|
||||||
@ -973,7 +1039,69 @@ jobs:
|
|||||||
docker kill $(docker ps -q) ||:
|
docker kill $(docker ps -q) ||:
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
docker rm -f $(docker ps -a -q) ||:
|
||||||
sudo rm -fr $TEMP_PATH
|
sudo rm -fr $TEMP_PATH
|
||||||
FunctionalStatelessTestDebug:
|
FunctionalStatelessTestMsan1:
|
||||||
|
needs: [BuilderDebMsan]
|
||||||
|
runs-on: [self-hosted, func-tester]
|
||||||
|
steps:
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
path: ${{runner.temp}}/reports_dir
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Functional test
|
||||||
|
env:
|
||||||
|
TEMP_PATH: ${{runner.temp}}/stateless_memory
|
||||||
|
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME: 'Stateless tests (memory, actions)'
|
||||||
|
REPO_COPY: ${{runner.temp}}/stateless_memory/ClickHouse
|
||||||
|
KILL_TIMEOUT: 10800
|
||||||
|
RUN_BY_HASH_NUM: 1
|
||||||
|
RUN_BY_HASH_TOTAL: 3
|
||||||
|
run: |
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
mkdir -p $TEMP_PATH
|
||||||
|
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||||
|
cd $REPO_COPY/tests/ci
|
||||||
|
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker kill $(docker ps -q) ||:
|
||||||
|
docker rm -f $(docker ps -a -q) ||:
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
FunctionalStatelessTestMsan2:
|
||||||
|
needs: [BuilderDebMsan]
|
||||||
|
runs-on: [self-hosted, func-tester]
|
||||||
|
steps:
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
path: ${{runner.temp}}/reports_dir
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Functional test
|
||||||
|
env:
|
||||||
|
TEMP_PATH: ${{runner.temp}}/stateless_memory
|
||||||
|
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME: 'Stateless tests (memory, actions)'
|
||||||
|
REPO_COPY: ${{runner.temp}}/stateless_memory/ClickHouse
|
||||||
|
KILL_TIMEOUT: 10800
|
||||||
|
RUN_BY_HASH_NUM: 2
|
||||||
|
RUN_BY_HASH_TOTAL: 3
|
||||||
|
run: |
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
mkdir -p $TEMP_PATH
|
||||||
|
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||||
|
cd $REPO_COPY/tests/ci
|
||||||
|
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker kill $(docker ps -q) ||:
|
||||||
|
docker rm -f $(docker ps -a -q) ||:
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
FunctionalStatelessTestDebug0:
|
||||||
needs: [BuilderDebDebug]
|
needs: [BuilderDebDebug]
|
||||||
runs-on: [self-hosted, func-tester]
|
runs-on: [self-hosted, func-tester]
|
||||||
steps:
|
steps:
|
||||||
@ -990,6 +1118,70 @@ jobs:
|
|||||||
CHECK_NAME: 'Stateless tests (debug, actions)'
|
CHECK_NAME: 'Stateless tests (debug, actions)'
|
||||||
REPO_COPY: ${{runner.temp}}/stateless_debug/ClickHouse
|
REPO_COPY: ${{runner.temp}}/stateless_debug/ClickHouse
|
||||||
KILL_TIMEOUT: 10800
|
KILL_TIMEOUT: 10800
|
||||||
|
RUN_BY_HASH_NUM: 0
|
||||||
|
RUN_BY_HASH_TOTAL: 3
|
||||||
|
run: |
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
mkdir -p $TEMP_PATH
|
||||||
|
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||||
|
cd $REPO_COPY/tests/ci
|
||||||
|
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker kill $(docker ps -q) ||:
|
||||||
|
docker rm -f $(docker ps -a -q) ||:
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
FunctionalStatelessTestDebug1:
|
||||||
|
needs: [BuilderDebDebug]
|
||||||
|
runs-on: [self-hosted, func-tester]
|
||||||
|
steps:
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
path: ${{runner.temp}}/reports_dir
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Functional test
|
||||||
|
env:
|
||||||
|
TEMP_PATH: ${{runner.temp}}/stateless_debug
|
||||||
|
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME: 'Stateless tests (debug, actions)'
|
||||||
|
REPO_COPY: ${{runner.temp}}/stateless_debug/ClickHouse
|
||||||
|
KILL_TIMEOUT: 10800
|
||||||
|
RUN_BY_HASH_NUM: 1
|
||||||
|
RUN_BY_HASH_TOTAL: 3
|
||||||
|
run: |
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
mkdir -p $TEMP_PATH
|
||||||
|
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||||
|
cd $REPO_COPY/tests/ci
|
||||||
|
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker kill $(docker ps -q) ||:
|
||||||
|
docker rm -f $(docker ps -a -q) ||:
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
FunctionalStatelessTestDebug2:
|
||||||
|
needs: [BuilderDebDebug]
|
||||||
|
runs-on: [self-hosted, func-tester]
|
||||||
|
steps:
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
path: ${{runner.temp}}/reports_dir
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Functional test
|
||||||
|
env:
|
||||||
|
TEMP_PATH: ${{runner.temp}}/stateless_debug
|
||||||
|
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME: 'Stateless tests (debug, actions)'
|
||||||
|
REPO_COPY: ${{runner.temp}}/stateless_debug/ClickHouse
|
||||||
|
KILL_TIMEOUT: 10800
|
||||||
|
RUN_BY_HASH_NUM: 2
|
||||||
|
RUN_BY_HASH_TOTAL: 3
|
||||||
run: |
|
run: |
|
||||||
sudo rm -fr $TEMP_PATH
|
sudo rm -fr $TEMP_PATH
|
||||||
mkdir -p $TEMP_PATH
|
mkdir -p $TEMP_PATH
|
||||||
@ -1497,8 +1689,8 @@ jobs:
|
|||||||
#############################################################################################
|
#############################################################################################
|
||||||
############################# INTEGRATION TESTS #############################################
|
############################# INTEGRATION TESTS #############################################
|
||||||
#############################################################################################
|
#############################################################################################
|
||||||
IntegrationTestsAsan:
|
IntegrationTestsAsan0:
|
||||||
needs: [BuilderDebAsan, FunctionalStatelessTestAsan]
|
needs: [BuilderDebAsan]
|
||||||
runs-on: [self-hosted, stress-tester]
|
runs-on: [self-hosted, stress-tester]
|
||||||
steps:
|
steps:
|
||||||
- name: Download json reports
|
- name: Download json reports
|
||||||
@ -1513,6 +1705,8 @@ jobs:
|
|||||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||||
CHECK_NAME: 'Integration tests (asan, actions)'
|
CHECK_NAME: 'Integration tests (asan, actions)'
|
||||||
REPO_COPY: ${{runner.temp}}/integration_tests_asan/ClickHouse
|
REPO_COPY: ${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM: 0
|
||||||
|
RUN_BY_HASH_TOTAL: 3
|
||||||
run: |
|
run: |
|
||||||
sudo rm -fr $TEMP_PATH
|
sudo rm -fr $TEMP_PATH
|
||||||
mkdir -p $TEMP_PATH
|
mkdir -p $TEMP_PATH
|
||||||
@ -1525,8 +1719,68 @@ jobs:
|
|||||||
docker kill $(docker ps -q) ||:
|
docker kill $(docker ps -q) ||:
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
docker rm -f $(docker ps -a -q) ||:
|
||||||
sudo rm -fr $TEMP_PATH
|
sudo rm -fr $TEMP_PATH
|
||||||
IntegrationTestsTsan:
|
IntegrationTestsAsan1:
|
||||||
needs: [BuilderDebTsan, FunctionalStatelessTestTsan]
|
needs: [BuilderDebAsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
path: ${{runner.temp}}/reports_dir
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Integration test
|
||||||
|
env:
|
||||||
|
TEMP_PATH: ${{runner.temp}}/integration_tests_asan
|
||||||
|
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME: 'Integration tests (asan, actions)'
|
||||||
|
REPO_COPY: ${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM: 1
|
||||||
|
RUN_BY_HASH_TOTAL: 3
|
||||||
|
run: |
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
mkdir -p $TEMP_PATH
|
||||||
|
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||||
|
cd $REPO_COPY/tests/ci
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker kill $(docker ps -q) ||:
|
||||||
|
docker rm -f $(docker ps -a -q) ||:
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
IntegrationTestsAsan2:
|
||||||
|
needs: [BuilderDebAsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
path: ${{runner.temp}}/reports_dir
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Integration test
|
||||||
|
env:
|
||||||
|
TEMP_PATH: ${{runner.temp}}/integration_tests_asan
|
||||||
|
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME: 'Integration tests (asan, actions)'
|
||||||
|
REPO_COPY: ${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM: 2
|
||||||
|
RUN_BY_HASH_TOTAL: 3
|
||||||
|
run: |
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
mkdir -p $TEMP_PATH
|
||||||
|
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||||
|
cd $REPO_COPY/tests/ci
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker kill $(docker ps -q) ||:
|
||||||
|
docker rm -f $(docker ps -a -q) ||:
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
IntegrationTestsTsan0:
|
||||||
|
needs: [BuilderDebTsan]
|
||||||
runs-on: [self-hosted, stress-tester]
|
runs-on: [self-hosted, stress-tester]
|
||||||
steps:
|
steps:
|
||||||
- name: Download json reports
|
- name: Download json reports
|
||||||
@ -1541,6 +1795,8 @@ jobs:
|
|||||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||||
CHECK_NAME: 'Integration tests (thread, actions)'
|
CHECK_NAME: 'Integration tests (thread, actions)'
|
||||||
REPO_COPY: ${{runner.temp}}/integration_tests_tsan/ClickHouse
|
REPO_COPY: ${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM: 0
|
||||||
|
RUN_BY_HASH_TOTAL: 4
|
||||||
run: |
|
run: |
|
||||||
sudo rm -fr $TEMP_PATH
|
sudo rm -fr $TEMP_PATH
|
||||||
mkdir -p $TEMP_PATH
|
mkdir -p $TEMP_PATH
|
||||||
@ -1553,8 +1809,98 @@ jobs:
|
|||||||
docker kill $(docker ps -q) ||:
|
docker kill $(docker ps -q) ||:
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
docker rm -f $(docker ps -a -q) ||:
|
||||||
sudo rm -fr $TEMP_PATH
|
sudo rm -fr $TEMP_PATH
|
||||||
IntegrationTestsRelease:
|
IntegrationTestsTsan1:
|
||||||
needs: [BuilderDebRelease, FunctionalStatelessTestRelease]
|
needs: [BuilderDebTsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
path: ${{runner.temp}}/reports_dir
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Integration test
|
||||||
|
env:
|
||||||
|
TEMP_PATH: ${{runner.temp}}/integration_tests_tsan
|
||||||
|
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME: 'Integration tests (thread, actions)'
|
||||||
|
REPO_COPY: ${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM: 1
|
||||||
|
RUN_BY_HASH_TOTAL: 4
|
||||||
|
run: |
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
mkdir -p $TEMP_PATH
|
||||||
|
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||||
|
cd $REPO_COPY/tests/ci
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker kill $(docker ps -q) ||:
|
||||||
|
docker rm -f $(docker ps -a -q) ||:
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
IntegrationTestsTsan2:
|
||||||
|
needs: [BuilderDebTsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
path: ${{runner.temp}}/reports_dir
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Integration test
|
||||||
|
env:
|
||||||
|
TEMP_PATH: ${{runner.temp}}/integration_tests_tsan
|
||||||
|
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME: 'Integration tests (thread, actions)'
|
||||||
|
REPO_COPY: ${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM: 2
|
||||||
|
RUN_BY_HASH_TOTAL: 4
|
||||||
|
run: |
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
mkdir -p $TEMP_PATH
|
||||||
|
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||||
|
cd $REPO_COPY/tests/ci
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker kill $(docker ps -q) ||:
|
||||||
|
docker rm -f $(docker ps -a -q) ||:
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
IntegrationTestsTsan3:
|
||||||
|
needs: [BuilderDebTsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
path: ${{runner.temp}}/reports_dir
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Integration test
|
||||||
|
env:
|
||||||
|
TEMP_PATH: ${{runner.temp}}/integration_tests_tsan
|
||||||
|
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME: 'Integration tests (thread, actions)'
|
||||||
|
REPO_COPY: ${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM: 3
|
||||||
|
RUN_BY_HASH_TOTAL: 4
|
||||||
|
run: |
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
mkdir -p $TEMP_PATH
|
||||||
|
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||||
|
cd $REPO_COPY/tests/ci
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker kill $(docker ps -q) ||:
|
||||||
|
docker rm -f $(docker ps -a -q) ||:
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
IntegrationTestsRelease0:
|
||||||
|
needs: [BuilderDebRelease]
|
||||||
runs-on: [self-hosted, stress-tester]
|
runs-on: [self-hosted, stress-tester]
|
||||||
steps:
|
steps:
|
||||||
- name: Download json reports
|
- name: Download json reports
|
||||||
@ -1569,6 +1915,38 @@ jobs:
|
|||||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||||
CHECK_NAME: 'Integration tests (release, actions)'
|
CHECK_NAME: 'Integration tests (release, actions)'
|
||||||
REPO_COPY: ${{runner.temp}}/integration_tests_release/ClickHouse
|
REPO_COPY: ${{runner.temp}}/integration_tests_release/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM: 0
|
||||||
|
RUN_BY_HASH_TOTAL: 2
|
||||||
|
run: |
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
mkdir -p $TEMP_PATH
|
||||||
|
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||||
|
cd $REPO_COPY/tests/ci
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker kill $(docker ps -q) ||:
|
||||||
|
docker rm -f $(docker ps -a -q) ||:
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
IntegrationTestsRelease1:
|
||||||
|
needs: [BuilderDebRelease]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
path: ${{runner.temp}}/reports_dir
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Integration test
|
||||||
|
env:
|
||||||
|
TEMP_PATH: ${{runner.temp}}/integration_tests_release
|
||||||
|
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME: 'Integration tests (release, actions)'
|
||||||
|
REPO_COPY: ${{runner.temp}}/integration_tests_release/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM: 1
|
||||||
|
RUN_BY_HASH_TOTAL: 2
|
||||||
run: |
|
run: |
|
||||||
sudo rm -fr $TEMP_PATH
|
sudo rm -fr $TEMP_PATH
|
||||||
mkdir -p $TEMP_PATH
|
mkdir -p $TEMP_PATH
|
||||||
@ -1759,13 +2137,19 @@ jobs:
|
|||||||
- CheckLabels
|
- CheckLabels
|
||||||
- BuilderReport
|
- BuilderReport
|
||||||
- FastTest
|
- FastTest
|
||||||
- FunctionalStatelessTestDebug
|
- FunctionalStatelessTestDebug0
|
||||||
|
- FunctionalStatelessTestDebug1
|
||||||
|
- FunctionalStatelessTestDebug2
|
||||||
- FunctionalStatelessTestRelease
|
- FunctionalStatelessTestRelease
|
||||||
- FunctionalStatelessTestReleaseDatabaseReplicated
|
- FunctionalStatelessTestReleaseDatabaseReplicated
|
||||||
- FunctionalStatelessTestReleaseWideParts
|
- FunctionalStatelessTestReleaseWideParts
|
||||||
- FunctionalStatelessTestAsan
|
- FunctionalStatelessTestAsan
|
||||||
- FunctionalStatelessTestTsan
|
- FunctionalStatelessTestTsan0
|
||||||
- FunctionalStatelessTestMsan
|
- FunctionalStatelessTestTsan1
|
||||||
|
- FunctionalStatelessTestTsan2
|
||||||
|
- FunctionalStatelessTestMsan0
|
||||||
|
- FunctionalStatelessTestMsan1
|
||||||
|
- FunctionalStatelessTestMsan2
|
||||||
- FunctionalStatelessTestUBsan
|
- FunctionalStatelessTestUBsan
|
||||||
- FunctionalStatefulTestDebug
|
- FunctionalStatefulTestDebug
|
||||||
- FunctionalStatefulTestRelease
|
- FunctionalStatefulTestRelease
|
||||||
@ -1783,9 +2167,15 @@ jobs:
|
|||||||
- ASTFuzzerTestTsan
|
- ASTFuzzerTestTsan
|
||||||
- ASTFuzzerTestMSan
|
- ASTFuzzerTestMSan
|
||||||
- ASTFuzzerTestUBSan
|
- ASTFuzzerTestUBSan
|
||||||
- IntegrationTestsAsan
|
- IntegrationTestsAsan0
|
||||||
- IntegrationTestsRelease
|
- IntegrationTestsAsan1
|
||||||
- IntegrationTestsTsan
|
- IntegrationTestsAsan2
|
||||||
|
- IntegrationTestsRelease0
|
||||||
|
- IntegrationTestsRelease1
|
||||||
|
- IntegrationTestsTsan0
|
||||||
|
- IntegrationTestsTsan1
|
||||||
|
- IntegrationTestsTsan2
|
||||||
|
- IntegrationTestsTsan3
|
||||||
- PVSCheck
|
- PVSCheck
|
||||||
- UnitTestsAsan
|
- UnitTestsAsan
|
||||||
- UnitTestsTsan
|
- UnitTestsTsan
|
||||||
|
450
.github/workflows/master.yml
vendored
450
.github/workflows/master.yml
vendored
@ -799,7 +799,7 @@ jobs:
|
|||||||
docker kill $(docker ps -q) ||:
|
docker kill $(docker ps -q) ||:
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
docker rm -f $(docker ps -a -q) ||:
|
||||||
sudo rm -fr $TEMP_PATH
|
sudo rm -fr $TEMP_PATH
|
||||||
FunctionalStatelessTestTsan:
|
FunctionalStatelessTestTsan0:
|
||||||
needs: [BuilderDebTsan]
|
needs: [BuilderDebTsan]
|
||||||
runs-on: [self-hosted, func-tester]
|
runs-on: [self-hosted, func-tester]
|
||||||
steps:
|
steps:
|
||||||
@ -816,6 +816,70 @@ jobs:
|
|||||||
CHECK_NAME: 'Stateless tests (thread, actions)'
|
CHECK_NAME: 'Stateless tests (thread, actions)'
|
||||||
REPO_COPY: ${{runner.temp}}/stateless_tsan/ClickHouse
|
REPO_COPY: ${{runner.temp}}/stateless_tsan/ClickHouse
|
||||||
KILL_TIMEOUT: 10800
|
KILL_TIMEOUT: 10800
|
||||||
|
RUN_BY_HASH_NUM: 0
|
||||||
|
RUN_BY_HASH_TOTAL: 3
|
||||||
|
run: |
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
mkdir -p $TEMP_PATH
|
||||||
|
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||||
|
cd $REPO_COPY/tests/ci
|
||||||
|
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker kill $(docker ps -q) ||:
|
||||||
|
docker rm -f $(docker ps -a -q) ||:
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
FunctionalStatelessTestTsan1:
|
||||||
|
needs: [BuilderDebTsan]
|
||||||
|
runs-on: [self-hosted, func-tester]
|
||||||
|
steps:
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
path: ${{runner.temp}}/reports_dir
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Functional test
|
||||||
|
env:
|
||||||
|
TEMP_PATH: ${{runner.temp}}/stateless_tsan
|
||||||
|
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME: 'Stateless tests (thread, actions)'
|
||||||
|
REPO_COPY: ${{runner.temp}}/stateless_tsan/ClickHouse
|
||||||
|
KILL_TIMEOUT: 10800
|
||||||
|
RUN_BY_HASH_NUM: 1
|
||||||
|
RUN_BY_HASH_TOTAL: 3
|
||||||
|
run: |
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
mkdir -p $TEMP_PATH
|
||||||
|
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||||
|
cd $REPO_COPY/tests/ci
|
||||||
|
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker kill $(docker ps -q) ||:
|
||||||
|
docker rm -f $(docker ps -a -q) ||:
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
FunctionalStatelessTestTsan2:
|
||||||
|
needs: [BuilderDebTsan]
|
||||||
|
runs-on: [self-hosted, func-tester]
|
||||||
|
steps:
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
path: ${{runner.temp}}/reports_dir
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Functional test
|
||||||
|
env:
|
||||||
|
TEMP_PATH: ${{runner.temp}}/stateless_tsan
|
||||||
|
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME: 'Stateless tests (thread, actions)'
|
||||||
|
REPO_COPY: ${{runner.temp}}/stateless_tsan/ClickHouse
|
||||||
|
KILL_TIMEOUT: 10800
|
||||||
|
RUN_BY_HASH_NUM: 2
|
||||||
|
RUN_BY_HASH_TOTAL: 3
|
||||||
run: |
|
run: |
|
||||||
sudo rm -fr $TEMP_PATH
|
sudo rm -fr $TEMP_PATH
|
||||||
mkdir -p $TEMP_PATH
|
mkdir -p $TEMP_PATH
|
||||||
@ -857,7 +921,7 @@ jobs:
|
|||||||
docker kill $(docker ps -q) ||:
|
docker kill $(docker ps -q) ||:
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
docker rm -f $(docker ps -a -q) ||:
|
||||||
sudo rm -fr $TEMP_PATH
|
sudo rm -fr $TEMP_PATH
|
||||||
FunctionalStatelessTestMsan:
|
FunctionalStatelessTestMsan0:
|
||||||
needs: [BuilderDebMsan]
|
needs: [BuilderDebMsan]
|
||||||
runs-on: [self-hosted, func-tester]
|
runs-on: [self-hosted, func-tester]
|
||||||
steps:
|
steps:
|
||||||
@ -874,6 +938,8 @@ jobs:
|
|||||||
CHECK_NAME: 'Stateless tests (memory, actions)'
|
CHECK_NAME: 'Stateless tests (memory, actions)'
|
||||||
REPO_COPY: ${{runner.temp}}/stateless_memory/ClickHouse
|
REPO_COPY: ${{runner.temp}}/stateless_memory/ClickHouse
|
||||||
KILL_TIMEOUT: 10800
|
KILL_TIMEOUT: 10800
|
||||||
|
RUN_BY_HASH_NUM: 0
|
||||||
|
RUN_BY_HASH_TOTAL: 3
|
||||||
run: |
|
run: |
|
||||||
sudo rm -fr $TEMP_PATH
|
sudo rm -fr $TEMP_PATH
|
||||||
mkdir -p $TEMP_PATH
|
mkdir -p $TEMP_PATH
|
||||||
@ -886,7 +952,69 @@ jobs:
|
|||||||
docker kill $(docker ps -q) ||:
|
docker kill $(docker ps -q) ||:
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
docker rm -f $(docker ps -a -q) ||:
|
||||||
sudo rm -fr $TEMP_PATH
|
sudo rm -fr $TEMP_PATH
|
||||||
FunctionalStatelessTestDebug:
|
FunctionalStatelessTestMsan1:
|
||||||
|
needs: [BuilderDebMsan]
|
||||||
|
runs-on: [self-hosted, func-tester]
|
||||||
|
steps:
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
path: ${{runner.temp}}/reports_dir
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Functional test
|
||||||
|
env:
|
||||||
|
TEMP_PATH: ${{runner.temp}}/stateless_memory
|
||||||
|
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME: 'Stateless tests (memory, actions)'
|
||||||
|
REPO_COPY: ${{runner.temp}}/stateless_memory/ClickHouse
|
||||||
|
KILL_TIMEOUT: 10800
|
||||||
|
RUN_BY_HASH_NUM: 1
|
||||||
|
RUN_BY_HASH_TOTAL: 3
|
||||||
|
run: |
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
mkdir -p $TEMP_PATH
|
||||||
|
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||||
|
cd $REPO_COPY/tests/ci
|
||||||
|
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker kill $(docker ps -q) ||:
|
||||||
|
docker rm -f $(docker ps -a -q) ||:
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
FunctionalStatelessTestMsan2:
|
||||||
|
needs: [BuilderDebMsan]
|
||||||
|
runs-on: [self-hosted, func-tester]
|
||||||
|
steps:
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
path: ${{runner.temp}}/reports_dir
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Functional test
|
||||||
|
env:
|
||||||
|
TEMP_PATH: ${{runner.temp}}/stateless_memory
|
||||||
|
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME: 'Stateless tests (memory, actions)'
|
||||||
|
REPO_COPY: ${{runner.temp}}/stateless_memory/ClickHouse
|
||||||
|
KILL_TIMEOUT: 10800
|
||||||
|
RUN_BY_HASH_NUM: 2
|
||||||
|
RUN_BY_HASH_TOTAL: 3
|
||||||
|
run: |
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
mkdir -p $TEMP_PATH
|
||||||
|
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||||
|
cd $REPO_COPY/tests/ci
|
||||||
|
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker kill $(docker ps -q) ||:
|
||||||
|
docker rm -f $(docker ps -a -q) ||:
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
FunctionalStatelessTestDebug0:
|
||||||
needs: [BuilderDebDebug]
|
needs: [BuilderDebDebug]
|
||||||
runs-on: [self-hosted, func-tester]
|
runs-on: [self-hosted, func-tester]
|
||||||
steps:
|
steps:
|
||||||
@ -903,6 +1031,70 @@ jobs:
|
|||||||
CHECK_NAME: 'Stateless tests (debug, actions)'
|
CHECK_NAME: 'Stateless tests (debug, actions)'
|
||||||
REPO_COPY: ${{runner.temp}}/stateless_debug/ClickHouse
|
REPO_COPY: ${{runner.temp}}/stateless_debug/ClickHouse
|
||||||
KILL_TIMEOUT: 10800
|
KILL_TIMEOUT: 10800
|
||||||
|
RUN_BY_HASH_NUM: 0
|
||||||
|
RUN_BY_HASH_TOTAL: 3
|
||||||
|
run: |
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
mkdir -p $TEMP_PATH
|
||||||
|
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||||
|
cd $REPO_COPY/tests/ci
|
||||||
|
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker kill $(docker ps -q) ||:
|
||||||
|
docker rm -f $(docker ps -a -q) ||:
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
FunctionalStatelessTestDebug1:
|
||||||
|
needs: [BuilderDebDebug]
|
||||||
|
runs-on: [self-hosted, func-tester]
|
||||||
|
steps:
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
path: ${{runner.temp}}/reports_dir
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Functional test
|
||||||
|
env:
|
||||||
|
TEMP_PATH: ${{runner.temp}}/stateless_debug
|
||||||
|
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME: 'Stateless tests (debug, actions)'
|
||||||
|
REPO_COPY: ${{runner.temp}}/stateless_debug/ClickHouse
|
||||||
|
KILL_TIMEOUT: 10800
|
||||||
|
RUN_BY_HASH_NUM: 1
|
||||||
|
RUN_BY_HASH_TOTAL: 3
|
||||||
|
run: |
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
mkdir -p $TEMP_PATH
|
||||||
|
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||||
|
cd $REPO_COPY/tests/ci
|
||||||
|
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker kill $(docker ps -q) ||:
|
||||||
|
docker rm -f $(docker ps -a -q) ||:
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
FunctionalStatelessTestDebug2:
|
||||||
|
needs: [BuilderDebDebug]
|
||||||
|
runs-on: [self-hosted, func-tester]
|
||||||
|
steps:
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
path: ${{runner.temp}}/reports_dir
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Functional test
|
||||||
|
env:
|
||||||
|
TEMP_PATH: ${{runner.temp}}/stateless_debug
|
||||||
|
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME: 'Stateless tests (debug, actions)'
|
||||||
|
REPO_COPY: ${{runner.temp}}/stateless_debug/ClickHouse
|
||||||
|
KILL_TIMEOUT: 10800
|
||||||
|
RUN_BY_HASH_NUM: 2
|
||||||
|
RUN_BY_HASH_TOTAL: 3
|
||||||
run: |
|
run: |
|
||||||
sudo rm -fr $TEMP_PATH
|
sudo rm -fr $TEMP_PATH
|
||||||
mkdir -p $TEMP_PATH
|
mkdir -p $TEMP_PATH
|
||||||
@ -1267,8 +1459,8 @@ jobs:
|
|||||||
#############################################################################################
|
#############################################################################################
|
||||||
############################# INTEGRATION TESTS #############################################
|
############################# INTEGRATION TESTS #############################################
|
||||||
#############################################################################################
|
#############################################################################################
|
||||||
IntegrationTestsAsan:
|
IntegrationTestsAsan0:
|
||||||
needs: [BuilderDebAsan, FunctionalStatelessTestAsan]
|
needs: [BuilderDebAsan]
|
||||||
runs-on: [self-hosted, stress-tester]
|
runs-on: [self-hosted, stress-tester]
|
||||||
steps:
|
steps:
|
||||||
- name: Download json reports
|
- name: Download json reports
|
||||||
@ -1283,6 +1475,8 @@ jobs:
|
|||||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||||
CHECK_NAME: 'Integration tests (asan, actions)'
|
CHECK_NAME: 'Integration tests (asan, actions)'
|
||||||
REPO_COPY: ${{runner.temp}}/integration_tests_asan/ClickHouse
|
REPO_COPY: ${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM: 0
|
||||||
|
RUN_BY_HASH_TOTAL: 3
|
||||||
run: |
|
run: |
|
||||||
sudo rm -fr $TEMP_PATH
|
sudo rm -fr $TEMP_PATH
|
||||||
mkdir -p $TEMP_PATH
|
mkdir -p $TEMP_PATH
|
||||||
@ -1295,8 +1489,68 @@ jobs:
|
|||||||
docker kill $(docker ps -q) ||:
|
docker kill $(docker ps -q) ||:
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
docker rm -f $(docker ps -a -q) ||:
|
||||||
sudo rm -fr $TEMP_PATH
|
sudo rm -fr $TEMP_PATH
|
||||||
IntegrationTestsTsan:
|
IntegrationTestsAsan1:
|
||||||
needs: [BuilderDebTsan, FunctionalStatelessTestTsan]
|
needs: [BuilderDebAsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
path: ${{runner.temp}}/reports_dir
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Integration test
|
||||||
|
env:
|
||||||
|
TEMP_PATH: ${{runner.temp}}/integration_tests_asan
|
||||||
|
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME: 'Integration tests (asan, actions)'
|
||||||
|
REPO_COPY: ${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM: 1
|
||||||
|
RUN_BY_HASH_TOTAL: 3
|
||||||
|
run: |
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
mkdir -p $TEMP_PATH
|
||||||
|
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||||
|
cd $REPO_COPY/tests/ci
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker kill $(docker ps -q) ||:
|
||||||
|
docker rm -f $(docker ps -a -q) ||:
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
IntegrationTestsAsan2:
|
||||||
|
needs: [BuilderDebAsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
path: ${{runner.temp}}/reports_dir
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Integration test
|
||||||
|
env:
|
||||||
|
TEMP_PATH: ${{runner.temp}}/integration_tests_asan
|
||||||
|
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME: 'Integration tests (asan, actions)'
|
||||||
|
REPO_COPY: ${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM: 2
|
||||||
|
RUN_BY_HASH_TOTAL: 3
|
||||||
|
run: |
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
mkdir -p $TEMP_PATH
|
||||||
|
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||||
|
cd $REPO_COPY/tests/ci
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker kill $(docker ps -q) ||:
|
||||||
|
docker rm -f $(docker ps -a -q) ||:
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
IntegrationTestsTsan0:
|
||||||
|
needs: [BuilderDebTsan]
|
||||||
runs-on: [self-hosted, stress-tester]
|
runs-on: [self-hosted, stress-tester]
|
||||||
steps:
|
steps:
|
||||||
- name: Download json reports
|
- name: Download json reports
|
||||||
@ -1311,6 +1565,8 @@ jobs:
|
|||||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||||
CHECK_NAME: 'Integration tests (thread, actions)'
|
CHECK_NAME: 'Integration tests (thread, actions)'
|
||||||
REPO_COPY: ${{runner.temp}}/integration_tests_tsan/ClickHouse
|
REPO_COPY: ${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM: 0
|
||||||
|
RUN_BY_HASH_TOTAL: 4
|
||||||
run: |
|
run: |
|
||||||
sudo rm -fr $TEMP_PATH
|
sudo rm -fr $TEMP_PATH
|
||||||
mkdir -p $TEMP_PATH
|
mkdir -p $TEMP_PATH
|
||||||
@ -1323,8 +1579,98 @@ jobs:
|
|||||||
docker kill $(docker ps -q) ||:
|
docker kill $(docker ps -q) ||:
|
||||||
docker rm -f $(docker ps -a -q) ||:
|
docker rm -f $(docker ps -a -q) ||:
|
||||||
sudo rm -fr $TEMP_PATH
|
sudo rm -fr $TEMP_PATH
|
||||||
IntegrationTestsRelease:
|
IntegrationTestsTsan1:
|
||||||
needs: [BuilderDebRelease, FunctionalStatelessTestRelease]
|
needs: [BuilderDebTsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
path: ${{runner.temp}}/reports_dir
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Integration test
|
||||||
|
env:
|
||||||
|
TEMP_PATH: ${{runner.temp}}/integration_tests_tsan
|
||||||
|
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME: 'Integration tests (thread, actions)'
|
||||||
|
REPO_COPY: ${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM: 1
|
||||||
|
RUN_BY_HASH_TOTAL: 4
|
||||||
|
run: |
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
mkdir -p $TEMP_PATH
|
||||||
|
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||||
|
cd $REPO_COPY/tests/ci
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker kill $(docker ps -q) ||:
|
||||||
|
docker rm -f $(docker ps -a -q) ||:
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
IntegrationTestsTsan2:
|
||||||
|
needs: [BuilderDebTsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
path: ${{runner.temp}}/reports_dir
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Integration test
|
||||||
|
env:
|
||||||
|
TEMP_PATH: ${{runner.temp}}/integration_tests_tsan
|
||||||
|
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME: 'Integration tests (thread, actions)'
|
||||||
|
REPO_COPY: ${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM: 2
|
||||||
|
RUN_BY_HASH_TOTAL: 4
|
||||||
|
run: |
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
mkdir -p $TEMP_PATH
|
||||||
|
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||||
|
cd $REPO_COPY/tests/ci
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker kill $(docker ps -q) ||:
|
||||||
|
docker rm -f $(docker ps -a -q) ||:
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
IntegrationTestsTsan3:
|
||||||
|
needs: [BuilderDebTsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
path: ${{runner.temp}}/reports_dir
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Integration test
|
||||||
|
env:
|
||||||
|
TEMP_PATH: ${{runner.temp}}/integration_tests_tsan
|
||||||
|
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME: 'Integration tests (thread, actions)'
|
||||||
|
REPO_COPY: ${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM: 3
|
||||||
|
RUN_BY_HASH_TOTAL: 4
|
||||||
|
run: |
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
mkdir -p $TEMP_PATH
|
||||||
|
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||||
|
cd $REPO_COPY/tests/ci
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker kill $(docker ps -q) ||:
|
||||||
|
docker rm -f $(docker ps -a -q) ||:
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
IntegrationTestsRelease0:
|
||||||
|
needs: [BuilderDebRelease]
|
||||||
runs-on: [self-hosted, stress-tester]
|
runs-on: [self-hosted, stress-tester]
|
||||||
steps:
|
steps:
|
||||||
- name: Download json reports
|
- name: Download json reports
|
||||||
@ -1339,6 +1685,66 @@ jobs:
|
|||||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||||
CHECK_NAME: 'Integration tests (release, actions)'
|
CHECK_NAME: 'Integration tests (release, actions)'
|
||||||
REPO_COPY: ${{runner.temp}}/integration_tests_release/ClickHouse
|
REPO_COPY: ${{runner.temp}}/integration_tests_release/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM: 0
|
||||||
|
RUN_BY_HASH_TOTAL: 2
|
||||||
|
run: |
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
mkdir -p $TEMP_PATH
|
||||||
|
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||||
|
cd $REPO_COPY/tests/ci
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker kill $(docker ps -q) ||:
|
||||||
|
docker rm -f $(docker ps -a -q) ||:
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
IntegrationTestsRelease1:
|
||||||
|
needs: [BuilderDebRelease]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
path: ${{runner.temp}}/reports_dir
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Integration test
|
||||||
|
env:
|
||||||
|
TEMP_PATH: ${{runner.temp}}/integration_tests_release
|
||||||
|
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME: 'Integration tests (release, actions)'
|
||||||
|
REPO_COPY: ${{runner.temp}}/integration_tests_release/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM: 1
|
||||||
|
RUN_BY_HASH_TOTAL: 2
|
||||||
|
run: |
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
mkdir -p $TEMP_PATH
|
||||||
|
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||||
|
cd $REPO_COPY/tests/ci
|
||||||
|
python3 integration_test_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker kill $(docker ps -q) ||:
|
||||||
|
docker rm -f $(docker ps -a -q) ||:
|
||||||
|
sudo rm -fr $TEMP_PATH
|
||||||
|
IntegrationTestsFlakyCheck:
|
||||||
|
needs: [BuilderDebAsan]
|
||||||
|
runs-on: [self-hosted, stress-tester]
|
||||||
|
steps:
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
path: ${{runner.temp}}/reports_dir
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Integration test
|
||||||
|
env:
|
||||||
|
TEMP_PATH: ${{runner.temp}}/integration_tests_asan_flaky_check
|
||||||
|
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME: 'Integration tests flaky check (asan, actions)'
|
||||||
|
REPO_COPY: ${{runner.temp}}/integration_tests_asan_flaky_check/ClickHouse
|
||||||
run: |
|
run: |
|
||||||
sudo rm -fr $TEMP_PATH
|
sudo rm -fr $TEMP_PATH
|
||||||
mkdir -p $TEMP_PATH
|
mkdir -p $TEMP_PATH
|
||||||
@ -1623,7 +2029,7 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
TEMP_PATH: ${{runner.temp}}/unit_tests_ubsan
|
TEMP_PATH: ${{runner.temp}}/unit_tests_ubsan
|
||||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||||
CHECK_NAME: 'Unit tests (msan, actions)'
|
CHECK_NAME: 'Unit tests (ubsan, actions)'
|
||||||
REPO_COPY: ${{runner.temp}}/unit_tests_ubsan/ClickHouse
|
REPO_COPY: ${{runner.temp}}/unit_tests_ubsan/ClickHouse
|
||||||
run: |
|
run: |
|
||||||
sudo rm -fr $TEMP_PATH
|
sudo rm -fr $TEMP_PATH
|
||||||
@ -1641,12 +2047,18 @@ jobs:
|
|||||||
needs:
|
needs:
|
||||||
- DockerHubPush
|
- DockerHubPush
|
||||||
- BuilderReport
|
- BuilderReport
|
||||||
- FunctionalStatelessTestDebug
|
- FunctionalStatelessTestDebug0
|
||||||
|
- FunctionalStatelessTestDebug1
|
||||||
|
- FunctionalStatelessTestDebug2
|
||||||
- FunctionalStatelessTestRelease
|
- FunctionalStatelessTestRelease
|
||||||
- FunctionalStatelessTestReleaseDatabaseOrdinary
|
- FunctionalStatelessTestReleaseDatabaseOrdinary
|
||||||
- FunctionalStatelessTestAsan
|
- FunctionalStatelessTestAsan
|
||||||
- FunctionalStatelessTestTsan
|
- FunctionalStatelessTestTsan0
|
||||||
- FunctionalStatelessTestMsan
|
- FunctionalStatelessTestTsan1
|
||||||
|
- FunctionalStatelessTestTsan2
|
||||||
|
- FunctionalStatelessTestMsan0
|
||||||
|
- FunctionalStatelessTestMsan1
|
||||||
|
- FunctionalStatelessTestMsan2
|
||||||
- FunctionalStatelessTestUBsan
|
- FunctionalStatelessTestUBsan
|
||||||
- FunctionalStatefulTestDebug
|
- FunctionalStatefulTestDebug
|
||||||
- FunctionalStatefulTestRelease
|
- FunctionalStatefulTestRelease
|
||||||
@ -1660,9 +2072,15 @@ jobs:
|
|||||||
- StressTestTsan
|
- StressTestTsan
|
||||||
- StressTestMsan
|
- StressTestMsan
|
||||||
- StressTestUBsan
|
- StressTestUBsan
|
||||||
- IntegrationTestsAsan
|
- IntegrationTestsAsan0
|
||||||
- IntegrationTestsRelease
|
- IntegrationTestsAsan1
|
||||||
- IntegrationTestsTsan
|
- IntegrationTestsAsan2
|
||||||
|
- IntegrationTestsRelease0
|
||||||
|
- IntegrationTestsRelease1
|
||||||
|
- IntegrationTestsTsan0
|
||||||
|
- IntegrationTestsTsan1
|
||||||
|
- IntegrationTestsTsan2
|
||||||
|
- IntegrationTestsTsan3
|
||||||
- CompatibilityCheck
|
- CompatibilityCheck
|
||||||
- ASTFuzzerTestDebug
|
- ASTFuzzerTestDebug
|
||||||
- ASTFuzzerTestAsan
|
- ASTFuzzerTestAsan
|
||||||
|
178
CHANGELOG.md
178
CHANGELOG.md
@ -1,3 +1,181 @@
|
|||||||
|
### ClickHouse release v21.12, 2021-12-13
|
||||||
|
|
||||||
|
#### Backward Incompatible Change
|
||||||
|
|
||||||
|
* *A fix for a feature that previously had unwanted behaviour.* Do not allow direct select for Kafka/RabbitMQ/FileLog. Can be enabled by setting `stream_like_engine_allow_direct_select`. Direct select will be not allowed even if enabled by setting, in case there is an attached materialized view. For Kafka and RabbitMQ direct selectm if allowed, will not commit massages by default. To enable commits with direct select, user must use storage level setting `kafka{rabbitmq}_commit_on_select=1` (default `0`). [#31053](https://github.com/ClickHouse/ClickHouse/pull/31053) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* *A slight change in behaviour of a new function.* Return unquoted string in JSON_VALUE. Closes [#27965](https://github.com/ClickHouse/ClickHouse/issues/27965). [#31008](https://github.com/ClickHouse/ClickHouse/pull/31008) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* *Setting rename.* Add custom null representation support for TSV/CSV input formats. Fix deserialing Nullable(String) in TSV/CSV/JSONCompactStringsEachRow/JSONStringsEachRow input formats. Rename `output_format_csv_null_representation` and `output_format_tsv_null_representation` to `format_csv_null_representation` and `format_tsv_null_representation` accordingly. [#30497](https://github.com/ClickHouse/ClickHouse/pull/30497) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* *Further deprecation of already unused code.* This is relevant only for users of ClickHouse versions older than 20.6. A "leader election" mechanism is removed from `ReplicatedMergeTree`, because multiple leaders are supported since 20.6. If you are upgrading from an older version and some replica with an old version is a leader, then server will fail to start after upgrade. Stop replicas with old version to make new version start. After that it will not be possible to downgrade to version older than 20.6. [#32140](https://github.com/ClickHouse/ClickHouse/pull/32140) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
|
||||||
|
#### New Feature
|
||||||
|
|
||||||
|
* Implemented more of the ZooKeeper Four Letter Words commands in clickhouse-keeper: https://zookeeper.apache.org/doc/r3.4.8/zookeeperAdmin.html#sc_zkCommands. [#28981](https://github.com/ClickHouse/ClickHouse/pull/28981) ([JackyWoo](https://github.com/JackyWoo)). Now `clickhouse-keeper` is feature complete.
|
||||||
|
* Support for `Bool` data type. [#31072](https://github.com/ClickHouse/ClickHouse/pull/31072) ([kevin wan](https://github.com/MaxWk)).
|
||||||
|
* Support for `PARTITION BY` in File, URL, HDFS storages and with `INSERT INTO` table function. Closes [#30273](https://github.com/ClickHouse/ClickHouse/issues/30273). [#30690](https://github.com/ClickHouse/ClickHouse/pull/30690) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Added `CONSTRAINT ... ASSUME ...` (without checking during `INSERT`). Added query transformation to CNF (https://github.com/ClickHouse/ClickHouse/issues/11749) for more convenient optimization. Added simple query rewriting using constraints (only simple matching now, will be improved to support <,=,>... later). Added ability to replace heavy columns with light columns if it's possible. [#18787](https://github.com/ClickHouse/ClickHouse/pull/18787) ([Nikita Vasilev](https://github.com/nikvas0)).
|
||||||
|
* Basic access authentication for http/url functions. [#31648](https://github.com/ClickHouse/ClickHouse/pull/31648) ([michael1589](https://github.com/michael1589)).
|
||||||
|
* Support `INTERVAL` type in `STEP` clause for `WITH FILL` modifier. [#30927](https://github.com/ClickHouse/ClickHouse/pull/30927) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Add support for parallel reading from multiple files and support globs in `FROM INFILE` clause. [#30135](https://github.com/ClickHouse/ClickHouse/pull/30135) ([Filatenkov Artur](https://github.com/FArthur-cmd)).
|
||||||
|
* Add support for `Identifier` table and database query parameters. Closes [#27226](https://github.com/ClickHouse/ClickHouse/issues/27226). [#28668](https://github.com/ClickHouse/ClickHouse/pull/28668) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* *TLDR: Major improvements of completeness and consistency of text formats.* Refactor formats `TSV`, `TSVRaw`, `CSV` and `JSONCompactEachRow`, `JSONCompactStringsEachRow`, remove code duplication, add base interface for formats with `-WithNames` and `-WithNamesAndTypes` suffixes. Add formats `CSVWithNamesAndTypes`, `TSVRawWithNames`, `TSVRawWithNamesAndTypes`, `JSONCompactEachRowWIthNames`, `JSONCompactStringsEachRowWIthNames`, `RowBinaryWithNames`. Support parallel parsing for formats `TSVWithNamesAndTypes`, `TSVRaw(WithNames/WIthNamesAndTypes)`, `CSVWithNamesAndTypes`, `JSONCompactEachRow(WithNames/WIthNamesAndTypes)`, `JSONCompactStringsEachRow(WithNames/WIthNamesAndTypes)`. Support columns mapping and types checking for `RowBinaryWithNamesAndTypes` format. Add setting `input_format_with_types_use_header` which specify if we should check that types written in <format_name>`WIthNamesAndTypes` format matches with table structure. Add setting `input_format_csv_empty_as_default` and use it in CSV format instead of `input_format_defaults_for_omitted_fields` (because this setting should not control `csv_empty_as_default`). Fix usage of setting `input_format_defaults_for_omitted_fields` (it was used only as `csv_empty_as_default`, but it should control calculation of default expressions for omitted fields). Fix Nullable input/output in `TSVRaw` format, make this format fully compatible with inserting into TSV. Fix inserting NULLs in `LowCardinality(Nullable)` when `input_format_null_as_default` is enabled (previously default values was inserted instead of actual NULLs). Fix strings deserialization in `JSONStringsEachRow`/`JSONCompactStringsEachRow` formats (strings were parsed just until first '\n' or '\t'). Add ability to use `Raw` escaping rule in Template input format. Add diagnostic info for JSONCompactEachRow(WithNames/WIthNamesAndTypes) input format. Fix bug with parallel parsing of `-WithNames` formats in case when setting `min_chunk_bytes_for_parallel_parsing` is less than bytes in a single row. [#30178](https://github.com/ClickHouse/ClickHouse/pull/30178) ([Kruglov Pavel](https://github.com/Avogar)). Allow to print/parse names and types of colums in `CustomSeparated` input/output format. Add formats `CustomSeparatedWithNames/WithNamesAndTypes` similar to `TSVWithNames/WithNamesAndTypes`. [#31434](https://github.com/ClickHouse/ClickHouse/pull/31434) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Aliyun OSS Storage support. [#31286](https://github.com/ClickHouse/ClickHouse/pull/31286) ([cfcz48](https://github.com/cfcz48)).
|
||||||
|
* Exposes all settings of the global thread pool in the configuration file. [#31285](https://github.com/ClickHouse/ClickHouse/pull/31285) ([Tomáš Hromada](https://github.com/gyfis)).
|
||||||
|
* Introduced window functions `exponentialTimeDecayedSum`, `exponentialTimeDecayedMax`, `exponentialTimeDecayedCount` and `exponentialTimeDecayedAvg` which are more effective than `exponentialMovingAverage` for bigger windows. Also more use-cases were covered. [#29799](https://github.com/ClickHouse/ClickHouse/pull/29799) ([Vladimir Chebotarev](https://github.com/excitoon)).
|
||||||
|
* Add option to compress logs before writing them to a file using LZ4. Closes [#23860](https://github.com/ClickHouse/ClickHouse/issues/23860). [#29219](https://github.com/ClickHouse/ClickHouse/pull/29219) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Support `JOIN ON 1 = 1` that have CROSS JOIN semantic. This closes [#25578](https://github.com/ClickHouse/ClickHouse/issues/25578). [#25894](https://github.com/ClickHouse/ClickHouse/pull/25894) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Add Map combinator for `Map` type. - Rename old `sum-, min-, max- Map` for mapped arrays to `sum-, min-, max- MappedArrays`. [#24539](https://github.com/ClickHouse/ClickHouse/pull/24539) ([Ildus Kurbangaliev](https://github.com/ildus)).
|
||||||
|
* Make reading from HTTP retriable. Closes [#29696](https://github.com/ClickHouse/ClickHouse/issues/29696). [#29894](https://github.com/ClickHouse/ClickHouse/pull/29894) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
|
||||||
|
#### Experimental Feature
|
||||||
|
|
||||||
|
* `WINDOW VIEW` to enable stream processing in ClickHouse. [#8331](https://github.com/ClickHouse/ClickHouse/pull/8331) ([vxider](https://github.com/Vxider)).
|
||||||
|
* Drop support for using Ordinary databases with `MaterializedMySQL`. [#31292](https://github.com/ClickHouse/ClickHouse/pull/31292) ([Stig Bakken](https://github.com/stigsb)).
|
||||||
|
* Implement the commands BACKUP and RESTORE for the Log family. This feature is under development. [#30688](https://github.com/ClickHouse/ClickHouse/pull/30688) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
|
||||||
|
#### Performance Improvement
|
||||||
|
|
||||||
|
* Reduce memory usage when reading with `s3` / `url` / `hdfs` formats `Parquet`, `ORC`, `Arrow` (controlled by setting `input_format_allow_seeks`, enabled by default). Also add setting `remote_read_min_bytes_for_seek` to control seeks. Closes [#10461](https://github.com/ClickHouse/ClickHouse/issues/10461). Closes [#16857](https://github.com/ClickHouse/ClickHouse/issues/16857). [#30936](https://github.com/ClickHouse/ClickHouse/pull/30936) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Add optimizations for constant conditions in JOIN ON, ref [#26928](https://github.com/ClickHouse/ClickHouse/issues/26928). [#27021](https://github.com/ClickHouse/ClickHouse/pull/27021) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Support parallel formatting for all text formats, except `JSONEachRowWithProgress` and `PrettyCompactMonoBlock`. [#31489](https://github.com/ClickHouse/ClickHouse/pull/31489) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Speed up count over nullable columns. [#31806](https://github.com/ClickHouse/ClickHouse/pull/31806) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Speed up `avg` and `sumCount` aggregate functions. [#31694](https://github.com/ClickHouse/ClickHouse/pull/31694) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Improve performance of JSON and XML output formats. [#31673](https://github.com/ClickHouse/ClickHouse/pull/31673) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Improve performance of syncing data to block device. This closes [#31181](https://github.com/ClickHouse/ClickHouse/issues/31181). [#31229](https://github.com/ClickHouse/ClickHouse/pull/31229) ([zhanglistar](https://github.com/zhanglistar)).
|
||||||
|
* Fixing query performance issue in `LiveView` tables. Fixes [#30831](https://github.com/ClickHouse/ClickHouse/issues/30831). [#31006](https://github.com/ClickHouse/ClickHouse/pull/31006) ([vzakaznikov](https://github.com/vzakaznikov)).
|
||||||
|
* Speed up query parsing. [#31949](https://github.com/ClickHouse/ClickHouse/pull/31949) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Allow to split `GraphiteMergeTree` rollup rules for plain/tagged metrics (optional `rule_type` field). [#25122](https://github.com/ClickHouse/ClickHouse/pull/25122) ([Michail Safronov](https://github.com/msaf1980)).
|
||||||
|
* Remove excessive `DESC TABLE` requests for `remote()` (in case of `remote('127.1', system.one)` (i.e. identifier as the db.table instead of string) there was excessive `DESC TABLE` request). [#32019](https://github.com/ClickHouse/ClickHouse/pull/32019) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Optimize function `tupleElement` to reading of subcolumn with enabled setting `optimize_functions_to_subcolumns`. [#31261](https://github.com/ClickHouse/ClickHouse/pull/31261) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Optimize function `mapContains` to reading of subcolumn `key` with enabled settings `optimize_functions_to_subcolumns`. [#31218](https://github.com/ClickHouse/ClickHouse/pull/31218) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Add settings `merge_tree_min_rows_for_concurrent_read_for_remote_filesystem` and `merge_tree_min_bytes_for_concurrent_read_for_remote_filesystem`. [#30970](https://github.com/ClickHouse/ClickHouse/pull/30970) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Skipping mutations of different partitions in `StorageMergeTree`. [#21326](https://github.com/ClickHouse/ClickHouse/pull/21326) ([Vladimir Chebotarev](https://github.com/excitoon)).
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
|
||||||
|
* Do not allow to drop a table or dictionary if some tables or dictionaries depend on it. [#30977](https://github.com/ClickHouse/ClickHouse/pull/30977) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
* Allow versioning of aggregate function states. Now we can introduce backward compatible changes in serialization format of aggregate function states. Closes [#12552](https://github.com/ClickHouse/ClickHouse/issues/12552). [#24820](https://github.com/ClickHouse/ClickHouse/pull/24820) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Support PostgreSQL style `ALTER MODIFY COLUMN` syntax. [#32003](https://github.com/ClickHouse/ClickHouse/pull/32003) ([SuperDJY](https://github.com/cmsxbc)).
|
||||||
|
* Added `update_field` support for `RangeHashedDictionary`, `ComplexKeyRangeHashedDictionary`. [#32185](https://github.com/ClickHouse/ClickHouse/pull/32185) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* The `murmurHash3_128` and `sipHash128` functions now accept an arbitrary number of arguments. This closes [#28774](https://github.com/ClickHouse/ClickHouse/issues/28774). [#28965](https://github.com/ClickHouse/ClickHouse/pull/28965) ([小路](https://github.com/nicelulu)).
|
||||||
|
* Support default expression for `HDFS` storage and optimize fetching when source is column oriented. [#32256](https://github.com/ClickHouse/ClickHouse/pull/32256) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Improve the operation name of an opentelemetry span. [#32234](https://github.com/ClickHouse/ClickHouse/pull/32234) ([Frank Chen](https://github.com/FrankChen021)).
|
||||||
|
* Use `Content-Type: application/x-ndjson` (http://ndjson.org/) for output format `JSONEachRow`. [#32223](https://github.com/ClickHouse/ClickHouse/pull/32223) ([Dmitriy Dorofeev](https://github.com/deem0n)).
|
||||||
|
* Improve skipping unknown fields with quoted escaping rule in Template/CustomSeparated formats. Previously you could skip only quoted strings, now you can skip values with any type. [#32204](https://github.com/ClickHouse/ClickHouse/pull/32204) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Now `clickhouse-keeper` refuses to start or apply configuration changes when they contain duplicated IDs or endpoints. Fixes [#31339](https://github.com/ClickHouse/ClickHouse/issues/31339). [#32121](https://github.com/ClickHouse/ClickHouse/pull/32121) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Set Content-Type in HTTP packets issued from URL engine. [#32113](https://github.com/ClickHouse/ClickHouse/pull/32113) ([Frank Chen](https://github.com/FrankChen021)).
|
||||||
|
* Return Content-Type as 'application/json' for `JSONEachRow` format if `output_format_json_array_of_rows` is enabled. [#32112](https://github.com/ClickHouse/ClickHouse/pull/32112) ([Frank Chen](https://github.com/FrankChen021)).
|
||||||
|
* Allow to parse `+` before `Float32`/`Float64` values. [#32079](https://github.com/ClickHouse/ClickHouse/pull/32079) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Allow a user configured `hdfs_replication` parameter for `DiskHDFS` and `StorageHDFS`. Closes [#32039](https://github.com/ClickHouse/ClickHouse/issues/32039). [#32049](https://github.com/ClickHouse/ClickHouse/pull/32049) ([leosunli](https://github.com/leosunli)).
|
||||||
|
* Added ClickHouse `exception` and `exception_code` fields to opentelemetry span log. [#32040](https://github.com/ClickHouse/ClickHouse/pull/32040) ([Frank Chen](https://github.com/FrankChen021)).
|
||||||
|
* Improve opentelemetry span log duration - it was is zero at the query level if there is a query exception. [#32038](https://github.com/ClickHouse/ClickHouse/pull/32038) ([Frank Chen](https://github.com/FrankChen021)).
|
||||||
|
* Fix the issue that `LowCardinality` of `Int256` cannot be created. [#31832](https://github.com/ClickHouse/ClickHouse/pull/31832) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Recreate `system.*_log` tables in case of different engine/partition_by. [#31824](https://github.com/ClickHouse/ClickHouse/pull/31824) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* `MaterializedMySQL`: Fix issue with table named 'table'. [#31781](https://github.com/ClickHouse/ClickHouse/pull/31781) ([Håvard Kvålen](https://github.com/havardk)).
|
||||||
|
* ClickHouse dictionary source: support named collections. Closes [#31705](https://github.com/ClickHouse/ClickHouse/issues/31705). [#31749](https://github.com/ClickHouse/ClickHouse/pull/31749) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Allow to use named collections configuration for Kafka and RabbitMQ engines (the same way as for other integration table engines). [#31691](https://github.com/ClickHouse/ClickHouse/pull/31691) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Always re-render prompt while navigating history in clickhouse-client. This will improve usability of manipulating very long queries that don't fit on screen. [#31675](https://github.com/ClickHouse/ClickHouse/pull/31675) ([alexey-milovidov](https://github.com/alexey-milovidov)) (author: Amos Bird).
|
||||||
|
* Add key bindings for navigating through history (instead of lines/history). [#31641](https://github.com/ClickHouse/ClickHouse/pull/31641) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Improve the `max_execution_time` checks. Fixed some cases when timeout checks do not happen and query could run too long. [#31636](https://github.com/ClickHouse/ClickHouse/pull/31636) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Better exception message when `users.xml` cannot be loaded due to bad password hash. This closes [#24126](https://github.com/ClickHouse/ClickHouse/issues/24126). [#31557](https://github.com/ClickHouse/ClickHouse/pull/31557) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Use shard and replica name from `Replicated` database arguments when expanding macros in `ReplicatedMergeTree` arguments if these macros are not defined in config. Closes [#31471](https://github.com/ClickHouse/ClickHouse/issues/31471). [#31488](https://github.com/ClickHouse/ClickHouse/pull/31488) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
* Better analysis for `min/max/count` projection. Now, with enabled `allow_experimental_projection_optimization`, virtual `min/max/count` projection can be used together with columns from partition key. [#31474](https://github.com/ClickHouse/ClickHouse/pull/31474) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Add `--pager` support for `clickhouse-local`. [#31457](https://github.com/ClickHouse/ClickHouse/pull/31457) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix waiting of the editor during interactive query edition (`waitpid()` returns -1 on `SIGWINCH` and `EDITOR` and `clickhouse-local`/`clickhouse-client` works concurrently). [#31456](https://github.com/ClickHouse/ClickHouse/pull/31456) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Throw an exception if there is some garbage after field in `JSONCompactStrings(EachRow)` format. [#31455](https://github.com/ClickHouse/ClickHouse/pull/31455) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Default value of `http_send_timeout` and `http_receive_timeout` settings changed from 1800 (30 minutes) to 180 (3 minutes). [#31450](https://github.com/ClickHouse/ClickHouse/pull/31450) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
* `MaterializedMySQL` now handles `CREATE TABLE ... LIKE ...` DDL queries. [#31410](https://github.com/ClickHouse/ClickHouse/pull/31410) ([Stig Bakken](https://github.com/stigsb)).
|
||||||
|
* Return artificial create query when executing `show create table` on system's tables. [#31391](https://github.com/ClickHouse/ClickHouse/pull/31391) ([SuperDJY](https://github.com/cmsxbc)).
|
||||||
|
* Previously progress was shown only for `numbers` table function. Now for `numbers_mt` it is also shown. [#31318](https://github.com/ClickHouse/ClickHouse/pull/31318) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Initial user's roles are used now to find row policies, see [#31080](https://github.com/ClickHouse/ClickHouse/issues/31080). [#31262](https://github.com/ClickHouse/ClickHouse/pull/31262) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* If some obsolete setting is changed - show warning in `system.warnings`. [#31252](https://github.com/ClickHouse/ClickHouse/pull/31252) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
* Improved backoff for background cleanup tasks in `MergeTree`. Settings `merge_tree_clear_old_temporary_directories_interval_seconds` and `merge_tree_clear_old_parts_interval_seconds` moved from users settings to merge tree settings. [#31180](https://github.com/ClickHouse/ClickHouse/pull/31180) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
* Now every replica will send to client only incremental information about profile events counters. [#31155](https://github.com/ClickHouse/ClickHouse/pull/31155) ([Dmitry Novik](https://github.com/novikd)). This makes `--hardware_utilization` option in `clickhouse-client` usable.
|
||||||
|
* Enable multiline editing in clickhouse-client by default. This addresses [#31121](https://github.com/ClickHouse/ClickHouse/issues/31121) . [#31123](https://github.com/ClickHouse/ClickHouse/pull/31123) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Function name normalization for `ALTER` queries. This helps avoid metadata mismatch between creating table with indices/projections and adding indices/projections via alter commands. This is a follow-up PR of https://github.com/ClickHouse/ClickHouse/pull/20174. Mark as improvements as there are no bug reports and the senario is somehow rare. [#31095](https://github.com/ClickHouse/ClickHouse/pull/31095) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Support `IF EXISTS` modifier for `RENAME DATABASE`/`TABLE`/`DICTIONARY` query. If this directive is used, one will not get an error if the DATABASE/TABLE/DICTIONARY to be renamed doesn't exist. [#31081](https://github.com/ClickHouse/ClickHouse/pull/31081) ([victorgao](https://github.com/kafka1991)).
|
||||||
|
* Cancel vertical merges when partition is dropped. This is a follow-up of https://github.com/ClickHouse/ClickHouse/pull/25684 and https://github.com/ClickHouse/ClickHouse/pull/30996. [#31057](https://github.com/ClickHouse/ClickHouse/pull/31057) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* The local session inside a Clickhouse dictionary source won't send its events to the session log anymore. This fixes a possible deadlock (tsan alert) on shutdown. Also this PR fixes flaky `test_dictionaries_dependency_xml/`. [#31013](https://github.com/ClickHouse/ClickHouse/pull/31013) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Less locking in ALTER command. [#31010](https://github.com/ClickHouse/ClickHouse/pull/31010) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Fix `--verbose` option in clickhouse-local interactive mode and allow logging into file. [#30881](https://github.com/ClickHouse/ClickHouse/pull/30881) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Added `\l`, `\d`, `\c` commands in `clickhouse-client` like in MySQL and PostgreSQL. [#30876](https://github.com/ClickHouse/ClickHouse/pull/30876) ([Pavel Medvedev](https://github.com/pmed)).
|
||||||
|
* For clickhouse-local or clickhouse-client: if there is `--interactive` option with `--query` or `--queries-file`, then first execute them like in non-interactive and then start interactive mode. [#30851](https://github.com/ClickHouse/ClickHouse/pull/30851) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix possible "The local set of parts of X doesn't look like the set of parts in ZooKeeper" error (if DROP fails during removing znodes from zookeeper). [#30826](https://github.com/ClickHouse/ClickHouse/pull/30826) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Avro format works against Kafka. Setting `output_format_avro_rows_in_file` added. [#30351](https://github.com/ClickHouse/ClickHouse/pull/30351) ([Ilya Golshtein](https://github.com/ilejn)).
|
||||||
|
* Allow to specify one or any number of PostgreSQL schemas for one `MaterializedPostgreSQL` database. Closes [#28901](https://github.com/ClickHouse/ClickHouse/issues/28901). Closes [#29324](https://github.com/ClickHouse/ClickHouse/issues/29324). [#28933](https://github.com/ClickHouse/ClickHouse/pull/28933) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Replaced default ports for clickhouse-keeper internal communication from 44444 to 9234. Fixes [#30879](https://github.com/ClickHouse/ClickHouse/issues/30879). [#31799](https://github.com/ClickHouse/ClickHouse/pull/31799) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Implement function transform with Decimal arguments. [#31839](https://github.com/ClickHouse/ClickHouse/pull/31839) ([李帅](https://github.com/loneylee)).
|
||||||
|
* Fix abort in debug server and `DB::Exception: std::out_of_range: basic_string` error in release server in case of bad hdfs url by adding additional check of hdfs url structure. [#31042](https://github.com/ClickHouse/ClickHouse/pull/31042) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix possible assert in `hdfs` table function/engine, add test. [#31036](https://github.com/ClickHouse/ClickHouse/pull/31036) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
|
||||||
|
#### Bug Fixes
|
||||||
|
|
||||||
|
* Fix group by / order by / limit by aliases with positional arguments enabled. Closes [#31173](https://github.com/ClickHouse/ClickHouse/issues/31173). [#31741](https://github.com/ClickHouse/ClickHouse/pull/31741) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix usage of `Buffer` table engine with type `Map`. Fixes [#30546](https://github.com/ClickHouse/ClickHouse/issues/30546). [#31742](https://github.com/ClickHouse/ClickHouse/pull/31742) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fix reading from `MergeTree` tables with enabled `use_uncompressed_cache`. [#31826](https://github.com/ClickHouse/ClickHouse/pull/31826) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fixed the behavior when mutations that have nothing to do are stuck (with enabled setting `empty_result_for_aggregation_by_empty_set`). [#32358](https://github.com/ClickHouse/ClickHouse/pull/32358) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Fix skipping columns while writing protobuf. This PR fixes [#31160](https://github.com/ClickHouse/ClickHouse/issues/31160), see the comment [#31160](https://github.com/ClickHouse/ClickHouse/issues/31160)#issuecomment-980595318. [#31988](https://github.com/ClickHouse/ClickHouse/pull/31988) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Fix bug when remove unneeded columns in subquery. If there is an aggregation function in query without group by, do not remove if it is unneeded. [#32289](https://github.com/ClickHouse/ClickHouse/pull/32289) ([dongyifeng](https://github.com/dyf6372)).
|
||||||
|
* Quota limit was not reached, but the limit was exceeded. This PR fixes [#31174](https://github.com/ClickHouse/ClickHouse/issues/31174). [#31337](https://github.com/ClickHouse/ClickHouse/pull/31337) ([sunny](https://github.com/sunny19930321)).
|
||||||
|
* Fix SHOW GRANTS when partial revokes are used. This PR fixes [#31138](https://github.com/ClickHouse/ClickHouse/issues/31138). [#31249](https://github.com/ClickHouse/ClickHouse/pull/31249) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Memory amount was incorrectly estimated when ClickHouse is run in containers with cgroup limits. [#31157](https://github.com/ClickHouse/ClickHouse/pull/31157) ([Pavel Medvedev](https://github.com/pmed)).
|
||||||
|
* Fix `ALTER ... MATERIALIZE COLUMN ...` queries in case when data type of default expression is not equal to the data type of column. [#32348](https://github.com/ClickHouse/ClickHouse/pull/32348) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fixed crash with SIGFPE in aggregate function `avgWeighted` with `Decimal` argument. Fixes [#32053](https://github.com/ClickHouse/ClickHouse/issues/32053). [#32303](https://github.com/ClickHouse/ClickHouse/pull/32303) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
* Server might fail to start with `Cannot attach 1 tables due to cyclic dependencies` error if `Dictionary` table looks at XML-dictionary with the same name, it's fixed. Fixes [#31315](https://github.com/ClickHouse/ClickHouse/issues/31315). [#32288](https://github.com/ClickHouse/ClickHouse/pull/32288) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
* Fix parsing error while NaN deserializing for `Nullable(Float)` for `Quoted` escaping rule. [#32190](https://github.com/ClickHouse/ClickHouse/pull/32190) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* XML dictionaries: identifiers, used in table create query, can be qualified to `default_database` during upgrade to newer version. Closes [#31963](https://github.com/ClickHouse/ClickHouse/issues/31963). [#32187](https://github.com/ClickHouse/ClickHouse/pull/32187) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Number of active replicas might be determined incorrectly when inserting with quorum if setting `replicated_can_become_leader` is disabled on some replicas. It's fixed. [#32157](https://github.com/ClickHouse/ClickHouse/pull/32157) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
* Dictionaries: fix cases when `{condition}` does not work for custom database queries. [#32117](https://github.com/ClickHouse/ClickHouse/pull/32117) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Fix `CAST` from `Nullable` with `cast_keep_nullable` (`PARAMETER_OUT_OF_BOUND` error before for i.e. `toUInt32OrDefault(toNullable(toUInt32(1)))`). [#32080](https://github.com/ClickHouse/ClickHouse/pull/32080) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix CREATE TABLE of Join Storage in some obscure cases. Close [#31680](https://github.com/ClickHouse/ClickHouse/issues/31680). [#32066](https://github.com/ClickHouse/ClickHouse/pull/32066) ([SuperDJY](https://github.com/cmsxbc)).
|
||||||
|
* Fixed `Directory ... already exists and is not empty` error when detaching part. [#32063](https://github.com/ClickHouse/ClickHouse/pull/32063) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
* `MaterializedMySQL` (experimental feature): Fix misinterpretation of `DECIMAL` data from MySQL. [#31990](https://github.com/ClickHouse/ClickHouse/pull/31990) ([Håvard Kvålen](https://github.com/havardk)).
|
||||||
|
* `FileLog` (experimental feature) engine unnesessary created meta data directory when create table failed. Fix [#31962](https://github.com/ClickHouse/ClickHouse/issues/31962). [#31967](https://github.com/ClickHouse/ClickHouse/pull/31967) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Some `GET_PART` entry might hang in replication queue if part is lost on all replicas and there are no other parts in the same partition. It's fixed in cases when partition key contains only columns of integer types or `Date[Time]`. Fixes [#31485](https://github.com/ClickHouse/ClickHouse/issues/31485). [#31887](https://github.com/ClickHouse/ClickHouse/pull/31887) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
* Fix functions `empty` and `notEmpty` with arguments of `UUID` type. Fixes [#31819](https://github.com/ClickHouse/ClickHouse/issues/31819). [#31883](https://github.com/ClickHouse/ClickHouse/pull/31883) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Change configuration path from `keeper_server.session_timeout_ms` to `keeper_server.coordination_settings.session_timeout_ms` when constructing a `KeeperTCPHandler`. Same with `operation_timeout`. [#31859](https://github.com/ClickHouse/ClickHouse/pull/31859) ([JackyWoo](https://github.com/JackyWoo)).
|
||||||
|
* Fix invalid cast of Nullable type when nullable primary key is used. (Nullable primary key is a discouraged feature - please do not use). This fixes [#31075](https://github.com/ClickHouse/ClickHouse/issues/31075). [#31823](https://github.com/ClickHouse/ClickHouse/pull/31823) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Fix crash in recursive UDF in SQL. Closes [#30856](https://github.com/ClickHouse/ClickHouse/issues/30856). [#31820](https://github.com/ClickHouse/ClickHouse/pull/31820) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Fix crash when function `dictGet` with type is used for dictionary attribute when type is `Nullable`. Fixes [#30980](https://github.com/ClickHouse/ClickHouse/issues/30980). [#31800](https://github.com/ClickHouse/ClickHouse/pull/31800) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Fix crash with empty result of ODBC query (with some ODBC drivers). Closes [#31465](https://github.com/ClickHouse/ClickHouse/issues/31465). [#31766](https://github.com/ClickHouse/ClickHouse/pull/31766) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix disabling query profiler (In case of `query_profiler_real_time_period_ns>0`/`query_profiler_cpu_time_period_ns>0` query profiler can stayed enabled even after query finished). [#31740](https://github.com/ClickHouse/ClickHouse/pull/31740) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fixed rare segfault on concurrent `ATTACH PARTITION` queries. [#31738](https://github.com/ClickHouse/ClickHouse/pull/31738) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
* Fix race in JSONEachRowWithProgress output format when data and lines with progress are mixed in output. [#31736](https://github.com/ClickHouse/ClickHouse/pull/31736) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fixed `there are no such cluster here` error on execution of `ON CLUSTER` query if specified cluster name is name of `Replicated` database. [#31723](https://github.com/ClickHouse/ClickHouse/pull/31723) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
* Fix exception on some of the applications of `decrypt` function on Nullable columns. This closes [#31662](https://github.com/ClickHouse/ClickHouse/issues/31662). This closes [#31426](https://github.com/ClickHouse/ClickHouse/issues/31426). [#31707](https://github.com/ClickHouse/ClickHouse/pull/31707) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed function ngrams when string contains UTF-8 characters. [#31706](https://github.com/ClickHouse/ClickHouse/pull/31706) ([yandd](https://github.com/yandd)).
|
||||||
|
* Settings `input_format_allow_errors_num` and `input_format_allow_errors_ratio` did not work for parsing of domain types, such as `IPv4`, it's fixed. Fixes [#31686](https://github.com/ClickHouse/ClickHouse/issues/31686). [#31697](https://github.com/ClickHouse/ClickHouse/pull/31697) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
* Fixed null pointer exception in `MATERIALIZE COLUMN`. [#31679](https://github.com/ClickHouse/ClickHouse/pull/31679) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* `RENAME TABLE` query worked incorrectly on attempt to rename an DDL dictionary in `Ordinary` database, it's fixed. [#31638](https://github.com/ClickHouse/ClickHouse/pull/31638) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
* Implement `sparkbar` aggregate function as it was intended, see: [#26175](https://github.com/ClickHouse/ClickHouse/issues/26175)#issuecomment-960353867, [comment](https://github.com/ClickHouse/ClickHouse/issues/26175#issuecomment-961155065). [#31624](https://github.com/ClickHouse/ClickHouse/pull/31624) ([小路](https://github.com/nicelulu)).
|
||||||
|
* Fix invalid generated JSON when only column names contain invalid UTF-8 sequences. [#31534](https://github.com/ClickHouse/ClickHouse/pull/31534) ([Kevin Michel](https://github.com/kmichel-aiven)).
|
||||||
|
* Disable `partial_merge_join_left_table_buffer_bytes` before bug in this optimization is fixed. See [#31009](https://github.com/ClickHouse/ClickHouse/issues/31009)). Remove redundant option `partial_merge_join_optimizations`. [#31528](https://github.com/ClickHouse/ClickHouse/pull/31528) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Fix progress for short `INSERT SELECT` queries. [#31510](https://github.com/ClickHouse/ClickHouse/pull/31510) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix wrong behavior with group by and positional arguments. Closes [#31280](https://github.com/ClickHouse/ClickHouse/issues/31280)#issuecomment-968696186. [#31420](https://github.com/ClickHouse/ClickHouse/pull/31420) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Resolve `nullptr` in STS credentials provider for S3. [#31409](https://github.com/ClickHouse/ClickHouse/pull/31409) ([Vladimir Chebotarev](https://github.com/excitoon)).
|
||||||
|
* Remove `notLike` function from index analysis, because it was wrong. [#31169](https://github.com/ClickHouse/ClickHouse/pull/31169) ([sundyli](https://github.com/sundy-li)).
|
||||||
|
* Fix bug in Keeper which can lead to inability to start when some coordination logs was lost and we have more fresh snapshot than our latest log. [#31150](https://github.com/ClickHouse/ClickHouse/pull/31150) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Rewrite right distributed table in local join. solves [#25809](https://github.com/ClickHouse/ClickHouse/issues/25809). [#31105](https://github.com/ClickHouse/ClickHouse/pull/31105) ([abel-cheng](https://github.com/abel-cheng)).
|
||||||
|
* Fix `Merge` table with aliases and where (it did not work before at all). Closes [#28802](https://github.com/ClickHouse/ClickHouse/issues/28802). [#31044](https://github.com/ClickHouse/ClickHouse/pull/31044) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix JSON_VALUE/JSON_QUERY with quoted identifiers. This allows to have spaces in json path. Closes [#30971](https://github.com/ClickHouse/ClickHouse/issues/30971). [#31003](https://github.com/ClickHouse/ClickHouse/pull/31003) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Using `formatRow` function with not row-oriented formats led to segfault. Don't allow to use this function with such formats (because it doesn't make sense). [#31001](https://github.com/ClickHouse/ClickHouse/pull/31001) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix bug which broke select queries if they happened after dropping materialized view. Found in [#30691](https://github.com/ClickHouse/ClickHouse/issues/30691). [#30997](https://github.com/ClickHouse/ClickHouse/pull/30997) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Skip `max_partition_size_to_drop check` in case of ATTACH PARTITION ... FROM and MOVE PARTITION ... [#30995](https://github.com/ClickHouse/ClickHouse/pull/30995) ([Amr Alaa](https://github.com/amralaa-MSFT)).
|
||||||
|
* Fix some corner cases with `INTERSECT` and `EXCEPT` operators. Closes [#30803](https://github.com/ClickHouse/ClickHouse/issues/30803). [#30965](https://github.com/ClickHouse/ClickHouse/pull/30965) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
|
||||||
|
* Fix incorrect filtering result on non-x86 builds. This closes [#31417](https://github.com/ClickHouse/ClickHouse/issues/31417). This closes [#31524](https://github.com/ClickHouse/ClickHouse/issues/31524). [#31574](https://github.com/ClickHouse/ClickHouse/pull/31574) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Make ClickHouse build fully reproducible (byte identical on different machines). This closes [#22113](https://github.com/ClickHouse/ClickHouse/issues/22113). [#31899](https://github.com/ClickHouse/ClickHouse/pull/31899) ([alexey-milovidov](https://github.com/alexey-milovidov)). Remove filesystem path to the build directory from binaries to enable reproducible builds. This needed for [#22113](https://github.com/ClickHouse/ClickHouse/issues/22113). [#31838](https://github.com/ClickHouse/ClickHouse/pull/31838) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Use our own CMakeLists for `zlib-ng`, `cassandra`, `mariadb-connector-c` and `xz`, `re2`, `sentry`, `gsasl`, `arrow`, `protobuf`. This is needed for [#20151](https://github.com/ClickHouse/ClickHouse/issues/20151). Part of [#9226](https://github.com/ClickHouse/ClickHouse/issues/9226). A small step towards removal of annoying trash from the build system. [#30599](https://github.com/ClickHouse/ClickHouse/pull/30599) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Hermetic builds: use fixed version of libc and make sure that no source or binary files from the host OS are using during build. This closes [#27133](https://github.com/ClickHouse/ClickHouse/issues/27133). This closes [#21435](https://github.com/ClickHouse/ClickHouse/issues/21435). This closes [#30462](https://github.com/ClickHouse/ClickHouse/issues/30462). [#30011](https://github.com/ClickHouse/ClickHouse/pull/30011) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Adding function `getFuzzerData()` to easily fuzz particular functions. This closes [#23227](https://github.com/ClickHouse/ClickHouse/issues/23227). [#27526](https://github.com/ClickHouse/ClickHouse/pull/27526) ([Alexey Boykov](https://github.com/mathalex)).
|
||||||
|
* More correct setting up capabilities inside Docker. [#31802](https://github.com/ClickHouse/ClickHouse/pull/31802) ([Constantine Peresypkin](https://github.com/pkit)).
|
||||||
|
* Enable clang `-fstrict-vtable-pointers`, `-fwhole-program-vtables` compile options. [#20151](https://github.com/ClickHouse/ClickHouse/pull/20151) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Avoid downloading toolchain tarballs for cross-compiling for FreeBSD. [#31672](https://github.com/ClickHouse/ClickHouse/pull/31672) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Initial support for risc-v. See development/build-cross-riscv for quirks and build command that was tested. [#31309](https://github.com/ClickHouse/ClickHouse/pull/31309) ([Vladimir Smirnov](https://github.com/Civil)).
|
||||||
|
* Support compile in arm machine with parameter "-DENABLE_TESTS=OFF". [#31007](https://github.com/ClickHouse/ClickHouse/pull/31007) ([zhanghuajie](https://github.com/zhanghuajieHIT)).
|
||||||
|
|
||||||
|
|
||||||
### ClickHouse release v21.11, 2021-11-09
|
### ClickHouse release v21.11, 2021-11-09
|
||||||
|
|
||||||
#### Backward Incompatible Change
|
#### Backward Incompatible Change
|
||||||
|
@ -149,6 +149,10 @@ if (ENABLE_FUZZING)
|
|||||||
set (ENABLE_JEMALLOC 0)
|
set (ENABLE_JEMALLOC 0)
|
||||||
set (ENABLE_CHECK_HEAVY_BUILDS 1)
|
set (ENABLE_CHECK_HEAVY_BUILDS 1)
|
||||||
set (GLIBC_COMPATIBILITY OFF)
|
set (GLIBC_COMPATIBILITY OFF)
|
||||||
|
|
||||||
|
# For codegen_select_fuzzer
|
||||||
|
set (ENABLE_PROTOBUF 1)
|
||||||
|
set (USE_INTERNAL_PROTOBUF_LIBRARY 1)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# Global libraries
|
# Global libraries
|
||||||
|
@ -24,8 +24,6 @@ set (SRCS
|
|||||||
|
|
||||||
if (ENABLE_REPLXX)
|
if (ENABLE_REPLXX)
|
||||||
list (APPEND SRCS ReplxxLineReader.cpp)
|
list (APPEND SRCS ReplxxLineReader.cpp)
|
||||||
elseif (ENABLE_READLINE)
|
|
||||||
list (APPEND SRCS ReadlineLineReader.cpp)
|
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (USE_DEBUG_HELPERS)
|
if (USE_DEBUG_HELPERS)
|
||||||
@ -52,28 +50,6 @@ if (OS_DARWIN AND NOT MAKE_STATIC_LIBRARIES)
|
|||||||
target_link_libraries(common PUBLIC -Wl,-U,_inside_main)
|
target_link_libraries(common PUBLIC -Wl,-U,_inside_main)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# Allow explicit fallback to readline
|
|
||||||
if (NOT ENABLE_REPLXX AND ENABLE_READLINE)
|
|
||||||
message (STATUS "Attempt to fallback to readline explicitly")
|
|
||||||
set (READLINE_PATHS "/usr/local/opt/readline/lib")
|
|
||||||
# First try find custom lib for macos users (default lib without history support)
|
|
||||||
find_library (READLINE_LIB NAMES readline PATHS ${READLINE_PATHS} NO_DEFAULT_PATH)
|
|
||||||
if (NOT READLINE_LIB)
|
|
||||||
find_library (READLINE_LIB NAMES readline PATHS ${READLINE_PATHS})
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
set(READLINE_INCLUDE_PATHS "/usr/local/opt/readline/include")
|
|
||||||
find_path (READLINE_INCLUDE_DIR NAMES readline/readline.h PATHS ${READLINE_INCLUDE_PATHS} NO_DEFAULT_PATH)
|
|
||||||
if (NOT READLINE_INCLUDE_DIR)
|
|
||||||
find_path (READLINE_INCLUDE_DIR NAMES readline/readline.h PATHS ${READLINE_INCLUDE_PATHS})
|
|
||||||
endif ()
|
|
||||||
if (READLINE_INCLUDE_DIR AND READLINE_LIB)
|
|
||||||
target_link_libraries(common PUBLIC ${READLINE_LIB})
|
|
||||||
target_compile_definitions(common PUBLIC USE_READLINE=1)
|
|
||||||
message (STATUS "Using readline: ${READLINE_INCLUDE_DIR} : ${READLINE_LIB}")
|
|
||||||
endif ()
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
target_link_libraries (common
|
target_link_libraries (common
|
||||||
PUBLIC
|
PUBLIC
|
||||||
${CITYHASH_LIBRARIES}
|
${CITYHASH_LIBRARIES}
|
||||||
|
@ -10,16 +10,6 @@
|
|||||||
#include <sys/types.h>
|
#include <sys/types.h>
|
||||||
|
|
||||||
|
|
||||||
#ifdef OS_LINUX
|
|
||||||
/// We can detect if code is linked with one or another readline variants or open the library dynamically.
|
|
||||||
# include <dlfcn.h>
|
|
||||||
extern "C"
|
|
||||||
{
|
|
||||||
char * readline(const char *) __attribute__((__weak__));
|
|
||||||
char * (*readline_ptr)(const char *) = readline;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef HAS_RESERVED_IDENTIFIER
|
#ifdef HAS_RESERVED_IDENTIFIER
|
||||||
#pragma clang diagnostic ignored "-Wreserved-identifier"
|
#pragma clang diagnostic ignored "-Wreserved-identifier"
|
||||||
#endif
|
#endif
|
||||||
@ -152,33 +142,6 @@ LineReader::InputStatus LineReader::readOneLine(const String & prompt)
|
|||||||
{
|
{
|
||||||
input.clear();
|
input.clear();
|
||||||
|
|
||||||
#ifdef OS_LINUX
|
|
||||||
if (!readline_ptr)
|
|
||||||
{
|
|
||||||
for (const auto * name : {"libreadline.so", "libreadline.so.0", "libeditline.so", "libeditline.so.0"})
|
|
||||||
{
|
|
||||||
void * dl_handle = dlopen(name, RTLD_LAZY);
|
|
||||||
if (dl_handle)
|
|
||||||
{
|
|
||||||
readline_ptr = reinterpret_cast<char * (*)(const char *)>(dlsym(dl_handle, "readline"));
|
|
||||||
if (readline_ptr)
|
|
||||||
{
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Minimal support for readline
|
|
||||||
if (readline_ptr)
|
|
||||||
{
|
|
||||||
char * line_read = (*readline_ptr)(prompt.c_str());
|
|
||||||
if (!line_read)
|
|
||||||
return ABORT;
|
|
||||||
input = line_read;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
#endif
|
|
||||||
{
|
{
|
||||||
std::cout << prompt;
|
std::cout << prompt;
|
||||||
std::getline(std::cin, input);
|
std::getline(std::cin, input);
|
||||||
|
@ -53,7 +53,6 @@ protected:
|
|||||||
|
|
||||||
String input;
|
String input;
|
||||||
|
|
||||||
private:
|
|
||||||
bool multiline;
|
bool multiline;
|
||||||
|
|
||||||
Patterns extenders;
|
Patterns extenders;
|
||||||
|
@ -1,187 +0,0 @@
|
|||||||
#include <base/ReadlineLineReader.h>
|
|
||||||
#include <base/errnoToString.h>
|
|
||||||
#include <base/scope_guard.h>
|
|
||||||
|
|
||||||
#include <errno.h>
|
|
||||||
#include <signal.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
|
|
||||||
#include <iostream>
|
|
||||||
|
|
||||||
namespace
|
|
||||||
{
|
|
||||||
|
|
||||||
/// Trim ending whitespace inplace
|
|
||||||
void trim(String & s)
|
|
||||||
{
|
|
||||||
s.erase(std::find_if(s.rbegin(), s.rend(), [](int ch) { return !std::isspace(ch); }).base(), s.end());
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
static const LineReader::Suggest * suggest;
|
|
||||||
|
|
||||||
/// Points to current word to suggest.
|
|
||||||
static LineReader::Suggest::Words::const_iterator pos;
|
|
||||||
/// Points after the last possible match.
|
|
||||||
static LineReader::Suggest::Words::const_iterator end;
|
|
||||||
|
|
||||||
/// Set iterators to the matched range of words if any.
|
|
||||||
static void findRange(const char * prefix, size_t prefix_length)
|
|
||||||
{
|
|
||||||
std::string prefix_str(prefix);
|
|
||||||
if (auto completions = suggest->getCompletions(prefix_str, prefix_length))
|
|
||||||
std::tie(pos, end) = *completions;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Iterates through matched range.
|
|
||||||
static char * nextMatch()
|
|
||||||
{
|
|
||||||
if (pos >= end)
|
|
||||||
return nullptr;
|
|
||||||
|
|
||||||
/// readline will free memory by itself.
|
|
||||||
char * word = strdup(pos->c_str());
|
|
||||||
++pos;
|
|
||||||
return word;
|
|
||||||
}
|
|
||||||
|
|
||||||
static char * generate(const char * text, int state)
|
|
||||||
{
|
|
||||||
if (!suggest->ready)
|
|
||||||
return nullptr;
|
|
||||||
if (state == 0)
|
|
||||||
findRange(text, strlen(text));
|
|
||||||
|
|
||||||
/// Do not append whitespace after word. For unknown reason, rl_completion_append_character = '\0' does not work.
|
|
||||||
rl_completion_suppress_append = 1;
|
|
||||||
|
|
||||||
return nextMatch();
|
|
||||||
};
|
|
||||||
|
|
||||||
ReadlineLineReader::ReadlineLineReader(
|
|
||||||
const Suggest & suggest_, const String & history_file_path_, bool multiline_, Patterns extenders_, Patterns delimiters_)
|
|
||||||
: LineReader(history_file_path_, multiline_, std::move(extenders_), std::move(delimiters_))
|
|
||||||
{
|
|
||||||
suggest = &suggest_;
|
|
||||||
|
|
||||||
if (!history_file_path.empty())
|
|
||||||
{
|
|
||||||
int res = read_history(history_file_path.c_str());
|
|
||||||
if (res)
|
|
||||||
std::cerr << "Cannot read history from file " + history_file_path + ": "+ errnoToString(errno) << std::endl;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Added '.' to the default list. Because it is used to separate database and table.
|
|
||||||
rl_basic_word_break_characters = word_break_characters;
|
|
||||||
|
|
||||||
/// Not append whitespace after single suggestion. Because whitespace after function name is meaningless.
|
|
||||||
rl_completion_append_character = '\0';
|
|
||||||
|
|
||||||
rl_completion_entry_function = generate;
|
|
||||||
|
|
||||||
/// Install Ctrl+C signal handler that will be used in interactive mode.
|
|
||||||
|
|
||||||
if (rl_initialize())
|
|
||||||
throw std::runtime_error("Cannot initialize readline");
|
|
||||||
|
|
||||||
auto clear_prompt_or_exit = [](int)
|
|
||||||
{
|
|
||||||
/// This is signal safe.
|
|
||||||
ssize_t res = write(STDOUT_FILENO, "\n", 1);
|
|
||||||
|
|
||||||
/// Allow to quit client while query is in progress by pressing Ctrl+C twice.
|
|
||||||
/// (First press to Ctrl+C will try to cancel query by InterruptListener).
|
|
||||||
if (res == 1 && rl_line_buffer[0] && !RL_ISSTATE(RL_STATE_DONE))
|
|
||||||
{
|
|
||||||
rl_replace_line("", 0);
|
|
||||||
if (rl_forced_update_display())
|
|
||||||
_exit(0);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
/// A little dirty, but we struggle to find better way to correctly
|
|
||||||
/// force readline to exit after returning from the signal handler.
|
|
||||||
_exit(0);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if (signal(SIGINT, clear_prompt_or_exit) == SIG_ERR)
|
|
||||||
throw std::runtime_error(std::string("Cannot set signal handler for readline: ") + errnoToString(errno));
|
|
||||||
|
|
||||||
rl_variable_bind("completion-ignore-case", "on");
|
|
||||||
// TODO: it doesn't work
|
|
||||||
// history_write_timestamps = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
ReadlineLineReader::~ReadlineLineReader()
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
LineReader::InputStatus ReadlineLineReader::readOneLine(const String & prompt)
|
|
||||||
{
|
|
||||||
input.clear();
|
|
||||||
|
|
||||||
const char* cinput = readline(prompt.c_str());
|
|
||||||
if (cinput == nullptr)
|
|
||||||
return (errno != EAGAIN) ? ABORT : RESET_LINE;
|
|
||||||
input = cinput;
|
|
||||||
|
|
||||||
trim(input);
|
|
||||||
return INPUT_LINE;
|
|
||||||
}
|
|
||||||
|
|
||||||
void ReadlineLineReader::addToHistory(const String & line)
|
|
||||||
{
|
|
||||||
add_history(line.c_str());
|
|
||||||
|
|
||||||
// Flush changes to the disk
|
|
||||||
// NOTE readline builds a buffer of all the lines to write, and write them in one syscall.
|
|
||||||
// Thus there is no need to lock the history file here.
|
|
||||||
write_history(history_file_path.c_str());
|
|
||||||
}
|
|
||||||
|
|
||||||
#if RL_VERSION_MAJOR >= 7
|
|
||||||
|
|
||||||
#define BRACK_PASTE_PREF "\033[200~"
|
|
||||||
#define BRACK_PASTE_SUFF "\033[201~"
|
|
||||||
|
|
||||||
#define BRACK_PASTE_LAST '~'
|
|
||||||
#define BRACK_PASTE_SLEN 6
|
|
||||||
|
|
||||||
/// This handler bypasses some unused macro/event checkings and remove trailing newlines before insertion.
|
|
||||||
static int clickhouse_rl_bracketed_paste_begin(int /* count */, int /* key */)
|
|
||||||
{
|
|
||||||
std::string buf;
|
|
||||||
buf.reserve(128);
|
|
||||||
|
|
||||||
RL_SETSTATE(RL_STATE_MOREINPUT);
|
|
||||||
SCOPE_EXIT(RL_UNSETSTATE(RL_STATE_MOREINPUT));
|
|
||||||
int c;
|
|
||||||
while ((c = rl_read_key()) >= 0)
|
|
||||||
{
|
|
||||||
if (c == '\r')
|
|
||||||
c = '\n';
|
|
||||||
buf.push_back(c);
|
|
||||||
if (buf.size() >= BRACK_PASTE_SLEN && c == BRACK_PASTE_LAST && buf.substr(buf.size() - BRACK_PASTE_SLEN) == BRACK_PASTE_SUFF)
|
|
||||||
{
|
|
||||||
buf.resize(buf.size() - BRACK_PASTE_SLEN);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
trim(buf);
|
|
||||||
return static_cast<size_t>(rl_insert_text(buf.c_str())) == buf.size() ? 0 : 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void ReadlineLineReader::enableBracketedPaste()
|
|
||||||
{
|
|
||||||
#if RL_VERSION_MAJOR >= 7
|
|
||||||
rl_variable_bind("enable-bracketed-paste", "on");
|
|
||||||
|
|
||||||
/// Use our bracketed paste handler to get better user experience. See comments above.
|
|
||||||
rl_bind_keyseq(BRACK_PASTE_PREF, clickhouse_rl_bracketed_paste_begin);
|
|
||||||
#endif
|
|
||||||
};
|
|
@ -1,19 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
#include "LineReader.h"
|
|
||||||
|
|
||||||
#include <readline/readline.h>
|
|
||||||
#include <readline/history.h>
|
|
||||||
|
|
||||||
class ReadlineLineReader : public LineReader
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
ReadlineLineReader(const Suggest & suggest, const String & history_file_path, bool multiline, Patterns extenders_, Patterns delimiters_);
|
|
||||||
~ReadlineLineReader() override;
|
|
||||||
|
|
||||||
void enableBracketedPaste() override;
|
|
||||||
|
|
||||||
private:
|
|
||||||
InputStatus readOneLine(const String & prompt) override;
|
|
||||||
void addToHistory(const String & line) override;
|
|
||||||
};
|
|
@ -22,7 +22,14 @@ namespace
|
|||||||
/// Trim ending whitespace inplace
|
/// Trim ending whitespace inplace
|
||||||
void trim(String & s)
|
void trim(String & s)
|
||||||
{
|
{
|
||||||
s.erase(std::find_if(s.rbegin(), s.rend(), [](int ch) { return !std::isspace(ch); }).base(), s.end());
|
s.erase(std::find_if(s.rbegin(), s.rend(), [](unsigned char ch) { return !std::isspace(ch); }).base(), s.end());
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if string ends with given character after skipping whitespaces.
|
||||||
|
bool ends_with(const std::string_view & s, const std::string_view & p)
|
||||||
|
{
|
||||||
|
auto ss = std::string_view(s.data(), s.rend() - std::find_if(s.rbegin(), s.rend(), [](unsigned char ch) { return !std::isspace(ch); }));
|
||||||
|
return ss.ends_with(p);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string getEditor()
|
std::string getEditor()
|
||||||
@ -189,8 +196,28 @@ ReplxxLineReader::ReplxxLineReader(
|
|||||||
rx.bind_key(Replxx::KEY::control('N'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::HISTORY_NEXT, code); });
|
rx.bind_key(Replxx::KEY::control('N'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::HISTORY_NEXT, code); });
|
||||||
rx.bind_key(Replxx::KEY::control('P'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::HISTORY_PREVIOUS, code); });
|
rx.bind_key(Replxx::KEY::control('P'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::HISTORY_PREVIOUS, code); });
|
||||||
|
|
||||||
|
auto commit_action = [this](char32_t code)
|
||||||
|
{
|
||||||
|
std::string_view str = rx.get_state().text();
|
||||||
|
|
||||||
|
/// Always commit line when we see extender at the end. It will start a new prompt.
|
||||||
|
for (const auto * extender : extenders)
|
||||||
|
if (ends_with(str, extender))
|
||||||
|
return rx.invoke(Replxx::ACTION::COMMIT_LINE, code);
|
||||||
|
|
||||||
|
/// If we see an delimiter at the end, commit right away.
|
||||||
|
for (const auto * delimiter : delimiters)
|
||||||
|
if (ends_with(str, delimiter))
|
||||||
|
return rx.invoke(Replxx::ACTION::COMMIT_LINE, code);
|
||||||
|
|
||||||
|
/// If we allow multiline and there is already something in the input, start a newline.
|
||||||
|
if (multiline && !input.empty())
|
||||||
|
return rx.invoke(Replxx::ACTION::NEW_LINE, code);
|
||||||
|
return rx.invoke(Replxx::ACTION::COMMIT_LINE, code);
|
||||||
|
};
|
||||||
/// bind C-j to ENTER action.
|
/// bind C-j to ENTER action.
|
||||||
rx.bind_key(Replxx::KEY::control('J'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::COMMIT_LINE, code); });
|
rx.bind_key(Replxx::KEY::control('J'), commit_action);
|
||||||
|
rx.bind_key(Replxx::KEY::ENTER, commit_action);
|
||||||
|
|
||||||
/// By default COMPLETE_NEXT/COMPLETE_PREV was binded to C-p/C-n, re-bind
|
/// By default COMPLETE_NEXT/COMPLETE_PREV was binded to C-p/C-n, re-bind
|
||||||
/// to M-P/M-N (that was used for HISTORY_COMMON_PREFIX_SEARCH before, but
|
/// to M-P/M-N (that was used for HISTORY_COMMON_PREFIX_SEARCH before, but
|
||||||
|
@ -2,11 +2,11 @@
|
|||||||
|
|
||||||
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
||||||
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
||||||
SET(VERSION_REVISION 54457)
|
SET(VERSION_REVISION 54458)
|
||||||
SET(VERSION_MAJOR 21)
|
SET(VERSION_MAJOR 21)
|
||||||
SET(VERSION_MINOR 12)
|
SET(VERSION_MINOR 13)
|
||||||
SET(VERSION_PATCH 1)
|
SET(VERSION_PATCH 1)
|
||||||
SET(VERSION_GITHASH 503a418dedf0011e9040c3a1b6913e0b5488be4c)
|
SET(VERSION_GITHASH 4cc45c1e15912ee300bca7cc8b8da2b888a70e2a)
|
||||||
SET(VERSION_DESCRIBE v21.12.1.1-prestable)
|
SET(VERSION_DESCRIBE v21.13.1.1-prestable)
|
||||||
SET(VERSION_STRING 21.12.1.1)
|
SET(VERSION_STRING 21.13.1.1)
|
||||||
# end of autochange
|
# end of autochange
|
||||||
|
@ -91,6 +91,9 @@ endif ()
|
|||||||
if (LINKER_NAME)
|
if (LINKER_NAME)
|
||||||
if (COMPILER_CLANG AND (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 12.0.0 OR CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 12.0.0))
|
if (COMPILER_CLANG AND (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 12.0.0 OR CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 12.0.0))
|
||||||
find_program (LLD_PATH NAMES ${LINKER_NAME})
|
find_program (LLD_PATH NAMES ${LINKER_NAME})
|
||||||
|
if (NOT LLD_PATH)
|
||||||
|
message (FATAL_ERROR "Using linker ${LINKER_NAME} but can't find its path.")
|
||||||
|
endif ()
|
||||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --ld-path=${LLD_PATH}")
|
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --ld-path=${LLD_PATH}")
|
||||||
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} --ld-path=${LLD_PATH}")
|
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} --ld-path=${LLD_PATH}")
|
||||||
else ()
|
else ()
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
if (SANITIZE OR NOT (
|
if (SANITIZE OR NOT (
|
||||||
((OS_LINUX OR OS_FREEBSD) AND (ARCH_AMD64 OR ARCH_ARM OR ARCH_PPC64LE OR ARCH_RISCV64)) OR
|
((OS_LINUX OR OS_FREEBSD) AND (ARCH_AMD64 OR ARCH_ARM OR ARCH_PPC64LE OR ARCH_RISCV64)) OR
|
||||||
(OS_DARWIN AND (CMAKE_BUILD_TYPE STREQUAL "RelWithDebInfo" OR CMAKE_BUILD_TYPE STREQUAL "Debug"))
|
(OS_DARWIN AND (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" OR CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG"))
|
||||||
))
|
))
|
||||||
if (ENABLE_JEMALLOC)
|
if (ENABLE_JEMALLOC)
|
||||||
message (${RECONFIGURE_MESSAGE_LEVEL}
|
message (${RECONFIGURE_MESSAGE_LEVEL}
|
||||||
|
2
contrib/libpqxx
vendored
2
contrib/libpqxx
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 357608d11b7a1961c3fb7db2ef9a5dbb2e87da77
|
Subproject commit 63e20f9485b8cbeabf99008123248fc9f033e766
|
2
contrib/poco
vendored
2
contrib/poco
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 258b9ba6cd245ff88e9346f75c43464c403f329d
|
Subproject commit 520a90e02e3e5cb90afeae1846d161dbc508a6f1
|
@ -8,7 +8,7 @@ if (NOT ENABLE_REPLXX)
|
|||||||
add_library(replxx INTERFACE)
|
add_library(replxx INTERFACE)
|
||||||
target_compile_definitions(replxx INTERFACE USE_REPLXX=0)
|
target_compile_definitions(replxx INTERFACE USE_REPLXX=0)
|
||||||
|
|
||||||
message (STATUS "Not using replxx (Beware! Runtime fallback to readline is possible!)")
|
message (STATUS "Not using replxx")
|
||||||
return()
|
return()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
@ -202,10 +202,10 @@
|
|||||||
#define HAVE_READDIR 1
|
#define HAVE_READDIR 1
|
||||||
|
|
||||||
/* Add readline support */
|
/* Add readline support */
|
||||||
#define HAVE_READLINE 1
|
/* #undef HAVE_READLINE */
|
||||||
|
|
||||||
/* Define to 1 if you have the <readline/history.h> header file. */
|
/* Define to 1 if you have the <readline/history.h> header file. */
|
||||||
#define HAVE_READLINE_HISTORY_H 1
|
/* #undef HAVE_READLINE_HISTORY_H */
|
||||||
|
|
||||||
/* Use the scandir lib */
|
/* Use the scandir lib */
|
||||||
/* #undef HAVE_SCANDIR */
|
/* #undef HAVE_SCANDIR */
|
||||||
|
4
debian/changelog
vendored
4
debian/changelog
vendored
@ -1,5 +1,5 @@
|
|||||||
clickhouse (21.12.1.1) unstable; urgency=low
|
clickhouse (21.13.1.1) unstable; urgency=low
|
||||||
|
|
||||||
* Modified source code
|
* Modified source code
|
||||||
|
|
||||||
-- clickhouse-release <clickhouse-release@yandex-team.ru> Tue, 02 Nov 2021 00:56:42 +0300
|
-- clickhouse-release <clickhouse-release@yandex-team.ru> Thu, 09 Dec 2021 00:32:58 +0300
|
||||||
|
@ -5,7 +5,7 @@ ARG apt_archive="http://archive.ubuntu.com"
|
|||||||
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
||||||
|
|
||||||
ARG repository="deb https://repo.clickhouse.com/deb/stable/ main/"
|
ARG repository="deb https://repo.clickhouse.com/deb/stable/ main/"
|
||||||
ARG version=21.12.1.*
|
ARG version=21.13.1.*
|
||||||
|
|
||||||
RUN apt-get update \
|
RUN apt-get update \
|
||||||
&& apt-get install --yes --no-install-recommends \
|
&& apt-get install --yes --no-install-recommends \
|
||||||
|
@ -52,7 +52,6 @@ RUN apt-get update \
|
|||||||
llvm-${LLVM_VERSION} \
|
llvm-${LLVM_VERSION} \
|
||||||
llvm-${LLVM_VERSION}-dev \
|
llvm-${LLVM_VERSION}-dev \
|
||||||
libicu-dev \
|
libicu-dev \
|
||||||
libreadline-dev \
|
|
||||||
moreutils \
|
moreutils \
|
||||||
ninja-build \
|
ninja-build \
|
||||||
pigz \
|
pigz \
|
||||||
|
@ -31,5 +31,6 @@ do
|
|||||||
mv "$FUZZER_PATH" /output/fuzzers
|
mv "$FUZZER_PATH" /output/fuzzers
|
||||||
done
|
done
|
||||||
|
|
||||||
|
|
||||||
tar -zcvf /output/fuzzers.tar.gz /output/fuzzers
|
tar -zcvf /output/fuzzers.tar.gz /output/fuzzers
|
||||||
rm -rf /output/fuzzers
|
rm -rf /output/fuzzers
|
||||||
|
@ -5,7 +5,7 @@ ARG apt_archive="http://archive.ubuntu.com"
|
|||||||
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
||||||
|
|
||||||
ARG repository="deb https://repo.clickhouse.com/deb/stable/ main/"
|
ARG repository="deb https://repo.clickhouse.com/deb/stable/ main/"
|
||||||
ARG version=21.12.1.*
|
ARG version=21.13.1.*
|
||||||
ARG gosu_ver=1.10
|
ARG gosu_ver=1.10
|
||||||
|
|
||||||
# set non-empty deb_location_url url to create a docker image
|
# set non-empty deb_location_url url to create a docker image
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
FROM ubuntu:18.04
|
FROM ubuntu:18.04
|
||||||
|
|
||||||
ARG repository="deb https://repo.clickhouse.com/deb/stable/ main/"
|
ARG repository="deb https://repo.clickhouse.com/deb/stable/ main/"
|
||||||
ARG version=21.12.1.*
|
ARG version=21.13.1.*
|
||||||
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y apt-transport-https dirmngr && \
|
apt-get install -y apt-transport-https dirmngr && \
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# shellcheck disable=SC2086,SC2001,SC2046
|
# shellcheck disable=SC2086,SC2001,SC2046,SC2030,SC2031
|
||||||
|
|
||||||
set -eux
|
set -eux
|
||||||
set -o pipefail
|
set -o pipefail
|
||||||
@ -35,7 +35,7 @@ function clone
|
|||||||
fi
|
fi
|
||||||
git diff --name-only master HEAD | tee ci-changed-files.txt
|
git diff --name-only master HEAD | tee ci-changed-files.txt
|
||||||
else
|
else
|
||||||
if [ -v COMMIT_SHA ]; then
|
if [ -v SHA_TO_TEST ]; then
|
||||||
git fetch --depth 2 origin "$SHA_TO_TEST"
|
git fetch --depth 2 origin "$SHA_TO_TEST"
|
||||||
git checkout "$SHA_TO_TEST"
|
git checkout "$SHA_TO_TEST"
|
||||||
echo "Checked out nominal SHA $SHA_TO_TEST for master"
|
echo "Checked out nominal SHA $SHA_TO_TEST for master"
|
||||||
@ -165,7 +165,7 @@ thread apply all backtrace
|
|||||||
continue
|
continue
|
||||||
" > script.gdb
|
" > script.gdb
|
||||||
|
|
||||||
gdb -batch -command script.gdb -p $server_pid &
|
sudo gdb -batch -command script.gdb -p $server_pid &
|
||||||
|
|
||||||
# Check connectivity after we attach gdb, because it might cause the server
|
# Check connectivity after we attach gdb, because it might cause the server
|
||||||
# to freeze and the fuzzer will fail.
|
# to freeze and the fuzzer will fail.
|
||||||
@ -189,6 +189,7 @@ continue
|
|||||||
--receive_data_timeout_ms=10000 \
|
--receive_data_timeout_ms=10000 \
|
||||||
--stacktrace \
|
--stacktrace \
|
||||||
--query-fuzzer-runs=1000 \
|
--query-fuzzer-runs=1000 \
|
||||||
|
--testmode \
|
||||||
--queries-file $(ls -1 ch/tests/queries/0_stateless/*.sql | sort -R) \
|
--queries-file $(ls -1 ch/tests/queries/0_stateless/*.sql | sort -R) \
|
||||||
$NEW_TESTS_OPT \
|
$NEW_TESTS_OPT \
|
||||||
> >(tail -n 100000 > fuzzer.log) \
|
> >(tail -n 100000 > fuzzer.log) \
|
||||||
|
@ -7,7 +7,6 @@ RUN apt-get update \
|
|||||||
&& env DEBIAN_FRONTEND=noninteractive apt-get -y install \
|
&& env DEBIAN_FRONTEND=noninteractive apt-get -y install \
|
||||||
tzdata \
|
tzdata \
|
||||||
python3 \
|
python3 \
|
||||||
libreadline-dev \
|
|
||||||
libicu-dev \
|
libicu-dev \
|
||||||
bsdutils \
|
bsdutils \
|
||||||
gdb \
|
gdb \
|
||||||
|
@ -21,7 +21,6 @@ RUN apt-get update \
|
|||||||
cgroupfs-mount \
|
cgroupfs-mount \
|
||||||
python3-pip \
|
python3-pip \
|
||||||
tzdata \
|
tzdata \
|
||||||
libreadline-dev \
|
|
||||||
libicu-dev \
|
libicu-dev \
|
||||||
bsdutils \
|
bsdutils \
|
||||||
curl \
|
curl \
|
||||||
@ -76,7 +75,7 @@ RUN python3 -m pip install \
|
|||||||
minio \
|
minio \
|
||||||
protobuf \
|
protobuf \
|
||||||
psycopg2-binary==2.8.6 \
|
psycopg2-binary==2.8.6 \
|
||||||
pymongo \
|
pymongo==3.11.0 \
|
||||||
pytest \
|
pytest \
|
||||||
pytest-timeout \
|
pytest-timeout \
|
||||||
pytest-xdist \
|
pytest-xdist \
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
version: '2.3'
|
version: '2.3'
|
||||||
services:
|
services:
|
||||||
mongo1:
|
mongo1:
|
||||||
image: mongo:3.6
|
image: mongo:5.0
|
||||||
restart: always
|
restart: always
|
||||||
environment:
|
environment:
|
||||||
MONGO_INITDB_ROOT_USERNAME: root
|
MONGO_INITDB_ROOT_USERNAME: root
|
||||||
@ -9,3 +9,9 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- ${MONGO_EXTERNAL_PORT}:${MONGO_INTERNAL_PORT}
|
- ${MONGO_EXTERNAL_PORT}:${MONGO_INTERNAL_PORT}
|
||||||
command: --profile=2 --verbose
|
command: --profile=2 --verbose
|
||||||
|
|
||||||
|
mongo2:
|
||||||
|
image: mongo:5.0
|
||||||
|
restart: always
|
||||||
|
ports:
|
||||||
|
- "27018:27017"
|
||||||
|
@ -61,7 +61,7 @@ function configure
|
|||||||
cp -rv right/config left ||:
|
cp -rv right/config left ||:
|
||||||
|
|
||||||
# Start a temporary server to rename the tables
|
# Start a temporary server to rename the tables
|
||||||
while killall clickhouse-server; do echo . ; sleep 1 ; done
|
while pkill clickhouse-serv; do echo . ; sleep 1 ; done
|
||||||
echo all killed
|
echo all killed
|
||||||
|
|
||||||
set -m # Spawn temporary in its own process groups
|
set -m # Spawn temporary in its own process groups
|
||||||
@ -88,7 +88,7 @@ function configure
|
|||||||
clickhouse-client --port $LEFT_SERVER_PORT --query "create database test" ||:
|
clickhouse-client --port $LEFT_SERVER_PORT --query "create database test" ||:
|
||||||
clickhouse-client --port $LEFT_SERVER_PORT --query "rename table datasets.hits_v1 to test.hits" ||:
|
clickhouse-client --port $LEFT_SERVER_PORT --query "rename table datasets.hits_v1 to test.hits" ||:
|
||||||
|
|
||||||
while killall clickhouse-server; do echo . ; sleep 1 ; done
|
while pkill clickhouse-serv; do echo . ; sleep 1 ; done
|
||||||
echo all killed
|
echo all killed
|
||||||
|
|
||||||
# Make copies of the original db for both servers. Use hardlinks instead
|
# Make copies of the original db for both servers. Use hardlinks instead
|
||||||
@ -106,7 +106,7 @@ function configure
|
|||||||
|
|
||||||
function restart
|
function restart
|
||||||
{
|
{
|
||||||
while killall clickhouse-server; do echo . ; sleep 1 ; done
|
while pkill clickhouse-serv; do echo . ; sleep 1 ; done
|
||||||
echo all killed
|
echo all killed
|
||||||
|
|
||||||
# Change the jemalloc settings here.
|
# Change the jemalloc settings here.
|
||||||
@ -261,16 +261,24 @@ function run_tests
|
|||||||
# Use awk because bash doesn't support floating point arithmetic.
|
# Use awk because bash doesn't support floating point arithmetic.
|
||||||
profile_seconds=$(awk "BEGIN { print ($profile_seconds_left > 0 ? 10 : 0) }")
|
profile_seconds=$(awk "BEGIN { print ($profile_seconds_left > 0 ? 10 : 0) }")
|
||||||
|
|
||||||
|
(
|
||||||
|
set +x
|
||||||
|
argv=(
|
||||||
|
--host localhost localhost
|
||||||
|
--port "$LEFT_SERVER_PORT" "$RIGHT_SERVER_PORT"
|
||||||
|
--runs "$CHPC_RUNS"
|
||||||
|
--max-queries "$CHPC_MAX_QUERIES"
|
||||||
|
--profile-seconds "$profile_seconds"
|
||||||
|
|
||||||
|
"$test"
|
||||||
|
)
|
||||||
TIMEFORMAT=$(printf "$test_name\t%%3R\t%%3U\t%%3S\n")
|
TIMEFORMAT=$(printf "$test_name\t%%3R\t%%3U\t%%3S\n")
|
||||||
# The grep is to filter out set -x output and keep only time output.
|
# one more subshell to suppress trace output for "set +x"
|
||||||
# The '2>&1 >/dev/null' redirects stderr to stdout, and discards stdout.
|
(
|
||||||
{ \
|
time "$script_dir/perf.py" "${argv[@]}" > "$test_name-raw.tsv" 2> "$test_name-err.log"
|
||||||
time "$script_dir/perf.py" --host localhost localhost --port $LEFT_SERVER_PORT $RIGHT_SERVER_PORT \
|
) 2>>wall-clock-times.tsv >/dev/null \
|
||||||
--runs "$CHPC_RUNS" --max-queries "$CHPC_MAX_QUERIES" \
|
|
||||||
--profile-seconds "$profile_seconds" \
|
|
||||||
-- "$test" > "$test_name-raw.tsv" 2> "$test_name-err.log" ; \
|
|
||||||
} 2>&1 >/dev/null | tee >(grep -v ^+ >> "wall-clock-times.tsv") \
|
|
||||||
|| echo "Test $test_name failed with error code $?" >> "$test_name-err.log"
|
|| echo "Test $test_name failed with error code $?" >> "$test_name-err.log"
|
||||||
|
) 2>/dev/null
|
||||||
|
|
||||||
profile_seconds_left=$(awk -F' ' \
|
profile_seconds_left=$(awk -F' ' \
|
||||||
'BEGIN { s = '$profile_seconds_left'; } /^profile-total/ { s -= $2 } END { print s }' \
|
'BEGIN { s = '$profile_seconds_left'; } /^profile-total/ { s -= $2 } END { print s }' \
|
||||||
@ -278,8 +286,6 @@ function run_tests
|
|||||||
current_test=$((current_test + 1))
|
current_test=$((current_test + 1))
|
||||||
done
|
done
|
||||||
|
|
||||||
unset TIMEFORMAT
|
|
||||||
|
|
||||||
wait
|
wait
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -291,7 +297,7 @@ function get_profiles_watchdog
|
|||||||
|
|
||||||
for pid in $(pgrep -f clickhouse)
|
for pid in $(pgrep -f clickhouse)
|
||||||
do
|
do
|
||||||
gdb -p "$pid" --batch --ex "info proc all" --ex "thread apply all bt" --ex quit &> "$pid.gdb.log" &
|
sudo gdb -p "$pid" --batch --ex "info proc all" --ex "thread apply all bt" --ex quit &> "$pid.gdb.log" &
|
||||||
done
|
done
|
||||||
wait
|
wait
|
||||||
|
|
||||||
@ -1409,7 +1415,7 @@ case "$stage" in
|
|||||||
while env kill -- -$watchdog_pid ; do sleep 1; done
|
while env kill -- -$watchdog_pid ; do sleep 1; done
|
||||||
|
|
||||||
# Stop the servers to free memory for the subsequent query analysis.
|
# Stop the servers to free memory for the subsequent query analysis.
|
||||||
while killall clickhouse; do echo . ; sleep 1 ; done
|
while pkill clickhouse-serv; do echo . ; sleep 1 ; done
|
||||||
echo Servers stopped.
|
echo Servers stopped.
|
||||||
;&
|
;&
|
||||||
"analyze_queries")
|
"analyze_queries")
|
||||||
|
@ -354,11 +354,9 @@ for query_index in queries_to_run:
|
|||||||
print(f'query\t{query_index}\t{run_id}\t{conn_index}\t{elapsed}')
|
print(f'query\t{query_index}\t{run_id}\t{conn_index}\t{elapsed}')
|
||||||
|
|
||||||
if elapsed > args.max_query_seconds:
|
if elapsed > args.max_query_seconds:
|
||||||
# Stop processing pathologically slow queries, to avoid timing out
|
# Do not stop processing pathologically slow queries,
|
||||||
# the entire test task. This shouldn't really happen, so we don't
|
# since this may hide errors in other queries.
|
||||||
# need much handling for this case and can just exit.
|
|
||||||
print(f'The query no. {query_index} is taking too long to run ({elapsed} s)', file=sys.stderr)
|
print(f'The query no. {query_index} is taking too long to run ({elapsed} s)', file=sys.stderr)
|
||||||
exit(2)
|
|
||||||
|
|
||||||
# Be careful with the counter, after this line it's the next iteration
|
# Be careful with the counter, after this line it's the next iteration
|
||||||
# already.
|
# already.
|
||||||
|
@ -61,6 +61,7 @@ chmod 777 -R /var/lib/clickhouse
|
|||||||
clickhouse-client --query "SHOW DATABASES"
|
clickhouse-client --query "SHOW DATABASES"
|
||||||
|
|
||||||
clickhouse-client --query "ATTACH DATABASE datasets ENGINE = Ordinary"
|
clickhouse-client --query "ATTACH DATABASE datasets ENGINE = Ordinary"
|
||||||
|
|
||||||
service clickhouse-server restart
|
service clickhouse-server restart
|
||||||
|
|
||||||
# Wait for server to start accepting connections
|
# Wait for server to start accepting connections
|
||||||
@ -109,15 +110,25 @@ function run_tests()
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
set +e
|
set +e
|
||||||
clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --no-stateless --hung-check --print-time "${ADDITIONAL_OPTIONS[@]}" \
|
clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --no-stateless --hung-check --print-time \
|
||||||
|
--skip 00168_parallel_processing_on_replicas "${ADDITIONAL_OPTIONS[@]}" \
|
||||||
"$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt
|
"$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt
|
||||||
|
|
||||||
|
clickhouse-test --timeout 1200 --testname --shard --zookeeper --check-zookeeper-session --no-stateless --hung-check --print-time \
|
||||||
|
00168_parallel_processing_on_replicas "${ADDITIONAL_OPTIONS[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a test_output/test_result.txt
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
}
|
}
|
||||||
|
|
||||||
export -f run_tests
|
export -f run_tests
|
||||||
timeout "$MAX_RUN_TIME" bash -c run_tests ||:
|
timeout "$MAX_RUN_TIME" bash -c run_tests ||:
|
||||||
|
|
||||||
./process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
|
echo "Files in current directory"
|
||||||
|
ls -la ./
|
||||||
|
echo "Files in root directory"
|
||||||
|
ls -la /
|
||||||
|
|
||||||
|
/process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
|
||||||
|
|
||||||
grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server.log ||:
|
grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server.log ||:
|
||||||
|
|
||||||
|
@ -49,7 +49,6 @@ RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
|||||||
ENV NUM_TRIES=1
|
ENV NUM_TRIES=1
|
||||||
ENV MAX_RUN_TIME=0
|
ENV MAX_RUN_TIME=0
|
||||||
|
|
||||||
|
|
||||||
# Download Minio-related binaries
|
# Download Minio-related binaries
|
||||||
RUN wget 'https://dl.min.io/server/minio/release/linux-amd64/minio' \
|
RUN wget 'https://dl.min.io/server/minio/release/linux-amd64/minio' \
|
||||||
&& chmod +x ./minio \
|
&& chmod +x ./minio \
|
||||||
|
@ -96,6 +96,13 @@ function run_tests()
|
|||||||
ADDITIONAL_OPTIONS+=('8')
|
ADDITIONAL_OPTIONS+=('8')
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [[ -n "$RUN_BY_HASH_NUM" ]] && [[ -n "$RUN_BY_HASH_TOTAL" ]]; then
|
||||||
|
ADDITIONAL_OPTIONS+=('--run-by-hash-num')
|
||||||
|
ADDITIONAL_OPTIONS+=("$RUN_BY_HASH_NUM")
|
||||||
|
ADDITIONAL_OPTIONS+=('--run-by-hash-total')
|
||||||
|
ADDITIONAL_OPTIONS+=("$RUN_BY_HASH_TOTAL")
|
||||||
|
fi
|
||||||
|
|
||||||
set +e
|
set +e
|
||||||
clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \
|
clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \
|
||||||
--test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \
|
--test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \
|
||||||
@ -108,7 +115,12 @@ export -f run_tests
|
|||||||
|
|
||||||
timeout "$MAX_RUN_TIME" bash -c run_tests ||:
|
timeout "$MAX_RUN_TIME" bash -c run_tests ||:
|
||||||
|
|
||||||
./process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
|
echo "Files in current directory"
|
||||||
|
ls -la ./
|
||||||
|
echo "Files in root directory"
|
||||||
|
ls -la /
|
||||||
|
|
||||||
|
/process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
|
||||||
|
|
||||||
clickhouse-client -q "system flush logs" ||:
|
clickhouse-client -q "system flush logs" ||:
|
||||||
|
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# shellcheck disable=SC2094
|
# shellcheck disable=SC2094
|
||||||
# shellcheck disable=SC2086
|
# shellcheck disable=SC2086
|
||||||
|
# shellcheck disable=SC2024
|
||||||
|
|
||||||
set -x
|
set -x
|
||||||
|
|
||||||
@ -55,9 +56,41 @@ function configure()
|
|||||||
echo "<clickhouse><asynchronous_metrics_update_period_s>1</asynchronous_metrics_update_period_s></clickhouse>" \
|
echo "<clickhouse><asynchronous_metrics_update_period_s>1</asynchronous_metrics_update_period_s></clickhouse>" \
|
||||||
> /etc/clickhouse-server/config.d/asynchronous_metrics_update_period_s.xml
|
> /etc/clickhouse-server/config.d/asynchronous_metrics_update_period_s.xml
|
||||||
|
|
||||||
|
local total_mem
|
||||||
|
total_mem=$(awk '/MemTotal/ { print $(NF-1) }' /proc/meminfo) # KiB
|
||||||
|
total_mem=$(( total_mem*1024 )) # bytes
|
||||||
# Set maximum memory usage as half of total memory (less chance of OOM).
|
# Set maximum memory usage as half of total memory (less chance of OOM).
|
||||||
echo "<clickhouse><max_server_memory_usage_to_ram_ratio>0.5</max_server_memory_usage_to_ram_ratio></clickhouse>" \
|
#
|
||||||
> /etc/clickhouse-server/config.d/max_server_memory_usage_to_ram_ratio.xml
|
# But not via max_server_memory_usage but via max_memory_usage_for_user,
|
||||||
|
# so that we can override this setting and execute service queries, like:
|
||||||
|
# - hung check
|
||||||
|
# - show/drop database
|
||||||
|
# - ...
|
||||||
|
#
|
||||||
|
# So max_memory_usage_for_user will be a soft limit, and
|
||||||
|
# max_server_memory_usage will be hard limit, and queries that should be
|
||||||
|
# executed regardless memory limits will use max_memory_usage_for_user=0,
|
||||||
|
# instead of relying on max_untracked_memory
|
||||||
|
local max_server_mem
|
||||||
|
max_server_mem=$((total_mem*75/100)) # 75%
|
||||||
|
echo "Setting max_server_memory_usage=$max_server_mem"
|
||||||
|
cat > /etc/clickhouse-server/config.d/max_server_memory_usage.xml <<EOL
|
||||||
|
<clickhouse>
|
||||||
|
<max_server_memory_usage>${max_server_mem}</max_server_memory_usage>
|
||||||
|
</clickhouse>
|
||||||
|
EOL
|
||||||
|
local max_users_mem
|
||||||
|
max_users_mem=$((total_mem*50/100)) # 50%
|
||||||
|
echo "Setting max_memory_usage_for_user=$max_users_mem"
|
||||||
|
cat > /etc/clickhouse-server/users.d/max_memory_usage_for_user.xml <<EOL
|
||||||
|
<clickhouse>
|
||||||
|
<profiles>
|
||||||
|
<default>
|
||||||
|
<max_memory_usage_for_user>${max_users_mem}</max_memory_usage_for_user>
|
||||||
|
</default>
|
||||||
|
</profiles>
|
||||||
|
</clickhouse>
|
||||||
|
EOL
|
||||||
}
|
}
|
||||||
|
|
||||||
function stop()
|
function stop()
|
||||||
@ -110,7 +143,7 @@ quit
|
|||||||
# FIXME Hung check may work incorrectly because of attached gdb
|
# FIXME Hung check may work incorrectly because of attached gdb
|
||||||
# 1. False positives are possible
|
# 1. False positives are possible
|
||||||
# 2. We cannot attach another gdb to get stacktraces if some queries hung
|
# 2. We cannot attach another gdb to get stacktraces if some queries hung
|
||||||
gdb -batch -command script.gdb -p "$(cat /var/run/clickhouse-server/clickhouse-server.pid)" >> /test_output/gdb.log &
|
sudo gdb -batch -command script.gdb -p "$(cat /var/run/clickhouse-server/clickhouse-server.pid)" >> /test_output/gdb.log &
|
||||||
}
|
}
|
||||||
|
|
||||||
configure
|
configure
|
||||||
|
@ -75,6 +75,9 @@ def call_with_retry(query, timeout=30, retry_count=5):
|
|||||||
else:
|
else:
|
||||||
break
|
break
|
||||||
|
|
||||||
|
def make_query_command(query):
|
||||||
|
return f"""clickhouse client -q "{query}" --max_untracked_memory=1Gi --memory_profiler_step=1Gi --max_memory_usage_for_user=0"""
|
||||||
|
|
||||||
|
|
||||||
def prepare_for_hung_check(drop_databases):
|
def prepare_for_hung_check(drop_databases):
|
||||||
# FIXME this function should not exist, but...
|
# FIXME this function should not exist, but...
|
||||||
@ -88,40 +91,41 @@ def prepare_for_hung_check(drop_databases):
|
|||||||
logging.info("Will terminate gdb (if any)")
|
logging.info("Will terminate gdb (if any)")
|
||||||
call_with_retry("kill -TERM $(pidof gdb)")
|
call_with_retry("kill -TERM $(pidof gdb)")
|
||||||
|
|
||||||
# Some tests set too low memory limit for default user and forget to reset in back.
|
call_with_retry(make_query_command('SELECT 1 FORMAT Null'))
|
||||||
# It may cause SYSTEM queries to fail, let's disable memory limit.
|
|
||||||
call_with_retry("clickhouse client --max_memory_usage_for_user=0 -q 'SELECT 1 FORMAT Null'")
|
|
||||||
|
|
||||||
# Some tests execute SYSTEM STOP MERGES or similar queries.
|
# Some tests execute SYSTEM STOP MERGES or similar queries.
|
||||||
# It may cause some ALTERs to hang.
|
# It may cause some ALTERs to hang.
|
||||||
# Possibly we should fix tests and forbid to use such queries without specifying table.
|
# Possibly we should fix tests and forbid to use such queries without specifying table.
|
||||||
call_with_retry("clickhouse client -q 'SYSTEM START MERGES'")
|
call_with_retry(make_query_command('SYSTEM START MERGES'))
|
||||||
call_with_retry("clickhouse client -q 'SYSTEM START DISTRIBUTED SENDS'")
|
call_with_retry(make_query_command('SYSTEM START DISTRIBUTED SENDS'))
|
||||||
call_with_retry("clickhouse client -q 'SYSTEM START TTL MERGES'")
|
call_with_retry(make_query_command('SYSTEM START TTL MERGES'))
|
||||||
call_with_retry("clickhouse client -q 'SYSTEM START MOVES'")
|
call_with_retry(make_query_command('SYSTEM START MOVES'))
|
||||||
call_with_retry("clickhouse client -q 'SYSTEM START FETCHES'")
|
call_with_retry(make_query_command('SYSTEM START FETCHES'))
|
||||||
call_with_retry("clickhouse client -q 'SYSTEM START REPLICATED SENDS'")
|
call_with_retry(make_query_command('SYSTEM START REPLICATED SENDS'))
|
||||||
call_with_retry("clickhouse client -q 'SYSTEM START REPLICATION QUEUES'")
|
call_with_retry(make_query_command('SYSTEM START REPLICATION QUEUES'))
|
||||||
|
call_with_retry(make_query_command('SYSTEM DROP MARK CACHE'))
|
||||||
|
|
||||||
# Issue #21004, live views are experimental, so let's just suppress it
|
# Issue #21004, live views are experimental, so let's just suppress it
|
||||||
call_with_retry("""clickhouse client -q "KILL QUERY WHERE upper(query) LIKE 'WATCH %'" """)
|
call_with_retry(make_query_command("KILL QUERY WHERE upper(query) LIKE 'WATCH %'"))
|
||||||
|
|
||||||
# Kill other queries which known to be slow
|
# Kill other queries which known to be slow
|
||||||
# It's query from 01232_preparing_sets_race_condition_long, it may take up to 1000 seconds in slow builds
|
# It's query from 01232_preparing_sets_race_condition_long, it may take up to 1000 seconds in slow builds
|
||||||
call_with_retry("""clickhouse client -q "KILL QUERY WHERE query LIKE 'insert into tableB select %'" """)
|
call_with_retry(make_query_command("KILL QUERY WHERE query LIKE 'insert into tableB select %'"))
|
||||||
# Long query from 00084_external_agregation
|
# Long query from 00084_external_agregation
|
||||||
call_with_retry("""clickhouse client -q "KILL QUERY WHERE query LIKE 'SELECT URL, uniq(SearchPhrase) AS u FROM test.hits GROUP BY URL ORDER BY u %'" """)
|
call_with_retry(make_query_command("KILL QUERY WHERE query LIKE 'SELECT URL, uniq(SearchPhrase) AS u FROM test.hits GROUP BY URL ORDER BY u %'"))
|
||||||
|
|
||||||
if drop_databases:
|
if drop_databases:
|
||||||
for i in range(5):
|
for i in range(5):
|
||||||
try:
|
try:
|
||||||
# Here we try to drop all databases in async mode. If some queries really hung, than drop will hung too.
|
# Here we try to drop all databases in async mode. If some queries really hung, than drop will hung too.
|
||||||
# Otherwise we will get rid of queries which wait for background pool. It can take a long time on slow builds (more than 900 seconds).
|
# Otherwise we will get rid of queries which wait for background pool. It can take a long time on slow builds (more than 900 seconds).
|
||||||
databases = check_output('clickhouse client -q "SHOW DATABASES"', shell=True, timeout=30).decode('utf-8').strip().split()
|
#
|
||||||
|
# Also specify max_untracked_memory to allow 1GiB of memory to overcommit.
|
||||||
|
databases = check_output(make_query_command('SHOW DATABASES'), shell=True, timeout=30).decode('utf-8').strip().split()
|
||||||
for db in databases:
|
for db in databases:
|
||||||
if db == "system":
|
if db == "system":
|
||||||
continue
|
continue
|
||||||
command = f'clickhouse client -q "DROP DATABASE {db}"'
|
command = make_query_command(f'DROP DATABASE {db}')
|
||||||
# we don't wait for drop
|
# we don't wait for drop
|
||||||
Popen(command, shell=True)
|
Popen(command, shell=True)
|
||||||
break
|
break
|
||||||
@ -133,9 +137,15 @@ def prepare_for_hung_check(drop_databases):
|
|||||||
|
|
||||||
|
|
||||||
# Wait for last queries to finish if any, not longer than 300 seconds
|
# Wait for last queries to finish if any, not longer than 300 seconds
|
||||||
call("""clickhouse client -q "select sleepEachRow((
|
call(make_query_command("""
|
||||||
select maxOrDefault(300 - elapsed) + 1 from system.processes where query not like '%from system.processes%' and elapsed < 300
|
select sleepEachRow((
|
||||||
) / 300) from numbers(300) format Null" """, shell=True, stderr=STDOUT, timeout=330)
|
select maxOrDefault(300 - elapsed) + 1
|
||||||
|
from system.processes
|
||||||
|
where query not like '%from system.processes%' and elapsed < 300
|
||||||
|
) / 300)
|
||||||
|
from numbers(300)
|
||||||
|
format Null
|
||||||
|
"""), shell=True, stderr=STDOUT, timeout=330)
|
||||||
|
|
||||||
# Even if all clickhouse-test processes are finished, there are probably some sh scripts,
|
# Even if all clickhouse-test processes are finished, there are probably some sh scripts,
|
||||||
# which still run some new queries. Let's ignore them.
|
# which still run some new queries. Let's ignore them.
|
||||||
@ -188,7 +198,24 @@ if __name__ == "__main__":
|
|||||||
if args.hung_check:
|
if args.hung_check:
|
||||||
have_long_running_queries = prepare_for_hung_check(args.drop_databases)
|
have_long_running_queries = prepare_for_hung_check(args.drop_databases)
|
||||||
logging.info("Checking if some queries hung")
|
logging.info("Checking if some queries hung")
|
||||||
cmd = "{} {} {}".format(args.test_cmd, "--hung-check", "00001_select_1")
|
cmd = ' '.join([args.test_cmd,
|
||||||
|
# Do not track memory allocations up to 1Gi,
|
||||||
|
# this will allow to ignore server memory limit (max_server_memory_usage) for this query.
|
||||||
|
#
|
||||||
|
# NOTE: memory_profiler_step should be also adjusted, because:
|
||||||
|
#
|
||||||
|
# untracked_memory_limit = min(settings.max_untracked_memory, settings.memory_profiler_step)
|
||||||
|
#
|
||||||
|
# NOTE: that if there will be queries with GROUP BY, this trick
|
||||||
|
# will not work due to CurrentMemoryTracker::check() from
|
||||||
|
# Aggregator code.
|
||||||
|
# But right now it should work, since neither hung check, nor 00001_select_1 has GROUP BY.
|
||||||
|
"--client-option", "max_untracked_memory=1Gi",
|
||||||
|
"--client-option", "max_memory_usage_for_user=0",
|
||||||
|
"--client-option", "memory_profiler_step=1Gi",
|
||||||
|
"--hung-check",
|
||||||
|
"00001_select_1"
|
||||||
|
])
|
||||||
res = call(cmd, shell=True, stderr=STDOUT)
|
res = call(cmd, shell=True, stderr=STDOUT)
|
||||||
hung_check_status = "No queries hung\tOK\n"
|
hung_check_status = "No queries hung\tOK\n"
|
||||||
if res != 0 and have_long_running_queries:
|
if res != 0 and have_long_running_queries:
|
||||||
|
@ -21,7 +21,6 @@ RUN apt-get update \
|
|||||||
cgroupfs-mount \
|
cgroupfs-mount \
|
||||||
python3-pip \
|
python3-pip \
|
||||||
tzdata \
|
tzdata \
|
||||||
libreadline-dev \
|
|
||||||
libicu-dev \
|
libicu-dev \
|
||||||
bsdutils \
|
bsdutils \
|
||||||
curl \
|
curl \
|
||||||
|
@ -262,7 +262,7 @@ In the example below, the index can’t be used.
|
|||||||
SELECT count() FROM table WHERE CounterID = 34 OR URL LIKE '%upyachka%'
|
SELECT count() FROM table WHERE CounterID = 34 OR URL LIKE '%upyachka%'
|
||||||
```
|
```
|
||||||
|
|
||||||
To check whether ClickHouse can use the index when running a query, use the settings [force_index_by_date](../../../operations/settings/settings.md#settings-force_index_by_date) and [force_primary_key](../../../operations/settings/settings.md).
|
To check whether ClickHouse can use the index when running a query, use the settings [force_index_by_date](../../../operations/settings/settings.md#settings-force_index_by_date) and [force_primary_key](../../../operations/settings/settings.md#force-primary-key).
|
||||||
|
|
||||||
The key for partitioning by month allows reading only those data blocks which contain dates from the proper range. In this case, the data block may contain data for many dates (up to an entire month). Within a block, data is sorted by primary key, which might not contain the date as the first column. Because of this, using a query with only a date condition that does not specify the primary key prefix will cause more data to be read than for a single date.
|
The key for partitioning by month allows reading only those data blocks which contain dates from the proper range. In this case, the data block may contain data for many dates (up to an entire month). Within a block, data is sorted by primary key, which might not contain the date as the first column. Because of this, using a query with only a date condition that does not specify the primary key prefix will cause more data to be read than for a single date.
|
||||||
|
|
||||||
|
99
docs/en/interfaces/grpc.md
Normal file
99
docs/en/interfaces/grpc.md
Normal file
@ -0,0 +1,99 @@
|
|||||||
|
---
|
||||||
|
toc_priority: 19
|
||||||
|
toc_title: gRPC Interface
|
||||||
|
---
|
||||||
|
|
||||||
|
# gRPC Interface {#grpc-interface}
|
||||||
|
|
||||||
|
## Introduction {#grpc-interface-introduction}
|
||||||
|
|
||||||
|
ClickHouse supports [gRPC](https://grpc.io/) interface. It is an open source remote procedure call system that uses HTTP/2 and [Protocol Buffers](https://en.wikipedia.org/wiki/Protocol_Buffers). The implementation of gRPC in ClickHouse supports:
|
||||||
|
|
||||||
|
- SSL;
|
||||||
|
- authentication;
|
||||||
|
- sessions;
|
||||||
|
- compression;
|
||||||
|
- parallel queries through the same channel;
|
||||||
|
- cancellation of queries;
|
||||||
|
- getting progress and logs;
|
||||||
|
- external tables.
|
||||||
|
|
||||||
|
The specification of the interface is described in [clickhouse_grpc.proto](https://github.com/ClickHouse/ClickHouse/blob/master/src/Server/grpc_protos/clickhouse_grpc.proto).
|
||||||
|
|
||||||
|
## gRPC Configuration {#grpc-interface-configuration}
|
||||||
|
|
||||||
|
To use the gRPC interface set `grpc_port` in the main [server configuration](../operations/configuration-files.md). Other configuration options see in the following example:
|
||||||
|
|
||||||
|
```xml
|
||||||
|
<grpc_port>9100</grpc_port>
|
||||||
|
<grpc>
|
||||||
|
<enable_ssl>false</enable_ssl>
|
||||||
|
|
||||||
|
<!-- The following two files are used only if SSL is enabled -->
|
||||||
|
<ssl_cert_file>/path/to/ssl_cert_file</ssl_cert_file>
|
||||||
|
<ssl_key_file>/path/to/ssl_key_file</ssl_key_file>
|
||||||
|
|
||||||
|
<!-- Whether server requests client for a certificate -->
|
||||||
|
<ssl_require_client_auth>false</ssl_require_client_auth>
|
||||||
|
|
||||||
|
<!-- The following file is used only if ssl_require_client_auth=true -->
|
||||||
|
<ssl_ca_cert_file>/path/to/ssl_ca_cert_file</ssl_ca_cert_file>
|
||||||
|
|
||||||
|
<!-- Default compression algorithm (applied if client doesn't specify another algorithm, see result_compression in QueryInfo).
|
||||||
|
Supported algorithms: none, deflate, gzip, stream_gzip -->
|
||||||
|
<compression>deflate</compression>
|
||||||
|
|
||||||
|
<!-- Default compression level (applied if client doesn't specify another level, see result_compression in QueryInfo).
|
||||||
|
Supported levels: none, low, medium, high -->
|
||||||
|
<compression_level>medium</compression_level>
|
||||||
|
|
||||||
|
<!-- Send/receive message size limits in bytes. -1 means unlimited -->
|
||||||
|
<max_send_message_size>-1</max_send_message_size>
|
||||||
|
<max_receive_message_size>-1</max_receive_message_size>
|
||||||
|
|
||||||
|
<!-- Enable if you want to get detailed logs -->
|
||||||
|
<verbose_logs>false</verbose_logs>
|
||||||
|
</grpc>
|
||||||
|
```
|
||||||
|
|
||||||
|
## Built-in Client {#grpc-client}
|
||||||
|
|
||||||
|
You can write a client in any of the programming languages supported by gRPC using the provided [specification](https://github.com/ClickHouse/ClickHouse/blob/master/src/Server/grpc_protos/clickhouse_grpc.proto).
|
||||||
|
Or you can use a built-in Python client. It is placed in [utils/grpc-client/clickhouse-grpc-client.py](https://github.com/ClickHouse/ClickHouse/blob/master/utils/grpc-client/clickhouse-grpc-client.py) in the repository. The built-in client requires [grpcio and grpcio-tools](https://grpc.io/docs/languages/python/quickstart) Python modules.
|
||||||
|
|
||||||
|
The client supports the following arguments:
|
||||||
|
|
||||||
|
- `--help` – Shows a help message and exits.
|
||||||
|
- `--host HOST, -h HOST` – A server name. Default value: `localhost`. You can use IPv4 or IPv6 addresses also.
|
||||||
|
- `--port PORT` – A port to connect to. This port should be enabled in the ClickHouse server configuration (see `grpc_port`). Default value: `9100`.
|
||||||
|
- `--user USER_NAME, -u USER_NAME` – A user name. Default value: `default`.
|
||||||
|
- `--password PASSWORD` – A password. Default value: empty string.
|
||||||
|
- `--query QUERY, -q QUERY` – A query to process when using non-interactive mode.
|
||||||
|
- `--database DATABASE, -d DATABASE` – A default database. If not specified, the current database set in the server settings is used (`default` by default).
|
||||||
|
- `--format OUTPUT_FORMAT, -f OUTPUT_FORMAT` – A result output [format](formats.md). Default value for interactive mode: `PrettyCompact`.
|
||||||
|
- `--debug` – Enables showing debug information.
|
||||||
|
|
||||||
|
To run the client in an interactive mode call it without `--query` argument.
|
||||||
|
|
||||||
|
In a batch mode query data can be passed via `stdin`.
|
||||||
|
|
||||||
|
**Client Usage Example**
|
||||||
|
|
||||||
|
In the following example a table is created and loaded with data from a CSV file. Then the content of the table is queried.
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
./clickhouse-grpc-client.py -q "CREATE TABLE grpc_example_table (id UInt32, text String) ENGINE = MergeTree() ORDER BY id;"
|
||||||
|
echo "0,Input data for" > a.txt ; echo "1,gRPC protocol example" >> a.txt
|
||||||
|
cat a.txt | ./clickhouse-grpc-client.py -q "INSERT INTO grpc_example_table FORMAT CSV"
|
||||||
|
|
||||||
|
./clickhouse-grpc-client.py --format PrettyCompact -q "SELECT * FROM grpc_example_table;"
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─id─┬─text──────────────────┐
|
||||||
|
│ 0 │ Input data for │
|
||||||
|
│ 1 │ gRPC protocol example │
|
||||||
|
└────┴───────────────────────┘
|
||||||
|
```
|
@ -6,10 +6,11 @@ toc_title: Introduction
|
|||||||
|
|
||||||
# Interfaces {#interfaces}
|
# Interfaces {#interfaces}
|
||||||
|
|
||||||
ClickHouse provides two network interfaces (both can be optionally wrapped in TLS for additional security):
|
ClickHouse provides three network interfaces (they can be optionally wrapped in TLS for additional security):
|
||||||
|
|
||||||
- [HTTP](http.md), which is documented and easy to use directly.
|
- [HTTP](http.md), which is documented and easy to use directly.
|
||||||
- [Native TCP](../interfaces/tcp.md), which has less overhead.
|
- [Native TCP](../interfaces/tcp.md), which has less overhead.
|
||||||
|
- [gRPC](grpc.md).
|
||||||
|
|
||||||
In most cases it is recommended to use appropriate tool or library instead of interacting with those directly. Officially supported by Yandex are the following:
|
In most cases it is recommended to use appropriate tool or library instead of interacting with those directly. Officially supported by Yandex are the following:
|
||||||
|
|
||||||
@ -24,4 +25,3 @@ There are also a wide range of third-party libraries for working with ClickHouse
|
|||||||
- [Integrations](../interfaces/third-party/integrations.md)
|
- [Integrations](../interfaces/third-party/integrations.md)
|
||||||
- [Visual interfaces](../interfaces/third-party/gui.md)
|
- [Visual interfaces](../interfaces/third-party/gui.md)
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/interfaces/) <!--hide-->
|
|
||||||
|
@ -105,7 +105,7 @@ ClickHouse Keeper is bundled into the ClickHouse server package, just add config
|
|||||||
clickhouse-keeper --config /etc/your_path_to_config/config.xml --daemon
|
clickhouse-keeper --config /etc/your_path_to_config/config.xml --daemon
|
||||||
```
|
```
|
||||||
|
|
||||||
## Four Latter Word Commands
|
## Four Letter Word Commands
|
||||||
|
|
||||||
ClickHouse Keeper also provides 4lw commands which are almost the same with Zookeeper. Each command is composed of four letters such as `mntr`, `stat` etc. There are some more interesting commands: `stat` gives some general information about the server and connected clients, while `srvr` and `cons` give extended details on server and connections respectively.
|
ClickHouse Keeper also provides 4lw commands which are almost the same with Zookeeper. Each command is composed of four letters such as `mntr`, `stat` etc. There are some more interesting commands: `stat` gives some general information about the server and connected clients, while `srvr` and `cons` give extended details on server and connections respectively.
|
||||||
|
|
||||||
|
@ -356,3 +356,24 @@ Possible values:
|
|||||||
- 1 — Parts are detached.
|
- 1 — Parts are detached.
|
||||||
|
|
||||||
Default value: `0`.
|
Default value: `0`.
|
||||||
|
|
||||||
|
## merge_tree_clear_old_temporary_directories_interval_seconds {#setting-merge-tree-clear-old-temporary-directories-interval-seconds}
|
||||||
|
|
||||||
|
Sets the interval in seconds for ClickHouse to execute the cleanup of old temporary directories.
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
|
||||||
|
- Any positive integer.
|
||||||
|
|
||||||
|
Default value: `60` seconds.
|
||||||
|
|
||||||
|
## merge_tree_clear_old_parts_interval_seconds {#setting-merge-tree-clear-old-parts-interval-seconds}
|
||||||
|
|
||||||
|
Sets the interval in seconds for ClickHouse to execute the cleanup of old parts, WALs, and mutations.
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
|
||||||
|
- Any positive integer.
|
||||||
|
|
||||||
|
Default value: `1` second.
|
||||||
|
|
||||||
|
@ -885,26 +885,6 @@ Possible values:
|
|||||||
|
|
||||||
Default value: 2013265920.
|
Default value: 2013265920.
|
||||||
|
|
||||||
## merge_tree_clear_old_temporary_directories_interval_seconds {#setting-merge-tree-clear-old-temporary-directories-interval-seconds}
|
|
||||||
|
|
||||||
Sets the interval in seconds for ClickHouse to execute the cleanup of old temporary directories.
|
|
||||||
|
|
||||||
Possible values:
|
|
||||||
|
|
||||||
- Any positive integer.
|
|
||||||
|
|
||||||
Default value: `60` seconds.
|
|
||||||
|
|
||||||
## merge_tree_clear_old_parts_interval_seconds {#setting-merge-tree-clear-old-parts-interval-seconds}
|
|
||||||
|
|
||||||
Sets the interval in seconds for ClickHouse to execute the cleanup of old parts, WALs, and mutations.
|
|
||||||
|
|
||||||
Possible values:
|
|
||||||
|
|
||||||
- Any positive integer.
|
|
||||||
|
|
||||||
Default value: `1` second.
|
|
||||||
|
|
||||||
## min_bytes_to_use_direct_io {#settings-min-bytes-to-use-direct-io}
|
## min_bytes_to_use_direct_io {#settings-min-bytes-to-use-direct-io}
|
||||||
|
|
||||||
The minimum data volume required for using direct I/O access to the storage disk.
|
The minimum data volume required for using direct I/O access to the storage disk.
|
||||||
@ -992,9 +972,16 @@ log_queries_min_type='EXCEPTION_WHILE_PROCESSING'
|
|||||||
|
|
||||||
Setting up query threads logging.
|
Setting up query threads logging.
|
||||||
|
|
||||||
Queries’ threads run by ClickHouse with this setup are logged according to the rules in the [query_thread_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) server configuration parameter.
|
Query threads log into [system.query_thread_log](../../operations/system-tables/query_thread_log.md) table. This setting have effect only when [log_queries](#settings-log-queries) is true. Queries’ threads run by ClickHouse with this setup are logged according to the rules in the [query_thread_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) server configuration parameter.
|
||||||
|
|
||||||
Example:
|
Possible values:
|
||||||
|
|
||||||
|
- 0 — Disabled.
|
||||||
|
- 1 — Enabled.
|
||||||
|
|
||||||
|
Default value: `1`.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
log_query_threads=1
|
log_query_threads=1
|
||||||
@ -4057,6 +4044,41 @@ Possible values:
|
|||||||
|
|
||||||
Default value: `0`.
|
Default value: `0`.
|
||||||
|
|
||||||
|
## alter_partition_verbose_result {#alter-partition-verbose-result}
|
||||||
|
|
||||||
|
Enables or disables the display of information about the parts to which the manipulation operations with partitions and parts have been successfully applied.
|
||||||
|
Applicable to [ATTACH PARTITION|PART](../../sql-reference/statements/alter/partition.md#alter_attach-partition) and to [FREEZE PARTITION](../../sql-reference/statements/alter/partition.md#alter_freeze-partition).
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
|
||||||
|
- 0 — disable verbosity.
|
||||||
|
- 1 — enable verbosity.
|
||||||
|
|
||||||
|
Default value: `0`.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE test(a Int64, d Date, s String) ENGINE = MergeTree PARTITION BY toYYYYMM(d) ORDER BY a;
|
||||||
|
INSERT INTO test VALUES(1, '2021-01-01', '');
|
||||||
|
INSERT INTO test VALUES(1, '2021-01-01', '');
|
||||||
|
ALTER TABLE test DETACH PARTITION ID '202101';
|
||||||
|
|
||||||
|
ALTER TABLE test ATTACH PARTITION ID '202101' SETTINGS alter_partition_verbose_result = 1;
|
||||||
|
|
||||||
|
┌─command_type─────┬─partition_id─┬─part_name────┬─old_part_name─┐
|
||||||
|
│ ATTACH PARTITION │ 202101 │ 202101_7_7_0 │ 202101_5_5_0 │
|
||||||
|
│ ATTACH PARTITION │ 202101 │ 202101_8_8_0 │ 202101_6_6_0 │
|
||||||
|
└──────────────────┴──────────────┴──────────────┴───────────────┘
|
||||||
|
|
||||||
|
ALTER TABLE test FREEZE SETTINGS alter_partition_verbose_result = 1;
|
||||||
|
|
||||||
|
┌─command_type─┬─partition_id─┬─part_name────┬─backup_name─┬─backup_path───────────────────┬─part_backup_path────────────────────────────────────────────┐
|
||||||
|
│ FREEZE ALL │ 202101 │ 202101_7_7_0 │ 8 │ /var/lib/clickhouse/shadow/8/ │ /var/lib/clickhouse/shadow/8/data/default/test/202101_7_7_0 │
|
||||||
|
│ FREEZE ALL │ 202101 │ 202101_8_8_0 │ 8 │ /var/lib/clickhouse/shadow/8/ │ /var/lib/clickhouse/shadow/8/data/default/test/202101_8_8_0 │
|
||||||
|
└──────────────┴──────────────┴──────────────┴─────────────┴───────────────────────────────┴─────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## format_capn_proto_enum_comparising_mode {#format-capn-proto-enum-comparising-mode}
|
## format_capn_proto_enum_comparising_mode {#format-capn-proto-enum-comparising-mode}
|
||||||
|
|
||||||
Determines how to map ClickHouse `Enum` data type and [CapnProto](../../interfaces/formats.md#capnproto) `Enum` data type from schema.
|
Determines how to map ClickHouse `Enum` data type and [CapnProto](../../interfaces/formats.md#capnproto) `Enum` data type from schema.
|
||||||
|
@ -34,7 +34,7 @@ Use `perf top` to watch the time spent in the kernel for memory management.
|
|||||||
Permanent huge pages also do not need to be allocated.
|
Permanent huge pages also do not need to be allocated.
|
||||||
|
|
||||||
!!! warning "Attention"
|
!!! warning "Attention"
|
||||||
If your system has less than 16 GB of RAM you may experience various memory exceptions because default settings does not match this amount of RAM. Recommended amount of RAM is 32 GB or more. You can use ClickHouse in system with small amount of RAM, even with 2 GB of RAM, but it requires an additional tuning and able to process small ingestion rate.
|
If your system has less than 16 GB of RAM, you may experience various memory exceptions because default settings do not match this amount of memory. The recommended amount of RAM is 32 GB or more. You can use ClickHouse in a system with a small amount of RAM, even with 2 GB of RAM, but it requires additional tuning and can ingest at a low rate.
|
||||||
|
|
||||||
## Storage Subsystem {#storage-subsystem}
|
## Storage Subsystem {#storage-subsystem}
|
||||||
|
|
||||||
|
@ -26,7 +26,7 @@ Query is send in post body. Response is returned in RowBinary format.
|
|||||||
```bash
|
```bash
|
||||||
$ clickhouse-odbc-bridge --http-port 9018 --daemon
|
$ clickhouse-odbc-bridge --http-port 9018 --daemon
|
||||||
|
|
||||||
$ curl -d "query=SELECT PageID, ImpID, AdType FROM Keys ORDER BY PageID, ImpID" --data-urlencode "connection_string=DSN=ClickHouse;DATABASE=stat" --data-urlencode "columns=columns format version: 1
|
$ curl -d "query=SELECT PageID, ImpID, AdType FROM Keys ORDER BY PageID, ImpID" --data-urlencode "connection_string=DSN=ClickHouse;DATABASE=stat" --data-urlencode "sample_block=columns format version: 1
|
||||||
3 columns:
|
3 columns:
|
||||||
\`PageID\` String
|
\`PageID\` String
|
||||||
\`ImpID\` String
|
\`ImpID\` String
|
||||||
|
@ -0,0 +1,64 @@
|
|||||||
|
---
|
||||||
|
toc_priority: 311
|
||||||
|
toc_title: sparkbar
|
||||||
|
---
|
||||||
|
|
||||||
|
# sparkbar {#sparkbar}
|
||||||
|
|
||||||
|
The function plots a frequency histogram for values `x` and the repetition rate `y` of these values over the interval `[min_x, max_x]`.
|
||||||
|
|
||||||
|
|
||||||
|
If no interval is specified, then the minimum `x` is used as the interval start, and the maximum `x` — as the interval end.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
sparkbar(width[, min_x, max_x])(x, y)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Parameters**
|
||||||
|
|
||||||
|
- `width` — The number of segments. Type: [Integer](../../../sql-reference/data-types/int-uint.md).
|
||||||
|
- `min_x` — The interval start. Optional parameter.
|
||||||
|
- `max_x` — The interval end. Optional parameter.
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `x` — The field with values.
|
||||||
|
- `y` — The field with the frequency of values.
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- The frequency histogram.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
CREATE TABLE spark_bar_data (`cnt` UInt64,`event_date` Date) ENGINE = MergeTree ORDER BY event_date SETTINGS index_granularity = 8192;
|
||||||
|
|
||||||
|
INSERT INTO spark_bar_data VALUES(1,'2020-01-01'),(4,'2020-01-02'),(5,'2020-01-03'),(2,'2020-01-04'),(3,'2020-01-05'),(7,'2020-01-06'),(6,'2020-01-07'),(8,'2020-01-08'),(2,'2020-01-11');
|
||||||
|
|
||||||
|
SELECT sparkbar(9)(event_date,cnt) FROM spark_bar_data;
|
||||||
|
|
||||||
|
SELECT sparkbar(9,toDate('2020-01-01'),toDate('2020-01-10'))(event_date,cnt) FROM spark_bar_data;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
|
||||||
|
┌─sparkbar(9)(event_date, cnt)─┐
|
||||||
|
│ │
|
||||||
|
│ ▁▅▄▃██▅ ▁ │
|
||||||
|
│ │
|
||||||
|
└──────────────────────────────┘
|
||||||
|
|
||||||
|
┌─sparkbar(9, toDate('2020-01-01'), toDate('2020-01-10'))(event_date, cnt)─┐
|
||||||
|
│ │
|
||||||
|
│▁▄▄▂▅▇█▁ │
|
||||||
|
│ │
|
||||||
|
└──────────────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
112
docs/en/sql-reference/functions/time-window-functions.md
Normal file
112
docs/en/sql-reference/functions/time-window-functions.md
Normal file
@ -0,0 +1,112 @@
|
|||||||
|
---
|
||||||
|
toc_priority: 68
|
||||||
|
toc_title: Time Window
|
||||||
|
---
|
||||||
|
|
||||||
|
# Time Window Functions {#time-window-functions}
|
||||||
|
|
||||||
|
Time window functions return the inclusive lower and exclusive upper bound of the corresponding window. The functions for working with WindowView are listed below:
|
||||||
|
|
||||||
|
## tumble {#time-window-functions-tumble}
|
||||||
|
|
||||||
|
A tumbling time window assigns records to non-overlapping, continuous windows with a fixed duration (`interval`).
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
tumble(time_attr, interval [, timezone])
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
- `time_attr` - Date and time. [DateTime](../../sql-reference/data-types/datetime.md) data type.
|
||||||
|
- `interval` - Window interval in [Interval](../../sql-reference/data-types/special-data-types/interval.md) data type.
|
||||||
|
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (optional).
|
||||||
|
|
||||||
|
**Returned values**
|
||||||
|
|
||||||
|
- The inclusive lower and exclusive upper bound of the corresponding tumbling window.
|
||||||
|
|
||||||
|
Type: `Tuple(DateTime, DateTime)`
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT tumble(now(), toIntervalDay('1'))
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─tumble(now(), toIntervalDay('1'))─────────────┐
|
||||||
|
│ ['2020-01-01 00:00:00','2020-01-02 00:00:00'] │
|
||||||
|
└───────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## hop {#time-window-functions-hop}
|
||||||
|
|
||||||
|
A hopping time window has a fixed duration (`window_interval`) and hops by a specified hop interval (`hop_interval`). If the `hop_interval` is smaller than the `window_interval`, hopping windows are overlapping. Thus, records can be assigned to multiple windows.
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
hop(time_attr, hop_interval, window_interval [, timezone])
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `time_attr` - Date and time. [DateTime](../../sql-reference/data-types/datetime.md) data type.
|
||||||
|
- `hop_interval` - Hop interval in [Interval](../../sql-reference/data-types/special-data-types/interval.md) data type. Should be a positive number.
|
||||||
|
- `window_interval` - Window interval in [Interval](../../sql-reference/data-types/special-data-types/interval.md) data type. Should be a positive number.
|
||||||
|
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (optional).
|
||||||
|
|
||||||
|
**Returned values**
|
||||||
|
|
||||||
|
- The inclusive lower and exclusive upper bound of the corresponding hopping window. Since one record can be assigned to multiple hop windows, the function only returns the bound of the **first** window when hop function is used **without** `WINDOW VIEW`.
|
||||||
|
|
||||||
|
Type: `Tuple(DateTime, DateTime)`
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT hop(now(), INTERVAL '1' SECOND, INTERVAL '2' SECOND)
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─hop(now(), toIntervalSecond('1'), toIntervalSecond('2'))──┐
|
||||||
|
│ ('2020-01-14 16:58:22','2020-01-14 16:58:24') │
|
||||||
|
└───────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## tumbleStart {#time-window-functions-tumblestart}
|
||||||
|
|
||||||
|
Returns the inclusive lower bound of the corresponding tumbling window.
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
tumbleStart(time_attr, interval [, timezone]);
|
||||||
|
```
|
||||||
|
|
||||||
|
## tumbleEnd {#time-window-functions-tumbleend}
|
||||||
|
|
||||||
|
Returns the exclusive upper bound of the corresponding tumbling window.
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
tumbleEnd(time_attr, interval [, timezone]);
|
||||||
|
```
|
||||||
|
|
||||||
|
## hopStart {#time-window-functions-hopstart}
|
||||||
|
|
||||||
|
Returns the inclusive lower bound of the corresponding hopping window.
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
hopStart(time_attr, hop_interval, window_interval [, timezone]);
|
||||||
|
```
|
||||||
|
|
||||||
|
## hopEnd {#time-window-functions-hopend}
|
||||||
|
|
||||||
|
Returns the exclusive upper bound of the corresponding hopping window.
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
hopEnd(time_attr, hop_interval, window_interval [, timezone]);
|
||||||
|
```
|
@ -175,6 +175,7 @@ in which the `Strings` represents the named fields of the tuple and `T` are the
|
|||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
tupleToNameValuePairs(tuple)
|
tupleToNameValuePairs(tuple)
|
||||||
|
```
|
||||||
|
|
||||||
**Arguments**
|
**Arguments**
|
||||||
|
|
||||||
|
@ -5,7 +5,7 @@ toc_title: VIEW
|
|||||||
|
|
||||||
# CREATE VIEW {#create-view}
|
# CREATE VIEW {#create-view}
|
||||||
|
|
||||||
Creates a new view. Views can be [normal](#normal), [materialized](#materialized) and [live](#live-view) (the latter is an experimental feature).
|
Creates a new view. Views can be [normal](#normal), [materialized](#materialized), [live](#live-view), and [window](#window-view) (live view and window view are experimental features).
|
||||||
|
|
||||||
## Normal View {#normal}
|
## Normal View {#normal}
|
||||||
|
|
||||||
@ -243,3 +243,119 @@ Most common uses of live view tables include:
|
|||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
- [ALTER LIVE VIEW](../alter/view.md#alter-live-view)
|
- [ALTER LIVE VIEW](../alter/view.md#alter-live-view)
|
||||||
|
|
||||||
|
## Window View [Experimental] {#window-view}
|
||||||
|
|
||||||
|
!!! important "Important"
|
||||||
|
This is an experimental feature that may change in backwards-incompatible ways in the future releases.
|
||||||
|
Enable usage of window views and `WATCH` query using [allow_experimental_window_view](../../../operations/settings/settings.md#allow-experimental-window-view) setting. Input the command `set allow_experimental_window_view = 1`.
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
CREATE WINDOW VIEW [IF NOT EXISTS] [db.]table_name [TO [db.]table_name] [ENGINE = engine] [WATERMARK = strategy] [ALLOWED_LATENESS = interval_function] AS SELECT ... GROUP BY time_window_function
|
||||||
|
```
|
||||||
|
|
||||||
|
Window view can aggregate data by time window and output the results when the window is ready to fire. It stores the partial aggregation results in an inner(or specified) table to reduce latency and can push the processing result to a specified table or push notifications using the WATCH query.
|
||||||
|
|
||||||
|
Creating a window view is similar to creating `MATERIALIZED VIEW`. Window view needs an inner storage engine to store intermediate data. The inner storage will use `AggregatingMergeTree` as the default engine.
|
||||||
|
|
||||||
|
### Time Window Functions {#window-view-timewindowfunctions}
|
||||||
|
|
||||||
|
[Time window functions](../../functions/time-window-functions.md) are used to get the lower and upper window bound of records. The window view needs to be used with a time window function.
|
||||||
|
|
||||||
|
### TIME ATTRIBUTES {#window-view-timeattributes}
|
||||||
|
|
||||||
|
Window view supports **processing time** and **event time** process.
|
||||||
|
|
||||||
|
**Processing time** allows window view to produce results based on the local machine's time and is used by default. It is the most straightforward notion of time but does not provide determinism. The processing time attribute can be defined by setting the `time_attr` of the time window function to a table column or using the function `now()`. The following query creates a window view with processing time.
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
CREATE WINDOW VIEW wv AS SELECT count(number), tumbleStart(w_id) as w_start from date GROUP BY tumble(now(), INTERVAL '5' SECOND) as w_id
|
||||||
|
```
|
||||||
|
|
||||||
|
**Event time** is the time that each individual event occurred on its producing device. This time is typically embedded within the records when it is generated. Event time processing allows for consistent results even in case of out-of-order events or late events. Window view supports event time processing by using `WATERMARK` syntax.
|
||||||
|
|
||||||
|
Window view provides three watermark strategies:
|
||||||
|
|
||||||
|
* `STRICTLY_ASCENDING`: Emits a watermark of the maximum observed timestamp so far. Rows that have a timestamp smaller to the max timestamp are not late.
|
||||||
|
* `ASCENDING`: Emits a watermark of the maximum observed timestamp so far minus 1. Rows that have a timestamp equal and smaller to the max timestamp are not late.
|
||||||
|
* `BOUNDED`: WATERMARK=INTERVAL. Emits watermarks, which are the maximum observed timestamp minus the specified delay.
|
||||||
|
|
||||||
|
The following queries are examples of creating a window view with `WATERMARK`:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
CREATE WINDOW VIEW wv WATERMARK=STRICTLY_ASCENDING AS SELECT count(number) FROM date GROUP BY tumble(timestamp, INTERVAL '5' SECOND);
|
||||||
|
CREATE WINDOW VIEW wv WATERMARK=ASCENDING AS SELECT count(number) FROM date GROUP BY tumble(timestamp, INTERVAL '5' SECOND);
|
||||||
|
CREATE WINDOW VIEW wv WATERMARK=INTERVAL '3' SECOND AS SELECT count(number) FROM date GROUP BY tumble(timestamp, INTERVAL '5' SECOND);
|
||||||
|
```
|
||||||
|
|
||||||
|
By default, the window will be fired when the watermark comes, and elements that arrived behind the watermark will be dropped. Window view supports late event processing by setting `ALLOWED_LATENESS=INTERVAL`. An example of lateness handling is:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
CREATE WINDOW VIEW test.wv TO test.dst WATERMARK=ASCENDING ALLOWED_LATENESS=INTERVAL '2' SECOND AS SELECT count(a) AS count, tumbleEnd(wid) AS w_end FROM test.mt GROUP BY tumble(timestamp, INTERVAL '5' SECOND) AS wid;
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that elements emitted by a late firing should be treated as updated results of a previous computation. Instead of firing at the end of windows, the window view will fire immediately when the late event arrives. Thus, it will result in multiple outputs for the same window. Users need to take these duplicated results into account or deduplicate them.
|
||||||
|
|
||||||
|
### Monitoring New Windows {#window-view-monitoring}
|
||||||
|
|
||||||
|
Window view supports the `WATCH` query to constantly append the processing results to the console or use `TO` syntax to output the results to a table.
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
WATCH [db.]name [LIMIT n]
|
||||||
|
```
|
||||||
|
|
||||||
|
`WATCH` query acts similar as in `LIVE VIEW`. A `LIMIT` can be specified to set the number of updates to receive before terminating the query.
|
||||||
|
|
||||||
|
### Settings {#window-view-settings}
|
||||||
|
|
||||||
|
- `window_view_clean_interval`: The clean interval of window view in seconds to free outdated data. The system will retain the windows that have not been fully triggered according to the system time or `WATERMARK` configuration, and the other data will be deleted.
|
||||||
|
- `window_view_heartbeat_interval`: The heartbeat interval in seconds to indicate the watch query is alive.
|
||||||
|
|
||||||
|
### Example {#window-view-example}
|
||||||
|
|
||||||
|
Suppose we need to count the number of click logs per 10 seconds in a log table called `data`, and its table structure is:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
CREATE TABLE data ( `id` UInt64, `timestamp` DateTime) ENGINE = Memory;
|
||||||
|
```
|
||||||
|
|
||||||
|
First, we create a window view with tumble window of 10 seconds interval:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
CREATE WINDOW VIEW wv as select count(id), tumbleStart(w_id) as window_start from data group by tumble(timestamp, INTERVAL '10' SECOND) as w_id
|
||||||
|
```
|
||||||
|
|
||||||
|
Then, we use the `WATCH` query to get the results.
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
WATCH wv
|
||||||
|
```
|
||||||
|
|
||||||
|
When logs are inserted into table `data`,
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
INSERT INTO data VALUES(1,now())
|
||||||
|
```
|
||||||
|
|
||||||
|
The `WATCH` query should print the results as follows:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─count(id)─┬────────window_start─┐
|
||||||
|
│ 1 │ 2020-01-14 16:56:40 │
|
||||||
|
└───────────┴─────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
Alternatively, we can attach the output to another table using `TO` syntax.
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
CREATE WINDOW VIEW wv TO dst AS SELECT count(id), tumbleStart(w_id) as window_start FROM data GROUP BY tumble(timestamp, INTERVAL '10' SECOND) as w_id
|
||||||
|
```
|
||||||
|
|
||||||
|
Additional examples can be found among stateful tests of ClickHouse (they are named `*window_view*` there).
|
||||||
|
|
||||||
|
### Window View Usage {#window-view-usage}
|
||||||
|
|
||||||
|
The window view is useful in the following scenarios:
|
||||||
|
|
||||||
|
* **Monitoring**: Aggregate and calculate the metrics logs by time, and output the results to a target table. The dashboard can use the target table as a source table.
|
||||||
|
* **Analyzing**: Automatically aggregate and preprocess data in the time window. This can be useful when analyzing a large number of logs. The preprocessing eliminates repeated calculations in multiple queries and reduces query latency.
|
||||||
|
99
docs/ru/interfaces/grpc.md
Normal file
99
docs/ru/interfaces/grpc.md
Normal file
@ -0,0 +1,99 @@
|
|||||||
|
---
|
||||||
|
toc_priority: 18
|
||||||
|
toc_title: gRPC интерфейс
|
||||||
|
---
|
||||||
|
|
||||||
|
# Интерфейс gRPC {#grpc-interface}
|
||||||
|
|
||||||
|
## Введение {#grpc-interface-introduction}
|
||||||
|
|
||||||
|
ClickHouse поддерживает интерфейс [gRPC](https://grpc.io/). Это система удаленного вызова процедур с открытым исходным кодом, которая использует HTTP/2 и [Protocol Buffers](https://ru.wikipedia.org/wiki/Protocol_Buffers). В реализации gRPC в ClickHouse поддерживаются:
|
||||||
|
|
||||||
|
- SSL;
|
||||||
|
- аутентификация;
|
||||||
|
- сессии;
|
||||||
|
- сжатие;
|
||||||
|
- параллельные запросы, выполняемые через один канал;
|
||||||
|
- отмена запросов;
|
||||||
|
- получение прогресса операций и логов;
|
||||||
|
- внешние таблицы.
|
||||||
|
|
||||||
|
Спецификация интерфейса содержится в [clickhouse_grpc.proto](https://github.com/ClickHouse/ClickHouse/blob/master/src/Server/grpc_protos/clickhouse_grpc.proto).
|
||||||
|
|
||||||
|
## Конфигурация gRPC {#grpc-interface-configuration}
|
||||||
|
|
||||||
|
Чтобы сделать доступным интерфейс gRPC, нужно задать порт с помощью настройки `grpc_port` в [конфигурации сервера](../operations/configuration-files.md). Другие настройки приведены в примере:
|
||||||
|
|
||||||
|
```xml
|
||||||
|
<grpc_port>9100</grpc_port>
|
||||||
|
<grpc>
|
||||||
|
<enable_ssl>false</enable_ssl>
|
||||||
|
|
||||||
|
<!-- Пути к файлам сертификатов и ключей. Используются при включенном SSL -->
|
||||||
|
<ssl_cert_file>/path/to/ssl_cert_file</ssl_cert_file>
|
||||||
|
<ssl_key_file>/path/to/ssl_key_file</ssl_key_file>
|
||||||
|
|
||||||
|
<!-- Запрашивает ли сервер сертификат клиента -->
|
||||||
|
<ssl_require_client_auth>false</ssl_require_client_auth>
|
||||||
|
|
||||||
|
<!-- Используется, если необходимо запрашивать сертификат -->
|
||||||
|
<ssl_ca_cert_file>/path/to/ssl_ca_cert_file</ssl_ca_cert_file>
|
||||||
|
|
||||||
|
<!-- Алгоритм сжатия по умолчанию (применяется, если клиент не указывает алгоритм, см. result_compression в QueryInfo).
|
||||||
|
Поддерживаются алгоритмы: none, deflate, gzip, stream_gzip -->
|
||||||
|
<compression>deflate</compression>
|
||||||
|
|
||||||
|
<!-- Уровень сжатия по умолчанию (применяется, если клиент не указывает уровень сжатия, см. result_compression в QueryInfo).
|
||||||
|
Поддерживаемые уровни: none, low, medium, high -->
|
||||||
|
<compression_level>medium</compression_level>
|
||||||
|
|
||||||
|
<!-- Ограничение в байтах на размер отправляемых и принимаемых сообщений. -1 означает отсутствие ограничения -->
|
||||||
|
<max_send_message_size>-1</max_send_message_size>
|
||||||
|
<max_receive_message_size>-1</max_receive_message_size>
|
||||||
|
|
||||||
|
<!-- Выводить ли детализированные логи -->
|
||||||
|
<verbose_logs>false</verbose_logs>
|
||||||
|
</grpc>
|
||||||
|
```
|
||||||
|
|
||||||
|
## Встроенный клиент {#grpc-client}
|
||||||
|
|
||||||
|
Можно написать клиент на любом языке программирования, который поддерживается gRPC, с использованием [спецификации](https://github.com/ClickHouse/ClickHouse/blob/master/src/Server/grpc_protos/clickhouse_grpc.proto).
|
||||||
|
Также можно воспользоваться встроенным Python клиентом. Он расположен в [utils/grpc-client/clickhouse-grpc-client.py](https://github.com/ClickHouse/ClickHouse/blob/master/utils/grpc-client/clickhouse-grpc-client.py) в репозитории. Для работы встроенного клиента требуются Python модули [grpcio и grpcio-tools](https://grpc.io/docs/languages/python/quickstart).
|
||||||
|
|
||||||
|
Клиент поддерживает аргументы:
|
||||||
|
|
||||||
|
- `--help` – вывести справку и завершить работу.
|
||||||
|
- `--host HOST, -h HOST` – имя сервера. Значение по умолчанию: `localhost`. Можно задать адрес IPv4 или IPv6.
|
||||||
|
- `--port PORT` – номер порта. Этот порт должен быть задан в конфигурации сервера ClickHouse настройкой `grpc_port`. Значение по умолчанию: `9100`.
|
||||||
|
- `--user USER_NAME, -u USER_NAME` – имя пользователя. Значение по умолчанию: `default`.
|
||||||
|
- `--password PASSWORD` – пароль. Значение по умолчанию: пустая строка.
|
||||||
|
- `--query QUERY, -q QUERY` – запрос, который выполнится, когда используется неинтерактивный режим работы.
|
||||||
|
- `--database DATABASE, -d DATABASE` – база данных по умолчанию. Если не указана, то будет использована база данных, заданная в настройках сервера (по умолчанию `default`).
|
||||||
|
- `--format OUTPUT_FORMAT, -f OUTPUT_FORMAT` – [формат](formats.md) вывода результата. Значение по умолчанию для интерактивного режима: `PrettyCompact`.
|
||||||
|
- `--debug` – вывод отладочной информации.
|
||||||
|
|
||||||
|
Чтобы запустить клиент в интерактивном режиме, не указывайте аргумент `--query`.
|
||||||
|
|
||||||
|
В неинтерактивном режиме данные запроса можно передать через `stdin`.
|
||||||
|
|
||||||
|
**Пример использования клиента**
|
||||||
|
|
||||||
|
В примере создается таблица, и в нее загружаются данные из CSV файла. Затем выводится содержимое таблицы.
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
./clickhouse-grpc-client.py -q "CREATE TABLE grpc_example_table (id UInt32, text String) ENGINE = MergeTree() ORDER BY id;"
|
||||||
|
echo "0,Input data for" > a.txt ; echo "1,gRPC protocol example" >> a.txt
|
||||||
|
cat a.txt | ./clickhouse-grpc-client.py -q "INSERT INTO grpc_example_table FORMAT CSV"
|
||||||
|
|
||||||
|
./clickhouse-grpc-client.py --format PrettyCompact -q "SELECT * FROM grpc_example_table;"
|
||||||
|
```
|
||||||
|
|
||||||
|
Результат:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─id─┬─text──────────────────┐
|
||||||
|
│ 0 │ Input data for │
|
||||||
|
│ 1 │ gRPC protocol example │
|
||||||
|
└────┴───────────────────────┘
|
||||||
|
```
|
@ -6,12 +6,13 @@ toc_title: "Введение"
|
|||||||
|
|
||||||
# Интерфейсы {#interfaces}
|
# Интерфейсы {#interfaces}
|
||||||
|
|
||||||
ClickHouse предоставляет два сетевых интерфейса (оба могут быть дополнительно обернуты в TLS для дополнительной безопасности):
|
ClickHouse предоставляет три сетевых интерфейса (они могут быть обернуты в TLS для дополнительной безопасности):
|
||||||
|
|
||||||
- [HTTP](http.md), который задокументирован и прост для использования напрямую;
|
- [HTTP](http.md), который задокументирован и прост для использования напрямую;
|
||||||
- [Native TCP](tcp.md), который имеет меньше накладных расходов.
|
- [Native TCP](tcp.md), который имеет меньше накладных расходов;
|
||||||
|
- [gRPC](grpc.md).
|
||||||
|
|
||||||
В большинстве случаев рекомендуется использовать подходящий инструмент или библиотеку, а не напрямую взаимодействовать с ClickHouse по сути. Официально поддерживаемые Яндексом:
|
В большинстве случаев рекомендуется использовать подходящий инструмент или библиотеку, а не напрямую взаимодействовать с ClickHouse. Официально поддерживаемые Яндексом:
|
||||||
|
|
||||||
- [Консольный клиент](cli.md);
|
- [Консольный клиент](cli.md);
|
||||||
- [JDBC-драйвер](jdbc.md);
|
- [JDBC-драйвер](jdbc.md);
|
||||||
|
@ -999,14 +999,14 @@ ClickHouse проверяет условия для `min_part_size` и `min_part
|
|||||||
|
|
||||||
Настройки логирования информации о зависимых представлениях (materialized, live и т.п.) в запросах принятых с настройкой [log_query_views=1](../../operations/settings/settings.md#settings-log-query-views).
|
Настройки логирования информации о зависимых представлениях (materialized, live и т.п.) в запросах принятых с настройкой [log_query_views=1](../../operations/settings/settings.md#settings-log-query-views).
|
||||||
|
|
||||||
Запросы сохраняются в таблицу system.query_views_log. Вы можете изменить название этой таблицы в параметре `table` (см. ниже).
|
Запросы логируются в таблице [system.query_views_log](../../operations/system-tables/query_views_log.md#system_tables-query_views_log). Вы можете изменить название этой таблицы в параметре `table` (см. ниже).
|
||||||
|
|
||||||
При настройке логирования используются следующие параметры:
|
При настройке логирования используются следующие параметры:
|
||||||
|
|
||||||
- `database` – имя базы данных.
|
- `database` – имя базы данных.
|
||||||
- `table` – имя таблицы куда будут записываться использованные представления.
|
- `table` – имя системной таблицы, где будут логироваться запросы.
|
||||||
- `partition_by` — устанавливает [произвольный ключ партиционирования](../../engines/table-engines/mergetree-family/custom-partitioning-key.md). Нельзя использовать если используется `engine`
|
- `partition_by` — устанавливает [произвольный ключ партиционирования](../../engines/table-engines/mergetree-family/custom-partitioning-key.md). Нельзя использовать, если задан параметр `engine`.
|
||||||
- `engine` - устанавливает [настройки MergeTree Engine](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) для системной таблицы. Нельзя использовать если используется `partition_by`.
|
- `engine` — устанавливает [настройки MergeTree Engine](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) для системной таблицы. Нельзя использовать, если задан параметр `partition_by`.
|
||||||
- `flush_interval_milliseconds` — период сброса данных из буфера в памяти в таблицу.
|
- `flush_interval_milliseconds` — период сброса данных из буфера в памяти в таблицу.
|
||||||
|
|
||||||
Если таблица не существует, то ClickHouse создаст её. Если структура журнала запросов изменилась при обновлении сервера ClickHouse, то таблица со старой структурой переименовывается, а новая таблица создается автоматически.
|
Если таблица не существует, то ClickHouse создаст её. Если структура журнала запросов изменилась при обновлении сервера ClickHouse, то таблица со старой структурой переименовывается, а новая таблица создается автоматически.
|
||||||
|
@ -355,3 +355,23 @@ Eсли суммарное число активных кусков во все
|
|||||||
- 1 — куски данных открепляются.
|
- 1 — куски данных открепляются.
|
||||||
|
|
||||||
Значение по умолчанию: `0`.
|
Значение по умолчанию: `0`.
|
||||||
|
|
||||||
|
## merge_tree_clear_old_temporary_directories_interval_seconds {#setting-merge-tree-clear-old-temporary-directories-interval-seconds}
|
||||||
|
|
||||||
|
Задает интервал в секундах для удаления старых временных каталогов на сервере ClickHouse.
|
||||||
|
|
||||||
|
Возможные значения:
|
||||||
|
|
||||||
|
- Положительное целое число.
|
||||||
|
|
||||||
|
Значение по умолчанию: `60` секунд.
|
||||||
|
|
||||||
|
## merge_tree_clear_old_parts_interval_seconds {#setting-merge-tree-clear-old-parts-interval-seconds}
|
||||||
|
|
||||||
|
Задает интервал в секундах для удаления старых кусков данных, журналов предзаписи (WAL) и мутаций на сервере ClickHouse.
|
||||||
|
|
||||||
|
Возможные значения:
|
||||||
|
|
||||||
|
- Положительное целое число.
|
||||||
|
|
||||||
|
Значение по умолчанию: `1` секунда.
|
||||||
|
@ -807,26 +807,6 @@ ClickHouse может парсить только базовый формат `Y
|
|||||||
|
|
||||||
Значение по умолчанию: 2013265920.
|
Значение по умолчанию: 2013265920.
|
||||||
|
|
||||||
## merge_tree_clear_old_temporary_directories_interval_seconds {#setting-merge-tree-clear-old-temporary-directories-interval-seconds}
|
|
||||||
|
|
||||||
Задает интервал в секундах для удаления старых временных каталогов на сервере ClickHouse.
|
|
||||||
|
|
||||||
Возможные значения:
|
|
||||||
|
|
||||||
- Положительное целое число.
|
|
||||||
|
|
||||||
Значение по умолчанию: `60` секунд.
|
|
||||||
|
|
||||||
## merge_tree_clear_old_parts_interval_seconds {#setting-merge-tree-clear-old-parts-interval-seconds}
|
|
||||||
|
|
||||||
Задает интервал в секундах для удаления старых кусков данных, журналов предзаписи (WAL) и мутаций на сервере ClickHouse .
|
|
||||||
|
|
||||||
Возможные значения:
|
|
||||||
|
|
||||||
- Положительное целое число.
|
|
||||||
|
|
||||||
Значение по умолчанию: `1` секунда.
|
|
||||||
|
|
||||||
## min_bytes_to_use_direct_io {#settings-min-bytes-to-use-direct-io}
|
## min_bytes_to_use_direct_io {#settings-min-bytes-to-use-direct-io}
|
||||||
|
|
||||||
Минимальный объём данных, необходимый для прямого (небуферизованного) чтения/записи (direct I/O) на диск.
|
Минимальный объём данных, необходимый для прямого (небуферизованного) чтения/записи (direct I/O) на диск.
|
||||||
@ -912,11 +892,18 @@ log_queries_min_type='EXCEPTION_WHILE_PROCESSING'
|
|||||||
|
|
||||||
## log_query_threads {#settings-log-query-threads}
|
## log_query_threads {#settings-log-query-threads}
|
||||||
|
|
||||||
Установка логирования информации о потоках выполнения запроса.
|
Управляет логированием информации о потоках выполнения запросов.
|
||||||
|
|
||||||
Лог информации о потоках выполнения запросов, переданных в ClickHouse с этой установкой, записывается согласно правилам конфигурационного параметра сервера [query_thread_log](../server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log).
|
Информация о потоках выполнения запросов сохраняется в системной таблице [system.query_thread_log](../../operations/system-tables/query_thread_log.md). Работает только в том случае, если включена настройка [log_queries](#settings-log-queries). Лог информации о потоках выполнения запросов, переданных в ClickHouse с этой установкой, записывается согласно правилам конфигурационного параметра сервера [query_thread_log](../server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log).
|
||||||
|
|
||||||
Пример:
|
Возможные значения:
|
||||||
|
|
||||||
|
- 0 — отключено.
|
||||||
|
- 1 — включено.
|
||||||
|
|
||||||
|
Значение по умолчанию: `1`.
|
||||||
|
|
||||||
|
**Пример**
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
log_query_threads=1
|
log_query_threads=1
|
||||||
@ -3808,6 +3795,40 @@ SELECT * FROM positional_arguments ORDER BY 2,3;
|
|||||||
|
|
||||||
Значение по умолчанию: `0`.
|
Значение по умолчанию: `0`.
|
||||||
|
|
||||||
|
## alter_partition_verbose_result {#alter-partition-verbose-result}
|
||||||
|
|
||||||
|
Включает или отключает вывод информации о кусках, к которым были успешно применены операции манипуляции с партициями и кусками. Применимо к [ATTACH PARTITION|PART](../../sql-reference/statements/alter/partition.md#alter_attach-partition) и к [FREEZE PARTITION](../../sql-reference/statements/alter/partition.md#alter_freeze-partition)
|
||||||
|
|
||||||
|
Возможные значения:
|
||||||
|
|
||||||
|
- 0 — отображение отключено.
|
||||||
|
- 1 — отображение включено.
|
||||||
|
|
||||||
|
Значение по умолчанию: `0`.
|
||||||
|
|
||||||
|
**Пример**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE test(a Int64, d Date, s String) ENGINE = MergeTree PARTITION BY toYYYYMM(d) ORDER BY a;
|
||||||
|
INSERT INTO test VALUES(1, '2021-01-01', '');
|
||||||
|
INSERT INTO test VALUES(1, '2021-01-01', '');
|
||||||
|
ALTER TABLE test DETACH PARTITION ID '202101';
|
||||||
|
|
||||||
|
ALTER TABLE test ATTACH PARTITION ID '202101' SETTINGS alter_partition_verbose_result = 1;
|
||||||
|
|
||||||
|
┌─command_type─────┬─partition_id─┬─part_name────┬─old_part_name─┐
|
||||||
|
│ ATTACH PARTITION │ 202101 │ 202101_7_7_0 │ 202101_5_5_0 │
|
||||||
|
│ ATTACH PARTITION │ 202101 │ 202101_8_8_0 │ 202101_6_6_0 │
|
||||||
|
└──────────────────┴──────────────┴──────────────┴───────────────┘
|
||||||
|
|
||||||
|
ALTER TABLE test FREEZE SETTINGS alter_partition_verbose_result = 1;
|
||||||
|
|
||||||
|
┌─command_type─┬─partition_id─┬─part_name────┬─backup_name─┬─backup_path───────────────────┬─part_backup_path────────────────────────────────────────────┐
|
||||||
|
│ FREEZE ALL │ 202101 │ 202101_7_7_0 │ 8 │ /var/lib/clickhouse/shadow/8/ │ /var/lib/clickhouse/shadow/8/data/default/test/202101_7_7_0 │
|
||||||
|
│ FREEZE ALL │ 202101 │ 202101_8_8_0 │ 8 │ /var/lib/clickhouse/shadow/8/ │ /var/lib/clickhouse/shadow/8/data/default/test/202101_8_8_0 │
|
||||||
|
└──────────────┴──────────────┴──────────────┴─────────────┴───────────────────────────────┴─────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## format_capn_proto_enum_comparising_mode {#format-capn-proto-enum-comparising-mode}
|
## format_capn_proto_enum_comparising_mode {#format-capn-proto-enum-comparising-mode}
|
||||||
|
|
||||||
Определяет, как сопоставить тип данных ClickHouse `Enum` и тип данных `Enum` формата [CapnProto](../../interfaces/formats.md#capnproto) из схемы.
|
Определяет, как сопоставить тип данных ClickHouse `Enum` и тип данных `Enum` формата [CapnProto](../../interfaces/formats.md#capnproto) из схемы.
|
||||||
|
@ -55,6 +55,7 @@ ClickHouse не удаляет данные из таблица автомати
|
|||||||
- `query_kind` ([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md)) — тип запроса.
|
- `query_kind` ([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md)) — тип запроса.
|
||||||
- `databases` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — имена баз данных, присутствующих в запросе.
|
- `databases` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — имена баз данных, присутствующих в запросе.
|
||||||
- `tables` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — имена таблиц, присутствующих в запросе.
|
- `tables` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — имена таблиц, присутствующих в запросе.
|
||||||
|
- `views` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — имена представлений (материализованные или live), которые представленны в запросе.
|
||||||
- `columns` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — имена столбцов, присутствующих в запросе.
|
- `columns` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — имена столбцов, присутствующих в запросе.
|
||||||
- `projections` ([String](../../sql-reference/data-types/string.md)) — имена проекций, использованных при выполнении запроса.
|
- `projections` ([String](../../sql-reference/data-types/string.md)) — имена проекций, использованных при выполнении запроса.
|
||||||
- `exception_code` ([Int32](../../sql-reference/data-types/int-uint.md)) — код исключения.
|
- `exception_code` ([Int32](../../sql-reference/data-types/int-uint.md)) — код исключения.
|
||||||
|
@ -113,4 +113,4 @@ ProfileEvents: {'Query':1,'SelectQuery':1,'ReadCompressedBytes':36,'Compr
|
|||||||
**Смотрите также**
|
**Смотрите также**
|
||||||
|
|
||||||
- [system.query_log](../../operations/system-tables/query_log.md#system_tables-query_log) — описание системной таблицы `query_log`, которая содержит общую информацию о выполненных запросах.
|
- [system.query_log](../../operations/system-tables/query_log.md#system_tables-query_log) — описание системной таблицы `query_log`, которая содержит общую информацию о выполненных запросах.
|
||||||
|
- [system.query_views_log](../../operations/system-tables/query_views_log.md#system_tables-query_views_log) — описание системной таблицы `query_views_log`, которая содержит информацию о всех представлениях, участвующих в выполненных запросах.
|
||||||
|
@ -1 +0,0 @@
|
|||||||
../../../en/operations/system-tables/query_views_log.md
|
|
84
docs/ru/operations/system-tables/query_views_log.md
Normal file
84
docs/ru/operations/system-tables/query_views_log.md
Normal file
@ -0,0 +1,84 @@
|
|||||||
|
# system.query_views_log {#system_tables-query_views_log}
|
||||||
|
|
||||||
|
Содержит информацию о зависимых представлениях, выполняемых при выполнении запроса, например, тип представления или время выполнения.
|
||||||
|
|
||||||
|
Чтобы начать ведение журнала:
|
||||||
|
|
||||||
|
1. Настройте параметры в разделе [query_views_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query_views_log).
|
||||||
|
2. Включите настройку [log_query_views=1](../../operations/settings/settings.md#settings-log-query-views).
|
||||||
|
|
||||||
|
Период сброса данных из буфера в памяти задается в параметре `flush_interval_milliseconds` в разделе настроек сервера [query_views_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query_views_log ). Для принудительного сброса используйте запрос [SYSTEM FLUSH LOGS](../../sql-reference/statements/system.md#query_language-system-flush_logs).
|
||||||
|
|
||||||
|
ClickHouse не удаляет данные из таблицы автоматически. Подробнее смотрите раздел [Системные таблицы](../../operations/system-tables/index.md#system-tables-introduction).
|
||||||
|
|
||||||
|
Чтобы уменьшить количество запросов, регистрируемых в таблице `query_views_log`, вы можете включить настройку [log_queries_probability](../../operations/settings/settings.md#log-queries-probability).
|
||||||
|
|
||||||
|
Столбцы:
|
||||||
|
|
||||||
|
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — дата, когда произошло последнее событие с представлением.
|
||||||
|
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — дата и время завершения выполнения представления.
|
||||||
|
- `event_time_microseconds` ([DateTime](../../sql-reference/data-types/datetime.md)) — дата и время завершения выполнения представления с точностью до микросекунд.
|
||||||
|
- `view_duration_ms` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — продолжительность выполнения представления (сумма его этапов) в миллисекундах.
|
||||||
|
- `initial_query_id` ([String](../../sql-reference/data-types/string.md)) — идентификатор начального запроса (при распределённом выполнении запроса).
|
||||||
|
- `view_name` ([String](../../sql-reference/data-types/string.md)) — имя представления.
|
||||||
|
- `view_uuid` ([UUID](../../sql-reference/data-types/uuid.md)) — UUID представления.
|
||||||
|
- `view_type` ([Enum8](../../sql-reference/data-types/enum.md)) — тип представления. Возможные значения:
|
||||||
|
- `'Default' = 1` — [обычные представления](../../sql-reference/statements/create/view.md#normal). Не должно появляться в этом журнале.
|
||||||
|
- `'Materialized' = 2` — [материализованные представления](../../sql-reference/statements/create/view.md#materialized).
|
||||||
|
- `'Live' = 3` — [live представления](../../sql-reference/statements/create/view.md#live-view).
|
||||||
|
- `view_query` ([String](../../sql-reference/data-types/string.md)) — запрос, выполняемый представлением.
|
||||||
|
- `view_target` ([String](../../sql-reference/data-types/string.md)) — имя целевой таблицы представления.
|
||||||
|
- `read_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — количество прочитанных строк.
|
||||||
|
- `read_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — количество прочитанных байт.
|
||||||
|
- `written_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — количество записанных строк.
|
||||||
|
- `written_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — количество записанных байт.
|
||||||
|
- `peak_memory_usage` ([Int64](../../sql-reference/data-types/int-uint.md)) — максимальная разница между объемом выделенной и освобожденной памяти в контексте этого представления.
|
||||||
|
- `ProfileEvents` ([Map(String, UInt64)](../../sql-reference/data-types/array.md)) — события профиля, которые измеряют различные показатели. Их описание можно найти в таблице [system.events](../../operations/system-tables/events.md#system_tables-events).
|
||||||
|
- `status` ([Enum8](../../sql-reference/data-types/enum.md)) — статус представления. Возможные значения:
|
||||||
|
- `'QueryStart' = 1` — успешное начало выполнения представления. Не должно отображаться.
|
||||||
|
- `'QueryFinish' = 2` — успешное завершение выполнения представления.
|
||||||
|
- `'ExceptionBeforeStart' = 3` — исключение до начала выполнения представления.
|
||||||
|
- `'ExceptionWhileProcessing' = 4` — исключение во время выполнения представления.
|
||||||
|
- `exception_code` ([Int32](../../sql-reference/data-types/int-uint.md)) — код исключения.
|
||||||
|
- `exception` ([String](../../sql-reference/data-types/string.md)) — сообщение исключения.
|
||||||
|
- `stack_trace` ([String](../../sql-reference/data-types/string.md)) — [трассировка стека](https://ru.wikipedia.org/wiki/Трассировка_стека). Пустая строка, если запрос был успешно выполнен.
|
||||||
|
|
||||||
|
**Пример**
|
||||||
|
|
||||||
|
Запрос:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT * FROM system.query_views_log LIMIT 1 \G;
|
||||||
|
```
|
||||||
|
|
||||||
|
Результат:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
Row 1:
|
||||||
|
──────
|
||||||
|
event_date: 2021-06-22
|
||||||
|
event_time: 2021-06-22 13:23:07
|
||||||
|
event_time_microseconds: 2021-06-22 13:23:07.738221
|
||||||
|
view_duration_ms: 0
|
||||||
|
initial_query_id: c3a1ac02-9cad-479b-af54-9e9c0a7afd70
|
||||||
|
view_name: default.matview_inner
|
||||||
|
view_uuid: 00000000-0000-0000-0000-000000000000
|
||||||
|
view_type: Materialized
|
||||||
|
view_query: SELECT * FROM default.table_b
|
||||||
|
view_target: default.`.inner.matview_inner`
|
||||||
|
read_rows: 4
|
||||||
|
read_bytes: 64
|
||||||
|
written_rows: 2
|
||||||
|
written_bytes: 32
|
||||||
|
peak_memory_usage: 4196188
|
||||||
|
ProfileEvents: {'FileOpen':2,'WriteBufferFromFileDescriptorWrite':2,'WriteBufferFromFileDescriptorWriteBytes':187,'IOBufferAllocs':3,'IOBufferAllocBytes':3145773,'FunctionExecute':3,'DiskWriteElapsedMicroseconds':13,'InsertedRows':2,'InsertedBytes':16,'SelectedRows':4,'SelectedBytes':48,'ContextLock':16,'RWLockAcquiredReadLocks':1,'RealTimeMicroseconds':698,'SoftPageFaults':4,'OSReadChars':463}
|
||||||
|
status: QueryFinish
|
||||||
|
exception_code: 0
|
||||||
|
exception:
|
||||||
|
stack_trace:
|
||||||
|
```
|
||||||
|
|
||||||
|
**См. также**
|
||||||
|
|
||||||
|
- [system.query_log](../../operations/system-tables/query_log.md#system_tables-query_log) — описание системной таблицы `query_log`, которая содержит общую информацию о выполненных запросах.
|
||||||
|
- [system.query_thread_log](../../operations/system-tables/query_thread_log.md#system_tables-query_thread_log) — описание системной таблицы `query_thread_log`, которая содержит информацию о каждом потоке выполнения запроса.
|
@ -0,0 +1,66 @@
|
|||||||
|
---
|
||||||
|
toc_priority: 311
|
||||||
|
toc_title: sparkbar
|
||||||
|
---
|
||||||
|
|
||||||
|
# sparkbar {#sparkbar}
|
||||||
|
|
||||||
|
Функция строит гистограмму частот по заданным значениям `x` и частоте повторения этих значений `y` на интервале `[min_x, max_x]`.
|
||||||
|
|
||||||
|
Если интервал для построения не указан, то в качестве нижней границы интервала будет взято минимальное значение `x`, а в качестве верхней границы — максимальное значение `x`.
|
||||||
|
|
||||||
|
|
||||||
|
**Синтаксис**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
sparkbar(width[, min_x, max_x])(x, y)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Параметры**
|
||||||
|
|
||||||
|
- `width` — Количество столбцов гистограммы. Тип: [Integer](../../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
- `min_x` — Начало интервала. Необязательный параметр.
|
||||||
|
- `max_x` — Конец интервала. Необязательный параметр.
|
||||||
|
|
||||||
|
**Аргументы**
|
||||||
|
|
||||||
|
- `x` — Поле со значениями.
|
||||||
|
- `y` — Поле с частотой повторения значений.
|
||||||
|
|
||||||
|
|
||||||
|
**Возвращаемые значения**
|
||||||
|
|
||||||
|
- Гистограмма частот.
|
||||||
|
|
||||||
|
**Пример**
|
||||||
|
|
||||||
|
Запрос:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
CREATE TABLE spark_bar_data (`cnt` UInt64,`event_date` Date) ENGINE = MergeTree ORDER BY event_date SETTINGS index_granularity = 8192;
|
||||||
|
|
||||||
|
INSERT INTO spark_bar_data VALUES(1,'2020-01-01'),(4,'2020-01-02'),(5,'2020-01-03'),(2,'2020-01-04'),(3,'2020-01-05'),(7,'2020-01-06'),(6,'2020-01-07'),(8,'2020-01-08'),(2,'2020-01-11');
|
||||||
|
|
||||||
|
SELECT sparkbar(9)(event_date,cnt) FROM spark_bar_data;
|
||||||
|
|
||||||
|
SELECT sparkbar(9,toDate('2020-01-01'),toDate('2020-01-10'))(event_date,cnt) FROM spark_bar_data;
|
||||||
|
```
|
||||||
|
|
||||||
|
Результат:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
|
||||||
|
┌─sparkbar(9)(event_date, cnt)─┐
|
||||||
|
│ │
|
||||||
|
│ ▁▅▄▃██▅ ▁ │
|
||||||
|
│ │
|
||||||
|
└──────────────────────────────┘
|
||||||
|
|
||||||
|
┌─sparkbar(9, toDate('2020-01-01'), toDate('2020-01-10'))(event_date, cnt)─┐
|
||||||
|
│ │
|
||||||
|
│▁▄▄▂▅▇█▁ │
|
||||||
|
│ │
|
||||||
|
└──────────────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
@ -357,7 +357,7 @@ Result:
|
|||||||
|
|
||||||
## multiFuzzyMatchAny(haystack, distance, \[pattern<sub>1</sub>, pattern<sub>2</sub>, …, pattern<sub>n</sub>\]) {#multifuzzymatchanyhaystack-distance-pattern1-pattern2-patternn}
|
## multiFuzzyMatchAny(haystack, distance, \[pattern<sub>1</sub>, pattern<sub>2</sub>, …, pattern<sub>n</sub>\]) {#multifuzzymatchanyhaystack-distance-pattern1-pattern2-patternn}
|
||||||
|
|
||||||
То же, что и `multiMatchAny`, но возвращает 1 если любой pattern соответствует haystack в пределах константного [редакционного расстояния](https://en.wikipedia.org/wiki/Edit_distance). Эта функция также находится в экспериментальном режиме и может быть очень медленной. За подробностями обращайтесь к [документации hyperscan](https://intel.github.io/hyperscan/dev-reference/compilation.html#approximate-matching).
|
То же, что и `multiMatchAny`, но возвращает 1 если любой шаблон соответствует haystack в пределах константного [редакционного расстояния](https://en.wikipedia.org/wiki/Edit_distance). Эта функция основана на экспериментальной библиотеке [hyperscan](https://intel.github.io/hyperscan/dev-reference/compilation.html#approximate-matching) и может быть медленной для некоторых частных случаев. Производительность зависит от значения редакционного расстояния и используемых шаблонов, но всегда медленнее по сравнению с non-fuzzy вариантами.
|
||||||
|
|
||||||
## multiFuzzyMatchAnyIndex(haystack, distance, \[pattern<sub>1</sub>, pattern<sub>2</sub>, …, pattern<sub>n</sub>\]) {#multifuzzymatchanyindexhaystack-distance-pattern1-pattern2-patternn}
|
## multiFuzzyMatchAnyIndex(haystack, distance, \[pattern<sub>1</sub>, pattern<sub>2</sub>, …, pattern<sub>n</sub>\]) {#multifuzzymatchanyindexhaystack-distance-pattern1-pattern2-patternn}
|
||||||
|
|
||||||
|
112
docs/zh/sql-reference/functions/time-window-functions.md
Normal file
112
docs/zh/sql-reference/functions/time-window-functions.md
Normal file
@ -0,0 +1,112 @@
|
|||||||
|
---
|
||||||
|
toc_priority: 68
|
||||||
|
toc_title: 时间窗口
|
||||||
|
---
|
||||||
|
|
||||||
|
# 时间窗口函数 {#time-window-han-shu}
|
||||||
|
|
||||||
|
时间窗口函数用于获取窗口的起始(包含边界)和结束时间(不包含边界)。系统支持的时间窗口函数如下:
|
||||||
|
|
||||||
|
## tumble {#time-window-functions-tumble}
|
||||||
|
|
||||||
|
tumble窗口是连续的、不重叠的固定大小(`interval`)时间窗口。
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
tumble(time_attr, interval [, timezone])
|
||||||
|
```
|
||||||
|
|
||||||
|
**参数**
|
||||||
|
- `time_attr` - [DateTime](../../sql-reference/data-types/datetime.md)类型的时间数据。
|
||||||
|
- `interval` - [Interval](../../sql-reference/data-types/special-data-types/interval.md)类型的窗口大小。
|
||||||
|
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) 类型的时区(可选参数).
|
||||||
|
|
||||||
|
**返回值**
|
||||||
|
|
||||||
|
- tumble窗口的开始(包含边界)和结束时间(不包含边界)
|
||||||
|
|
||||||
|
类型: `Tuple(DateTime, DateTime)`
|
||||||
|
|
||||||
|
**示例**
|
||||||
|
|
||||||
|
查询:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT tumble(now(), toIntervalDay('1'))
|
||||||
|
```
|
||||||
|
|
||||||
|
结果:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─tumble(now(), toIntervalDay('1'))─────────────┐
|
||||||
|
│ ['2020-01-01 00:00:00','2020-01-02 00:00:00'] │
|
||||||
|
└───────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## hop {#time-window-functions-hop}
|
||||||
|
|
||||||
|
hop窗口是一个固定大小(`window_interval`)的时间窗口,并按照一个固定的滑动间隔(`hop_interval`)滑动。当滑动间隔小于窗口大小时,滑动窗口间存在重叠,此时一个数据可能存在于多个窗口。
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
hop(time_attr, hop_interval, window_interval [, timezone])
|
||||||
|
```
|
||||||
|
|
||||||
|
**参数**
|
||||||
|
|
||||||
|
- `time_attr` - [DateTime](../../sql-reference/data-types/datetime.md)类型的时间数据。
|
||||||
|
- `hop_interval` - [Interval](../../sql-reference/data-types/special-data-types/interval.md)类型的滑动间隔,需要大于0。
|
||||||
|
- `window_interval` - [Interval](../../sql-reference/data-types/special-data-types/interval.md)类型的窗口大小,需要大于0。
|
||||||
|
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) 类型的时区(可选参数)。
|
||||||
|
|
||||||
|
**返回值**
|
||||||
|
|
||||||
|
- hop窗口的开始(包含边界)和结束时间(不包含边界)。由于一个数据可能存在于多个窗口,脱离window view单独调用该函数时只返回第一个窗口数据。
|
||||||
|
|
||||||
|
类型: `Tuple(DateTime, DateTime)`
|
||||||
|
|
||||||
|
**示例**
|
||||||
|
|
||||||
|
查询:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT hop(now(), INTERVAL '1' SECOND, INTERVAL '2' SECOND)
|
||||||
|
```
|
||||||
|
|
||||||
|
结果:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─hop(now(), toIntervalSecond('1'), toIntervalSecond('2'))──┐
|
||||||
|
│ ('2020-01-14 16:58:22','2020-01-14 16:58:24') │
|
||||||
|
└───────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## tumbleStart {#time-window-functions-tumblestart}
|
||||||
|
|
||||||
|
返回tumble窗口的开始时间(包含边界)。
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
tumbleStart(time_attr, interval [, timezone]);
|
||||||
|
```
|
||||||
|
|
||||||
|
## tumbleEnd {#time-window-functions-tumbleend}
|
||||||
|
|
||||||
|
返回tumble窗口的结束时间(不包含边界)。
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
tumbleEnd(time_attr, interval [, timezone]);
|
||||||
|
```
|
||||||
|
|
||||||
|
## hopStart {#time-window-functions-hopstart}
|
||||||
|
|
||||||
|
返回hop窗口的开始时间(包含边界)。
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
hopStart(time_attr, hop_interval, window_interval [, timezone]);
|
||||||
|
```
|
||||||
|
|
||||||
|
## hopEnd {#time-window-functions-hopend}
|
||||||
|
|
||||||
|
返回hop窗口的结束时间(不包含边界)。
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
hopEnd(time_attr, hop_interval, window_interval [, timezone]);
|
||||||
|
```
|
@ -5,7 +5,7 @@ toc_title: VIEW
|
|||||||
|
|
||||||
# CREATE VIEW {#create-view}
|
# CREATE VIEW {#create-view}
|
||||||
|
|
||||||
创建一个新视图。 有两种类型的视图:普通视图和物化视图。
|
创建一个新视图。 有两种类型的视图:普通视图,物化视图,Live视图和Window视图。
|
||||||
|
|
||||||
## Normal {#normal}
|
## Normal {#normal}
|
||||||
|
|
||||||
@ -241,3 +241,120 @@ Code: 60. DB::Exception: Received from localhost:9000. DB::Exception: Table defa
|
|||||||
- 使用定期刷新从系统表中查看指标。
|
- 使用定期刷新从系统表中查看指标。
|
||||||
|
|
||||||
[原始文章](https://clickhouse.com/docs/en/sql-reference/statements/create/view/) <!--hide-->
|
[原始文章](https://clickhouse.com/docs/en/sql-reference/statements/create/view/) <!--hide-->
|
||||||
|
|
||||||
|
## Window View [Experimental] {#window-view}
|
||||||
|
|
||||||
|
!!! important "重要"
|
||||||
|
这是一项试验性功能,可能会在未来版本中以向后不兼容的方式进行更改。
|
||||||
|
通过[allow_experimental_window_view](../../../operations/settings/settings.md#allow-experimental-window-view)启用window view以及`WATCH`语句。输入命令
|
||||||
|
`set allow_experimental_window_view = 1`。
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
CREATE WINDOW VIEW [IF NOT EXISTS] [db.]table_name [TO [db.]table_name] [ENGINE = engine] [WATERMARK = strategy] [ALLOWED_LATENESS = interval_function] AS SELECT ... GROUP BY time_window_function
|
||||||
|
```
|
||||||
|
|
||||||
|
Window view可以通过时间窗口聚合数据,并在满足窗口触发条件时自动触发对应窗口计算。其通过将计算状态保存降低处理延迟,支持将处理结果输出至目标表或通过`WATCH`语句输出至终端。
|
||||||
|
|
||||||
|
创建window view的方式和创建物化视图类似。Window view使用默认为`AggregatingMergeTree`的内部存储引擎存储计算中间状态。
|
||||||
|
|
||||||
|
### 时间窗口函数 {#window-view-shi-jian-chuang-kou-han-shu}
|
||||||
|
|
||||||
|
[时间窗口函数](../../functions/time-window-functions.md)用于获取窗口的起始和结束时间。Window view需要和时间窗口函数配合使用。
|
||||||
|
|
||||||
|
### 时间属性 {#window-view-shi-jian-shu-xing}
|
||||||
|
|
||||||
|
Window view 支持**处理时间**和**事件时间**两种时间类型。
|
||||||
|
|
||||||
|
**处理时间**为默认时间类型,该模式下window view使用本地机器时间计算窗口数据。“处理时间”时间类型计算简单,但具有不确定性。该模式下时间可以为时间窗口函数的第一个参数`time_attr`,或通过函数`now()`使用当前机器时间。下面的例子展示了使用“处理时间”创建window view的例子。
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
CREATE WINDOW VIEW wv AS SELECT count(number), tumbleStart(w_id) as w_start from date GROUP BY tumble(now(), INTERVAL '5' SECOND) as w_id
|
||||||
|
```
|
||||||
|
|
||||||
|
**事件时间** 是事件真实发生的时间,该时间往往在事件发生时便嵌入数据记录。事件时间处理提供较高的确定性,可以处理乱序数据以及迟到数据。Window view通过水位线(`WATERMARK`)启用事件时间处理。
|
||||||
|
|
||||||
|
Window view提供如下三种水位线策略:
|
||||||
|
|
||||||
|
* `STRICTLY_ASCENDING`: 提交观测到的最大时间作为水位线,小于最大观测时间的数据不算迟到。
|
||||||
|
* `ASCENDING`: 提交观测到的最大时间减1作为水位线。小于或等于最大观测时间的数据不算迟到。
|
||||||
|
* `BOUNDED`: WATERMARK=INTERVAL. 提交最大观测时间减去固定间隔(`INTERVAL`)做为水位线。
|
||||||
|
|
||||||
|
以下为使用`WATERMARK`创建window view的示例:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
CREATE WINDOW VIEW wv WATERMARK=STRICTLY_ASCENDING AS SELECT count(number) FROM date GROUP BY tumble(timestamp, INTERVAL '5' SECOND);
|
||||||
|
CREATE WINDOW VIEW wv WATERMARK=ASCENDING AS SELECT count(number) FROM date GROUP BY tumble(timestamp, INTERVAL '5' SECOND);
|
||||||
|
CREATE WINDOW VIEW wv WATERMARK=INTERVAL '3' SECOND AS SELECT count(number) FROM date GROUP BY tumble(timestamp, INTERVAL '5' SECOND);
|
||||||
|
```
|
||||||
|
|
||||||
|
通常,窗口会在水位线到达时触发,水位线到达之后的数据会被丢弃。Window view可以通过设置`ALLOWED_LATENESS=INTERVAL`来开启迟到消息处理。示例如下:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
CREATE WINDOW VIEW test.wv TO test.dst WATERMARK=ASCENDING ALLOWED_LATENESS=INTERVAL '2' SECOND AS SELECT count(a) AS count, tumbleEnd(wid) AS w_end FROM test.mt GROUP BY tumble(timestamp, INTERVAL '5' SECOND) AS wid;
|
||||||
|
```
|
||||||
|
|
||||||
|
需要注意的是,迟到消息需要更新之前的处理结果。与在窗口结束时触发不同,迟到消息到达时window view会立即触发计算。因此,会导致同一个窗口输出多次计算结果。用户需要注意这种情况,并消除重复结果。
|
||||||
|
|
||||||
|
### 新窗口监控 {#window-view-xin-chuang-kou-jian-kong}
|
||||||
|
|
||||||
|
Window view可以通过`WATCH`语句将处理结果推送至终端,或通过`TO`语句将结果推送至数据表。
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
WATCH [db.]name [LIMIT n]
|
||||||
|
```
|
||||||
|
|
||||||
|
`WATCH`语句和`LIVE VIEW`中的类似。支持设置`LIMIT`参数,输出消息数目达到`LIMIT`限制时结束查询。
|
||||||
|
|
||||||
|
### 设置 {#window-view-she-zhi}
|
||||||
|
|
||||||
|
- `window_view_clean_interval`: window view清除过期数据间隔(单位为秒)。系统会定期清除过期数据,尚未触发的窗口数据不会被清除。
|
||||||
|
- `window_view_heartbeat_interval`: 用于判断watch查询活跃的心跳时间间隔。
|
||||||
|
|
||||||
|
### 示例 {#window-view-shi-li}
|
||||||
|
|
||||||
|
假设我们需要每10秒统计一次`data`表中的点击日志,且`data`表的结构如下:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
CREATE TABLE data ( `id` UInt64, `timestamp` DateTime) ENGINE = Memory;
|
||||||
|
```
|
||||||
|
|
||||||
|
首先,使用10秒大小的tumble函数创建window view。
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
CREATE WINDOW VIEW wv as select count(id), tumbleStart(w_id) as window_start from data group by tumble(timestamp, INTERVAL '10' SECOND) as w_id
|
||||||
|
```
|
||||||
|
|
||||||
|
随后,我们使用`WATCH`语句获取计算结果。
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
WATCH wv
|
||||||
|
```
|
||||||
|
|
||||||
|
当日志插入表`data`时,
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
INSERT INTO data VALUES(1,now())
|
||||||
|
```
|
||||||
|
|
||||||
|
`WATCH`语句会输出如下结果:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─count(id)─┬────────window_start─┐
|
||||||
|
│ 1 │ 2020-01-14 16:56:40 │
|
||||||
|
└───────────┴─────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
或者,我们可以通过`TO`关键字将处理结果输出至另一张表。
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
CREATE WINDOW VIEW wv TO dst AS SELECT count(id), tumbleStart(w_id) as window_start FROM data GROUP BY tumble(timestamp, INTERVAL '10' SECOND) as w_id
|
||||||
|
```
|
||||||
|
|
||||||
|
ClickHouse测试中提供了更多的示例(以`*window_view*`命名)。
|
||||||
|
|
||||||
|
### Window View 使用场景 {#window-view-shi-yong-chang-jing}
|
||||||
|
|
||||||
|
Window view 在以下场景有用:
|
||||||
|
|
||||||
|
* **监控**: 以时间维度聚合及处理数据,并将处理结果输出至目标表。用户可通过目标表获取并操作计算结果。
|
||||||
|
* **分析**: 以时间维度进行数据分析. 当数据源非常庞大时,window view可以减少重复全表查询的计算量。
|
||||||
|
@ -473,3 +473,7 @@ if (ENABLE_TESTS AND USE_GTEST)
|
|||||||
add_custom_target (clickhouse-tests ALL DEPENDS ${CLICKHOUSE_UNIT_TESTS_TARGETS})
|
add_custom_target (clickhouse-tests ALL DEPENDS ${CLICKHOUSE_UNIT_TESTS_TARGETS})
|
||||||
add_dependencies(clickhouse-bundle clickhouse-tests)
|
add_dependencies(clickhouse-bundle clickhouse-tests)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
if (ENABLE_FUZZING)
|
||||||
|
add_compile_definitions(FUZZING_MODE=1)
|
||||||
|
endif ()
|
||||||
|
@ -20,9 +20,7 @@
|
|||||||
#include <base/argsToConfig.h>
|
#include <base/argsToConfig.h>
|
||||||
#include <base/find_symbols.h>
|
#include <base/find_symbols.h>
|
||||||
|
|
||||||
#if !defined(ARCADIA_BUILD)
|
|
||||||
#include <Common/config_version.h>
|
#include <Common/config_version.h>
|
||||||
#endif
|
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
#include <Common/formatReadable.h>
|
#include <Common/formatReadable.h>
|
||||||
#include <Common/TerminalSize.h>
|
#include <Common/TerminalSize.h>
|
||||||
@ -705,6 +703,12 @@ bool Client::processWithFuzzing(const String & full_query)
|
|||||||
throw;
|
throw;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!orig_ast)
|
||||||
|
{
|
||||||
|
// Can't continue after a parsing error
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
// `USE db` should not be executed
|
// `USE db` should not be executed
|
||||||
// since this will break every query after `DROP db`
|
// since this will break every query after `DROP db`
|
||||||
if (orig_ast->as<ASTUseQuery>())
|
if (orig_ast->as<ASTUseQuery>())
|
||||||
@ -712,12 +716,6 @@ bool Client::processWithFuzzing(const String & full_query)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!orig_ast)
|
|
||||||
{
|
|
||||||
// Can't continue after a parsing error
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Don't repeat:
|
// Don't repeat:
|
||||||
// - INSERT -- Because the tables may grow too big.
|
// - INSERT -- Because the tables may grow too big.
|
||||||
// - CREATE -- Because first we run the unmodified query, it will succeed,
|
// - CREATE -- Because first we run the unmodified query, it will succeed,
|
||||||
|
@ -22,10 +22,8 @@
|
|||||||
#include <pwd.h>
|
#include <pwd.h>
|
||||||
#include <Coordination/FourLetterCommand.h>
|
#include <Coordination/FourLetterCommand.h>
|
||||||
|
|
||||||
#if !defined(ARCADIA_BUILD)
|
|
||||||
#include "config_core.h"
|
#include "config_core.h"
|
||||||
#include "Common/config_version.h"
|
#include "Common/config_version.h"
|
||||||
#endif
|
|
||||||
|
|
||||||
#if USE_SSL
|
#if USE_SSL
|
||||||
# include <Poco/Net/Context.h>
|
# include <Poco/Net/Context.h>
|
||||||
|
@ -17,3 +17,9 @@ clickhouse_program_add(local)
|
|||||||
if(NOT CLICKHOUSE_ONE_SHARED)
|
if(NOT CLICKHOUSE_ONE_SHARED)
|
||||||
target_link_libraries(clickhouse-local-lib PRIVATE clickhouse-server-lib)
|
target_link_libraries(clickhouse-local-lib PRIVATE clickhouse-server-lib)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
if (ENABLE_FUZZING)
|
||||||
|
add_compile_definitions(FUZZING_MODE=1)
|
||||||
|
set (WITH_COVERAGE ON)
|
||||||
|
target_link_libraries(clickhouse-local-lib PRIVATE ${LIB_FUZZING_ENGINE})
|
||||||
|
endif ()
|
||||||
|
@ -41,6 +41,10 @@
|
|||||||
#include <base/argsToConfig.h>
|
#include <base/argsToConfig.h>
|
||||||
#include <filesystem>
|
#include <filesystem>
|
||||||
|
|
||||||
|
#if defined(FUZZING_MODE)
|
||||||
|
#include <Functions/getFuzzerData.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
namespace fs = std::filesystem;
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
|
|
||||||
@ -384,12 +388,6 @@ void LocalServer::setupUsers()
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
String LocalServer::getQueryTextPrefix()
|
|
||||||
{
|
|
||||||
return getInitialCreateTableQuery();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void LocalServer::connect()
|
void LocalServer::connect()
|
||||||
{
|
{
|
||||||
connection_parameters = ConnectionParameters(config());
|
connection_parameters = ConnectionParameters(config());
|
||||||
@ -407,10 +405,25 @@ try
|
|||||||
std::cout << std::fixed << std::setprecision(3);
|
std::cout << std::fixed << std::setprecision(3);
|
||||||
std::cerr << std::fixed << std::setprecision(3);
|
std::cerr << std::fixed << std::setprecision(3);
|
||||||
|
|
||||||
|
#if defined(FUZZING_MODE)
|
||||||
|
static bool first_time = true;
|
||||||
|
if (first_time)
|
||||||
|
{
|
||||||
|
|
||||||
|
if (queries_files.empty() && !config().has("query"))
|
||||||
|
{
|
||||||
|
std::cerr << "\033[31m" << "ClickHouse compiled in fuzzing mode." << "\033[0m" << std::endl;
|
||||||
|
std::cerr << "\033[31m" << "You have to provide a query with --query or --queries-file option." << "\033[0m" << std::endl;
|
||||||
|
std::cerr << "\033[31m" << "The query have to use function getFuzzerData() inside." << "\033[0m" << std::endl;
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
is_interactive = false;
|
||||||
|
#else
|
||||||
is_interactive = stdin_is_a_tty
|
is_interactive = stdin_is_a_tty
|
||||||
&& (config().hasOption("interactive")
|
&& (config().hasOption("interactive")
|
||||||
|| (!config().has("query") && !config().has("table-structure") && queries_files.empty()));
|
|| (!config().has("query") && !config().has("table-structure") && queries_files.empty()));
|
||||||
|
#endif
|
||||||
if (!is_interactive)
|
if (!is_interactive)
|
||||||
{
|
{
|
||||||
/// We will terminate process on error
|
/// We will terminate process on error
|
||||||
@ -439,6 +452,15 @@ try
|
|||||||
|
|
||||||
connect();
|
connect();
|
||||||
|
|
||||||
|
#ifdef FUZZING_MODE
|
||||||
|
first_time = false;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
String initial_query = getInitialCreateTableQuery();
|
||||||
|
if (!initial_query.empty())
|
||||||
|
processQueryText(initial_query);
|
||||||
|
|
||||||
if (is_interactive && !delayed_interactive)
|
if (is_interactive && !delayed_interactive)
|
||||||
{
|
{
|
||||||
runInteractive();
|
runInteractive();
|
||||||
@ -451,7 +473,9 @@ try
|
|||||||
runInteractive();
|
runInteractive();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef FUZZING_MODE
|
||||||
cleanup();
|
cleanup();
|
||||||
|
#endif
|
||||||
return Application::EXIT_OK;
|
return Application::EXIT_OK;
|
||||||
}
|
}
|
||||||
catch (const DB::Exception & e)
|
catch (const DB::Exception & e)
|
||||||
@ -653,7 +677,7 @@ void LocalServer::processConfig()
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static std::string getHelpHeader()
|
[[ maybe_unused ]] static std::string getHelpHeader()
|
||||||
{
|
{
|
||||||
return
|
return
|
||||||
"usage: clickhouse-local [initial table definition] [--query <query>]\n"
|
"usage: clickhouse-local [initial table definition] [--query <query>]\n"
|
||||||
@ -669,7 +693,7 @@ static std::string getHelpHeader()
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static std::string getHelpFooter()
|
[[ maybe_unused ]] static std::string getHelpFooter()
|
||||||
{
|
{
|
||||||
return
|
return
|
||||||
"Example printing memory used by each Unix user:\n"
|
"Example printing memory used by each Unix user:\n"
|
||||||
@ -680,11 +704,23 @@ static std::string getHelpFooter()
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void LocalServer::printHelpMessage(const OptionsDescription & options_description)
|
void LocalServer::printHelpMessage([[maybe_unused]] const OptionsDescription & options_description)
|
||||||
{
|
{
|
||||||
|
#if defined(FUZZING_MODE)
|
||||||
|
std::cout <<
|
||||||
|
"usage: clickhouse <clickhouse-local arguments> -- <libfuzzer arguments>\n"
|
||||||
|
"Note: It is important not to use only one letter keys with single dash for \n"
|
||||||
|
"for clickhouse-local arguments. It may work incorrectly.\n"
|
||||||
|
|
||||||
|
"ClickHouse is build with coverage guided fuzzer (libfuzzer) inside it.\n"
|
||||||
|
"You have to provide a query which contains getFuzzerData function.\n"
|
||||||
|
"This will take the data from fuzzing engine, pass it to getFuzzerData function and execute a query.\n"
|
||||||
|
"Each time the data will be different, and it will last until some segfault or sanitizer assertion is found. \n";
|
||||||
|
#else
|
||||||
std::cout << getHelpHeader() << "\n";
|
std::cout << getHelpHeader() << "\n";
|
||||||
std::cout << options_description.main_description.value() << "\n";
|
std::cout << options_description.main_description.value() << "\n";
|
||||||
std::cout << getHelpFooter() << "\n";
|
std::cout << getHelpFooter() << "\n";
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -781,3 +817,51 @@ int mainEntryClickHouseLocal(int argc, char ** argv)
|
|||||||
return code ? code : 1;
|
return code ? code : 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if defined(FUZZING_MODE)
|
||||||
|
|
||||||
|
std::optional<DB::LocalServer> fuzz_app;
|
||||||
|
|
||||||
|
extern "C" int LLVMFuzzerInitialize(int * pargc, char *** pargv)
|
||||||
|
{
|
||||||
|
int & argc = *pargc;
|
||||||
|
char ** argv = *pargv;
|
||||||
|
|
||||||
|
/// As a user you can add flags to clickhouse binary in fuzzing mode as follows
|
||||||
|
/// clickhouse <set of clickhouse-local specific flag> -- <set of libfuzzer flags>
|
||||||
|
|
||||||
|
/// Calculate the position of delimiter "--" that separates arguments
|
||||||
|
/// of clickhouse-local and libfuzzer
|
||||||
|
int pos_delim = argc;
|
||||||
|
for (int i = 0; i < argc; ++i)
|
||||||
|
{
|
||||||
|
if (strcmp(argv[i], "--") == 0)
|
||||||
|
{
|
||||||
|
pos_delim = i;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Initialize clickhouse-local app
|
||||||
|
fuzz_app.emplace();
|
||||||
|
fuzz_app->init(pos_delim, argv);
|
||||||
|
|
||||||
|
/// We will leave clickhouse-local specific arguments as is, because libfuzzer will ignore
|
||||||
|
/// all keys starting with --
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size)
|
||||||
|
try
|
||||||
|
{
|
||||||
|
auto input = String(reinterpret_cast<const char *>(data), size);
|
||||||
|
DB::FunctionGetFuzzerData::update(input);
|
||||||
|
fuzz_app->run();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
catch (...)
|
||||||
|
{
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
@ -37,7 +37,6 @@ protected:
|
|||||||
void processError(const String & query) const override;
|
void processError(const String & query) const override;
|
||||||
String getName() const override { return "local"; }
|
String getName() const override { return "local"; }
|
||||||
|
|
||||||
String getQueryTextPrefix() override;
|
|
||||||
void printHelpMessage(const OptionsDescription & options_description) override;
|
void printHelpMessage(const OptionsDescription & options_description) override;
|
||||||
|
|
||||||
void addOptions(OptionsDescription & options_description) override;
|
void addOptions(OptionsDescription & options_description) override;
|
||||||
|
@ -13,9 +13,7 @@
|
|||||||
#include <tuple>
|
#include <tuple>
|
||||||
#include <utility> /// pair
|
#include <utility> /// pair
|
||||||
|
|
||||||
#if !defined(ARCADIA_BUILD)
|
|
||||||
#include "config_tools.h"
|
#include "config_tools.h"
|
||||||
#endif
|
|
||||||
|
|
||||||
#include <Common/StringUtils/StringUtils.h>
|
#include <Common/StringUtils/StringUtils.h>
|
||||||
#include <Common/getHashOfLoadedBinary.h>
|
#include <Common/getHashOfLoadedBinary.h>
|
||||||
@ -88,6 +86,7 @@ namespace
|
|||||||
|
|
||||||
using MainFunc = int (*)(int, char**);
|
using MainFunc = int (*)(int, char**);
|
||||||
|
|
||||||
|
#if !defined(FUZZING_MODE)
|
||||||
|
|
||||||
/// Add an item here to register new application
|
/// Add an item here to register new application
|
||||||
std::pair<const char *, MainFunc> clickhouse_applications[] =
|
std::pair<const char *, MainFunc> clickhouse_applications[] =
|
||||||
@ -141,7 +140,6 @@ std::pair<const char *, MainFunc> clickhouse_applications[] =
|
|||||||
{"hash-binary", mainEntryClickHouseHashBinary},
|
{"hash-binary", mainEntryClickHouseHashBinary},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
int printHelp(int, char **)
|
int printHelp(int, char **)
|
||||||
{
|
{
|
||||||
std::cerr << "Use one of the following commands:" << std::endl;
|
std::cerr << "Use one of the following commands:" << std::endl;
|
||||||
@ -150,7 +148,6 @@ int printHelp(int, char **)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
bool isClickhouseApp(const std::string & app_suffix, std::vector<char *> & argv)
|
bool isClickhouseApp(const std::string & app_suffix, std::vector<char *> & argv)
|
||||||
{
|
{
|
||||||
/// Use app if the first arg 'app' is passed (the arg should be quietly removed)
|
/// Use app if the first arg 'app' is passed (the arg should be quietly removed)
|
||||||
@ -170,6 +167,7 @@ bool isClickhouseApp(const std::string & app_suffix, std::vector<char *> & argv)
|
|||||||
std::string app_name = "clickhouse-" + app_suffix;
|
std::string app_name = "clickhouse-" + app_suffix;
|
||||||
return !argv.empty() && (app_name == argv[0] || endsWith(argv[0], "/" + app_name));
|
return !argv.empty() && (app_name == argv[0] || endsWith(argv[0], "/" + app_name));
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
enum class InstructionFail
|
enum class InstructionFail
|
||||||
@ -342,9 +340,13 @@ struct Checker
|
|||||||
///
|
///
|
||||||
/// extern bool inside_main;
|
/// extern bool inside_main;
|
||||||
/// class C { C() { assert(inside_main); } };
|
/// class C { C() { assert(inside_main); } };
|
||||||
|
#ifndef FUZZING_MODE
|
||||||
bool inside_main = false;
|
bool inside_main = false;
|
||||||
|
#else
|
||||||
|
bool inside_main = true;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if !defined(FUZZING_MODE)
|
||||||
int main(int argc_, char ** argv_)
|
int main(int argc_, char ** argv_)
|
||||||
{
|
{
|
||||||
inside_main = true;
|
inside_main = true;
|
||||||
@ -375,3 +377,4 @@ int main(int argc_, char ** argv_)
|
|||||||
|
|
||||||
return main_func(static_cast<int>(argv.size()), argv.data());
|
return main_func(static_cast<int>(argv.size()), argv.data());
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
@ -82,10 +82,8 @@
|
|||||||
#include <Compression/CompressionCodecEncrypted.h>
|
#include <Compression/CompressionCodecEncrypted.h>
|
||||||
#include <filesystem>
|
#include <filesystem>
|
||||||
|
|
||||||
#if !defined(ARCADIA_BUILD)
|
|
||||||
#include "config_core.h"
|
#include "config_core.h"
|
||||||
#include "Common/config_version.h"
|
#include "Common/config_version.h"
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(OS_LINUX)
|
#if defined(OS_LINUX)
|
||||||
# include <sys/mman.h>
|
# include <sys/mman.h>
|
||||||
@ -96,7 +94,7 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if USE_SSL
|
#if USE_SSL
|
||||||
# if USE_INTERNAL_SSL_LIBRARY && !defined(ARCADIA_BUILD)
|
# if USE_INTERNAL_SSL_LIBRARY
|
||||||
# include <Compression/CompressionCodecEncrypted.h>
|
# include <Compression/CompressionCodecEncrypted.h>
|
||||||
# endif
|
# endif
|
||||||
# include <Poco/Net/Context.h>
|
# include <Poco/Net/Context.h>
|
||||||
|
@ -650,6 +650,38 @@
|
|||||||
</replica>
|
</replica>
|
||||||
</shard>
|
</shard>
|
||||||
</test_shard_localhost>
|
</test_shard_localhost>
|
||||||
|
<test_cluster_one_shard_three_replicas_localhost>
|
||||||
|
<shard>
|
||||||
|
<internal_replication>false</internal_replication>
|
||||||
|
<replica>
|
||||||
|
<host>127.0.0.1</host>
|
||||||
|
<port>9000</port>
|
||||||
|
</replica>
|
||||||
|
<replica>
|
||||||
|
<host>127.0.0.2</host>
|
||||||
|
<port>9000</port>
|
||||||
|
</replica>
|
||||||
|
<replica>
|
||||||
|
<host>127.0.0.3</host>
|
||||||
|
<port>9000</port>
|
||||||
|
</replica>
|
||||||
|
</shard>
|
||||||
|
<!--shard>
|
||||||
|
<internal_replication>false</internal_replication>
|
||||||
|
<replica>
|
||||||
|
<host>127.0.0.1</host>
|
||||||
|
<port>9000</port>
|
||||||
|
</replica>
|
||||||
|
<replica>
|
||||||
|
<host>127.0.0.2</host>
|
||||||
|
<port>9000</port>
|
||||||
|
</replica>
|
||||||
|
<replica>
|
||||||
|
<host>127.0.0.3</host>
|
||||||
|
<port>9000</port>
|
||||||
|
</replica>
|
||||||
|
</shard-->
|
||||||
|
</test_cluster_one_shard_three_replicas_localhost>
|
||||||
<test_cluster_two_shards_localhost>
|
<test_cluster_two_shards_localhost>
|
||||||
<shard>
|
<shard>
|
||||||
<replica>
|
<replica>
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
#include <Access/QuotaUsage.h>
|
#include <Access/QuotaUsage.h>
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
#include <Common/quoteString.h>
|
#include <Common/quoteString.h>
|
||||||
|
#include <Common/thread_local_rng.h>
|
||||||
#include <base/chrono_io.h>
|
#include <base/chrono_io.h>
|
||||||
#include <base/range.h>
|
#include <base/range.h>
|
||||||
#include <boost/smart_ptr/make_shared.hpp>
|
#include <boost/smart_ptr/make_shared.hpp>
|
||||||
@ -15,6 +16,7 @@ namespace ErrorCodes
|
|||||||
extern const int QUOTA_EXPIRED;
|
extern const int QUOTA_EXPIRED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
struct EnabledQuota::Impl
|
struct EnabledQuota::Impl
|
||||||
{
|
{
|
||||||
[[noreturn]] static void throwQuotaExceed(
|
[[noreturn]] static void throwQuotaExceed(
|
||||||
@ -35,54 +37,6 @@ struct EnabledQuota::Impl
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// Returns the end of the current interval. If the passed `current_time` is greater than that end,
|
|
||||||
/// the function automatically recalculates the interval's end by adding the interval's duration
|
|
||||||
/// one or more times until the interval's end is greater than `current_time`.
|
|
||||||
/// If that recalculation occurs the function also resets amounts of resources used and sets the variable
|
|
||||||
/// `counters_were_reset`.
|
|
||||||
static std::chrono::system_clock::time_point getEndOfInterval(
|
|
||||||
const Interval & interval, std::chrono::system_clock::time_point current_time, bool & counters_were_reset)
|
|
||||||
{
|
|
||||||
auto & end_of_interval = interval.end_of_interval;
|
|
||||||
auto end_loaded = end_of_interval.load();
|
|
||||||
auto end = std::chrono::system_clock::time_point{end_loaded};
|
|
||||||
if (current_time < end)
|
|
||||||
{
|
|
||||||
counters_were_reset = false;
|
|
||||||
return end;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool need_reset_counters = false;
|
|
||||||
|
|
||||||
do
|
|
||||||
{
|
|
||||||
/// Calculate the end of the next interval:
|
|
||||||
/// | X |
|
|
||||||
/// end current_time next_end = end + duration * n
|
|
||||||
/// where n is an integer number, n >= 1.
|
|
||||||
const auto duration = interval.duration;
|
|
||||||
UInt64 n = static_cast<UInt64>((current_time - end + duration) / duration);
|
|
||||||
end = end + duration * n;
|
|
||||||
if (end_of_interval.compare_exchange_strong(end_loaded, end.time_since_epoch()))
|
|
||||||
{
|
|
||||||
/// We reset counters only if the interval's end has been calculated before.
|
|
||||||
/// If it hasn't we just calculate the interval's end for the first time and don't reset counters yet.
|
|
||||||
need_reset_counters = (end_loaded.count() != 0);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
end = std::chrono::system_clock::time_point{end_loaded};
|
|
||||||
}
|
|
||||||
while (current_time >= end);
|
|
||||||
|
|
||||||
if (need_reset_counters)
|
|
||||||
{
|
|
||||||
boost::range::fill(interval.used, 0);
|
|
||||||
counters_were_reset = true;
|
|
||||||
}
|
|
||||||
return end;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static void used(
|
static void used(
|
||||||
const String & user_name,
|
const String & user_name,
|
||||||
const Intervals & intervals,
|
const Intervals & intervals,
|
||||||
@ -91,24 +45,22 @@ struct EnabledQuota::Impl
|
|||||||
std::chrono::system_clock::time_point current_time,
|
std::chrono::system_clock::time_point current_time,
|
||||||
bool check_exceeded)
|
bool check_exceeded)
|
||||||
{
|
{
|
||||||
|
auto quota_type_i = static_cast<size_t>(quota_type);
|
||||||
for (const auto & interval : intervals.intervals)
|
for (const auto & interval : intervals.intervals)
|
||||||
{
|
{
|
||||||
auto quota_type_i = static_cast<size_t>(quota_type);
|
|
||||||
QuotaValue used = (interval.used[quota_type_i] += value);
|
QuotaValue used = (interval.used[quota_type_i] += value);
|
||||||
QuotaValue max = interval.max[quota_type_i];
|
QuotaValue max = interval.max[quota_type_i];
|
||||||
if (!max)
|
if (!max)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (used > max)
|
if (used > max)
|
||||||
{
|
{
|
||||||
bool counters_were_reset = false;
|
bool counters_were_reset = false;
|
||||||
auto end_of_interval = getEndOfInterval(interval, current_time, counters_were_reset);
|
auto end_of_interval = interval.getEndOfInterval(current_time, counters_were_reset);
|
||||||
if (counters_were_reset)
|
if (counters_were_reset)
|
||||||
{
|
|
||||||
used = (interval.used[quota_type_i] += value);
|
used = (interval.used[quota_type_i] += value);
|
||||||
if ((used > max) && check_exceeded)
|
|
||||||
throwQuotaExceed(user_name, intervals.quota_name, quota_type, used, max, interval.duration, end_of_interval);
|
if (check_exceeded && (used > max))
|
||||||
}
|
|
||||||
else if (check_exceeded)
|
|
||||||
throwQuotaExceed(user_name, intervals.quota_name, quota_type, used, max, interval.duration, end_of_interval);
|
throwQuotaExceed(user_name, intervals.quota_name, quota_type, used, max, interval.duration, end_of_interval);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -127,10 +79,11 @@ struct EnabledQuota::Impl
|
|||||||
QuotaValue max = interval.max[quota_type_i];
|
QuotaValue max = interval.max[quota_type_i];
|
||||||
if (!max)
|
if (!max)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (used > max)
|
if (used > max)
|
||||||
{
|
{
|
||||||
bool counters_were_reset = false;
|
bool counters_were_reset = false;
|
||||||
std::chrono::system_clock::time_point end_of_interval = getEndOfInterval(interval, current_time, counters_were_reset);
|
auto end_of_interval = interval.getEndOfInterval(current_time, counters_were_reset);
|
||||||
if (!counters_were_reset)
|
if (!counters_were_reset)
|
||||||
throwQuotaExceed(user_name, intervals.quota_name, quota_type, used, max, interval.duration, end_of_interval);
|
throwQuotaExceed(user_name, intervals.quota_name, quota_type, used, max, interval.duration, end_of_interval);
|
||||||
}
|
}
|
||||||
@ -145,17 +98,32 @@ struct EnabledQuota::Impl
|
|||||||
for (auto quota_type : collections::range(QuotaType::MAX))
|
for (auto quota_type : collections::range(QuotaType::MAX))
|
||||||
checkExceeded(user_name, intervals, quota_type, current_time);
|
checkExceeded(user_name, intervals, quota_type, current_time);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static std::chrono::system_clock::duration randomDuration(std::chrono::seconds max)
|
||||||
|
{
|
||||||
|
auto count = std::chrono::duration_cast<std::chrono::system_clock::duration>(max).count();
|
||||||
|
std::uniform_int_distribution<Int64> distribution{0, count - 1};
|
||||||
|
return std::chrono::system_clock::duration(distribution(thread_local_rng));
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
EnabledQuota::Interval::Interval()
|
EnabledQuota::Interval::Interval(std::chrono::seconds duration_, bool randomize_interval_, std::chrono::system_clock::time_point current_time_)
|
||||||
|
: duration(duration_) , randomize_interval(randomize_interval_)
|
||||||
{
|
{
|
||||||
|
std::chrono::system_clock::time_point initial_end{};
|
||||||
|
if (randomize_interval_)
|
||||||
|
initial_end += Impl::randomDuration(duration_);
|
||||||
|
end_of_interval = initial_end.time_since_epoch();
|
||||||
|
|
||||||
for (auto quota_type : collections::range(QuotaType::MAX))
|
for (auto quota_type : collections::range(QuotaType::MAX))
|
||||||
{
|
{
|
||||||
auto quota_type_i = static_cast<size_t>(quota_type);
|
auto quota_type_i = static_cast<size_t>(quota_type);
|
||||||
used[quota_type_i].store(0);
|
used[quota_type_i].store(0);
|
||||||
max[quota_type_i] = 0;
|
max[quota_type_i] = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
getEndOfInterval(current_time_); /// Force updating the end of the interval for the first time.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -177,6 +145,55 @@ EnabledQuota::Interval & EnabledQuota::Interval::operator =(const Interval & src
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Returns the end of the current interval. If the passed `current_time` is greater than that end,
|
||||||
|
/// the function automatically recalculates the interval's end by adding the interval's duration
|
||||||
|
/// one or more times until the interval's end is greater than `current_time`.
|
||||||
|
/// If that recalculation occurs the function also resets amounts of resources used and sets the variable
|
||||||
|
/// `counters_were_reset`.
|
||||||
|
std::chrono::system_clock::time_point EnabledQuota::Interval::getEndOfInterval(std::chrono::system_clock::time_point current_time) const
|
||||||
|
{
|
||||||
|
bool counters_were_reset;
|
||||||
|
return getEndOfInterval(current_time, counters_were_reset);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::chrono::system_clock::time_point EnabledQuota::Interval::getEndOfInterval(std::chrono::system_clock::time_point current_time, bool & counters_were_reset) const
|
||||||
|
{
|
||||||
|
auto end_loaded = end_of_interval.load();
|
||||||
|
auto end = std::chrono::system_clock::time_point{end_loaded};
|
||||||
|
if (current_time < end)
|
||||||
|
{
|
||||||
|
counters_were_reset = false;
|
||||||
|
return end;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool need_reset_counters = false;
|
||||||
|
|
||||||
|
do
|
||||||
|
{
|
||||||
|
/// Calculate the end of the next interval:
|
||||||
|
/// | X |
|
||||||
|
/// end current_time next_end = end + duration * n
|
||||||
|
/// where n is an integer number, n >= 1.
|
||||||
|
UInt64 n = static_cast<UInt64>((current_time - end + duration) / duration);
|
||||||
|
end = end + duration * n;
|
||||||
|
if (end_of_interval.compare_exchange_strong(end_loaded, end.time_since_epoch()))
|
||||||
|
{
|
||||||
|
need_reset_counters = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
end = std::chrono::system_clock::time_point{end_loaded};
|
||||||
|
}
|
||||||
|
while (current_time >= end);
|
||||||
|
|
||||||
|
if (need_reset_counters)
|
||||||
|
{
|
||||||
|
boost::range::fill(used, 0);
|
||||||
|
counters_were_reset = true;
|
||||||
|
}
|
||||||
|
return end;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
std::optional<QuotaUsage> EnabledQuota::Intervals::getUsage(std::chrono::system_clock::time_point current_time) const
|
std::optional<QuotaUsage> EnabledQuota::Intervals::getUsage(std::chrono::system_clock::time_point current_time) const
|
||||||
{
|
{
|
||||||
if (!quota_id)
|
if (!quota_id)
|
||||||
@ -192,8 +209,7 @@ std::optional<QuotaUsage> EnabledQuota::Intervals::getUsage(std::chrono::system_
|
|||||||
auto & out = usage.intervals.back();
|
auto & out = usage.intervals.back();
|
||||||
out.duration = in.duration;
|
out.duration = in.duration;
|
||||||
out.randomize_interval = in.randomize_interval;
|
out.randomize_interval = in.randomize_interval;
|
||||||
bool counters_were_reset = false;
|
out.end_of_interval = in.getEndOfInterval(current_time);
|
||||||
out.end_of_interval = Impl::getEndOfInterval(in, current_time, counters_were_reset);
|
|
||||||
for (auto quota_type : collections::range(QuotaType::MAX))
|
for (auto quota_type : collections::range(QuotaType::MAX))
|
||||||
{
|
{
|
||||||
auto quota_type_i = static_cast<size_t>(quota_type);
|
auto quota_type_i = static_cast<size_t>(quota_type);
|
||||||
|
@ -73,9 +73,13 @@ private:
|
|||||||
bool randomize_interval = false;
|
bool randomize_interval = false;
|
||||||
mutable std::atomic<std::chrono::system_clock::duration> end_of_interval;
|
mutable std::atomic<std::chrono::system_clock::duration> end_of_interval;
|
||||||
|
|
||||||
Interval();
|
Interval(std::chrono::seconds duration_, bool randomize_interval_, std::chrono::system_clock::time_point current_time_);
|
||||||
|
|
||||||
Interval(const Interval & src) { *this = src; }
|
Interval(const Interval & src) { *this = src; }
|
||||||
Interval & operator =(const Interval & src);
|
Interval & operator =(const Interval & src);
|
||||||
|
|
||||||
|
std::chrono::system_clock::time_point getEndOfInterval(std::chrono::system_clock::time_point current_time) const;
|
||||||
|
std::chrono::system_clock::time_point getEndOfInterval(std::chrono::system_clock::time_point current_time, bool & counters_were_reset) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct Intervals
|
struct Intervals
|
||||||
|
@ -4,7 +4,6 @@
|
|||||||
#include <Access/QuotaUsage.h>
|
#include <Access/QuotaUsage.h>
|
||||||
#include <Access/AccessControl.h>
|
#include <Access/AccessControl.h>
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
#include <Common/thread_local_rng.h>
|
|
||||||
#include <base/range.h>
|
#include <base/range.h>
|
||||||
#include <boost/range/adaptor/map.hpp>
|
#include <boost/range/adaptor/map.hpp>
|
||||||
#include <boost/range/algorithm/copy.hpp>
|
#include <boost/range/algorithm/copy.hpp>
|
||||||
@ -22,17 +21,6 @@ namespace ErrorCodes
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
namespace
|
|
||||||
{
|
|
||||||
std::chrono::system_clock::duration randomDuration(std::chrono::seconds max)
|
|
||||||
{
|
|
||||||
auto count = std::chrono::duration_cast<std::chrono::system_clock::duration>(max).count();
|
|
||||||
std::uniform_int_distribution<Int64> distribution{0, count - 1};
|
|
||||||
return std::chrono::system_clock::duration(distribution(thread_local_rng));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void QuotaCache::QuotaInfo::setQuota(const QuotaPtr & quota_, const UUID & quota_id_)
|
void QuotaCache::QuotaInfo::setQuota(const QuotaPtr & quota_, const UUID & quota_id_)
|
||||||
{
|
{
|
||||||
quota = quota_;
|
quota = quota_;
|
||||||
@ -94,18 +82,21 @@ boost::shared_ptr<const EnabledQuota::Intervals> QuotaCache::QuotaInfo::getOrBui
|
|||||||
auto it = key_to_intervals.find(key);
|
auto it = key_to_intervals.find(key);
|
||||||
if (it != key_to_intervals.end())
|
if (it != key_to_intervals.end())
|
||||||
return it->second;
|
return it->second;
|
||||||
return rebuildIntervals(key);
|
return rebuildIntervals(key, std::chrono::system_clock::now());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void QuotaCache::QuotaInfo::rebuildAllIntervals()
|
void QuotaCache::QuotaInfo::rebuildAllIntervals()
|
||||||
{
|
{
|
||||||
|
if (key_to_intervals.empty())
|
||||||
|
return;
|
||||||
|
auto current_time = std::chrono::system_clock::now();
|
||||||
for (const String & key : key_to_intervals | boost::adaptors::map_keys)
|
for (const String & key : key_to_intervals | boost::adaptors::map_keys)
|
||||||
rebuildIntervals(key);
|
rebuildIntervals(key, current_time);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
boost::shared_ptr<const EnabledQuota::Intervals> QuotaCache::QuotaInfo::rebuildIntervals(const String & key)
|
boost::shared_ptr<const EnabledQuota::Intervals> QuotaCache::QuotaInfo::rebuildIntervals(const String & key, std::chrono::system_clock::time_point current_time)
|
||||||
{
|
{
|
||||||
auto new_intervals = boost::make_shared<Intervals>();
|
auto new_intervals = boost::make_shared<Intervals>();
|
||||||
new_intervals->quota_name = quota->getName();
|
new_intervals->quota_name = quota->getName();
|
||||||
@ -115,14 +106,8 @@ boost::shared_ptr<const EnabledQuota::Intervals> QuotaCache::QuotaInfo::rebuildI
|
|||||||
intervals.reserve(quota->all_limits.size());
|
intervals.reserve(quota->all_limits.size());
|
||||||
for (const auto & limits : quota->all_limits)
|
for (const auto & limits : quota->all_limits)
|
||||||
{
|
{
|
||||||
intervals.emplace_back();
|
intervals.emplace_back(limits.duration, limits.randomize_interval, current_time);
|
||||||
auto & interval = intervals.back();
|
auto & interval = intervals.back();
|
||||||
interval.duration = limits.duration;
|
|
||||||
std::chrono::system_clock::time_point end_of_interval{};
|
|
||||||
interval.randomize_interval = limits.randomize_interval;
|
|
||||||
if (limits.randomize_interval)
|
|
||||||
end_of_interval += randomDuration(limits.duration);
|
|
||||||
interval.end_of_interval = end_of_interval.time_since_epoch();
|
|
||||||
for (auto quota_type : collections::range(QuotaType::MAX))
|
for (auto quota_type : collections::range(QuotaType::MAX))
|
||||||
{
|
{
|
||||||
auto quota_type_i = static_cast<size_t>(quota_type);
|
auto quota_type_i = static_cast<size_t>(quota_type);
|
||||||
|
@ -43,7 +43,7 @@ private:
|
|||||||
|
|
||||||
String calculateKey(const EnabledQuota & enabled_quota) const;
|
String calculateKey(const EnabledQuota & enabled_quota) const;
|
||||||
boost::shared_ptr<const Intervals> getOrBuildIntervals(const String & key);
|
boost::shared_ptr<const Intervals> getOrBuildIntervals(const String & key);
|
||||||
boost::shared_ptr<const Intervals> rebuildIntervals(const String & key);
|
boost::shared_ptr<const Intervals> rebuildIntervals(const String & key, std::chrono::system_clock::time_point current_time);
|
||||||
void rebuildAllIntervals();
|
void rebuildAllIntervals();
|
||||||
|
|
||||||
QuotaPtr quota;
|
QuotaPtr quota;
|
||||||
|
@ -20,6 +20,7 @@
|
|||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
struct Settings;
|
struct Settings;
|
||||||
|
|
||||||
template <typename T> constexpr bool DecimalOrExtendedInt =
|
template <typename T> constexpr bool DecimalOrExtendedInt =
|
||||||
@ -42,39 +43,19 @@ struct AvgFraction
|
|||||||
/// Invoked only is either Numerator or Denominator are Decimal.
|
/// Invoked only is either Numerator or Denominator are Decimal.
|
||||||
Float64 NO_SANITIZE_UNDEFINED divideIfAnyDecimal(UInt32 num_scale, UInt32 denom_scale [[maybe_unused]]) const
|
Float64 NO_SANITIZE_UNDEFINED divideIfAnyDecimal(UInt32 num_scale, UInt32 denom_scale [[maybe_unused]]) const
|
||||||
{
|
{
|
||||||
if constexpr (is_decimal<Numerator> && is_decimal<Denominator>)
|
Float64 numerator_float;
|
||||||
{
|
|
||||||
// According to the docs, num(S1) / denom(S2) would have scale S1
|
|
||||||
|
|
||||||
if constexpr (std::is_same_v<Numerator, Decimal256> && std::is_same_v<Denominator, Decimal128>)
|
|
||||||
///Special case as Decimal256 / Decimal128 = compile error (as Decimal128 is not parametrized by a wide
|
|
||||||
///int), but an __int128 instead
|
|
||||||
return DecimalUtils::convertTo<Float64>(
|
|
||||||
numerator / (denominator.template convertTo<Decimal256>()), num_scale);
|
|
||||||
else
|
|
||||||
return DecimalUtils::convertTo<Float64>(numerator / denominator, num_scale);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Numerator is always casted to Float64 to divide correctly if the denominator is not Float64.
|
|
||||||
Float64 num_converted;
|
|
||||||
|
|
||||||
if constexpr (is_decimal<Numerator>)
|
if constexpr (is_decimal<Numerator>)
|
||||||
num_converted = DecimalUtils::convertTo<Float64>(numerator, num_scale);
|
numerator_float = DecimalUtils::convertTo<Float64>(numerator, num_scale);
|
||||||
else
|
else
|
||||||
num_converted = static_cast<Float64>(numerator); /// all other types, including extended integral.
|
numerator_float = numerator;
|
||||||
|
|
||||||
std::conditional_t<DecimalOrExtendedInt<Denominator>,
|
|
||||||
Float64, Denominator> denom_converted;
|
|
||||||
|
|
||||||
|
Float64 denominator_float;
|
||||||
if constexpr (is_decimal<Denominator>)
|
if constexpr (is_decimal<Denominator>)
|
||||||
denom_converted = DecimalUtils::convertTo<Float64>(denominator, denom_scale);
|
denominator_float = DecimalUtils::convertTo<Float64>(denominator, denom_scale);
|
||||||
else if constexpr (DecimalOrExtendedInt<Denominator>)
|
|
||||||
/// no way to divide Float64 and extended integral type without an explicit cast.
|
|
||||||
denom_converted = static_cast<Float64>(denominator);
|
|
||||||
else
|
else
|
||||||
denom_converted = denominator; /// can divide on float, no cast required.
|
denominator_float = denominator;
|
||||||
|
|
||||||
return num_converted / denom_converted;
|
return numerator_float / denominator_float;
|
||||||
}
|
}
|
||||||
|
|
||||||
Float64 NO_SANITIZE_UNDEFINED divide() const
|
Float64 NO_SANITIZE_UNDEFINED divide() const
|
||||||
@ -237,9 +218,9 @@ public:
|
|||||||
using ColVecType = ColumnVectorOrDecimal<T>;
|
using ColVecType = ColumnVectorOrDecimal<T>;
|
||||||
|
|
||||||
|
|
||||||
void NO_SANITIZE_UNDEFINED add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const final
|
void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const final
|
||||||
{
|
{
|
||||||
this->data(place).numerator += static_cast<const ColVecType &>(*columns[0]).getData()[row_num];
|
increment(place, static_cast<const ColVecType &>(*columns[0]).getData()[row_num]);
|
||||||
++this->data(place).denominator;
|
++this->data(place).denominator;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -259,7 +240,7 @@ public:
|
|||||||
sum_data.addMany(column.getData().data(), batch_size);
|
sum_data.addMany(column.getData().data(), batch_size);
|
||||||
this->data(place).denominator += batch_size;
|
this->data(place).denominator += batch_size;
|
||||||
}
|
}
|
||||||
this->data(place).numerator += sum_data.sum;
|
increment(place, sum_data.sum);
|
||||||
}
|
}
|
||||||
|
|
||||||
void addBatchSinglePlaceNotNull(
|
void addBatchSinglePlaceNotNull(
|
||||||
@ -289,7 +270,7 @@ public:
|
|||||||
sum_data.addManyNotNull(column.getData().data(), null_map, batch_size);
|
sum_data.addManyNotNull(column.getData().data(), null_map, batch_size);
|
||||||
this->data(place).denominator += batch_size - countBytesInFilter(null_map, batch_size);
|
this->data(place).denominator += batch_size - countBytesInFilter(null_map, batch_size);
|
||||||
}
|
}
|
||||||
this->data(place).numerator += sum_data.sum;
|
increment(place, sum_data.sum);
|
||||||
}
|
}
|
||||||
|
|
||||||
String getName() const override { return "avg"; }
|
String getName() const override { return "avg"; }
|
||||||
@ -317,5 +298,10 @@ public:
|
|||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
private:
|
||||||
|
void NO_SANITIZE_UNDEFINED increment(AggregateDataPtr __restrict place, Numerator inc) const
|
||||||
|
{
|
||||||
|
this->data(place).numerator += inc;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@ -82,17 +82,17 @@ createAggregateFunctionAvgWeighted(const std::string & name, const DataTypes & a
|
|||||||
const bool left_decimal = isDecimal(data_type);
|
const bool left_decimal = isDecimal(data_type);
|
||||||
const bool right_decimal = isDecimal(data_type_weight);
|
const bool right_decimal = isDecimal(data_type_weight);
|
||||||
|
|
||||||
|
/// We multiply value by weight, so actual scale of numerator is <scale of value> + <scale of weight>
|
||||||
if (left_decimal && right_decimal)
|
if (left_decimal && right_decimal)
|
||||||
ptr.reset(create(*data_type, *data_type_weight,
|
ptr.reset(create(*data_type, *data_type_weight,
|
||||||
argument_types,
|
argument_types,
|
||||||
getDecimalScale(*data_type), getDecimalScale(*data_type_weight)));
|
getDecimalScale(*data_type) + getDecimalScale(*data_type_weight), getDecimalScale(*data_type_weight)));
|
||||||
else if (left_decimal)
|
else if (left_decimal)
|
||||||
ptr.reset(create(*data_type, *data_type_weight, argument_types,
|
ptr.reset(create(*data_type, *data_type_weight, argument_types,
|
||||||
getDecimalScale(*data_type)));
|
getDecimalScale(*data_type)));
|
||||||
else if (right_decimal)
|
else if (right_decimal)
|
||||||
ptr.reset(create(*data_type, *data_type_weight, argument_types,
|
ptr.reset(create(*data_type, *data_type_weight, argument_types,
|
||||||
// numerator is not decimal, so its scale is 0
|
getDecimalScale(*data_type_weight), getDecimalScale(*data_type_weight)));
|
||||||
0, getDecimalScale(*data_type_weight)));
|
|
||||||
else
|
else
|
||||||
ptr.reset(create(*data_type, *data_type_weight, argument_types));
|
ptr.reset(create(*data_type, *data_type_weight, argument_types));
|
||||||
|
|
||||||
|
@ -517,6 +517,8 @@ if (USE_BZIP2)
|
|||||||
target_include_directories (clickhouse_common_io SYSTEM BEFORE PRIVATE ${BZIP2_INCLUDE_DIR})
|
target_include_directories (clickhouse_common_io SYSTEM BEFORE PRIVATE ${BZIP2_INCLUDE_DIR})
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
dbms_target_link_libraries(PUBLIC consistent-hashing)
|
||||||
|
|
||||||
include ("${ClickHouse_SOURCE_DIR}/cmake/add_check.cmake")
|
include ("${ClickHouse_SOURCE_DIR}/cmake/add_check.cmake")
|
||||||
|
|
||||||
if (ENABLE_TESTS AND USE_GTEST)
|
if (ENABLE_TESTS AND USE_GTEST)
|
||||||
|
@ -1414,17 +1414,11 @@ void ClientBase::runInteractive()
|
|||||||
highlight_callback = highlight;
|
highlight_callback = highlight;
|
||||||
|
|
||||||
ReplxxLineReader lr(*suggest, history_file, config().has("multiline"), query_extenders, query_delimiters, highlight_callback);
|
ReplxxLineReader lr(*suggest, history_file, config().has("multiline"), query_extenders, query_delimiters, highlight_callback);
|
||||||
|
|
||||||
#elif defined(USE_READLINE) && USE_READLINE
|
|
||||||
ReadlineLineReader lr(*suggest, history_file, config().has("multiline"), query_extenders, query_delimiters);
|
|
||||||
#else
|
#else
|
||||||
LineReader lr(history_file, config().has("multiline"), query_extenders, query_delimiters);
|
LineReader lr(history_file, config().has("multiline"), query_extenders, query_delimiters);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/// Enable bracketed-paste-mode only when multiquery is enabled and multiline is
|
/// Enable bracketed-paste-mode so that we are able to paste multiline queries as a whole.
|
||||||
/// disabled, so that we are able to paste and execute multiline queries in a whole
|
|
||||||
/// instead of erroring out, while be less intrusive.
|
|
||||||
if (config().has("multiquery") && !config().has("multiline"))
|
|
||||||
lr.enableBracketedPaste();
|
lr.enableBracketedPaste();
|
||||||
|
|
||||||
do
|
do
|
||||||
@ -1497,17 +1491,14 @@ void ClientBase::runNonInteractive()
|
|||||||
{
|
{
|
||||||
auto process_multi_query_from_file = [&](const String & file)
|
auto process_multi_query_from_file = [&](const String & file)
|
||||||
{
|
{
|
||||||
auto text = getQueryTextPrefix();
|
|
||||||
String queries_from_file;
|
String queries_from_file;
|
||||||
|
|
||||||
ReadBufferFromFile in(file);
|
ReadBufferFromFile in(file);
|
||||||
readStringUntilEOF(queries_from_file, in);
|
readStringUntilEOF(queries_from_file, in);
|
||||||
|
|
||||||
text += queries_from_file;
|
return executeMultiQuery(queries_from_file);
|
||||||
return executeMultiQuery(text);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Read all queries into `text`.
|
|
||||||
for (const auto & queries_file : queries_files)
|
for (const auto & queries_file : queries_files)
|
||||||
{
|
{
|
||||||
for (const auto & interleave_file : interleave_queries_files)
|
for (const auto & interleave_file : interleave_queries_files)
|
||||||
@ -1522,9 +1513,6 @@ void ClientBase::runNonInteractive()
|
|||||||
}
|
}
|
||||||
|
|
||||||
String text;
|
String text;
|
||||||
if (is_multiquery)
|
|
||||||
text = getQueryTextPrefix();
|
|
||||||
|
|
||||||
if (config().has("query"))
|
if (config().has("query"))
|
||||||
{
|
{
|
||||||
text += config().getRawString("query"); /// Poco configuration should not process substitutions in form of ${...} inside query.
|
text += config().getRawString("query"); /// Poco configuration should not process substitutions in form of ${...} inside query.
|
||||||
|
@ -78,9 +78,6 @@ protected:
|
|||||||
String & query_to_execute, ASTPtr & parsed_query, const String & all_queries_text,
|
String & query_to_execute, ASTPtr & parsed_query, const String & all_queries_text,
|
||||||
std::optional<Exception> & current_exception);
|
std::optional<Exception> & current_exception);
|
||||||
|
|
||||||
/// For non-interactive multi-query mode get queries text prefix.
|
|
||||||
virtual String getQueryTextPrefix() { return ""; }
|
|
||||||
|
|
||||||
static void clearTerminal();
|
static void clearTerminal();
|
||||||
void showClientVersion();
|
void showClientVersion();
|
||||||
|
|
||||||
@ -100,9 +97,10 @@ protected:
|
|||||||
const std::vector<Arguments> & external_tables_arguments) = 0;
|
const std::vector<Arguments> & external_tables_arguments) = 0;
|
||||||
virtual void processConfig() = 0;
|
virtual void processConfig() = 0;
|
||||||
|
|
||||||
private:
|
protected:
|
||||||
bool processQueryText(const String & text);
|
bool processQueryText(const String & text);
|
||||||
|
|
||||||
|
private:
|
||||||
void receiveResult(ASTPtr parsed_query);
|
void receiveResult(ASTPtr parsed_query);
|
||||||
bool receiveAndProcessPacket(ASTPtr parsed_query, bool cancelled);
|
bool receiveAndProcessPacket(ASTPtr parsed_query, bool cancelled);
|
||||||
void receiveLogs(ASTPtr parsed_query);
|
void receiveLogs(ASTPtr parsed_query);
|
||||||
|
@ -603,6 +603,14 @@ void Connection::sendReadTaskResponse(const String & response)
|
|||||||
out->next();
|
out->next();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Connection::sendMergeTreeReadTaskResponse(const PartitionReadResponse & response)
|
||||||
|
{
|
||||||
|
writeVarUInt(Protocol::Client::MergeTreeReadTaskResponse, *out);
|
||||||
|
response.serialize(*out);
|
||||||
|
out->next();
|
||||||
|
}
|
||||||
|
|
||||||
void Connection::sendPreparedData(ReadBuffer & input, size_t size, const String & name)
|
void Connection::sendPreparedData(ReadBuffer & input, size_t size, const String & name)
|
||||||
{
|
{
|
||||||
/// NOTE 'Throttler' is not used in this method (could use, but it's not important right now).
|
/// NOTE 'Throttler' is not used in this method (could use, but it's not important right now).
|
||||||
@ -872,6 +880,10 @@ Packet Connection::receivePacket()
|
|||||||
case Protocol::Server::ReadTaskRequest:
|
case Protocol::Server::ReadTaskRequest:
|
||||||
return res;
|
return res;
|
||||||
|
|
||||||
|
case Protocol::Server::MergeTreeReadTaskRequest:
|
||||||
|
res.request = receivePartitionReadRequest();
|
||||||
|
return res;
|
||||||
|
|
||||||
case Protocol::Server::ProfileEvents:
|
case Protocol::Server::ProfileEvents:
|
||||||
res.block = receiveProfileEvents();
|
res.block = receiveProfileEvents();
|
||||||
return res;
|
return res;
|
||||||
@ -1023,6 +1035,13 @@ ProfileInfo Connection::receiveProfileInfo() const
|
|||||||
return profile_info;
|
return profile_info;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
PartitionReadRequest Connection::receivePartitionReadRequest() const
|
||||||
|
{
|
||||||
|
PartitionReadRequest request;
|
||||||
|
request.deserialize(*in);
|
||||||
|
return request;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void Connection::throwUnexpectedPacket(UInt64 packet_type, const char * expected) const
|
void Connection::throwUnexpectedPacket(UInt64 packet_type, const char * expected) const
|
||||||
{
|
{
|
||||||
|
@ -16,6 +16,8 @@
|
|||||||
|
|
||||||
#include <Compression/ICompressionCodec.h>
|
#include <Compression/ICompressionCodec.h>
|
||||||
|
|
||||||
|
#include <Storages/MergeTree/RequestResponse.h>
|
||||||
|
|
||||||
#include <atomic>
|
#include <atomic>
|
||||||
#include <optional>
|
#include <optional>
|
||||||
|
|
||||||
@ -104,6 +106,8 @@ public:
|
|||||||
|
|
||||||
void sendData(const Block & block, const String & name/* = "" */, bool scalar/* = false */) override;
|
void sendData(const Block & block, const String & name/* = "" */, bool scalar/* = false */) override;
|
||||||
|
|
||||||
|
void sendMergeTreeReadTaskResponse(const PartitionReadResponse & response) override;
|
||||||
|
|
||||||
void sendExternalTablesData(ExternalTablesData & data) override;
|
void sendExternalTablesData(ExternalTablesData & data) override;
|
||||||
|
|
||||||
bool poll(size_t timeout_microseconds/* = 0 */) override;
|
bool poll(size_t timeout_microseconds/* = 0 */) override;
|
||||||
@ -255,6 +259,7 @@ private:
|
|||||||
std::vector<String> receiveMultistringMessage(UInt64 msg_type) const;
|
std::vector<String> receiveMultistringMessage(UInt64 msg_type) const;
|
||||||
std::unique_ptr<Exception> receiveException() const;
|
std::unique_ptr<Exception> receiveException() const;
|
||||||
Progress receiveProgress() const;
|
Progress receiveProgress() const;
|
||||||
|
PartitionReadRequest receivePartitionReadRequest() const;
|
||||||
ProfileInfo receiveProfileInfo() const;
|
ProfileInfo receiveProfileInfo() const;
|
||||||
|
|
||||||
void initInputBuffers();
|
void initInputBuffers();
|
||||||
|
@ -132,7 +132,7 @@ void HedgedConnections::sendQuery(
|
|||||||
const String & query,
|
const String & query,
|
||||||
const String & query_id,
|
const String & query_id,
|
||||||
UInt64 stage,
|
UInt64 stage,
|
||||||
const ClientInfo & client_info,
|
ClientInfo & client_info,
|
||||||
bool with_pending_data)
|
bool with_pending_data)
|
||||||
{
|
{
|
||||||
std::lock_guard lock(cancel_mutex);
|
std::lock_guard lock(cancel_mutex);
|
||||||
@ -171,7 +171,9 @@ void HedgedConnections::sendQuery(
|
|||||||
modified_settings.group_by_two_level_threshold_bytes = 0;
|
modified_settings.group_by_two_level_threshold_bytes = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (offset_states.size() > 1)
|
const bool enable_sample_offset_parallel_processing = settings.max_parallel_replicas > 1 && !settings.allow_experimental_parallel_reading_from_replicas;
|
||||||
|
|
||||||
|
if (offset_states.size() > 1 && enable_sample_offset_parallel_processing)
|
||||||
{
|
{
|
||||||
modified_settings.parallel_replicas_count = offset_states.size();
|
modified_settings.parallel_replicas_count = offset_states.size();
|
||||||
modified_settings.parallel_replica_offset = fd_to_replica_location[replica.packet_receiver->getFileDescriptor()].offset;
|
modified_settings.parallel_replica_offset = fd_to_replica_location[replica.packet_receiver->getFileDescriptor()].offset;
|
||||||
|
@ -86,7 +86,7 @@ public:
|
|||||||
const String & query,
|
const String & query,
|
||||||
const String & query_id,
|
const String & query_id,
|
||||||
UInt64 stage,
|
UInt64 stage,
|
||||||
const ClientInfo & client_info,
|
ClientInfo & client_info,
|
||||||
bool with_pending_data) override;
|
bool with_pending_data) override;
|
||||||
|
|
||||||
void sendReadTaskResponse(const String &) override
|
void sendReadTaskResponse(const String &) override
|
||||||
@ -94,6 +94,11 @@ public:
|
|||||||
throw Exception("sendReadTaskResponse in not supported with HedgedConnections", ErrorCodes::LOGICAL_ERROR);
|
throw Exception("sendReadTaskResponse in not supported with HedgedConnections", ErrorCodes::LOGICAL_ERROR);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void sendMergeTreeReadTaskResponse(PartitionReadResponse) override
|
||||||
|
{
|
||||||
|
throw Exception("sendMergeTreeReadTaskResponse in not supported with HedgedConnections", ErrorCodes::LOGICAL_ERROR);
|
||||||
|
}
|
||||||
|
|
||||||
Packet receivePacket() override;
|
Packet receivePacket() override;
|
||||||
|
|
||||||
Packet receivePacketUnlocked(AsyncCallback async_callback, bool is_draining) override;
|
Packet receivePacketUnlocked(AsyncCallback async_callback, bool is_draining) override;
|
||||||
@ -112,6 +117,8 @@ public:
|
|||||||
|
|
||||||
bool hasActiveConnections() const override { return active_connection_count > 0; }
|
bool hasActiveConnections() const override { return active_connection_count > 0; }
|
||||||
|
|
||||||
|
void setReplicaInfo(ReplicaInfo value) override { replica_info = value; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/// If we don't receive data from replica and there is no progress in query
|
/// If we don't receive data from replica and there is no progress in query
|
||||||
/// execution for receive_data_timeout, we are trying to get new
|
/// execution for receive_data_timeout, we are trying to get new
|
||||||
@ -199,6 +206,8 @@ private:
|
|||||||
bool sent_query = false;
|
bool sent_query = false;
|
||||||
bool cancelled = false;
|
bool cancelled = false;
|
||||||
|
|
||||||
|
ReplicaInfo replica_info;
|
||||||
|
|
||||||
mutable std::mutex cancel_mutex;
|
mutable std::mutex cancel_mutex;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1,6 +1,9 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <compare>
|
||||||
|
|
||||||
#include <Client/Connection.h>
|
#include <Client/Connection.h>
|
||||||
|
#include <Storages/MergeTree/RequestResponse.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -27,10 +30,11 @@ public:
|
|||||||
const String & query,
|
const String & query,
|
||||||
const String & query_id,
|
const String & query_id,
|
||||||
UInt64 stage,
|
UInt64 stage,
|
||||||
const ClientInfo & client_info,
|
ClientInfo & client_info,
|
||||||
bool with_pending_data) = 0;
|
bool with_pending_data) = 0;
|
||||||
|
|
||||||
virtual void sendReadTaskResponse(const String &) = 0;
|
virtual void sendReadTaskResponse(const String &) = 0;
|
||||||
|
virtual void sendMergeTreeReadTaskResponse(PartitionReadResponse response) = 0;
|
||||||
|
|
||||||
/// Get packet from any replica.
|
/// Get packet from any replica.
|
||||||
virtual Packet receivePacket() = 0;
|
virtual Packet receivePacket() = 0;
|
||||||
@ -56,6 +60,17 @@ public:
|
|||||||
/// Get the replica addresses as a string.
|
/// Get the replica addresses as a string.
|
||||||
virtual std::string dumpAddresses() const = 0;
|
virtual std::string dumpAddresses() const = 0;
|
||||||
|
|
||||||
|
|
||||||
|
struct ReplicaInfo
|
||||||
|
{
|
||||||
|
size_t all_replicas_count{0};
|
||||||
|
size_t number_of_current_replica{0};
|
||||||
|
};
|
||||||
|
|
||||||
|
/// This is needed in max_parallel_replicas case.
|
||||||
|
/// We create a RemoteQueryExecutor for each replica
|
||||||
|
virtual void setReplicaInfo(ReplicaInfo value) = 0;
|
||||||
|
|
||||||
/// Returns the number of replicas.
|
/// Returns the number of replicas.
|
||||||
virtual size_t size() const = 0;
|
virtual size_t size() const = 0;
|
||||||
|
|
||||||
|
@ -12,6 +12,8 @@
|
|||||||
#include <IO/ConnectionTimeouts.h>
|
#include <IO/ConnectionTimeouts.h>
|
||||||
#include <IO/Progress.h>
|
#include <IO/Progress.h>
|
||||||
|
|
||||||
|
#include <Storages/MergeTree/RequestResponse.h>
|
||||||
|
|
||||||
|
|
||||||
#include <boost/noncopyable.hpp>
|
#include <boost/noncopyable.hpp>
|
||||||
|
|
||||||
@ -32,10 +34,13 @@ struct Packet
|
|||||||
Progress progress;
|
Progress progress;
|
||||||
ProfileInfo profile_info;
|
ProfileInfo profile_info;
|
||||||
std::vector<UUID> part_uuids;
|
std::vector<UUID> part_uuids;
|
||||||
|
PartitionReadRequest request;
|
||||||
|
PartitionReadResponse response;
|
||||||
|
|
||||||
Packet() : type(Protocol::Server::Hello) {}
|
Packet() : type(Protocol::Server::Hello) {}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
/// Struct which represents data we are going to send for external table.
|
/// Struct which represents data we are going to send for external table.
|
||||||
struct ExternalTableData
|
struct ExternalTableData
|
||||||
{
|
{
|
||||||
@ -96,6 +101,8 @@ public:
|
|||||||
/// Send all contents of external (temporary) tables.
|
/// Send all contents of external (temporary) tables.
|
||||||
virtual void sendExternalTablesData(ExternalTablesData & data) = 0;
|
virtual void sendExternalTablesData(ExternalTablesData & data) = 0;
|
||||||
|
|
||||||
|
virtual void sendMergeTreeReadTaskResponse(const PartitionReadResponse & response) = 0;
|
||||||
|
|
||||||
/// Check, if has data to read.
|
/// Check, if has data to read.
|
||||||
virtual bool poll(size_t timeout_microseconds) = 0;
|
virtual bool poll(size_t timeout_microseconds) = 0;
|
||||||
|
|
||||||
|
@ -424,6 +424,11 @@ void LocalConnection::sendExternalTablesData(ExternalTablesData &)
|
|||||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Not implemented");
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Not implemented");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void LocalConnection::sendMergeTreeReadTaskResponse(const PartitionReadResponse &)
|
||||||
|
{
|
||||||
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Not implemented");
|
||||||
|
}
|
||||||
|
|
||||||
ServerConnectionPtr LocalConnection::createConnection(const ConnectionParameters &, ContextPtr current_context, bool send_progress)
|
ServerConnectionPtr LocalConnection::createConnection(const ConnectionParameters &, ContextPtr current_context, bool send_progress)
|
||||||
{
|
{
|
||||||
return std::make_unique<LocalConnection>(current_context, send_progress);
|
return std::make_unique<LocalConnection>(current_context, send_progress);
|
||||||
|
@ -92,6 +92,8 @@ public:
|
|||||||
|
|
||||||
void sendExternalTablesData(ExternalTablesData &) override;
|
void sendExternalTablesData(ExternalTablesData &) override;
|
||||||
|
|
||||||
|
void sendMergeTreeReadTaskResponse(const PartitionReadResponse & response) override;
|
||||||
|
|
||||||
bool poll(size_t timeout_microseconds/* = 0 */) override;
|
bool poll(size_t timeout_microseconds/* = 0 */) override;
|
||||||
|
|
||||||
bool hasReadPendingData() const override;
|
bool hasReadPendingData() const override;
|
||||||
|
@ -1,9 +1,10 @@
|
|||||||
#include <Client/MultiplexedConnections.h>
|
#include <Client/MultiplexedConnections.h>
|
||||||
|
|
||||||
|
#include <Common/thread_local_rng.h>
|
||||||
|
#include <Core/Protocol.h>
|
||||||
#include <IO/ConnectionTimeouts.h>
|
#include <IO/ConnectionTimeouts.h>
|
||||||
#include <IO/Operators.h>
|
#include <IO/Operators.h>
|
||||||
#include <Common/thread_local_rng.h>
|
#include <Interpreters/ClientInfo.h>
|
||||||
#include "Core/Protocol.h"
|
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -110,7 +111,7 @@ void MultiplexedConnections::sendQuery(
|
|||||||
const String & query,
|
const String & query,
|
||||||
const String & query_id,
|
const String & query_id,
|
||||||
UInt64 stage,
|
UInt64 stage,
|
||||||
const ClientInfo & client_info,
|
ClientInfo & client_info,
|
||||||
bool with_pending_data)
|
bool with_pending_data)
|
||||||
{
|
{
|
||||||
std::lock_guard lock(cancel_mutex);
|
std::lock_guard lock(cancel_mutex);
|
||||||
@ -131,16 +132,29 @@ void MultiplexedConnections::sendQuery(
|
|||||||
modified_settings.group_by_two_level_threshold = 0;
|
modified_settings.group_by_two_level_threshold = 0;
|
||||||
modified_settings.group_by_two_level_threshold_bytes = 0;
|
modified_settings.group_by_two_level_threshold_bytes = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (settings.allow_experimental_parallel_reading_from_replicas)
|
||||||
|
{
|
||||||
|
client_info.collaborate_with_initiator = true;
|
||||||
|
client_info.count_participating_replicas = replica_info.all_replicas_count;
|
||||||
|
client_info.number_of_current_replica = replica_info.number_of_current_replica;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const bool enable_sample_offset_parallel_processing = settings.max_parallel_replicas > 1 && !settings.allow_experimental_parallel_reading_from_replicas;
|
||||||
|
|
||||||
size_t num_replicas = replica_states.size();
|
size_t num_replicas = replica_states.size();
|
||||||
if (num_replicas > 1)
|
if (num_replicas > 1)
|
||||||
{
|
{
|
||||||
|
if (enable_sample_offset_parallel_processing)
|
||||||
/// Use multiple replicas for parallel query processing.
|
/// Use multiple replicas for parallel query processing.
|
||||||
modified_settings.parallel_replicas_count = num_replicas;
|
modified_settings.parallel_replicas_count = num_replicas;
|
||||||
|
|
||||||
for (size_t i = 0; i < num_replicas; ++i)
|
for (size_t i = 0; i < num_replicas; ++i)
|
||||||
{
|
{
|
||||||
|
if (enable_sample_offset_parallel_processing)
|
||||||
modified_settings.parallel_replica_offset = i;
|
modified_settings.parallel_replica_offset = i;
|
||||||
|
|
||||||
replica_states[i].connection->sendQuery(timeouts, query, query_id,
|
replica_states[i].connection->sendQuery(timeouts, query, query_id,
|
||||||
stage, &modified_settings, &client_info, with_pending_data);
|
stage, &modified_settings, &client_info, with_pending_data);
|
||||||
}
|
}
|
||||||
@ -179,6 +193,16 @@ void MultiplexedConnections::sendReadTaskResponse(const String & response)
|
|||||||
current_connection->sendReadTaskResponse(response);
|
current_connection->sendReadTaskResponse(response);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void MultiplexedConnections::sendMergeTreeReadTaskResponse(PartitionReadResponse response)
|
||||||
|
{
|
||||||
|
std::lock_guard lock(cancel_mutex);
|
||||||
|
if (cancelled)
|
||||||
|
return;
|
||||||
|
current_connection->sendMergeTreeReadTaskResponse(response);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
Packet MultiplexedConnections::receivePacket()
|
Packet MultiplexedConnections::receivePacket()
|
||||||
{
|
{
|
||||||
std::lock_guard lock(cancel_mutex);
|
std::lock_guard lock(cancel_mutex);
|
||||||
@ -234,6 +258,7 @@ Packet MultiplexedConnections::drain()
|
|||||||
|
|
||||||
switch (packet.type)
|
switch (packet.type)
|
||||||
{
|
{
|
||||||
|
case Protocol::Server::MergeTreeReadTaskRequest:
|
||||||
case Protocol::Server::ReadTaskRequest:
|
case Protocol::Server::ReadTaskRequest:
|
||||||
case Protocol::Server::PartUUIDs:
|
case Protocol::Server::PartUUIDs:
|
||||||
case Protocol::Server::Data:
|
case Protocol::Server::Data:
|
||||||
@ -313,6 +338,7 @@ Packet MultiplexedConnections::receivePacketUnlocked(AsyncCallback async_callbac
|
|||||||
|
|
||||||
switch (packet.type)
|
switch (packet.type)
|
||||||
{
|
{
|
||||||
|
case Protocol::Server::MergeTreeReadTaskRequest:
|
||||||
case Protocol::Server::ReadTaskRequest:
|
case Protocol::Server::ReadTaskRequest:
|
||||||
case Protocol::Server::PartUUIDs:
|
case Protocol::Server::PartUUIDs:
|
||||||
case Protocol::Server::Data:
|
case Protocol::Server::Data:
|
||||||
|
@ -38,10 +38,11 @@ public:
|
|||||||
const String & query,
|
const String & query,
|
||||||
const String & query_id,
|
const String & query_id,
|
||||||
UInt64 stage,
|
UInt64 stage,
|
||||||
const ClientInfo & client_info,
|
ClientInfo & client_info,
|
||||||
bool with_pending_data) override;
|
bool with_pending_data) override;
|
||||||
|
|
||||||
void sendReadTaskResponse(const String &) override;
|
void sendReadTaskResponse(const String &) override;
|
||||||
|
void sendMergeTreeReadTaskResponse(PartitionReadResponse response) override;
|
||||||
|
|
||||||
Packet receivePacket() override;
|
Packet receivePacket() override;
|
||||||
|
|
||||||
@ -62,6 +63,7 @@ public:
|
|||||||
/// Without locking, because sendCancel() does not change the state of the replicas.
|
/// Without locking, because sendCancel() does not change the state of the replicas.
|
||||||
bool hasActiveConnections() const override { return active_connection_count > 0; }
|
bool hasActiveConnections() const override { return active_connection_count > 0; }
|
||||||
|
|
||||||
|
void setReplicaInfo(ReplicaInfo value) override { replica_info = value; }
|
||||||
private:
|
private:
|
||||||
Packet receivePacketUnlocked(AsyncCallback async_callback, bool is_draining) override;
|
Packet receivePacketUnlocked(AsyncCallback async_callback, bool is_draining) override;
|
||||||
|
|
||||||
@ -102,6 +104,8 @@ private:
|
|||||||
bool sent_query = false;
|
bool sent_query = false;
|
||||||
bool cancelled = false;
|
bool cancelled = false;
|
||||||
|
|
||||||
|
ReplicaInfo replica_info;
|
||||||
|
|
||||||
/// A mutex for the sendCancel function to execute safely
|
/// A mutex for the sendCancel function to execute safely
|
||||||
/// in separate thread.
|
/// in separate thread.
|
||||||
mutable std::mutex cancel_mutex;
|
mutable std::mutex cancel_mutex;
|
||||||
|
@ -28,13 +28,20 @@ namespace ErrorCodes
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static String getTypeString(const AggregateFunctionPtr & func)
|
static String getTypeString(const AggregateFunctionPtr & func, std::optional<size_t> version = std::nullopt)
|
||||||
{
|
{
|
||||||
WriteBufferFromOwnString stream;
|
WriteBufferFromOwnString stream;
|
||||||
stream << "AggregateFunction(" << func->getName();
|
|
||||||
|
stream << "AggregateFunction(";
|
||||||
|
|
||||||
|
/// If aggregate function does not support versioning its version is 0 and is not printed.
|
||||||
|
if (version && *version)
|
||||||
|
stream << *version << ", ";
|
||||||
|
|
||||||
|
stream << func->getName();
|
||||||
|
|
||||||
const auto & parameters = func->getParameters();
|
const auto & parameters = func->getParameters();
|
||||||
const auto & argument_types = func->getArgumentTypes();
|
const auto & argument_types = func->getArgumentTypes();
|
||||||
|
|
||||||
if (!parameters.empty())
|
if (!parameters.empty())
|
||||||
{
|
{
|
||||||
stream << '(';
|
stream << '(';
|
||||||
@ -56,7 +63,7 @@ static String getTypeString(const AggregateFunctionPtr & func)
|
|||||||
|
|
||||||
|
|
||||||
ColumnAggregateFunction::ColumnAggregateFunction(const AggregateFunctionPtr & func_, std::optional<size_t> version_)
|
ColumnAggregateFunction::ColumnAggregateFunction(const AggregateFunctionPtr & func_, std::optional<size_t> version_)
|
||||||
: func(func_), type_string(getTypeString(func)), version(version_)
|
: func(func_), type_string(getTypeString(func, version_)), version(version_)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -66,10 +73,11 @@ ColumnAggregateFunction::ColumnAggregateFunction(const AggregateFunctionPtr & fu
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ColumnAggregateFunction::set(const AggregateFunctionPtr & func_)
|
void ColumnAggregateFunction::set(const AggregateFunctionPtr & func_, size_t version_)
|
||||||
{
|
{
|
||||||
func = func_;
|
func = func_;
|
||||||
type_string = getTypeString(func);
|
version = version_;
|
||||||
|
type_string = getTypeString(func, version);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -403,7 +411,7 @@ void ColumnAggregateFunction::protect()
|
|||||||
|
|
||||||
MutableColumnPtr ColumnAggregateFunction::cloneEmpty() const
|
MutableColumnPtr ColumnAggregateFunction::cloneEmpty() const
|
||||||
{
|
{
|
||||||
return create(func);
|
return create(func, version);
|
||||||
}
|
}
|
||||||
|
|
||||||
Field ColumnAggregateFunction::operator[](size_t n) const
|
Field ColumnAggregateFunction::operator[](size_t n) const
|
||||||
|
@ -103,7 +103,7 @@ private:
|
|||||||
public:
|
public:
|
||||||
~ColumnAggregateFunction() override;
|
~ColumnAggregateFunction() override;
|
||||||
|
|
||||||
void set(const AggregateFunctionPtr & func_);
|
void set(const AggregateFunctionPtr & func_, size_t version_);
|
||||||
|
|
||||||
AggregateFunctionPtr getAggregateFunction() { return func; }
|
AggregateFunctionPtr getAggregateFunction() { return func; }
|
||||||
AggregateFunctionPtr getAggregateFunction() const { return func; }
|
AggregateFunctionPtr getAggregateFunction() const { return func; }
|
||||||
|
@ -601,6 +601,7 @@
|
|||||||
M(631, UNKNOWN_FILE_SIZE) \
|
M(631, UNKNOWN_FILE_SIZE) \
|
||||||
M(632, UNEXPECTED_DATA_AFTER_PARSED_VALUE) \
|
M(632, UNEXPECTED_DATA_AFTER_PARSED_VALUE) \
|
||||||
M(633, QUERY_IS_NOT_SUPPORTED_IN_WINDOW_VIEW) \
|
M(633, QUERY_IS_NOT_SUPPORTED_IN_WINDOW_VIEW) \
|
||||||
|
M(634, MONGODB_ERROR) \
|
||||||
\
|
\
|
||||||
M(999, KEEPER_EXCEPTION) \
|
M(999, KEEPER_EXCEPTION) \
|
||||||
M(1000, POCO_EXCEPTION) \
|
M(1000, POCO_EXCEPTION) \
|
||||||
|
@ -163,4 +163,3 @@ protected:
|
|||||||
/** Creates a new object to put into the pool. */
|
/** Creates a new object to put into the pool. */
|
||||||
virtual ObjectPtr allocObject() = 0;
|
virtual ObjectPtr allocObject() = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -25,13 +25,13 @@ namespace
|
|||||||
{
|
{
|
||||||
#if defined(OS_LINUX)
|
#if defined(OS_LINUX)
|
||||||
thread_local size_t write_trace_iteration = 0;
|
thread_local size_t write_trace_iteration = 0;
|
||||||
|
#endif
|
||||||
/// Even after timer_delete() the signal can be delivered,
|
/// Even after timer_delete() the signal can be delivered,
|
||||||
/// since it does not do anything with pending signals.
|
/// since it does not do anything with pending signals.
|
||||||
///
|
///
|
||||||
/// And so to overcome this flag is exists,
|
/// And so to overcome this flag is exists,
|
||||||
/// to ignore delivered signals after timer_delete().
|
/// to ignore delivered signals after timer_delete().
|
||||||
thread_local bool signal_handler_disarmed = true;
|
thread_local bool signal_handler_disarmed = true;
|
||||||
#endif
|
|
||||||
|
|
||||||
void writeTraceInfo(TraceType trace_type, int /* sig */, siginfo_t * info, void * context)
|
void writeTraceInfo(TraceType trace_type, int /* sig */, siginfo_t * info, void * context)
|
||||||
{
|
{
|
||||||
|
@ -1,6 +1,44 @@
|
|||||||
#include <Common/memory.h>
|
#include <Common/memory.h>
|
||||||
#include <new>
|
#include <new>
|
||||||
|
|
||||||
|
#if defined(OS_DARWIN) && (USE_JEMALLOC)
|
||||||
|
/// In case of OSX jemalloc register itself as a default zone allocator.
|
||||||
|
///
|
||||||
|
/// Sure jemalloc will register itself, since zone_register() declared with
|
||||||
|
/// constructor attribute (since zone_register is also forbidden from
|
||||||
|
/// optimizing out), however those constructors will be called before
|
||||||
|
/// constructors for global variable initializers (__cxx_global_var_init()).
|
||||||
|
///
|
||||||
|
/// So to make jemalloc under OSX more stable, we will call it explicitly from
|
||||||
|
/// global variable initializers so that each allocation will use it.
|
||||||
|
/// (NOTE: It is ok to call it twice, since zone_register() is a no-op if the
|
||||||
|
/// default zone is already replaced with something.)
|
||||||
|
///
|
||||||
|
/// Refs: https://github.com/jemalloc/jemalloc/issues/708
|
||||||
|
|
||||||
|
extern "C"
|
||||||
|
{
|
||||||
|
extern void zone_register();
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct InitializeJemallocZoneAllocatorForOSX
|
||||||
|
{
|
||||||
|
InitializeJemallocZoneAllocatorForOSX()
|
||||||
|
{
|
||||||
|
zone_register();
|
||||||
|
/// jemalloc() initializes itself only on malloc()
|
||||||
|
/// and so if some global initializer will have free(nullptr)
|
||||||
|
/// jemalloc may trigger some internal assertion.
|
||||||
|
///
|
||||||
|
/// To prevent this, we explicitly call malloc(free()) here.
|
||||||
|
if (void * ptr = malloc(0))
|
||||||
|
{
|
||||||
|
free(ptr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} initializeJemallocZoneAllocatorForOSX;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
/// Replace default new/delete with memory tracking versions.
|
/// Replace default new/delete with memory tracking versions.
|
||||||
/// @sa https://en.cppreference.com/w/cpp/memory/new/operator_new
|
/// @sa https://en.cppreference.com/w/cpp/memory/new/operator_new
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
#include <gtest/gtest.h>
|
#include <gtest/gtest.h>
|
||||||
|
|
||||||
#include <Common/config.h>
|
|
||||||
#include "config_core.h"
|
#include "config_core.h"
|
||||||
|
|
||||||
#if USE_NURAFT
|
#if USE_NURAFT
|
||||||
@ -15,7 +14,6 @@
|
|||||||
#include <Coordination/WriteBufferFromNuraftBuffer.h>
|
#include <Coordination/WriteBufferFromNuraftBuffer.h>
|
||||||
#include <Coordination/ReadBufferFromNuraftBuffer.h>
|
#include <Coordination/ReadBufferFromNuraftBuffer.h>
|
||||||
#include <IO/ReadBufferFromString.h>
|
#include <IO/ReadBufferFromString.h>
|
||||||
#include <IO/WriteBufferFromString.h>
|
|
||||||
#include <Common/ZooKeeper/ZooKeeperCommon.h>
|
#include <Common/ZooKeeper/ZooKeeperCommon.h>
|
||||||
#include <Common/ZooKeeper/ZooKeeperIO.h>
|
#include <Common/ZooKeeper/ZooKeeperIO.h>
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
|
@ -81,7 +81,9 @@ namespace Protocol
|
|||||||
/// This is such an inverted logic, where server sends requests
|
/// This is such an inverted logic, where server sends requests
|
||||||
/// And client returns back response
|
/// And client returns back response
|
||||||
ProfileEvents = 14, /// Packet with profile events from server.
|
ProfileEvents = 14, /// Packet with profile events from server.
|
||||||
MAX = ProfileEvents,
|
MergeTreeReadTaskRequest = 15, /// Request from a MergeTree replica to a coordinator
|
||||||
|
MAX = MergeTreeReadTaskRequest,
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/// NOTE: If the type of packet argument would be Enum, the comparison packet >= 0 && packet < 10
|
/// NOTE: If the type of packet argument would be Enum, the comparison packet >= 0 && packet < 10
|
||||||
@ -106,6 +108,7 @@ namespace Protocol
|
|||||||
"PartUUIDs",
|
"PartUUIDs",
|
||||||
"ReadTaskRequest",
|
"ReadTaskRequest",
|
||||||
"ProfileEvents",
|
"ProfileEvents",
|
||||||
|
"MergeTreeReadTaskRequest",
|
||||||
};
|
};
|
||||||
return packet <= MAX
|
return packet <= MAX
|
||||||
? data[packet]
|
? data[packet]
|
||||||
@ -141,9 +144,9 @@ namespace Protocol
|
|||||||
KeepAlive = 6, /// Keep the connection alive
|
KeepAlive = 6, /// Keep the connection alive
|
||||||
Scalar = 7, /// A block of data (compressed or not).
|
Scalar = 7, /// A block of data (compressed or not).
|
||||||
IgnoredPartUUIDs = 8, /// List of unique parts ids to exclude from query processing
|
IgnoredPartUUIDs = 8, /// List of unique parts ids to exclude from query processing
|
||||||
ReadTaskResponse = 9, /// TODO:
|
ReadTaskResponse = 9, /// A filename to read from s3 (used in s3Cluster)
|
||||||
|
MergeTreeReadTaskResponse = 10, /// Coordinator's decision with a modified set of mark ranges allowed to read
|
||||||
MAX = ReadTaskResponse,
|
MAX = MergeTreeReadTaskResponse,
|
||||||
};
|
};
|
||||||
|
|
||||||
inline const char * toString(UInt64 packet)
|
inline const char * toString(UInt64 packet)
|
||||||
@ -159,6 +162,7 @@ namespace Protocol
|
|||||||
"Scalar",
|
"Scalar",
|
||||||
"IgnoredPartUUIDs",
|
"IgnoredPartUUIDs",
|
||||||
"ReadTaskResponse",
|
"ReadTaskResponse",
|
||||||
|
"MergeTreeReadTaskResponse"
|
||||||
};
|
};
|
||||||
return packet <= MAX
|
return packet <= MAX
|
||||||
? data[packet]
|
? data[packet]
|
||||||
|
@ -31,6 +31,9 @@
|
|||||||
|
|
||||||
#define DBMS_CLUSTER_PROCESSING_PROTOCOL_VERSION 1
|
#define DBMS_CLUSTER_PROCESSING_PROTOCOL_VERSION 1
|
||||||
|
|
||||||
|
#define DBMS_PARALLEL_REPLICAS_PROTOCOL_VERSION 1
|
||||||
|
#define DBMS_MIN_REVISION_WITH_PARALLEL_REPLICAS 54453
|
||||||
|
|
||||||
/// Minimum revision supporting interserver secret.
|
/// Minimum revision supporting interserver secret.
|
||||||
#define DBMS_MIN_REVISION_WITH_INTERSERVER_SECRET 54441
|
#define DBMS_MIN_REVISION_WITH_INTERSERVER_SECRET 54441
|
||||||
|
|
||||||
@ -48,6 +51,7 @@
|
|||||||
/// NOTE: DBMS_TCP_PROTOCOL_VERSION has nothing common with VERSION_REVISION,
|
/// NOTE: DBMS_TCP_PROTOCOL_VERSION has nothing common with VERSION_REVISION,
|
||||||
/// later is just a number for server version (one number instead of commit SHA)
|
/// later is just a number for server version (one number instead of commit SHA)
|
||||||
/// for simplicity (sometimes it may be more convenient in some use cases).
|
/// for simplicity (sometimes it may be more convenient in some use cases).
|
||||||
#define DBMS_TCP_PROTOCOL_VERSION 54452
|
|
||||||
|
#define DBMS_TCP_PROTOCOL_VERSION 54453
|
||||||
|
|
||||||
#define DBMS_MIN_PROTOCOL_VERSION_WITH_INITIAL_QUERY_START_TIME 54449
|
#define DBMS_MIN_PROTOCOL_VERSION_WITH_INITIAL_QUERY_START_TIME 54449
|
||||||
|
@ -75,6 +75,7 @@ class IColumn;
|
|||||||
M(UInt64, s3_max_single_read_retries, 4, "The maximum number of retries during single S3 read.", 0) \
|
M(UInt64, s3_max_single_read_retries, 4, "The maximum number of retries during single S3 read.", 0) \
|
||||||
M(UInt64, s3_max_redirects, 10, "Max number of S3 redirects hops allowed.", 0) \
|
M(UInt64, s3_max_redirects, 10, "Max number of S3 redirects hops allowed.", 0) \
|
||||||
M(UInt64, s3_max_connections, 1024, "The maximum number of connections per server.", 0) \
|
M(UInt64, s3_max_connections, 1024, "The maximum number of connections per server.", 0) \
|
||||||
|
M(UInt64, hdfs_replication, 0, "The actual number of replications can be specified when the hdfs file is created.", 0) \
|
||||||
M(UInt64, hsts_max_age, 0, "Expired time for hsts. 0 means disable HSTS.", 0) \
|
M(UInt64, hsts_max_age, 0, "Expired time for hsts. 0 means disable HSTS.", 0) \
|
||||||
M(Bool, extremes, false, "Calculate minimums and maximums of the result columns. They can be output in JSON-formats.", IMPORTANT) \
|
M(Bool, extremes, false, "Calculate minimums and maximums of the result columns. They can be output in JSON-formats.", IMPORTANT) \
|
||||||
M(Bool, use_uncompressed_cache, false, "Whether to use the cache of uncompressed blocks.", 0) \
|
M(Bool, use_uncompressed_cache, false, "Whether to use the cache of uncompressed blocks.", 0) \
|
||||||
@ -125,6 +126,8 @@ class IColumn;
|
|||||||
M(UInt64, parallel_replicas_count, 0, "", 0) \
|
M(UInt64, parallel_replicas_count, 0, "", 0) \
|
||||||
M(UInt64, parallel_replica_offset, 0, "", 0) \
|
M(UInt64, parallel_replica_offset, 0, "", 0) \
|
||||||
\
|
\
|
||||||
|
M(Bool, allow_experimental_parallel_reading_from_replicas, false, "If true, ClickHouse will send a SELECT query to all replicas of a table. It will work for any kind on MergeTree table.", 0) \
|
||||||
|
\
|
||||||
M(Bool, skip_unavailable_shards, false, "If true, ClickHouse silently skips unavailable shards and nodes unresolvable through DNS. Shard is marked as unavailable when none of the replicas can be reached.", 0) \
|
M(Bool, skip_unavailable_shards, false, "If true, ClickHouse silently skips unavailable shards and nodes unresolvable through DNS. Shard is marked as unavailable when none of the replicas can be reached.", 0) \
|
||||||
\
|
\
|
||||||
M(UInt64, parallel_distributed_insert_select, 0, "Process distributed INSERT SELECT query in the same cluster on local tables on every shard, if 1 SELECT is executed on each shard, if 2 SELECT and INSERT is executed on each shard", 0) \
|
M(UInt64, parallel_distributed_insert_select, 0, "Process distributed INSERT SELECT query in the same cluster on local tables on every shard, if 1 SELECT is executed on each shard, if 2 SELECT and INSERT is executed on each shard", 0) \
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user