mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 01:25:21 +00:00
Merge branch 'master' into check_dependencies_on_drop
This commit is contained in:
commit
416a82b99a
32
.github/workflows/backport.yml
vendored
Normal file
32
.github/workflows/backport.yml
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
name: CherryPick
|
||||
concurrency:
|
||||
group: cherry-pick
|
||||
on: # yamllint disable-line rule:truthy
|
||||
schedule:
|
||||
- cron: '0 */3 * * *'
|
||||
jobs:
|
||||
CherryPick:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
fetch-depth: 0
|
||||
- name: Cherry pick
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/cherry_pick
|
||||
ROBOT_CLICKHOUSE_SSH_KEY: ${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||
REPO_OWNER: "ClickHouse"
|
||||
REPO_NAME: "ClickHouse"
|
||||
REPO_TEAM: "core"
|
||||
run: |
|
||||
sudo pip install GitPython
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 cherry_pick.py
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
324
.github/workflows/main.yml
vendored
324
.github/workflows/main.yml
vendored
@ -121,8 +121,125 @@ jobs:
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
BuilderDebAsan:
|
||||
needs: DockerHubPush
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/images_path
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'recursive'
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/build_check
|
||||
IMAGES_PATH: ${{runner.temp}}/images_path
|
||||
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH: ${{runner.temp}}/../ccaches
|
||||
CHECK_NAME: 'ClickHouse build check (actions)'
|
||||
BUILD_NUMBER: 3
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
|
||||
- name: Upload build URLs to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
BuilderDebRelease:
|
||||
needs: DockerHubPush
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/images_path
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'recursive'
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/build_check
|
||||
IMAGES_PATH: ${{runner.temp}}/images_path
|
||||
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH: ${{runner.temp}}/../ccaches
|
||||
CHECK_NAME: 'ClickHouse build check (actions)'
|
||||
BUILD_NUMBER: 0
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
|
||||
- name: Upload build URLs to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
BuilderDebSplitted:
|
||||
needs: DockerHubPush
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/images_path
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'recursive'
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/build_check
|
||||
IMAGES_PATH: ${{runner.temp}}/images_path
|
||||
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH: ${{runner.temp}}/../ccaches
|
||||
CHECK_NAME: 'ClickHouse special build check (actions)'
|
||||
BUILD_NUMBER: 1
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
|
||||
- name: Upload build URLs to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
BuilderReport:
|
||||
needs: [BuilderDebDebug]
|
||||
needs: [BuilderDebDebug, BuilderDebAsan, BuilderDebRelease, BuilderDebSplitted]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
@ -163,14 +280,13 @@ jobs:
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stateless tests (debug, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stateless_debug/ClickHouse
|
||||
REQUIRED_BUILD_NUMBER: 7
|
||||
KILL_TIMEOUT: 10800
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $REQUIRED_BUILD_NUMBER $KILL_TIMEOUT
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -193,14 +309,42 @@ jobs:
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stateful tests (debug, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stateful_debug/ClickHouse
|
||||
REQUIRED_BUILD_NUMBER: 7
|
||||
KILL_TIMEOUT: 3600
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $REQUIRED_BUILD_NUMBER $KILL_TIMEOUT
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatelessTestFlakyCheck:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stateless_flaky_asan
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stateless tests flaky check (address, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stateless_flaky_asan/ClickHouse
|
||||
KILL_TIMEOUT: 3600
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -221,15 +365,98 @@ jobs:
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stress_debug
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stress tests (debug, actions)'
|
||||
CHECK_NAME: 'Stress test (debug, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stress_debug/ClickHouse
|
||||
REQUIRED_BUILD_NUMBER: 7
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 stress_check.py "$CHECK_NAME" $REQUIRED_BUILD_NUMBER
|
||||
python3 stress_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
ASTFuzzerTestDebug:
|
||||
needs: [BuilderDebDebug]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Fuzzer
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/ast_fuzzer_debug
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'AST fuzzer (debug, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/ast_fuzzer_debug/ClickHouse
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 ast_fuzzer_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
# IntegrationTestsAsan:
|
||||
# needs: [BuilderDebAsan]
|
||||
# runs-on: [self-hosted, stress-tester]
|
||||
# steps:
|
||||
# - name: Download json reports
|
||||
# uses: actions/download-artifact@v2
|
||||
# with:
|
||||
# path: ${{runner.temp}}/reports_dir
|
||||
# - name: Check out repository code
|
||||
# uses: actions/checkout@v2
|
||||
# - name: Integration test
|
||||
# env:
|
||||
# TEMP_PATH: ${{runner.temp}}/integration_tests_asan
|
||||
# REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
# CHECK_NAME: 'Integration tests (asan, actions)'
|
||||
# REPO_COPY: ${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
# run: |
|
||||
# sudo rm -fr $TEMP_PATH
|
||||
# mkdir -p $TEMP_PATH
|
||||
# cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
# cd $REPO_COPY/tests/ci
|
||||
# python3 integration_test_check.py "$CHECK_NAME"
|
||||
# - name: Cleanup
|
||||
# if: always()
|
||||
# run: |
|
||||
# docker kill $(docker ps -q) ||:
|
||||
# docker rm -f $(docker ps -a -q) ||:
|
||||
# sudo rm -fr $TEMP_PATH
|
||||
UnitTestsAsan:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Unit test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/unit_tests_asan
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Unit tests (asan, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/unit_tests_asan/ClickHouse
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 unit_tests_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -259,8 +486,87 @@ jobs:
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
PVSCheck:
|
||||
needs: DockerHubPush
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'recursive'
|
||||
- name: PVS Check
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/pvs_check
|
||||
REPO_COPY: ${{runner.temp}}/pvs_check/ClickHouse
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci && python3 pvs_check.py
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
CompatibilityCheck:
|
||||
needs: [BuilderDebRelease]
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: CompatibilityCheck
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/compatibility_check
|
||||
REPO_COPY: ${{runner.temp}}/compatibility_check/ClickHouse
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci && python3 compatibility_check.py 0
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
SplitBuildSmokeTest:
|
||||
needs: [BuilderDebSplitted]
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Split build check
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/split_build_check
|
||||
REPO_COPY: ${{runner.temp}}/split_build_check/ClickHouse
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci && python3 split_build_smoke_check.py
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FinishCheck:
|
||||
needs: [StyleCheck, DockerHubPush, CheckLabels, BuilderReport, FastTest, FunctionalStatelessTestDebug, FunctionalStatefulTestDebug, DocsCheck, StressTestDebug]
|
||||
needs: [StyleCheck, DockerHubPush, CheckLabels, BuilderReport, FastTest, FunctionalStatelessTestDebug, FunctionalStatefulTestDebug, DocsCheck, StressTestDebug, ASTFuzzerTestDebug, PVSCheck, UnitTestsAsan, SplitBuildSmokeTest, CompatibilityCheck]
|
||||
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
|
266
CHANGELOG.md
266
CHANGELOG.md
@ -1,3 +1,269 @@
|
||||
### ClickHouse release v21.11, 2021-11-09
|
||||
|
||||
#### Backward Incompatible Change
|
||||
|
||||
* Change order of json_path and json arguments in SQL/JSON functions (to be consistent with the standard). Closes [#30449](https://github.com/ClickHouse/ClickHouse/issues/30449). [#30474](https://github.com/ClickHouse/ClickHouse/pull/30474) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Remove `MergeTree` table setting `write_final_mark`. It will be always `true`. [#30455](https://github.com/ClickHouse/ClickHouse/pull/30455) ([Kseniia Sumarokova](https://github.com/kssenii)). No actions required, all tables are compatible with the new version.
|
||||
* Function `bayesAB` is removed. Please help to return this function back, refreshed. This closes [#26233](https://github.com/ClickHouse/ClickHouse/issues/26233). [#29934](https://github.com/ClickHouse/ClickHouse/pull/29934) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* This is relevant only if you already started using the experimental `clickhouse-keeper` support. Now ClickHouse Keeper snapshots compressed with `ZSTD` codec by default instead of custom ClickHouse LZ4 block compression. This behavior can be turned off with `compress_snapshots_with_zstd_format` coordination setting (must be equal on all quorum replicas). Backward incompatibility is quite rare and may happen only when new node will send snapshot (happens in case of recovery) to the old node which is unable to read snapshots in ZSTD format. [#29417](https://github.com/ClickHouse/ClickHouse/pull/29417) ([alesapin](https://github.com/alesapin)).
|
||||
|
||||
#### New Feature
|
||||
|
||||
* New asynchronous INSERT mode allows to accumulate inserted data and store it in a single batch in background. On client it can be enabled by setting `async_insert` for `INSERT` queries with data inlined in query or in separate buffer (e.g. for `INSERT` queries via HTTP protocol). If `wait_for_async_insert` is true (by default) the client will wait until data will be flushed to table. On server-side it controlled by the settings `async_insert_threads`, `async_insert_max_data_size` and `async_insert_busy_timeout_ms`. Implements [#18282](https://github.com/ClickHouse/ClickHouse/issues/18282). [#27537](https://github.com/ClickHouse/ClickHouse/pull/27537) ([Anton Popov](https://github.com/CurtizJ)). [#20557](https://github.com/ClickHouse/ClickHouse/pull/20557) ([Ivan](https://github.com/abyss7)). Notes on performance: with asynchronous inserts you can do up to around 10 000 individual INSERT queries per second, so it is still recommended to insert in batches if you want to achieve performance up to millions inserted rows per second.
|
||||
* Add interactive mode for `clickhouse-local`. So, you can just run `clickhouse-local` to get a command line ClickHouse interface without connecting to a server and process data from files and external data sources. Also merge the code of `clickhouse-client` and `clickhouse-local` together. Closes [#7203](https://github.com/ClickHouse/ClickHouse/issues/7203). Closes [#25516](https://github.com/ClickHouse/ClickHouse/issues/25516). Closes [#22401](https://github.com/ClickHouse/ClickHouse/issues/22401). [#26231](https://github.com/ClickHouse/ClickHouse/pull/26231) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Added support for executable (scriptable) user defined functions. These are UDFs that can be written in any programming language. [#28803](https://github.com/ClickHouse/ClickHouse/pull/28803) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Allow predefined connections to external data sources. This allows to avoid specifying credentials or addresses while using external data sources, they can be referenced by names instead. Closes [#28367](https://github.com/ClickHouse/ClickHouse/issues/28367). [#28577](https://github.com/ClickHouse/ClickHouse/pull/28577) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Added `INFORMATION_SCHEMA` database with `SCHEMATA`, `TABLES`, `VIEWS` and `COLUMNS` views to the corresponding tables in `system` database. Closes [#9770](https://github.com/ClickHouse/ClickHouse/issues/9770). [#28691](https://github.com/ClickHouse/ClickHouse/pull/28691) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Support `EXISTS (subquery)`. Closes [#6852](https://github.com/ClickHouse/ClickHouse/issues/6852). [#29731](https://github.com/ClickHouse/ClickHouse/pull/29731) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Session logging for audit. Logging all successful and failed login and logout events to a new `system.session_log` table. [#22415](https://github.com/ClickHouse/ClickHouse/pull/22415) ([Vasily Nemkov](https://github.com/Enmk)) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Support multidimensional cosine distance and euclidean distance functions; L1, L2, Lp, Linf distances and norms. Scalar product on tuples and various arithmetic operators on tuples. This fully closes [#4509](https://github.com/ClickHouse/ClickHouse/issues/4509) and even more. [#27933](https://github.com/ClickHouse/ClickHouse/pull/27933) ([Alexey Boykov](https://github.com/mathalex)).
|
||||
* Add support for compression and decompression for `INTO OUTPUT` and `FROM INFILE` (with autodetect or with additional optional parameter). [#27135](https://github.com/ClickHouse/ClickHouse/pull/27135) ([Filatenkov Artur](https://github.com/FArthur-cmd)).
|
||||
* Add CORS (Cross Origin Resource Sharing) support with HTTP `OPTIONS` request. It means, now Grafana will work with serverless requests without a kludges. Closes [#18693](https://github.com/ClickHouse/ClickHouse/issues/18693). [#29155](https://github.com/ClickHouse/ClickHouse/pull/29155) ([Filatenkov Artur](https://github.com/FArthur-cmd)).
|
||||
* Queries with JOIN ON now supports disjunctions (OR). [#21320](https://github.com/ClickHouse/ClickHouse/pull/21320) ([Ilya Golshtein](https://github.com/ilejn)).
|
||||
* Added function `tokens`. That allow to split string into tokens using non-alpha numeric ASCII characters as separators. [#29981](https://github.com/ClickHouse/ClickHouse/pull/29981) ([Maksim Kita](https://github.com/kitaisreal)). Added function `ngrams` to extract ngrams from text. Closes [#29699](https://github.com/ClickHouse/ClickHouse/issues/29699). [#29738](https://github.com/ClickHouse/ClickHouse/pull/29738) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Add functions for Unicode normalization: `normalizeUTF8NFC`, `normalizeUTF8NFD`, `normalizeUTF8NFKC`, `normalizeUTF8NFKD` functions. [#28633](https://github.com/ClickHouse/ClickHouse/pull/28633) ([darkkeks](https://github.com/darkkeks)).
|
||||
* Streaming consumption of application log files in ClickHouse with `FileLog` table engine. It's like `Kafka` or `RabbitMQ` engine but for append-only and rotated logs in local filesystem. Closes [#6953](https://github.com/ClickHouse/ClickHouse/issues/6953). [#25969](https://github.com/ClickHouse/ClickHouse/pull/25969) ([flynn](https://github.com/ucasfl)) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Add `CapnProto` output format, refactor `CapnProto` input format. [#29291](https://github.com/ClickHouse/ClickHouse/pull/29291) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Allow to write number in query as binary literal. Example `SELECT 0b001;`. [#29304](https://github.com/ClickHouse/ClickHouse/pull/29304) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Added `hashed_array` dictionary type. It saves memory when using dictionaries with multiple attributes. Closes [#30236](https://github.com/ClickHouse/ClickHouse/issues/30236). [#30242](https://github.com/ClickHouse/ClickHouse/pull/30242) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Added `JSONExtractKeys` function. [#30056](https://github.com/ClickHouse/ClickHouse/pull/30056) ([Vitaly](https://github.com/orloffv)).
|
||||
* Add a function `getOSKernelVersion` - it returns a string with OS kernel version. [#29755](https://github.com/ClickHouse/ClickHouse/pull/29755) ([Memo](https://github.com/Joeywzr)).
|
||||
* Added `MD4` and `SHA384` functions. MD4 is an obsolete and insecure hash function, it can be used only in rare cases when MD4 is already being used in some legacy system and you need to get exactly the same result. [#29602](https://github.com/ClickHouse/ClickHouse/pull/29602) ([Nikita Tikhomirov](https://github.com/NSTikhomirov)).
|
||||
* HSTS can be enabled for Clickhouse HTTP server by setting `hsts_max_age` in configuration file with a positive number. [#29516](https://github.com/ClickHouse/ClickHouse/pull/29516) ([凌涛](https://github.com/lingtaolf)).
|
||||
* Huawei OBS Storage support. Closes [#24294](https://github.com/ClickHouse/ClickHouse/issues/24294). [#29511](https://github.com/ClickHouse/ClickHouse/pull/29511) ([kevin wan](https://github.com/MaxWk)).
|
||||
* New function `mapContainsKeyLike` to get the map that key matches a simple regular expression. [#29471](https://github.com/ClickHouse/ClickHouse/pull/29471) ([凌涛](https://github.com/lingtaolf)). New function `mapExtractKeyLike` to get the map only kept elements matched specified pattern. [#30793](https://github.com/ClickHouse/ClickHouse/pull/30793) ([凌涛](https://github.com/lingtaolf)).
|
||||
* Implemented `ALTER TABLE x MODIFY COMMENT`. [#29264](https://github.com/ClickHouse/ClickHouse/pull/29264) ([Vasily Nemkov](https://github.com/Enmk)).
|
||||
* Adds H3 inspection functions that are missing from ClickHouse but are available via the H3 api: https://h3geo.org/docs/api/inspection. [#29209](https://github.com/ClickHouse/ClickHouse/pull/29209) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* Allow non-replicated ALTER TABLE FETCH and ATTACH in Replicated databases. [#29202](https://github.com/ClickHouse/ClickHouse/pull/29202) ([Kevin Michel](https://github.com/kmichel-aiven)).
|
||||
* Added a setting `output_format_csv_null_representation`: This is the same as `output_format_tsv_null_representation` but is for CSV output. [#29123](https://github.com/ClickHouse/ClickHouse/pull/29123) ([PHO](https://github.com/depressed-pho)).
|
||||
* Added function `zookeeperSessionUptime()` which returns uptime of current ZooKeeper session in seconds. [#28983](https://github.com/ClickHouse/ClickHouse/pull/28983) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Implements the `h3ToGeoBoundary` function. [#28952](https://github.com/ClickHouse/ClickHouse/pull/28952) ([Ivan Veselov](https://github.com/fuzzERot)).
|
||||
* Add aggregate function `exponentialMovingAverage` that can be used as window function. This closes [#27511](https://github.com/ClickHouse/ClickHouse/issues/27511). [#28914](https://github.com/ClickHouse/ClickHouse/pull/28914) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Allow to include subcolumns of table columns into `DESCRIBE` query result (can be enabled by setting `describe_include_subcolumns`). [#28905](https://github.com/ClickHouse/ClickHouse/pull/28905) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* `Executable`, `ExecutablePool` added option `send_chunk_header`. If this option is true then chunk rows_count with line break will be sent to client before chunk. [#28833](https://github.com/ClickHouse/ClickHouse/pull/28833) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* `tokenbf_v1` and `ngram` support Map with key of String of FixedSring type. It enhance data skipping in query with map key filter. ```sql CREATE TABLE map_tokenbf ( row_id UInt32, map Map(String, String), INDEX map_tokenbf map TYPE ngrambf_v1(4,256,2,0) GRANULARITY 1 ) Engine=MergeTree() Order by id ``` With table above, the query `select * from map_tokebf where map['K']='V'` will skip the granule that doesn't contain key `A` . Of course, how many rows will skipped is depended on the `granularity` and `index_granularity` you set. [#28511](https://github.com/ClickHouse/ClickHouse/pull/28511) ([凌涛](https://github.com/lingtaolf)).
|
||||
* Send profile events from server to client. New packet type `ProfileEvents` was introduced. Closes [#26177](https://github.com/ClickHouse/ClickHouse/issues/26177). [#28364](https://github.com/ClickHouse/ClickHouse/pull/28364) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Bit shift operations for `FixedString` and `String` data types. This closes [#27763](https://github.com/ClickHouse/ClickHouse/issues/27763). [#28325](https://github.com/ClickHouse/ClickHouse/pull/28325) ([小路](https://github.com/nicelulu)).
|
||||
* Support adding / deleting tables to replication from PostgreSQL dynamically in database engine MaterializedPostgreSQL. Support alter for database settings. Closes [#27573](https://github.com/ClickHouse/ClickHouse/issues/27573). [#28301](https://github.com/ClickHouse/ClickHouse/pull/28301) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Added function accurateCastOrDefault(x, T). Closes [#21330](https://github.com/ClickHouse/ClickHouse/issues/21330). Authors @taiyang-li. [#23028](https://github.com/ClickHouse/ClickHouse/pull/23028) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Add Function `toUUIDOrDefault`, `toUInt8/16/32/64/256OrDefault`, `toInt8/16/32/64/128/256OrDefault`, which enables user defining default value(not null) when string parsing is failed. [#21330](https://github.com/ClickHouse/ClickHouse/pull/21330) ([taiyang-li](https://github.com/taiyang-li)).
|
||||
|
||||
#### Performance Improvement
|
||||
|
||||
* Background merges can be preempted by each other and they are scheduled with appropriate priorities. Now long running merges won't prevent short merges to proceed. This is needed for a better scheduling and controlling of merges execution. It reduces the chances to get "too many parts" error. [#22381](https://github.com/ClickHouse/ClickHouse/issues/22381). [#25165](https://github.com/ClickHouse/ClickHouse/pull/25165) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). Added an ability to execute more merges and mutations than the number of threads in background pool. Merges and mutations will be executed step by step according to their sizes (lower is more prioritized). The ratio of the number of tasks to threads to execute is controlled by a setting `background_merges_mutations_concurrency_ratio`, 2 by default. [#29140](https://github.com/ClickHouse/ClickHouse/pull/29140) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Allow to use asynchronous reads for remote filesystems. Lower the number of seeks while reading from remote filesystems. It improves performance tremendously and makes the experimental `web` and `s3` disks to work faster than EBS under certain conditions. [#29205](https://github.com/ClickHouse/ClickHouse/pull/29205) ([Kseniia Sumarokova](https://github.com/kssenii)). In the meantime, the `web` disk type (static dataset hosted on a web server) is graduated from being experimental to be production ready.
|
||||
* Queries with `INTO OUTFILE` in `clickhouse-client` will use multiple threads. Fix the issue with flickering progress-bar when using `INTO OUTFILE`. This closes [#30873](https://github.com/ClickHouse/ClickHouse/issues/30873). This closes [#30872](https://github.com/ClickHouse/ClickHouse/issues/30872). [#30886](https://github.com/ClickHouse/ClickHouse/pull/30886) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Reduce amount of redundant compressed data read from disk for some types `SELECT` queries (only for `MergeTree` engines family). [#30111](https://github.com/ClickHouse/ClickHouse/pull/30111) ([alesapin](https://github.com/alesapin)).
|
||||
* Remove some redundant `seek` calls while reading compressed blocks in MergeTree table engines family. [#29766](https://github.com/ClickHouse/ClickHouse/pull/29766) ([alesapin](https://github.com/alesapin)).
|
||||
* Make `url` table function to process multiple URLs in parallel. This closes [#29670](https://github.com/ClickHouse/ClickHouse/issues/29670) and closes [#29671](https://github.com/ClickHouse/ClickHouse/issues/29671). [#29673](https://github.com/ClickHouse/ClickHouse/pull/29673) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Improve performance of aggregation in order of primary key (with enabled setting `optimize_aggregation_in_order`). [#30266](https://github.com/ClickHouse/ClickHouse/pull/30266) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Now clickhouse is using DNS cache while communicating with external S3. [#29999](https://github.com/ClickHouse/ClickHouse/pull/29999) ([alesapin](https://github.com/alesapin)).
|
||||
* Add support for pushdown of `IS NULL`/`IS NOT NULL` to external databases (i.e. MySQL). [#29463](https://github.com/ClickHouse/ClickHouse/pull/29463) ([Azat Khuzhin](https://github.com/azat)). Transform `isNull`/`isNotNull` to `IS NULL`/`IS NOT NULL` (for external dbs, i.e. MySQL). [#29446](https://github.com/ClickHouse/ClickHouse/pull/29446) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* SELECT queries from Dictionary tables will use multiple threads. [#30500](https://github.com/ClickHouse/ClickHouse/pull/30500) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Improve performance for filtering (WHERE operation) of `Decimal` columns. [#30431](https://github.com/ClickHouse/ClickHouse/pull/30431) ([Jun Jin](https://github.com/vesslanjin)).
|
||||
* Remove branchy code in filter operation with a better implementation with popcnt/ctz which have better performance. [#29881](https://github.com/ClickHouse/ClickHouse/pull/29881) ([Jun Jin](https://github.com/vesslanjin)).
|
||||
* Improve filter bytemask generator (used for WHERE operator) function all in one with SSE/AVX2/AVX512 instructions. Note that by default ClickHouse is only using SSE, so it's only relevant for custom builds. [#30014](https://github.com/ClickHouse/ClickHouse/pull/30014) ([jasperzhu](https://github.com/jinjunzh)). [#30670](https://github.com/ClickHouse/ClickHouse/pull/30670) ([jasperzhu](https://github.com/jinjunzh)).
|
||||
* Improve the performance of SUM aggregate function of Nullable floating point numbers. [#28906](https://github.com/ClickHouse/ClickHouse/pull/28906) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Speed up part loading process with multiple disks are in use. The idea is similar to https://github.com/ClickHouse/ClickHouse/pull/16423 . Prod env shows improvement: 24 min -> 16 min . [#28363](https://github.com/ClickHouse/ClickHouse/pull/28363) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Reduce default settings for S3 multipart upload part size to lower memory usage. [#28679](https://github.com/ClickHouse/ClickHouse/pull/28679) ([ianton-ru](https://github.com/ianton-ru)).
|
||||
* Speed up `bitmapAnd` function. [#28332](https://github.com/ClickHouse/ClickHouse/pull/28332) ([dddounaiking](https://github.com/OodounaikingoO)).
|
||||
* Removed sub-optimal mutation notifications in `StorageMergeTree` when merges are still going. [#27552](https://github.com/ClickHouse/ClickHouse/pull/27552) ([Vladimir Chebotarev](https://github.com/excitoon)).
|
||||
* Attempt to improve performance of string comparison. [#28767](https://github.com/ClickHouse/ClickHouse/pull/28767) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Primary key index and partition filter can work in tuple. [#29281](https://github.com/ClickHouse/ClickHouse/pull/29281) ([凌涛](https://github.com/lingtaolf)).
|
||||
* If query has multiple quantile aggregate functions with the same arguments but different level parameter, they will be fused together and executed in one pass if the setting `optimize_syntax_fuse_functions` is enabled. [#26657](https://github.com/ClickHouse/ClickHouse/pull/26657) ([hexiaoting](https://github.com/hexiaoting)).
|
||||
* Now min-max aggregation over the first expression of primary key is optimized by projection. This is for [#329](https://github.com/ClickHouse/ClickHouse/issues/329). [#29918](https://github.com/ClickHouse/ClickHouse/pull/29918) ([Amos Bird](https://github.com/amosbird)).
|
||||
|
||||
#### Experimental Feature
|
||||
|
||||
* Add ability to change nodes configuration (in `.xml` file) for ClickHouse Keeper. [#30372](https://github.com/ClickHouse/ClickHouse/pull/30372) ([alesapin](https://github.com/alesapin)).
|
||||
* Add `sparkbar` aggregate function. This closes [#26175](https://github.com/ClickHouse/ClickHouse/issues/26175). [#27481](https://github.com/ClickHouse/ClickHouse/pull/27481) ([小路](https://github.com/nicelulu)). Note: there is one flaw in this function, the behaviour will be changed in future releases.
|
||||
|
||||
#### Improvement
|
||||
|
||||
* Allow user to change log levels without restart. [#29586](https://github.com/ClickHouse/ClickHouse/pull/29586) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Multiple improvements for SQL UDF. Queries for manipulation of SQL User Defined Functions now support ON CLUSTER clause. Example `CREATE FUNCTION test_function ON CLUSTER 'cluster' AS x -> x + 1;`. Closes [#30666](https://github.com/ClickHouse/ClickHouse/issues/30666). [#30734](https://github.com/ClickHouse/ClickHouse/pull/30734) ([Maksim Kita](https://github.com/kitaisreal)). Support `CREATE OR REPLACE`, `CREATE IF NOT EXISTS` syntaxes. [#30454](https://github.com/ClickHouse/ClickHouse/pull/30454) ([Maksim Kita](https://github.com/kitaisreal)). Added DROP IF EXISTS support. Example `DROP FUNCTION IF EXISTS test_function`. [#30437](https://github.com/ClickHouse/ClickHouse/pull/30437) ([Maksim Kita](https://github.com/kitaisreal)). Support lambdas. Example `CREATE FUNCTION lambda_function AS x -> arrayMap(element -> element * 2, x);`. [#30435](https://github.com/ClickHouse/ClickHouse/pull/30435) ([Maksim Kita](https://github.com/kitaisreal)). Support SQL user defined functions for `clickhouse-local`. [#30179](https://github.com/ClickHouse/ClickHouse/pull/30179) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Enable per-query memory profiler (set to `memory_profiler_step` = 4MiB) globally. [#29455](https://github.com/ClickHouse/ClickHouse/pull/29455) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Added columns `data_compressed_bytes`, `data_uncompressed_bytes`, `marks_bytes` into `system.data_skipping_indices`. Added columns `secondary_indices_compressed_bytes`, `secondary_indices_uncompressed_bytes`, `secondary_indices_marks_bytes` into `system.parts`. Closes [#29697](https://github.com/ClickHouse/ClickHouse/issues/29697). [#29896](https://github.com/ClickHouse/ClickHouse/pull/29896) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Add `table` alias to system.tables and `database` alias to system.databases [#29677](https://github.com/ClickHouse/ClickHouse/issues/29677). [#29882](https://github.com/ClickHouse/ClickHouse/pull/29882) ([kevin wan](https://github.com/MaxWk)).
|
||||
* Correctly resolve interdependencies between tables on server startup. Closes [#8004](https://github.com/ClickHouse/ClickHouse/issues/8004), closes [#15170](https://github.com/ClickHouse/ClickHouse/issues/15170). [#28373](https://github.com/ClickHouse/ClickHouse/pull/28373) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Avoid error "Division by zero" when denominator is Nullable in functions `divide`, `intDiv` and `modulo`. Closes [#22621](https://github.com/ClickHouse/ClickHouse/issues/22621). [#28352](https://github.com/ClickHouse/ClickHouse/pull/28352) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Allow to parse values of `Date` data type in text formats as `YYYYMMDD` in addition to `YYYY-MM-DD`. This closes [#30870](https://github.com/ClickHouse/ClickHouse/issues/30870). [#30871](https://github.com/ClickHouse/ClickHouse/pull/30871) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Web UI: render bars in table cells. [#29792](https://github.com/ClickHouse/ClickHouse/pull/29792) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* User can now create dictionaries with comments: `CREATE DICTIONARY ... COMMENT 'vaue'` ... [#29899](https://github.com/ClickHouse/ClickHouse/pull/29899) ([Vasily Nemkov](https://github.com/Enmk)). Users now can set comments to database in `CREATE DATABASE` statement ... [#29429](https://github.com/ClickHouse/ClickHouse/pull/29429) ([Vasily Nemkov](https://github.com/Enmk)).
|
||||
* Introduce `compiled_expression_cache_elements_size` setting. If you will ever want to use this setting, you will already know what it does. [#30667](https://github.com/ClickHouse/ClickHouse/pull/30667) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* clickhouse-format now supports option `--query`. In previous versions you have to pass the query to stdin. [#29325](https://github.com/ClickHouse/ClickHouse/pull/29325) ([凌涛](https://github.com/lingtaolf)).
|
||||
* Support `ALTER TABLE` for tables in `Memory` databases. Memory databases are used in `clickhouse-local`. [#30866](https://github.com/ClickHouse/ClickHouse/pull/30866) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Arrays of all serializable types are now supported by `arrayStringConcat`. [#30840](https://github.com/ClickHouse/ClickHouse/pull/30840) ([Nickita Taranov](https://github.com/nickitat)).
|
||||
* ClickHouse now will account docker/cgroups limitations to get system memory amount. See [#25662](https://github.com/ClickHouse/ClickHouse/issues/25662). [#30574](https://github.com/ClickHouse/ClickHouse/pull/30574) ([Pavel Medvedev](https://github.com/pmed)).
|
||||
* Fetched table structure for PostgreSQL database is more reliable now. [#30477](https://github.com/ClickHouse/ClickHouse/pull/30477) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Full support of positional arguments in GROUP BY and ORDER BY. [#30433](https://github.com/ClickHouse/ClickHouse/pull/30433) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Allow extracting non-string element as string using JSONExtractString. This is for [pull/25452#issuecomment-927123287](https://github.com/ClickHouse/ClickHouse/pull/25452#issuecomment-927123287). [#30426](https://github.com/ClickHouse/ClickHouse/pull/30426) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Added an ability to use FINAL clause in SELECT queries from `GraphiteMergeTree`. [#30360](https://github.com/ClickHouse/ClickHouse/pull/30360) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Minor improvements in replica cloning and enqueuing fetch for broken parts, that should avoid extremely rare hanging of `GET_PART` entries in replication queue. [#30346](https://github.com/ClickHouse/ClickHouse/pull/30346) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Allow symlinks to files in `user_files` directory for file table function. [#30309](https://github.com/ClickHouse/ClickHouse/pull/30309) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fixed comparison of `Date32` with `Date`, `DateTime`, `DateTime64` and `String`. [#30219](https://github.com/ClickHouse/ClickHouse/pull/30219) ([liang.huang](https://github.com/lhuang09287750)).
|
||||
* Allow to remove `SAMPLE BY` expression from `MergeTree` tables (`ALTER TABLE <table> REMOVE SAMPLE BY`). [#30180](https://github.com/ClickHouse/ClickHouse/pull/30180) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Now `Keeper` (as part of `clickhouse-server`) will start asynchronously if it can connect to some other node. [#30170](https://github.com/ClickHouse/ClickHouse/pull/30170) ([alesapin](https://github.com/alesapin)).
|
||||
* Now `clickhouse-client` supports native multi-line editing. [#30143](https://github.com/ClickHouse/ClickHouse/pull/30143) ([Amos Bird](https://github.com/amosbird)).
|
||||
* `polygon` dictionaries (reverse geocoding): added support for reading the dictionary content with SELECT query method if setting `store_polygon_key_column` = true. Closes [#30090](https://github.com/ClickHouse/ClickHouse/issues/30090). [#30142](https://github.com/ClickHouse/ClickHouse/pull/30142) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Add ClickHouse logo to Play UI. [#29674](https://github.com/ClickHouse/ClickHouse/pull/29674) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Better exception message while reading column from Arrow-supported formats like `Arrow`, `ArrowStream`, `Parquet` and `ORC`. This closes [#29926](https://github.com/ClickHouse/ClickHouse/issues/29926). [#29927](https://github.com/ClickHouse/ClickHouse/pull/29927) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix data-race between flush and startup in `Buffer` tables. This can appear in tests. [#29930](https://github.com/ClickHouse/ClickHouse/pull/29930) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix `lock-order-inversion` between `DROP TABLE` for `DatabaseMemory` and `LiveView`. Live View is an experimental feature. Memory database is used in clickhouse-local. [#29929](https://github.com/ClickHouse/ClickHouse/pull/29929) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix lock-order-inversion between periodic dictionary reload and config reload. [#29928](https://github.com/ClickHouse/ClickHouse/pull/29928) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Update zoneinfo files to 2021c. [#29925](https://github.com/ClickHouse/ClickHouse/pull/29925) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add ability to configure retries and delays between them for `clickhouse-copier`. [#29921](https://github.com/ClickHouse/ClickHouse/pull/29921) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Add `shutdown_wait_unfinished_queries` server setting to allowing waiting for running queries up to `shutdown_wait_unfinished` time. This is for [#24451](https://github.com/ClickHouse/ClickHouse/issues/24451). [#29914](https://github.com/ClickHouse/ClickHouse/pull/29914) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Add ability to trace peak memory usage (with new trace_type in `system.trace_log` - `MemoryPeak`). [#29858](https://github.com/ClickHouse/ClickHouse/pull/29858) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* PostgreSQL foreign tables: Added partitioned table prefix 'p' for the query for fetching replica identity index. [#29828](https://github.com/ClickHouse/ClickHouse/pull/29828) ([Shoh Jahon](https://github.com/Shohjahon)).
|
||||
* Apply `max_untracked_memory`/`memory_profiler_step`/`memory_profiler_sample_probability` during mutate/merge to profile memory usage during merges. [#29681](https://github.com/ClickHouse/ClickHouse/pull/29681) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Query obfuscator: `clickhouse-format --obfuscate` now works with more types of queries. [#29672](https://github.com/ClickHouse/ClickHouse/pull/29672) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fixed the issue: `clickhouse-format --obfuscate` cannot process queries with embedded dictionaries (functions `regionTo...`). [#29667](https://github.com/ClickHouse/ClickHouse/pull/29667) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix incorrect Nullable processing of JSON functions. This fixes [#29615](https://github.com/ClickHouse/ClickHouse/issues/29615) . Mark as improvement because https://github.com/ClickHouse/ClickHouse/pull/28012 is not released. [#29659](https://github.com/ClickHouse/ClickHouse/pull/29659) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Increase `listen_backlog` by default (to match default in newer linux kernel). [#29643](https://github.com/ClickHouse/ClickHouse/pull/29643) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Reload dictionaries, models, user defined executable functions if servers config `dictionaries_config`, `models_config`, `user_defined_executable_functions_config` changes. Closes [#28142](https://github.com/ClickHouse/ClickHouse/issues/28142). [#29529](https://github.com/ClickHouse/ClickHouse/pull/29529) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Get rid of pointless restriction on projection name. Now projection name can start with `tmp_`. [#29520](https://github.com/ClickHouse/ClickHouse/pull/29520) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fixed `There is no query or query context has expired` error in mutations with nested subqueries. Do not allow subqueries in mutation if table is replicated and `allow_nondeterministic_mutations` setting is disabled. [#29495](https://github.com/ClickHouse/ClickHouse/pull/29495) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Apply config changes to `max_concurrent_queries` during runtime (no need to restart). [#29414](https://github.com/ClickHouse/ClickHouse/pull/29414) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Added setting `use_skip_indexes`. [#29405](https://github.com/ClickHouse/ClickHouse/pull/29405) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Add support for `FREEZE`ing in-memory parts (for backups). [#29376](https://github.com/ClickHouse/ClickHouse/pull/29376) ([Mo Xuan](https://github.com/mo-avatar)).
|
||||
* Pass through initial query_id for `clickhouse-benchmark` (previously if you run remote query via `clickhouse-benchmark`, queries on shards will not be linked to the initial query via `initial_query_id`). [#29364](https://github.com/ClickHouse/ClickHouse/pull/29364) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Skip indexes `tokenbf_v1` and `ngrambf_v1`: added support for `Array` data type with key of `String` of `FixedString` type. [#29280](https://github.com/ClickHouse/ClickHouse/pull/29280) ([Maksim Kita](https://github.com/kitaisreal)). Skip indexes `tokenbf_v1` and `ngrambf_v1` added support for `Map` data type with key of `String` of `FixedString` type. Author @lingtaolf. [#29220](https://github.com/ClickHouse/ClickHouse/pull/29220) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Function `has`: added support for `Map` data type. [#29267](https://github.com/ClickHouse/ClickHouse/pull/29267) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Add `compress_logs` settings for clickhouse-keeper which allow to compress clickhouse-keeper logs (for replicated state machine) in `ZSTD` . Implements: [#26977](https://github.com/ClickHouse/ClickHouse/issues/26977). [#29223](https://github.com/ClickHouse/ClickHouse/pull/29223) ([alesapin](https://github.com/alesapin)).
|
||||
* Add a setting `external_table_strict_query` - it will force passing the whole WHERE expression in queries to foreign databases even if it is incompatible. [#29206](https://github.com/ClickHouse/ClickHouse/pull/29206) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Disable projections when `ARRAY JOIN` is used. In previous versions projection analysis may break aliases in array join. [#29139](https://github.com/ClickHouse/ClickHouse/pull/29139) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Support more types in `MsgPack` input/output format. [#29077](https://github.com/ClickHouse/ClickHouse/pull/29077) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Allow to input and output `LowCardinality` columns in `ORC` input/output format. [#29062](https://github.com/ClickHouse/ClickHouse/pull/29062) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Select from `system.distributed_ddl_queue` might show incorrect values, it's fixed. [#29061](https://github.com/ClickHouse/ClickHouse/pull/29061) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Correct behaviour with unknown methods for HTTP connection. Solves [#29050](https://github.com/ClickHouse/ClickHouse/issues/29050). [#29057](https://github.com/ClickHouse/ClickHouse/pull/29057) ([Filatenkov Artur](https://github.com/FArthur-cmd)).
|
||||
* `clickhouse-keeper`: Fix bug in `clickhouse-keeper-converter` which can lead to some data loss while restoring from ZooKeeper logs (not snapshot). [#29030](https://github.com/ClickHouse/ClickHouse/pull/29030) ([小路](https://github.com/nicelulu)). Fix bug in `clickhouse-keeper-converter` which can lead to incorrect ZooKeeper log deserialization. [#29071](https://github.com/ClickHouse/ClickHouse/pull/29071) ([小路](https://github.com/nicelulu)).
|
||||
* Apply settings from `CREATE ... AS SELECT` queries (fixes: [#28810](https://github.com/ClickHouse/ClickHouse/issues/28810)). [#28962](https://github.com/ClickHouse/ClickHouse/pull/28962) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Respect default database setting for ALTER TABLE ... ON CLUSTER ... REPLACE/MOVE PARTITION FROM/TO ... [#28955](https://github.com/ClickHouse/ClickHouse/pull/28955) ([anneji-dev](https://github.com/anneji-dev)).
|
||||
* gRPC protocol: Allow change server-side compression from client. [#28953](https://github.com/ClickHouse/ClickHouse/pull/28953) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Skip "no data" exception when reading thermal sensors for asynchronous metrics. This closes [#28852](https://github.com/ClickHouse/ClickHouse/issues/28852). [#28882](https://github.com/ClickHouse/ClickHouse/pull/28882) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fixed logical race condition that might cause `Dictionary not found` error for existing dictionary in rare cases. [#28853](https://github.com/ClickHouse/ClickHouse/pull/28853) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Relax nested function for If-combinator check (but forbid nested identical combinators). [#28828](https://github.com/ClickHouse/ClickHouse/pull/28828) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix possible uncaught exception during server termination. [#28761](https://github.com/ClickHouse/ClickHouse/pull/28761) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Forbid cleaning of tmp directories that can be used by an active mutation/merge if mutation/merge is extraordinarily long. [#28760](https://github.com/ClickHouse/ClickHouse/pull/28760) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Allow optimization `optimize_arithmetic_operations_in_aggregate_functions = 1` when alias is used. [#28746](https://github.com/ClickHouse/ClickHouse/pull/28746) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Implement `detach_not_byte_identical_parts` setting for `ReplicatedMergeTree`, that will detach instead of remove not byte-identical parts (after mege/mutate). [#28708](https://github.com/ClickHouse/ClickHouse/pull/28708) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Implement `max_suspicious_broken_parts_bytes` setting for `MergeTree` (to limit total size of all broken parts, default is `1GiB`). [#28707](https://github.com/ClickHouse/ClickHouse/pull/28707) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Enable expanding macros in `RabbitMQ` table settings. [#28683](https://github.com/ClickHouse/ClickHouse/pull/28683) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Restore the possibility to read data of a table using the `Log` engine in multiple threads. [#28125](https://github.com/ClickHouse/ClickHouse/pull/28125) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix misbehavior of NULL column handling in JSON functions. This fixes [#27930](https://github.com/ClickHouse/ClickHouse/issues/27930). [#28012](https://github.com/ClickHouse/ClickHouse/pull/28012) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Allow to set the size of Mark/Uncompressed cache for skip indices separately from columns. [#27961](https://github.com/ClickHouse/ClickHouse/pull/27961) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Allow to mix JOIN with `USING` with other JOIN types. [#23881](https://github.com/ClickHouse/ClickHouse/pull/23881) ([darkkeks](https://github.com/darkkeks)).
|
||||
* Update aws-sdk submodule for throttling in Yandex Cloud S3. [#30646](https://github.com/ClickHouse/ClickHouse/pull/30646) ([ianton-ru](https://github.com/ianton-ru)).
|
||||
* Fix releasing query ID and session ID at the end of query processing while handing gRPC call. [#29954](https://github.com/ClickHouse/ClickHouse/pull/29954) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix shutdown of `AccessControlManager` to fix flaky test. [#29951](https://github.com/ClickHouse/ClickHouse/pull/29951) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix failed assertion in reading from `HDFS`. Update libhdfs3 library to be able to run in tests in debug. Closes [#29251](https://github.com/ClickHouse/ClickHouse/issues/29251). Closes [#27814](https://github.com/ClickHouse/ClickHouse/issues/27814). [#29276](https://github.com/ClickHouse/ClickHouse/pull/29276) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
|
||||
* Add support for FreeBSD builds for Aarch64 machines. [#29952](https://github.com/ClickHouse/ClickHouse/pull/29952) ([MikaelUrankar](https://github.com/MikaelUrankar)).
|
||||
* Recursive submodules are no longer needed for ClickHouse. [#30315](https://github.com/ClickHouse/ClickHouse/pull/30315) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* ClickHouse can be statically built with Musl. This is added as experiment, it does not support building `odbc-bridge`, `library-bridge`, integration with CatBoost and some libraries. [#30248](https://github.com/ClickHouse/ClickHouse/pull/30248) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Enable `Protobuf`, `Arrow`, `ORC`, `Parquet` for `AArch64` and `Darwin` (macOS) builds. This closes [#29248](https://github.com/ClickHouse/ClickHouse/issues/29248). This closes [#28018](https://github.com/ClickHouse/ClickHouse/issues/28018). [#30015](https://github.com/ClickHouse/ClickHouse/pull/30015) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add cross-build for PowerPC (powerpc64le). This closes [#9589](https://github.com/ClickHouse/ClickHouse/issues/9589). Enable support for interaction with MySQL for AArch64 and PowerPC. This closes [#26301](https://github.com/ClickHouse/ClickHouse/issues/26301). [#30010](https://github.com/ClickHouse/ClickHouse/pull/30010) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Leave only required files in cross-compile toolchains. Include them as submodules (earlier they were downloaded as tarballs). [#29974](https://github.com/ClickHouse/ClickHouse/pull/29974) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Implemented structure-aware fuzzing approach in ClickHouse for select statement parser. [#30012](https://github.com/ClickHouse/ClickHouse/pull/30012) ([Paul](https://github.com/PaulCher)).
|
||||
* Turning on experimental constexpr expressions evaluator for clang to speed up template code compilation. [#29668](https://github.com/ClickHouse/ClickHouse/pull/29668) ([myrrc](https://github.com/myrrc)).
|
||||
* Add ability to compile using newer version fo glibc without using new symbols. [#29594](https://github.com/ClickHouse/ClickHouse/pull/29594) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Reduce Debug build binary size by clang optimization option. [#28736](https://github.com/ClickHouse/ClickHouse/pull/28736) ([flynn](https://github.com/ucasfl)).
|
||||
* Now all images for CI will be placed in the separate dockerhub repo. [#28656](https://github.com/ClickHouse/ClickHouse/pull/28656) ([alesapin](https://github.com/alesapin)).
|
||||
* Improve support for build with clang-13. [#28046](https://github.com/ClickHouse/ClickHouse/pull/28046) ([Sergei Semin](https://github.com/syominsergey)).
|
||||
* Add ability to print raw profile events to `clickhouse-client` (This can be useful for debugging and for testing). [#30064](https://github.com/ClickHouse/ClickHouse/pull/30064) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Add time dependency for clickhouse-server unit (systemd and sysvinit init). [#28891](https://github.com/ClickHouse/ClickHouse/pull/28891) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Reload stacktrace cache when symbol is reloaded. [#28137](https://github.com/ClickHouse/ClickHouse/pull/28137) ([Amos Bird](https://github.com/amosbird)).
|
||||
|
||||
#### Bug Fix
|
||||
|
||||
* Functions for case-insensitive search in UTF-8 strings like `positionCaseInsensitiveUTF8` and `countSubstringsCaseInsensitiveUTF8` might find substrings that actually does not match in very rare cases, it's fixed. [#30663](https://github.com/ClickHouse/ClickHouse/pull/30663) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix reading from empty file on encrypted disk. [#30494](https://github.com/ClickHouse/ClickHouse/pull/30494) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix transformation of disjunctions chain to `IN` (controlled by settings `optimize_min_equality_disjunction_chain_length`) in distributed queries with settings `legacy_column_name_of_tuple_literal = 0`. [#28658](https://github.com/ClickHouse/ClickHouse/pull/28658) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Allow using a materialized column as the sharding key in a distributed table even if `insert_allow_materialized_columns=0`:. [#28637](https://github.com/ClickHouse/ClickHouse/pull/28637) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix `ORDER BY ... WITH FILL` with set `TO` and `FROM` and no rows in result set. [#30888](https://github.com/ClickHouse/ClickHouse/pull/30888) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix set index not used in AND/OR expressions when there are more than two operands. This fixes [#30416](https://github.com/ClickHouse/ClickHouse/issues/30416) . [#30887](https://github.com/ClickHouse/ClickHouse/pull/30887) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix crash when projection with hashing function is materialized. This fixes [#30861](https://github.com/ClickHouse/ClickHouse/issues/30861) . The issue is similar to https://github.com/ClickHouse/ClickHouse/pull/28560 which is a lack of proper understanding of the invariant of header's emptyness. [#30877](https://github.com/ClickHouse/ClickHouse/pull/30877) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fixed ambiguity when extracting auxiliary ZooKeeper name from ZooKeeper path in `ReplicatedMergeTree`. Previously server might fail to start with `Unknown auxiliary ZooKeeper name` if ZooKeeper path contains a colon. Fixes [#29052](https://github.com/ClickHouse/ClickHouse/issues/29052). Also it was allowed to specify ZooKeeper path that does not start with slash, but now it's deprecated and creation of new tables with such path is not allowed. Slashes and colons in auxiliary ZooKeeper names are not allowed too. [#30822](https://github.com/ClickHouse/ClickHouse/pull/30822) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Clean temporary directory when localBackup failed by some reason. [#30797](https://github.com/ClickHouse/ClickHouse/pull/30797) ([ianton-ru](https://github.com/ianton-ru)).
|
||||
* Fixed a race condition between `REPLACE/MOVE PARTITION` and background merge in non-replicated `MergeTree` that might cause a part of moved/replaced data to remain in partition. Fixes [#29327](https://github.com/ClickHouse/ClickHouse/issues/29327). [#30717](https://github.com/ClickHouse/ClickHouse/pull/30717) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix PREWHERE with WHERE in case of always true PREWHERE. [#30668](https://github.com/ClickHouse/ClickHouse/pull/30668) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Limit push down optimization could cause a error `Cannot find column`. Fixes [#30438](https://github.com/ClickHouse/ClickHouse/issues/30438). [#30562](https://github.com/ClickHouse/ClickHouse/pull/30562) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Add missing parenthesis for `isNotNull`/`isNull` rewrites to `IS [NOT] NULL` (fixes queries that has something like `isNotNull(1)+isNotNull(2)`). [#30520](https://github.com/ClickHouse/ClickHouse/pull/30520) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix deadlock on ALTER with scalar subquery to the same table, close [#30461](https://github.com/ClickHouse/ClickHouse/issues/30461). [#30492](https://github.com/ClickHouse/ClickHouse/pull/30492) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fixed segfault which might happen if session expired during execution of REPLACE PARTITION. [#30432](https://github.com/ClickHouse/ClickHouse/pull/30432) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Queries with condition like `IN (subquery)` could return incorrect result in case if aggregate projection applied. Fixed creation of sets for projections. [#30310](https://github.com/ClickHouse/ClickHouse/pull/30310) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix column alias resolution of JOIN queries when projection is enabled. This fixes [#30146](https://github.com/ClickHouse/ClickHouse/issues/30146). [#30293](https://github.com/ClickHouse/ClickHouse/pull/30293) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix some deficiency in `replaceRegexpAll` function. [#30292](https://github.com/ClickHouse/ClickHouse/pull/30292) ([Memo](https://github.com/Joeywzr)).
|
||||
* Fix ComplexKeyHashedDictionary, ComplexKeySparseHashedDictionary parsing `preallocate` option from layout config. [#30246](https://github.com/ClickHouse/ClickHouse/pull/30246) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix `[I]LIKE` function. Closes [#28661](https://github.com/ClickHouse/ClickHouse/issues/28661). [#30244](https://github.com/ClickHouse/ClickHouse/pull/30244) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix crash with shortcircuit and lowcardinality in multiIf. [#30243](https://github.com/ClickHouse/ClickHouse/pull/30243) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* FlatDictionary, HashedDictionary fix bytes_allocated calculation for nullable attributes. [#30238](https://github.com/ClickHouse/ClickHouse/pull/30238) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Allow identifiers starting with numbers in multiple joins. [#30230](https://github.com/ClickHouse/ClickHouse/pull/30230) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix reading from `MergeTree` with `max_read_buffer_size = 0` (when the user wants to shoot himself in the foot) (can lead to exceptions `Can't adjust last granule`, `LOGICAL_ERROR`, or even data loss). [#30192](https://github.com/ClickHouse/ClickHouse/pull/30192) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix `pread_fake_async`/`pread_threadpool` with `min_bytes_to_use_direct_io`. [#30191](https://github.com/ClickHouse/ClickHouse/pull/30191) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix INSERT SELECT incorrectly fills MATERIALIZED column based of Nullable column. [#30189](https://github.com/ClickHouse/ClickHouse/pull/30189) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Support nullable arguments in function `initializeAggregation`. [#30177](https://github.com/ClickHouse/ClickHouse/pull/30177) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix error `Port is already connected` for queries with `GLOBAL IN` and `WITH TOTALS`. Only for 21.9 and 21.10. [#30086](https://github.com/ClickHouse/ClickHouse/pull/30086) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix race between MOVE PARTITION and merges/mutations for MergeTree. [#30074](https://github.com/ClickHouse/ClickHouse/pull/30074) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Dropped `Memory` database might reappear after server restart, it's fixed ([#29795](https://github.com/ClickHouse/ClickHouse/issues/29795)). Also added `force_remove_data_recursively_on_drop` setting as a workaround for `Directory not empty` error when dropping `Ordinary` database (because it's not possible to remove data leftovers manually in cloud environment). [#30054](https://github.com/ClickHouse/ClickHouse/pull/30054) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix crash of sample by `tuple()`, closes [#30004](https://github.com/ClickHouse/ClickHouse/issues/30004). [#30016](https://github.com/ClickHouse/ClickHouse/pull/30016) ([flynn](https://github.com/ucasfl)).
|
||||
* try to close issue: [#29965](https://github.com/ClickHouse/ClickHouse/issues/29965). [#29976](https://github.com/ClickHouse/ClickHouse/pull/29976) ([hexiaoting](https://github.com/hexiaoting)).
|
||||
* Fix possible data-race between `FileChecker` and `StorageLog`/`StorageStripeLog`. [#29959](https://github.com/ClickHouse/ClickHouse/pull/29959) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix data-race between `LogSink::writeMarks()` and `LogSource` in `StorageLog`. [#29946](https://github.com/ClickHouse/ClickHouse/pull/29946) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix potential resource leak of the concurrent query limit of merge tree tables introduced in https://github.com/ClickHouse/ClickHouse/pull/19544. [#29879](https://github.com/ClickHouse/ClickHouse/pull/29879) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix system tables recreation check (fails to detect changes in enum values). [#29857](https://github.com/ClickHouse/ClickHouse/pull/29857) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* MaterializedMySQL: Fix an issue where if the connection to MySQL was lost, only parts of a transaction could be processed. [#29837](https://github.com/ClickHouse/ClickHouse/pull/29837) ([Håvard Kvålen](https://github.com/havardk)).
|
||||
* Avoid `Timeout exceeded: elapsed 18446744073.709553 seconds` error that might happen in extremely rare cases, presumably due to some bug in kernel. Fixes [#29154](https://github.com/ClickHouse/ClickHouse/issues/29154). [#29811](https://github.com/ClickHouse/ClickHouse/pull/29811) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix bad cast in `ATTACH TABLE ... FROM 'path'` query when non-string literal is used instead of path. It may lead to reading of uninitialized memory. [#29790](https://github.com/ClickHouse/ClickHouse/pull/29790) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix concurrent access to `LowCardinality` during `GROUP BY` (in combination with `Buffer` tables it may lead to troubles). [#29782](https://github.com/ClickHouse/ClickHouse/pull/29782) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix incorrect `GROUP BY` (multiple rows with the same keys in result) in case of distributed query when shards had mixed versions `<= 21.3` and `>= 21.4`, `GROUP BY` key had several columns all with fixed size, and two-level aggregation was activated (see `group_by_two_level_threshold` and `group_by_two_level_threshold_bytes`). Fixes [#29580](https://github.com/ClickHouse/ClickHouse/issues/29580). [#29735](https://github.com/ClickHouse/ClickHouse/pull/29735) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fixed incorrect behaviour of setting `materialized_postgresql_tables_list` at server restart. Found in [#28529](https://github.com/ClickHouse/ClickHouse/issues/28529). [#29686](https://github.com/ClickHouse/ClickHouse/pull/29686) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Condition in filter predicate could be lost after push-down optimisation. [#29625](https://github.com/ClickHouse/ClickHouse/pull/29625) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix JIT expression compilation with aliases and short-circuit expression evaluation. Closes [#29403](https://github.com/ClickHouse/ClickHouse/issues/29403). [#29574](https://github.com/ClickHouse/ClickHouse/pull/29574) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix rare segfault in `ALTER MODIFY` query when using incorrect table identifier in `DEFAULT` expression like `x.y.z...` Fixes [#29184](https://github.com/ClickHouse/ClickHouse/issues/29184). [#29573](https://github.com/ClickHouse/ClickHouse/pull/29573) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix nullptr deference for `GROUP BY WITH TOTALS HAVING` (when the column from `HAVING` wasn't selected). [#29553](https://github.com/ClickHouse/ClickHouse/pull/29553) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Avoid deadlocks when reading and writting on Join table engine tables at the same time. [#29544](https://github.com/ClickHouse/ClickHouse/pull/29544) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix bug in check `pathStartsWith` becuase there was bug with the usage of `std::mismatch`: ` The behavior is undefined if the second range is shorter than the first range.`. [#29531](https://github.com/ClickHouse/ClickHouse/pull/29531) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* In ODBC bridge add retries for error Invalid cursor state. It is a retriable error. Closes [#29473](https://github.com/ClickHouse/ClickHouse/issues/29473). [#29518](https://github.com/ClickHouse/ClickHouse/pull/29518) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fixed incorrect table name parsing on loading of `Lazy` database. Fixes [#29456](https://github.com/ClickHouse/ClickHouse/issues/29456). [#29476](https://github.com/ClickHouse/ClickHouse/pull/29476) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix possible `Block structure mismatch` for subqueries with pushed-down `HAVING` predicate. Fixes [#29010](https://github.com/ClickHouse/ClickHouse/issues/29010). [#29475](https://github.com/ClickHouse/ClickHouse/pull/29475) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix Logical error `Cannot capture columns` in functions greatest/least. Closes [#29334](https://github.com/ClickHouse/ClickHouse/issues/29334). [#29454](https://github.com/ClickHouse/ClickHouse/pull/29454) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* RocksDB table engine: fix race condition during multiple DB opening (and get back some tests that triggers the problem on CI). [#29393](https://github.com/ClickHouse/ClickHouse/pull/29393) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix replicated access storage not shutting down cleanly when misconfigured. [#29388](https://github.com/ClickHouse/ClickHouse/pull/29388) ([Kevin Michel](https://github.com/kmichel-aiven)).
|
||||
* Remove window function `nth_value` as it is not memory-safe. This closes [#29347](https://github.com/ClickHouse/ClickHouse/issues/29347). [#29348](https://github.com/ClickHouse/ClickHouse/pull/29348) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix vertical merges of projection parts. This fixes [#29253](https://github.com/ClickHouse/ClickHouse/issues/29253) . This PR also fixes several projection merge/mutation issues introduced in https://github.com/ClickHouse/ClickHouse/pull/25165. [#29337](https://github.com/ClickHouse/ClickHouse/pull/29337) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix hanging DDL queries on Replicated database while adding a new replica. [#29328](https://github.com/ClickHouse/ClickHouse/pull/29328) ([Kevin Michel](https://github.com/kmichel-aiven)).
|
||||
* Fix connection timeouts (`send_timeout`/`receive_timeout`). [#29282](https://github.com/ClickHouse/ClickHouse/pull/29282) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix possible `Table columns structure in ZooKeeper is different from local table structure` exception while recreating or creating new replicas of `ReplicatedMergeTree`, when one of table columns have default expressions with case-insensitive functions. [#29266](https://github.com/ClickHouse/ClickHouse/pull/29266) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Send normal `Database doesn't exist error` (`UNKNOWN_DATABASE`) to the client (via TCP) instead of `Attempt to read after eof` (`ATTEMPT_TO_READ_AFTER_EOF`). [#29229](https://github.com/ClickHouse/ClickHouse/pull/29229) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix segfault while inserting into column with type LowCardinality(Nullable) in Avro input format. [#29132](https://github.com/ClickHouse/ClickHouse/pull/29132) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Do not allow to reuse previous credentials in case of inter-server secret (Before INSERT via Buffer/Kafka to Distributed table with interserver secret configured for that cluster, may re-use previously set user for that connection). [#29060](https://github.com/ClickHouse/ClickHouse/pull/29060) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Handle `any_join_distinct_right_table_keys` when join with dictionary, close [#29007](https://github.com/ClickHouse/ClickHouse/issues/29007). [#29014](https://github.com/ClickHouse/ClickHouse/pull/29014) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix "Not found column ... in block" error, when join on alias column, close [#26980](https://github.com/ClickHouse/ClickHouse/issues/26980). [#29008](https://github.com/ClickHouse/ClickHouse/pull/29008) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix the number of threads used in `GLOBAL IN` subquery (it was executed in single threads since [#19414](https://github.com/ClickHouse/ClickHouse/issues/19414) bugfix). [#28997](https://github.com/ClickHouse/ClickHouse/pull/28997) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix bad optimizations of ORDER BY if it contains WITH FILL. This closes [#28908](https://github.com/ClickHouse/ClickHouse/issues/28908). This closes [#26049](https://github.com/ClickHouse/ClickHouse/issues/26049). [#28910](https://github.com/ClickHouse/ClickHouse/pull/28910) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix higher-order array functions (`SIGSEGV` for `arrayCompact`/`ILLEGAL_COLUMN` for `arrayDifference`/`arrayCumSumNonNegative`) with consts. [#28904](https://github.com/ClickHouse/ClickHouse/pull/28904) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix waiting for mutation with `mutations_sync=2`. [#28889](https://github.com/ClickHouse/ClickHouse/pull/28889) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix queries to external databases (i.e. MySQL) with multiple columns in IN ( i.e. `(k,v) IN ((1, 2))` ). [#28888](https://github.com/ClickHouse/ClickHouse/pull/28888) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix bug with `LowCardinality` in short-curcuit function evaluation. Closes [#28884](https://github.com/ClickHouse/ClickHouse/issues/28884). [#28887](https://github.com/ClickHouse/ClickHouse/pull/28887) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix reading of subcolumns from compact parts. [#28873](https://github.com/ClickHouse/ClickHouse/pull/28873) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fixed a race condition between `DROP PART` and `REPLACE/MOVE PARTITION` that might cause replicas to diverge in rare cases. [#28864](https://github.com/ClickHouse/ClickHouse/pull/28864) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix expressions compilation with short circuit evaluation. [#28821](https://github.com/ClickHouse/ClickHouse/pull/28821) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix extremely rare case when ReplicatedMergeTree replicas can diverge after hard reboot of all replicas. The error looks like `Part ... intersects (previous|next) part ...`. [#28817](https://github.com/ClickHouse/ClickHouse/pull/28817) ([alesapin](https://github.com/alesapin)).
|
||||
* Better check for connection usability and also catch any exception in `RabbitMQ` shutdown just in case. [#28797](https://github.com/ClickHouse/ClickHouse/pull/28797) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix benign race condition in ReplicatedMergeTreeQueue. Shouldn't be visible for user, but can lead to subtle bugs. [#28734](https://github.com/ClickHouse/ClickHouse/pull/28734) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix possible crash for `SELECT` with partially created aggregate projection in case of exception. [#28700](https://github.com/ClickHouse/ClickHouse/pull/28700) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix the coredump in the creation of distributed tables, when the parameters passed in are wrong. [#28686](https://github.com/ClickHouse/ClickHouse/pull/28686) ([Zhiyong Wang](https://github.com/ljcui)).
|
||||
* Add Settings.Names, Settings.Values aliases for system.processes table. [#28685](https://github.com/ClickHouse/ClickHouse/pull/28685) ([Vitaly](https://github.com/orloffv)).
|
||||
* Support for S2 Geometry library: Fix the number of arguments required by `s2RectAdd` and `s2RectContains` functions. [#28663](https://github.com/ClickHouse/ClickHouse/pull/28663) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* Fix invalid constant type conversion when Nullable or LowCardinality primary key is used. [#28636](https://github.com/ClickHouse/ClickHouse/pull/28636) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix "Column is not under aggregate function and not in GROUP BY" with PREWHERE (Fixes: [#28461](https://github.com/ClickHouse/ClickHouse/issues/28461)). [#28502](https://github.com/ClickHouse/ClickHouse/pull/28502) ([Azat Khuzhin](https://github.com/azat)).
|
||||
|
||||
|
||||
### ClickHouse release v21.10, 2021-10-16
|
||||
|
||||
#### Backward Incompatible Change
|
||||
|
@ -624,7 +624,7 @@ macro (add_executable target)
|
||||
# invoke built-in add_executable
|
||||
# explicitly acquire and interpose malloc symbols by clickhouse_malloc
|
||||
# if GLIBC_COMPATIBILITY is ON and ENABLE_THINLTO is on than provide memcpy symbol explicitly to neutrialize thinlto's libcall generation.
|
||||
if (GLIBC_COMPATIBILITY AND ENABLE_THINLTO)
|
||||
if (ARCH_AMD64 AND GLIBC_COMPATIBILITY AND ENABLE_THINLTO)
|
||||
_add_executable (${ARGV} $<TARGET_OBJECTS:clickhouse_malloc> $<TARGET_OBJECTS:memcpy>)
|
||||
else ()
|
||||
_add_executable (${ARGV} $<TARGET_OBJECTS:clickhouse_malloc>)
|
||||
|
@ -108,6 +108,11 @@ public:
|
||||
LocalDate toDate() const { return LocalDate(m_year, m_month, m_day); }
|
||||
LocalDateTime toStartOfDate() const { return LocalDateTime(m_year, m_month, m_day, 0, 0, 0); }
|
||||
|
||||
time_t to_time_t(const DateLUTImpl & time_zone = DateLUT::instance()) const
|
||||
{
|
||||
return time_zone.makeDateTime(m_year, m_month, m_day, m_hour, m_minute, m_second);
|
||||
}
|
||||
|
||||
std::string toString() const
|
||||
{
|
||||
std::string s{"0000-00-00 00:00:00"};
|
||||
|
@ -28,8 +28,8 @@
|
||||
#define NO_INLINE __attribute__((__noinline__))
|
||||
#define MAY_ALIAS __attribute__((__may_alias__))
|
||||
|
||||
#if !defined(__x86_64__) && !defined(__aarch64__) && !defined(__PPC__)
|
||||
# error "The only supported platforms are x86_64 and AArch64, PowerPC (work in progress)"
|
||||
#if !defined(__x86_64__) && !defined(__aarch64__) && !defined(__PPC__) && !(defined(__riscv) && (__riscv_xlen == 64))
|
||||
# error "The only supported platforms are x86_64 and AArch64, PowerPC (work in progress) and RISC-V 64 (experimental)"
|
||||
#endif
|
||||
|
||||
/// Check for presence of address sanitizer
|
||||
|
@ -16,17 +16,6 @@
|
||||
*/
|
||||
uint64_t getMemoryAmountOrZero()
|
||||
{
|
||||
#if defined(OS_LINUX)
|
||||
// Try to lookup at the Cgroup limit
|
||||
std::ifstream cgroup_limit("/sys/fs/cgroup/memory/memory.limit_in_bytes");
|
||||
if (cgroup_limit.is_open())
|
||||
{
|
||||
uint64_t amount = 0; // in case of read error
|
||||
cgroup_limit >> amount;
|
||||
return amount;
|
||||
}
|
||||
#endif
|
||||
|
||||
int64_t num_pages = sysconf(_SC_PHYS_PAGES);
|
||||
if (num_pages <= 0)
|
||||
return 0;
|
||||
@ -35,7 +24,22 @@ uint64_t getMemoryAmountOrZero()
|
||||
if (page_size <= 0)
|
||||
return 0;
|
||||
|
||||
return num_pages * page_size;
|
||||
uint64_t memory_amount = num_pages * page_size;
|
||||
|
||||
#if defined(OS_LINUX)
|
||||
// Try to lookup at the Cgroup limit
|
||||
std::ifstream cgroup_limit("/sys/fs/cgroup/memory/memory.limit_in_bytes");
|
||||
if (cgroup_limit.is_open())
|
||||
{
|
||||
uint64_t memory_limit = 0; // in case of read error
|
||||
cgroup_limit >> memory_limit;
|
||||
if (memory_limit > 0 && memory_limit < memory_amount)
|
||||
memory_amount = memory_limit;
|
||||
}
|
||||
#endif
|
||||
|
||||
return memory_amount;
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
@ -675,6 +675,34 @@ void BaseDaemon::initialize(Application & self)
|
||||
if ((!log_path.empty() && is_daemon) || config().has("logger.stderr"))
|
||||
{
|
||||
std::string stderr_path = config().getString("logger.stderr", log_path + "/stderr.log");
|
||||
|
||||
/// Check that stderr is writable before freopen(),
|
||||
/// since freopen() will make stderr invalid on error,
|
||||
/// and logging to stderr will be broken,
|
||||
/// so the following code (that is used in every program) will not write anything:
|
||||
///
|
||||
/// int main(int argc, char ** argv)
|
||||
/// {
|
||||
/// try
|
||||
/// {
|
||||
/// DB::SomeApp app;
|
||||
/// return app.run(argc, argv);
|
||||
/// }
|
||||
/// catch (...)
|
||||
/// {
|
||||
/// std::cerr << DB::getCurrentExceptionMessage(true) << "\n";
|
||||
/// return 1;
|
||||
/// }
|
||||
/// }
|
||||
if (access(stderr_path.c_str(), W_OK))
|
||||
{
|
||||
int fd;
|
||||
if ((fd = creat(stderr_path.c_str(), 0600)) == -1 && errno != EEXIST)
|
||||
throw Poco::OpenFileException("File " + stderr_path + " (logger.stderr) is not writable");
|
||||
if (fd != -1)
|
||||
::close(fd);
|
||||
}
|
||||
|
||||
if (!freopen(stderr_path.c_str(), "a+", stderr))
|
||||
throw Poco::OpenFileException("Cannot attach stderr to " + stderr_path);
|
||||
|
||||
|
@ -16,3 +16,7 @@ endif ()
|
||||
if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(ppc64le.*|PPC64LE.*)")
|
||||
set (ARCH_PPC64LE 1)
|
||||
endif ()
|
||||
if (CMAKE_SYSTEM_PROCESSOR MATCHES "riscv64")
|
||||
set (ARCH_RISCV64 1)
|
||||
endif ()
|
||||
|
||||
|
@ -20,10 +20,10 @@ set (CMAKE_C_FLAGS_INIT "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
set (CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
set (CMAKE_ASM_FLAGS_INIT "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
|
||||
set (LINKER_NAME "ld.lld" CACHE STRING "" FORCE)
|
||||
set (LINKER_NAME "riscv64-linux-gnu-ld.bfd" CACHE STRING "" FORCE)
|
||||
|
||||
set (CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=lld")
|
||||
set (CMAKE_SHARED_LINKER_FLAGS_INIT "-fuse-ld=lld")
|
||||
set (CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=bfd")
|
||||
set (CMAKE_SHARED_LINKER_FLAGS_INIT "-fuse-ld=bfd")
|
||||
|
||||
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
||||
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
||||
|
2
contrib/boost
vendored
2
contrib/boost
vendored
@ -1 +1 @@
|
||||
Subproject commit 79358a3106aab6af464430ed67c7efafebf5cd6f
|
||||
Subproject commit fcb058e1459ac273ecfe7cdf72791cb1479115af
|
@ -196,6 +196,12 @@ if (NOT EXTERNAL_BOOST_FOUND)
|
||||
"${LIBRARY_DIR}/libs/context/src/asm/make_ppc64_sysv_elf_gas.S"
|
||||
"${LIBRARY_DIR}/libs/context/src/asm/ontop_ppc64_sysv_elf_gas.S"
|
||||
)
|
||||
elseif (ARCH_RISCV64)
|
||||
set (SRCS_CONTEXT ${SRCS_CONTEXT}
|
||||
"${LIBRARY_DIR}/libs/context/src/asm/jump_riscv64_sysv_elf_gas.S"
|
||||
"${LIBRARY_DIR}/libs/context/src/asm/make_riscv64_sysv_elf_gas.S"
|
||||
"${LIBRARY_DIR}/libs/context/src/asm/ontop_riscv64_sysv_elf_gas.S"
|
||||
)
|
||||
elseif(OS_DARWIN)
|
||||
set (SRCS_CONTEXT ${SRCS_CONTEXT}
|
||||
"${LIBRARY_DIR}/libs/context/src/asm/jump_x86_64_sysv_macho_gas.S"
|
||||
|
@ -1,5 +1,5 @@
|
||||
if (SANITIZE OR NOT (
|
||||
((OS_LINUX OR OS_FREEBSD) AND (ARCH_AMD64 OR ARCH_ARM OR ARCH_PPC64LE)) OR
|
||||
((OS_LINUX OR OS_FREEBSD) AND (ARCH_AMD64 OR ARCH_ARM OR ARCH_PPC64LE OR ARCH_RISCV64)) OR
|
||||
(OS_DARWIN AND (CMAKE_BUILD_TYPE STREQUAL "RelWithDebInfo" OR CMAKE_BUILD_TYPE STREQUAL "Debug"))
|
||||
))
|
||||
if (ENABLE_JEMALLOC)
|
||||
@ -112,6 +112,8 @@ elseif (ARCH_ARM)
|
||||
set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_aarch64")
|
||||
elseif (ARCH_PPC64LE)
|
||||
set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_ppc64le")
|
||||
elseif (ARCH_RISCV64)
|
||||
set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_riscv64")
|
||||
else ()
|
||||
message (FATAL_ERROR "internal jemalloc: This arch is not supported")
|
||||
endif ()
|
||||
|
8
contrib/jemalloc-cmake/include_linux_riscv64/README
Normal file
8
contrib/jemalloc-cmake/include_linux_riscv64/README
Normal file
@ -0,0 +1,8 @@
|
||||
Here are pre-generated files from jemalloc on Linux risc-v.
|
||||
You can obtain these files by running ./autogen.sh inside jemalloc source directory.
|
||||
|
||||
Added #define GNU_SOURCE
|
||||
Added JEMALLOC_OVERRIDE___POSIX_MEMALIGN because why not.
|
||||
Removed JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF because it's non standard.
|
||||
Removed JEMALLOC_PURGE_MADVISE_FREE because it's available only from Linux 4.5.
|
||||
Added JEMALLOC_CONFIG_MALLOC_CONF substitution
|
@ -0,0 +1,367 @@
|
||||
/* include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */
|
||||
#ifndef JEMALLOC_INTERNAL_DEFS_H_
|
||||
#define JEMALLOC_INTERNAL_DEFS_H_
|
||||
/*
|
||||
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
|
||||
* public APIs to be prefixed. This makes it possible, with some care, to use
|
||||
* multiple allocators simultaneously.
|
||||
*/
|
||||
/* #undef JEMALLOC_PREFIX */
|
||||
/* #undef JEMALLOC_CPREFIX */
|
||||
|
||||
/*
|
||||
* Define overrides for non-standard allocator-related functions if they are
|
||||
* present on the system.
|
||||
*/
|
||||
#define JEMALLOC_OVERRIDE___LIBC_CALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_FREE
|
||||
#define JEMALLOC_OVERRIDE___LIBC_MALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_MEMALIGN
|
||||
#define JEMALLOC_OVERRIDE___LIBC_REALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_VALLOC
|
||||
/* #undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN */
|
||||
|
||||
/*
|
||||
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
|
||||
* For shared libraries, symbol visibility mechanisms prevent these symbols
|
||||
* from being exported, but for static libraries, naming collisions are a real
|
||||
* possibility.
|
||||
*/
|
||||
#define JEMALLOC_PRIVATE_NAMESPACE je_
|
||||
|
||||
/*
|
||||
* Hyper-threaded CPUs may need a special instruction inside spin loops in
|
||||
* order to yield to another virtual CPU.
|
||||
*/
|
||||
#define CPU_SPINWAIT
|
||||
/* 1 if CPU_SPINWAIT is defined, 0 otherwise. */
|
||||
#define HAVE_CPU_SPINWAIT 0
|
||||
|
||||
/*
|
||||
* Number of significant bits in virtual addresses. This may be less than the
|
||||
* total number of bits in a pointer, e.g. on x64, for which the uppermost 16
|
||||
* bits are the same as bit 47.
|
||||
*/
|
||||
#define LG_VADDR 48
|
||||
|
||||
/* Defined if C11 atomics are available. */
|
||||
#define JEMALLOC_C11_ATOMICS 1
|
||||
|
||||
/* Defined if GCC __atomic atomics are available. */
|
||||
#define JEMALLOC_GCC_ATOMIC_ATOMICS 1
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS 1
|
||||
|
||||
/* Defined if GCC __sync atomics are available. */
|
||||
#define JEMALLOC_GCC_SYNC_ATOMICS 1
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_SYNC_ATOMICS 1
|
||||
|
||||
/*
|
||||
* Defined if __builtin_clz() and __builtin_clzl() are available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_BUILTIN_CLZ
|
||||
|
||||
/*
|
||||
* Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
|
||||
*/
|
||||
/* #undef JEMALLOC_OS_UNFAIR_LOCK */
|
||||
|
||||
/* Defined if syscall(2) is usable. */
|
||||
#define JEMALLOC_USE_SYSCALL
|
||||
|
||||
/*
|
||||
* Defined if secure_getenv(3) is available.
|
||||
*/
|
||||
// #define JEMALLOC_HAVE_SECURE_GETENV
|
||||
|
||||
/*
|
||||
* Defined if issetugid(2) is available.
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_ISSETUGID */
|
||||
|
||||
/* Defined if pthread_atfork(3) is available. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_ATFORK
|
||||
|
||||
/* Defined if pthread_setname_np(3) is available. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_SETNAME_NP
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE 1
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC 1
|
||||
|
||||
/*
|
||||
* Defined if mach_absolute_time() is available.
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME */
|
||||
|
||||
/*
|
||||
* Defined if _malloc_thread_cleanup() exists. At least in the case of
|
||||
* FreeBSD, pthread_key_create() allocates, which if used during malloc
|
||||
* bootstrapping will cause recursion into the pthreads library. Therefore, if
|
||||
* _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
|
||||
* malloc_tsd.
|
||||
*/
|
||||
/* #undef JEMALLOC_MALLOC_THREAD_CLEANUP */
|
||||
|
||||
/*
|
||||
* Defined if threaded initialization is known to be safe on this platform.
|
||||
* Among other things, it must be possible to initialize a mutex without
|
||||
* triggering allocation in order for threaded allocation to be safe.
|
||||
*/
|
||||
#define JEMALLOC_THREADED_INIT
|
||||
|
||||
/*
|
||||
* Defined if the pthreads implementation defines
|
||||
* _pthread_mutex_init_calloc_cb(), in which case the function is used in order
|
||||
* to avoid recursive allocation during mutex initialization.
|
||||
*/
|
||||
/* #undef JEMALLOC_MUTEX_INIT_CB */
|
||||
|
||||
/* Non-empty if the tls_model attribute is supported. */
|
||||
#define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec")))
|
||||
|
||||
/*
|
||||
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
|
||||
* inline functions.
|
||||
*/
|
||||
/* #undef JEMALLOC_DEBUG */
|
||||
|
||||
/* JEMALLOC_STATS enables statistics calculation. */
|
||||
#define JEMALLOC_STATS
|
||||
|
||||
/* JEMALLOC_EXPERIMENTAL_SMALLOCX_API enables experimental smallocx API. */
|
||||
/* #undef JEMALLOC_EXPERIMENTAL_SMALLOCX_API */
|
||||
|
||||
/* JEMALLOC_PROF enables allocation profiling. */
|
||||
/* #undef JEMALLOC_PROF */
|
||||
|
||||
/* Use libunwind for profile backtracing if defined. */
|
||||
/* #undef JEMALLOC_PROF_LIBUNWIND */
|
||||
|
||||
/* Use libgcc for profile backtracing if defined. */
|
||||
/* #undef JEMALLOC_PROF_LIBGCC */
|
||||
|
||||
/* Use gcc intrinsics for profile backtracing if defined. */
|
||||
/* #undef JEMALLOC_PROF_GCC */
|
||||
|
||||
/*
|
||||
* JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
|
||||
* segment (DSS).
|
||||
*/
|
||||
#define JEMALLOC_DSS
|
||||
|
||||
/* Support memory filling (junk/zero). */
|
||||
#define JEMALLOC_FILL
|
||||
|
||||
/* Support utrace(2)-based tracing. */
|
||||
/* #undef JEMALLOC_UTRACE */
|
||||
|
||||
/* Support optional abort() on OOM. */
|
||||
/* #undef JEMALLOC_XMALLOC */
|
||||
|
||||
/* Support lazy locking (avoid locking unless a second thread is launched). */
|
||||
/* #undef JEMALLOC_LAZY_LOCK */
|
||||
|
||||
/*
|
||||
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
|
||||
* classes).
|
||||
*/
|
||||
/* #undef LG_QUANTUM */
|
||||
|
||||
/* One page is 2^LG_PAGE bytes. */
|
||||
#define LG_PAGE 16
|
||||
|
||||
/*
|
||||
* One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
|
||||
* system does not explicitly support huge pages; system calls that require
|
||||
* explicit huge page support are separately configured.
|
||||
*/
|
||||
#define LG_HUGEPAGE 29
|
||||
|
||||
/*
|
||||
* If defined, adjacent virtual memory mappings with identical attributes
|
||||
* automatically coalesce, and they fragment when changes are made to subranges.
|
||||
* This is the normal order of things for mmap()/munmap(), but on Windows
|
||||
* VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
|
||||
* mappings do *not* coalesce/fragment.
|
||||
*/
|
||||
#define JEMALLOC_MAPS_COALESCE
|
||||
|
||||
/*
|
||||
* If defined, retain memory for later reuse by default rather than using e.g.
|
||||
* munmap() to unmap freed extents. This is enabled on 64-bit Linux because
|
||||
* common sequences of mmap()/munmap() calls will cause virtual memory map
|
||||
* holes.
|
||||
*/
|
||||
#define JEMALLOC_RETAIN
|
||||
|
||||
/* TLS is used to map arenas and magazine caches to threads. */
|
||||
#define JEMALLOC_TLS
|
||||
|
||||
/*
|
||||
* Used to mark unreachable code to quiet "end of non-void" compiler warnings.
|
||||
* Don't use this directly; instead use unreachable() from util.h
|
||||
*/
|
||||
#define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable
|
||||
|
||||
/*
|
||||
* ffs*() functions to use for bitmapping. Don't use these directly; instead,
|
||||
* use ffs_*() from util.h.
|
||||
*/
|
||||
#define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll
|
||||
#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl
|
||||
#define JEMALLOC_INTERNAL_FFS __builtin_ffs
|
||||
|
||||
/*
|
||||
* popcount*() functions to use for bitmapping.
|
||||
*/
|
||||
#define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl
|
||||
#define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount
|
||||
|
||||
/*
|
||||
* If defined, explicitly attempt to more uniformly distribute large allocation
|
||||
* pointer alignments across all cache indices.
|
||||
*/
|
||||
#define JEMALLOC_CACHE_OBLIVIOUS
|
||||
|
||||
/*
|
||||
* If defined, enable logging facilities. We make this a configure option to
|
||||
* avoid taking extra branches everywhere.
|
||||
*/
|
||||
/* #undef JEMALLOC_LOG */
|
||||
|
||||
/*
|
||||
* If defined, use readlinkat() (instead of readlink()) to follow
|
||||
* /etc/malloc_conf.
|
||||
*/
|
||||
/* #undef JEMALLOC_READLINKAT */
|
||||
|
||||
/*
|
||||
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
|
||||
*/
|
||||
/* #undef JEMALLOC_ZONE */
|
||||
|
||||
/*
|
||||
* Methods for determining whether the OS overcommits.
|
||||
* JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
|
||||
* /proc/sys/vm.overcommit_memory file.
|
||||
* JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
|
||||
*/
|
||||
/* #undef JEMALLOC_SYSCTL_VM_OVERCOMMIT */
|
||||
#define JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
|
||||
|
||||
/* Defined if madvise(2) is available. */
|
||||
#define JEMALLOC_HAVE_MADVISE
|
||||
|
||||
/*
|
||||
* Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
|
||||
* arguments to madvise(2).
|
||||
*/
|
||||
#define JEMALLOC_HAVE_MADVISE_HUGE
|
||||
|
||||
/*
|
||||
* Methods for purging unused pages differ between operating systems.
|
||||
*
|
||||
* madvise(..., MADV_FREE) : This marks pages as being unused, such that they
|
||||
* will be discarded rather than swapped out.
|
||||
* madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is
|
||||
* defined, this immediately discards pages,
|
||||
* such that new pages will be demand-zeroed if
|
||||
* the address region is later touched;
|
||||
* otherwise this behaves similarly to
|
||||
* MADV_FREE, though typically with higher
|
||||
* system overhead.
|
||||
*/
|
||||
#define JEMALLOC_PURGE_MADVISE_FREE
|
||||
#define JEMALLOC_PURGE_MADVISE_DONTNEED
|
||||
#define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
|
||||
|
||||
/* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */
|
||||
/* #undef JEMALLOC_DEFINE_MADVISE_FREE */
|
||||
|
||||
/*
|
||||
* Defined if MADV_DO[NT]DUMP is supported as an argument to madvise.
|
||||
*/
|
||||
#define JEMALLOC_MADVISE_DONTDUMP
|
||||
|
||||
/*
|
||||
* Defined if transparent huge pages (THPs) are supported via the
|
||||
* MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
|
||||
*/
|
||||
/* #undef JEMALLOC_THP */
|
||||
|
||||
/* Define if operating system has alloca.h header. */
|
||||
#define JEMALLOC_HAS_ALLOCA_H 1
|
||||
|
||||
/* C99 restrict keyword supported. */
|
||||
#define JEMALLOC_HAS_RESTRICT 1
|
||||
|
||||
/* For use by hash code. */
|
||||
/* #undef JEMALLOC_BIG_ENDIAN */
|
||||
|
||||
/* sizeof(int) == 2^LG_SIZEOF_INT. */
|
||||
#define LG_SIZEOF_INT 2
|
||||
|
||||
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
|
||||
#define LG_SIZEOF_LONG 3
|
||||
|
||||
/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
|
||||
#define LG_SIZEOF_LONG_LONG 3
|
||||
|
||||
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
|
||||
#define LG_SIZEOF_INTMAX_T 3
|
||||
|
||||
/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
|
||||
#define JEMALLOC_GLIBC_MALLOC_HOOK
|
||||
|
||||
/* glibc memalign hook. */
|
||||
#define JEMALLOC_GLIBC_MEMALIGN_HOOK
|
||||
|
||||
/* pthread support */
|
||||
#define JEMALLOC_HAVE_PTHREAD
|
||||
|
||||
/* dlsym() support */
|
||||
#define JEMALLOC_HAVE_DLSYM
|
||||
|
||||
/* Adaptive mutex support in pthreads. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
|
||||
|
||||
/* GNU specific sched_getcpu support */
|
||||
#define JEMALLOC_HAVE_SCHED_GETCPU
|
||||
|
||||
/* GNU specific sched_setaffinity support */
|
||||
#define JEMALLOC_HAVE_SCHED_SETAFFINITY
|
||||
|
||||
/*
|
||||
* If defined, all the features necessary for background threads are present.
|
||||
*/
|
||||
#define JEMALLOC_BACKGROUND_THREAD 1
|
||||
|
||||
/*
|
||||
* If defined, jemalloc symbols are not exported (doesn't work when
|
||||
* JEMALLOC_PREFIX is not defined).
|
||||
*/
|
||||
/* #undef JEMALLOC_EXPORT */
|
||||
|
||||
/* config.malloc_conf options string. */
|
||||
#define JEMALLOC_CONFIG_MALLOC_CONF "@JEMALLOC_CONFIG_MALLOC_CONF@"
|
||||
|
||||
/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
|
||||
#define JEMALLOC_IS_MALLOC 1
|
||||
|
||||
/*
|
||||
* Defined if strerror_r returns char * if _GNU_SOURCE is defined.
|
||||
*/
|
||||
#define JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE
|
||||
|
||||
/* Performs additional safety checks when defined. */
|
||||
/* #undef JEMALLOC_OPT_SAFETY_CHECKS */
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
|
2
contrib/libhdfs3
vendored
2
contrib/libhdfs3
vendored
@ -1 +1 @@
|
||||
Subproject commit 082e55f17d1c58bf124290fb044fea40e985ec11
|
||||
Subproject commit a8c37ee001af1ae88e5dfa637ae5b31b087c96d3
|
@ -66,7 +66,7 @@
|
||||
#cmakedefine WITH_SASL_OAUTHBEARER 1
|
||||
#cmakedefine WITH_SASL_CYRUS 1
|
||||
// crc32chw
|
||||
#if !defined(__PPC__) && (!defined(__aarch64__) || defined(__ARM_FEATURE_CRC32)) && !(defined(__aarch64__) && defined(__APPLE__))
|
||||
#if !defined(__PPC__) && !defined(__riscv) && (!defined(__aarch64__) || defined(__ARM_FEATURE_CRC32)) && !(defined(__aarch64__) && defined(__APPLE__))
|
||||
#define WITH_CRC32C_HW 1
|
||||
#endif
|
||||
// regex
|
||||
|
2
contrib/poco
vendored
2
contrib/poco
vendored
@ -1 +1 @@
|
||||
Subproject commit 39fd359765a3a77b46d94ec3c5def3c7802a920f
|
||||
Subproject commit 173fb31717837d366152c508619b09dcf11786da
|
2
contrib/rocksdb
vendored
2
contrib/rocksdb
vendored
@ -1 +1 @@
|
||||
Subproject commit 296c1b8b95fd448b8097a1b2cc9f704ff4a73a2c
|
||||
Subproject commit e7c2b2f7bcf3b4b33892a1a6d25c32a93edfbdb9
|
2
contrib/sentry-native
vendored
2
contrib/sentry-native
vendored
@ -1 +1 @@
|
||||
Subproject commit 94644e92f0a3ff14bd35ed902a8622a2d15f7be4
|
||||
Subproject commit f431047ac8da13179c488018dddf1c0d0771a997
|
2
contrib/sysroot
vendored
2
contrib/sysroot
vendored
@ -1 +1 @@
|
||||
Subproject commit 6172893931e19b028f9cabb7095a44361be863df
|
||||
Subproject commit 1a64956aa7c280448be6526251bb2b8e6d380ab1
|
@ -81,11 +81,11 @@ then
|
||||
fi
|
||||
|
||||
# Also build fuzzers if any sanitizer specified
|
||||
if [ -n "$SANITIZER" ]
|
||||
then
|
||||
# Currently we are in build/build_docker directory
|
||||
../docker/packager/other/fuzzer.sh
|
||||
fi
|
||||
# if [ -n "$SANITIZER" ]
|
||||
# then
|
||||
# # Currently we are in build/build_docker directory
|
||||
# ../docker/packager/other/fuzzer.sh
|
||||
# fi
|
||||
|
||||
ccache --show-config ||:
|
||||
ccache --show-stats ||:
|
||||
|
@ -31,15 +31,15 @@ then
|
||||
fi
|
||||
|
||||
# Also build fuzzers if any sanitizer specified
|
||||
if [ -n "$SANITIZER" ]
|
||||
then
|
||||
# Script is supposed that we are in build directory.
|
||||
mkdir -p build/build_docker
|
||||
cd build/build_docker
|
||||
# Launching build script
|
||||
../docker/packager/other/fuzzer.sh
|
||||
cd
|
||||
fi
|
||||
# if [ -n "$SANITIZER" ]
|
||||
# then
|
||||
# # Script is supposed that we are in build directory.
|
||||
# mkdir -p build/build_docker
|
||||
# cd build/build_docker
|
||||
# # Launching build script
|
||||
# ../docker/packager/other/fuzzer.sh
|
||||
# cd
|
||||
# fi
|
||||
|
||||
ccache --show-config ||:
|
||||
ccache --show-stats ||:
|
||||
|
@ -256,6 +256,12 @@ continue
|
||||
task_exit_code=0
|
||||
echo "success" > status.txt
|
||||
echo "OK" > description.txt
|
||||
elif [ "$fuzzer_exit_code" == "137" ]
|
||||
then
|
||||
# Killed.
|
||||
task_exit_code=$fuzzer_exit_code
|
||||
echo "failure" > status.txt
|
||||
echo "Killed" > description.txt
|
||||
else
|
||||
# The server was alive, but the fuzzer returned some error. This might
|
||||
# be some client-side error detected by fuzzing, or a problem in the
|
||||
|
@ -60,7 +60,7 @@ RUN dockerd --version; docker --version
|
||||
RUN python3 -m pip install \
|
||||
PyMySQL \
|
||||
aerospike==4.0.0 \
|
||||
avro \
|
||||
avro==1.10.2 \
|
||||
cassandra-driver \
|
||||
confluent-kafka==1.5.0 \
|
||||
dict2xml \
|
||||
|
@ -2,7 +2,7 @@ version: '2.3'
|
||||
services:
|
||||
postgres1:
|
||||
image: postgres
|
||||
command: ["postgres", "-c", "logging_collector=on", "-c", "log_directory=/postgres/logs", "-c", "log_filename=postgresql.log", "-c", "log_statement=all", "-c", "max_connections=200"]
|
||||
command: ["postgres", "-c", "wal_level=logical", "-c", "max_replication_slots=2", "-c", "logging_collector=on", "-c", "log_directory=/postgres/logs", "-c", "log_filename=postgresql.log", "-c", "log_statement=all", "-c", "max_connections=200"]
|
||||
restart: always
|
||||
expose:
|
||||
- ${POSTGRES_PORT}
|
||||
@ -11,7 +11,6 @@ services:
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
command: [ "postgres", "-c", "wal_level=logical", "-c", "max_replication_slots=2"]
|
||||
networks:
|
||||
default:
|
||||
aliases:
|
||||
|
@ -308,12 +308,7 @@ function get_profiles_watchdog
|
||||
function get_profiles
|
||||
{
|
||||
# Collect the profiles
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "set query_profiler_cpu_time_period_ns = 0"
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "set query_profiler_real_time_period_ns = 0"
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "system flush logs" &
|
||||
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "set query_profiler_cpu_time_period_ns = 0"
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "set query_profiler_real_time_period_ns = 0"
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "system flush logs" &
|
||||
|
||||
wait
|
||||
|
@ -5,22 +5,44 @@
|
||||
<interserver_http_port remove="remove"/>
|
||||
<tcp_with_proxy_port remove="remove"/>
|
||||
<keeper_server remove="remove"/>
|
||||
<zookeeper remove="remove"/>
|
||||
<listen_host>::</listen_host>
|
||||
|
||||
<logger>
|
||||
<console>true</console>
|
||||
</logger>
|
||||
|
||||
<text_log remove="remove">
|
||||
<table remove="remove"/>
|
||||
</text_log>
|
||||
<text_log remove="remove"/>
|
||||
<crash_log remove="remove"/>
|
||||
<query_views_log remove="remove"/>
|
||||
<part_log remove="remove"/>
|
||||
<opentelemetry_span_log remove="remove"/>
|
||||
<session_log remove="remove"/>
|
||||
|
||||
<metric_log replace="replace">
|
||||
<database>system</database>
|
||||
<table>metric_log</table>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<collect_interval_milliseconds>1000</collect_interval_milliseconds>
|
||||
<!-- performance tests does not uses real block devices,
|
||||
instead they stores everything in memory.
|
||||
|
||||
And so, to avoid extra memory reference switch *_log to Memory engine. -->
|
||||
<query_log>
|
||||
<engine>ENGINE = Memory</engine>
|
||||
<partition_by remove="remove"/>
|
||||
</query_log>
|
||||
<query_thread_log>
|
||||
<engine>ENGINE = Memory</engine>
|
||||
<partition_by remove="remove"/>
|
||||
</query_thread_log>
|
||||
<trace_log>
|
||||
<engine>ENGINE = Memory</engine>
|
||||
<partition_by remove="remove"/>
|
||||
</trace_log>
|
||||
<metric_log>
|
||||
<engine>ENGINE = Memory</engine>
|
||||
<partition_by remove="remove"/>
|
||||
</metric_log>
|
||||
<asynchronous_metric_log>
|
||||
<engine>ENGINE = Memory</engine>
|
||||
<partition_by remove="remove"/>
|
||||
</asynchronous_metric_log>
|
||||
|
||||
<uncompressed_cache_size>1000000000</uncompressed_cache_size>
|
||||
|
||||
|
@ -34,7 +34,7 @@ then
|
||||
if [ "${ARCH}" = "x86_64" ]
|
||||
then
|
||||
DIR="macos"
|
||||
elif [ "${ARCH}" = "aarch64" ]
|
||||
elif [ "${ARCH}" = "aarch64" -o "${ARCH}" = "arm64" ]
|
||||
then
|
||||
DIR="macos-aarch64"
|
||||
fi
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 71
|
||||
toc_priority: 72
|
||||
toc_title: Source Code Browser
|
||||
---
|
||||
|
||||
|
30
docs/en/development/build-cross-riscv.md
Normal file
30
docs/en/development/build-cross-riscv.md
Normal file
@ -0,0 +1,30 @@
|
||||
---
|
||||
toc_priority: 68
|
||||
toc_title: Build on Linux for RISC-V 64
|
||||
---
|
||||
|
||||
# How to Build ClickHouse on Linux for RISC-V 64 Architecture {#how-to-build-clickhouse-on-linux-for-risc-v-64-architecture}
|
||||
|
||||
As of writing (11.11.2021) building for risc-v considered to be highly experimental. Not all features can be enabled.
|
||||
|
||||
This is for the case when you have Linux machine and want to use it to build `clickhouse` binary that will run on another Linux machine with RISC-V 64 CPU architecture. This is intended for continuous integration checks that run on Linux servers.
|
||||
|
||||
The cross-build for RISC-V 64 is based on the [Build instructions](../development/build.md), follow them first.
|
||||
|
||||
## Install Clang-13
|
||||
|
||||
Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup or do
|
||||
```
|
||||
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
||||
```
|
||||
|
||||
## Build ClickHouse {#build-clickhouse}
|
||||
|
||||
``` bash
|
||||
cd ClickHouse
|
||||
mkdir build-riscv64
|
||||
CC=clang-13 CXX=clang++-13 cmake . -Bbuild-riscv64 -G Ninja -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-riscv64.cmake -DGLIBC_COMPATIBILITY=OFF -DENABLE_LDAP=OFF -DOPENSSL_NO_ASM=ON -DENABLE_JEMALLOC=ON -DENABLE_PARQUET=OFF -DUSE_INTERNAL_PARQUET_LIBRARY=OFF -DENABLE_ORC=OFF -DUSE_INTERNAL_ORC_LIBRARY=OFF -DUSE_UNWIND=OFF -DUSE_INTERNAL_PROTOBUF_LIBRARY=ON -DENABLE_GRPC=OFF -DUSE_INTERNAL_GRPC_LIBRARY=OFF -DENABLE_HDFS=OFF -DUSE_INTERNAL_HDFS3_LIBRARY=OFF -DENABLE_MYSQL=OFF -DUSE_INTERNAL_MYSQL_LIBRARY=OFF
|
||||
ninja -C build-riscv64
|
||||
```
|
||||
|
||||
The resulting binary will run only on Linux with the RISC-V 64 CPU architecture.
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 70
|
||||
toc_priority: 71
|
||||
toc_title: Third-Party Libraries Used
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 68
|
||||
toc_priority: 69
|
||||
toc_title: C++ Guide
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 69
|
||||
toc_priority: 70
|
||||
toc_title: Testing
|
||||
---
|
||||
|
||||
|
@ -23,15 +23,15 @@ ENGINE = MaterializedPostgreSQL('host:port', ['database' | database], 'user', 'p
|
||||
- `user` — PostgreSQL user.
|
||||
- `password` — User password.
|
||||
|
||||
## Dynamically adding new tables to replication
|
||||
## Dynamically adding new tables to replication {#dynamically-adding-table-to-replication}
|
||||
|
||||
``` sql
|
||||
ATTACH TABLE postgres_database.new_table;
|
||||
```
|
||||
|
||||
It will work as well if there is a setting `materialized_postgresql_tables_list`.
|
||||
When specifying a specific list of tables in the database using the setting [materialized_postgresql_tables_list](../../operations/settings/settings.md#materialized-postgresql-tables-list), it will be updated to the current state, taking into account the tables which were added by the `ATTACH TABLE` query.
|
||||
|
||||
## Dynamically removing tables from replication
|
||||
## Dynamically removing tables from replication {#dynamically-removing-table-from-replication}
|
||||
|
||||
``` sql
|
||||
DETACH TABLE postgres_database.table_to_remove;
|
||||
@ -58,7 +58,7 @@ SETTINGS materialized_postgresql_max_block_size = 65536,
|
||||
SELECT * FROM database1.table1;
|
||||
```
|
||||
|
||||
It is also possible to change settings at run time.
|
||||
The settings can be changed, if necessary, using a DDL query. But it is impossible to change the setting `materialized_postgresql_tables_list`. To update the list of tables in this setting use the `ATTACH TABLE` query.
|
||||
|
||||
``` sql
|
||||
ALTER DATABASE postgres_database MODIFY SETTING materialized_postgresql_max_block_size = <new_size>;
|
||||
|
@ -787,6 +787,8 @@ Moving data does not interfere with data replication. Therefore, different stora
|
||||
After the completion of background merges and mutations, old parts are removed only after a certain amount of time (`old_parts_lifetime`).
|
||||
During this time, they are not moved to other volumes or disks. Therefore, until the parts are finally removed, they are still taken into account for evaluation of the occupied disk space.
|
||||
|
||||
User can assign new big parts to different disks of a [JBOD](https://en.wikipedia.org/wiki/Non-RAID_drive_architectures) volume in a balanced way using the [min_bytes_to_rebalance_partition_over_jbod](../../../operations/settings/merge-tree-settings.md#min-bytes-to-rebalance-partition-over-jbod) setting.
|
||||
|
||||
## Using S3 for Data Storage {#table_engine-mergetree-s3}
|
||||
|
||||
`MergeTree` family table engines can store data to [S3](https://aws.amazon.com/s3/) using a disk with type `s3`.
|
||||
|
@ -9,45 +9,57 @@ The `Merge` engine (not to be confused with `MergeTree`) does not store data its
|
||||
|
||||
Reading is automatically parallelized. Writing to a table is not supported. When reading, the indexes of tables that are actually being read are used, if they exist.
|
||||
|
||||
The `Merge` engine accepts parameters: the database name and a regular expression for tables.
|
||||
|
||||
## Examples {#examples}
|
||||
|
||||
Example 1:
|
||||
## Creating a Table {#creating-a-table}
|
||||
|
||||
``` sql
|
||||
Merge(hits, '^WatchLog')
|
||||
CREATE TABLE ... Engine=Merge(db_name, tables_regexp)
|
||||
```
|
||||
|
||||
Data will be read from the tables in the `hits` database that have names that match the regular expression ‘`^WatchLog`’.
|
||||
**Engine Parameters**
|
||||
|
||||
Instead of the database name, you can use a constant expression that returns a string. For example, `currentDatabase()`.
|
||||
- `db_name` — Possible values:
|
||||
- database name,
|
||||
- constant expression that returns a string with a database name, for example, `currentDatabase()`,
|
||||
- `REGEXP(expression)`, where `expression` is a regular expression to match the DB names.
|
||||
|
||||
- `tables_regexp` — A regular expression to match the table names in the specified DB or DBs.
|
||||
|
||||
Regular expressions — [re2](https://github.com/google/re2) (supports a subset of PCRE), case-sensitive.
|
||||
See the notes about escaping symbols in regular expressions in the “match” section.
|
||||
See the notes about escaping symbols in regular expressions in the "match" section.
|
||||
|
||||
When selecting tables to read, the `Merge` table itself will not be selected, even if it matches the regex. This is to avoid loops.
|
||||
It is possible to create two `Merge` tables that will endlessly try to read each others’ data, but this is not a good idea.
|
||||
## Usage {#usage}
|
||||
|
||||
When selecting tables to read, the `Merge` table itself is not selected, even if it matches the regex. This is to avoid loops.
|
||||
It is possible to create two `Merge` tables that will endlessly try to read each others' data, but this is not a good idea.
|
||||
|
||||
The typical way to use the `Merge` engine is for working with a large number of `TinyLog` tables as if with a single table.
|
||||
|
||||
Example 2:
|
||||
## Examples {#examples}
|
||||
|
||||
Let’s say you have a old table (WatchLog_old) and decided to change partitioning without moving data to a new table (WatchLog_new) and you need to see data from both tables.
|
||||
**Example 1**
|
||||
|
||||
Consider two databases `ABC_corporate_site` and `ABC_store`. The `all_visitors` table will contain IDs from the tables `visitors` in both databases.
|
||||
|
||||
``` sql
|
||||
CREATE TABLE WatchLog_old(date Date, UserId Int64, EventType String, Cnt UInt64)
|
||||
ENGINE=MergeTree(date, (UserId, EventType), 8192);
|
||||
CREATE TABLE all_visitors (id UInt32) ENGINE=Merge(REGEXP('ABC_*'), 'visitors');
|
||||
```
|
||||
|
||||
**Example 2**
|
||||
|
||||
Let's say you have an old table `WatchLog_old` and decided to change partitioning without moving data to a new table `WatchLog_new`, and you need to see data from both tables.
|
||||
|
||||
``` sql
|
||||
CREATE TABLE WatchLog_old(date Date, UserId Int64, EventType String, Cnt UInt64)
|
||||
ENGINE=MergeTree(date, (UserId, EventType), 8192);
|
||||
INSERT INTO WatchLog_old VALUES ('2018-01-01', 1, 'hit', 3);
|
||||
|
||||
CREATE TABLE WatchLog_new(date Date, UserId Int64, EventType String, Cnt UInt64)
|
||||
ENGINE=MergeTree PARTITION BY date ORDER BY (UserId, EventType) SETTINGS index_granularity=8192;
|
||||
CREATE TABLE WatchLog_new(date Date, UserId Int64, EventType String, Cnt UInt64)
|
||||
ENGINE=MergeTree PARTITION BY date ORDER BY (UserId, EventType) SETTINGS index_granularity=8192;
|
||||
INSERT INTO WatchLog_new VALUES ('2018-01-02', 2, 'hit', 3);
|
||||
|
||||
CREATE TABLE WatchLog as WatchLog_old ENGINE=Merge(currentDatabase(), '^WatchLog');
|
||||
|
||||
SELECT *
|
||||
FROM WatchLog
|
||||
SELECT * FROM WatchLog;
|
||||
```
|
||||
|
||||
``` text
|
||||
@ -68,5 +80,4 @@ FROM WatchLog
|
||||
**See Also**
|
||||
|
||||
- [Virtual columns](../../../engines/table-engines/special/index.md#table_engines-virtual_columns)
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/operations/table_engines/merge/) <!--hide-->
|
||||
- [merge](../../../sql-reference/table-functions/merge.md) table function
|
||||
|
@ -332,7 +332,7 @@ ORDER BY year, count(*) DESC
|
||||
|
||||
The following server was used:
|
||||
|
||||
Two Intel(R) Xeon(R) CPU E5-2650 v2 @ 2.60GHz, 16 physical kernels total,128 GiB RAM,8x6 TB HD on hardware RAID-5
|
||||
Two Intel(R) Xeon(R) CPU E5-2650 v2 @ 2.60GHz, 16 physical cores total, 128 GiB RAM, 8x6 TB HD on hardware RAID-5
|
||||
|
||||
Execution time is the best of three runs. But starting from the second run, queries read data from the file system cache. No further caching occurs: the data is read out and processed in each run.
|
||||
|
||||
|
@ -130,13 +130,17 @@ Only a small set of symbols are escaped. You can easily stumble onto a string va
|
||||
|
||||
Arrays are written as a list of comma-separated values in square brackets. Number items in the array are formatted as normally. `Date` and `DateTime` types are written in single quotes. Strings are written in single quotes with the same escaping rules as above.
|
||||
|
||||
[NULL](../sql-reference/syntax.md) is formatted as `\N`.
|
||||
[NULL](../sql-reference/syntax.md) is formatted according to setting [format_tsv_null_representation](../operations/settings/settings.md#settings-format_tsv_null_representation) (default value is `\N`).
|
||||
|
||||
|
||||
If setting [input_format_tsv_empty_as_default](../operations/settings/settings.md#settings-input_format_tsv_empty_as_default) is enabled,
|
||||
empty input fields are replaced with default values. For complex default expressions [input_format_defaults_for_omitted_fields](../operations/settings/settings.md#settings-input_format_defaults_for_omitted_fields) must be enabled too.
|
||||
|
||||
Each element of [Nested](../sql-reference/data-types/nested-data-structures/nested.md) structures is represented as array.
|
||||
|
||||
In input data, ENUM values can be represented as names or as ids. First, we try to match the input value to the ENUM name. If we fail and the input value is a number, we try to match this number to ENUM id.
|
||||
If input data contains only ENUM ids, it's recommended to enable the setting [input_format_tsv_enum_as_number](../operations/settings/settings.md#settings-input_format_tsv_enum_as_number) to optimize ENUM parsing.
|
||||
|
||||
For example:
|
||||
|
||||
``` sql
|
||||
@ -405,7 +409,10 @@ When parsing, all values can be parsed either with or without quotes. Both doubl
|
||||
If setting [input_format_csv_empty_as_default](../operations/settings/settings.md#settings-input_format_csv_empty_as_default) is enabled,
|
||||
empty unquoted input values are replaced with default values. For complex default expressions [input_format_defaults_for_omitted_fields](../operations/settings/settings.md#settings-input_format_defaults_for_omitted_fields) must be enabled too.
|
||||
|
||||
`NULL` is formatted as `\N` or `NULL` or an empty unquoted string (see settings [input_format_csv_unquoted_null_literal_as_null](../operations/settings/settings.md#settings-input_format_csv_unquoted_null_literal_as_null) and [input_format_defaults_for_omitted_fields](../operations/settings/settings.md#session_settings-input_format_defaults_for_omitted_fields)).
|
||||
`NULL` is formatted according to setting [format_csv_null_representation](../operations/settings/settings.md#settings-format_csv_null_representation) (default value is `\N`).
|
||||
|
||||
In input data, ENUM values can be represented as names or as ids. First, we try to match the input value to the ENUM name. If we fail and the input value is a number, we try to match this number to ENUM id.
|
||||
If input data contains only ENUM ids, it's recommended to enable the setting [input_format_csv_enum_as_number](../operations/settings/settings.md#settings-input_format_csv_enum_as_number) to optimize ENUM parsing.
|
||||
|
||||
The CSV format supports the output of totals and extremes the same way as `TabSeparated`.
|
||||
|
||||
|
@ -432,7 +432,7 @@ Example:
|
||||
<http_handlers>
|
||||
<rule>
|
||||
<url><![CDATA[/query_param_with_url/\w+/(?P<name_1>[^/]+)(/(?P<name_2>[^/]+))?]]></url>
|
||||
<method>GET</method>
|
||||
<methods>GET</methods>
|
||||
<headers>
|
||||
<XXX>TEST_HEADER_VALUE</XXX>
|
||||
<PARAMS_XXX><![CDATA[(?P<name_1>[^/]+)(/(?P<name_2>[^/]+))?]]></PARAMS_XXX>
|
||||
@ -639,4 +639,4 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_relative_path_static_handler'
|
||||
<
|
||||
<html><body>Relative Path File</body></html>
|
||||
* Connection #0 to host localhost left intact
|
||||
```
|
||||
```
|
||||
|
@ -35,6 +35,7 @@ toc_title: Client Libraries
|
||||
- NodeJs
|
||||
- [clickhouse (NodeJs)](https://github.com/TimonKK/clickhouse)
|
||||
- [node-clickhouse](https://github.com/apla/node-clickhouse)
|
||||
- [nestjs-clickhouse](https://github.com/depyronick/nestjs-clickhouse)
|
||||
- Perl
|
||||
- [perl-DBD-ClickHouse](https://github.com/elcamlost/perl-DBD-ClickHouse)
|
||||
- [HTTP-ClickHouse](https://metacpan.org/release/HTTP-ClickHouse)
|
||||
|
@ -78,6 +78,7 @@ toc_title: Adopters
|
||||
| <a href="https://ippon.tech" class="favicon">Ippon Technologies</a> | Technology Consulting | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=205) |
|
||||
| <a href="https://www.ivi.ru/" class="favicon">Ivi</a> | Online Cinema | Analytics, Monitoring | — | — | [Article in Russian, Jan 2018](https://habr.com/en/company/ivi/blog/347408/) |
|
||||
| <a href="https://jinshuju.net" class="favicon">Jinshuju 金数据</a> | BI Analytics | Main product | — | — | [Slides in Chinese, October 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/3.%20金数据数据架构调整方案Public.pdf) |
|
||||
| <a href="https://jitsu.com" class="favicon">Jitsu</a> | Cloud Software | Data Pipeline | — | — | [Documentation](https://jitsu.com/docs/destinations-configuration/clickhouse-destination), [Hacker News](https://news.ycombinator.com/item?id=29106082) |
|
||||
| <a href="https://www.kakaocorp.com/" class="favicon">kakaocorp</a> | Internet company | — | — | — | [if(kakao)2020 conference](https://if.kakao.com/session/117) |
|
||||
| <a href="https://www.kodiakdata.com/" class="favicon">Kodiak Data</a> | Clouds | Main product | — | — | [Slides in Engish, April 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup13/kodiak_data.pdf) |
|
||||
| <a href="https://kontur.ru" class="favicon">Kontur</a> | Software Development | Metrics | — | — | [Talk in Russian, November 2018](https://www.youtube.com/watch?v=U4u4Bd0FtrY) |
|
||||
@ -101,7 +102,8 @@ toc_title: Adopters
|
||||
| <a href="https://www.nuna.com/" class="favicon">Nuna Inc.</a> | Health Data Analytics | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=170) |
|
||||
| <a href="https://ok.ru" class="favicon">Ok.ru</a> | Social Network | — | 72 servers | 810 TB compressed, 50bn rows/day, 1.5 TB/day | [SmartData conference, Oct 2021](https://assets.ctfassets.net/oxjq45e8ilak/4JPHkbJenLgZhBGGyyonFP/57472ec6987003ec4078d0941740703b/____________________ClickHouse_______________________.pdf) |
|
||||
| <a href="https://omnicomm.ru/" class="favicon">Omnicomm</a> | Transportation Monitoring | — | — | — | [Facebook post, Oct 2021](https://www.facebook.com/OmnicommTeam/posts/2824479777774500) |
|
||||
| <a href="https://www.oneapm.com/" class="favicon">OneAPM</a> | Monitorings and Data Analysis | Main product | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/8.%20clickhouse在OneAPM的应用%20杜龙.pdf) |
|
||||
| <a href="https://www.oneapm.com/" class="favicon">OneAPM</a> | Monitoring and Data Analysis | Main product | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/8.%20clickhouse在OneAPM的应用%20杜龙.pdf) |
|
||||
| <a href="https://www.opentargets.org/" class="favicon">Open Targets</a> | Genome Research | Genome Search | — | — | [Tweet, Oct 2021](https://twitter.com/OpenTargets/status/1452570865342758913?s=20), [Blog](https://blog.opentargets.org/graphql/) |
|
||||
| <a href="https://corp.ozon.com/" class="favicon">OZON</a> | E-commerce | — | — | — | [Official website](https://job.ozon.ru/vacancy/razrabotchik-clickhouse-ekspluatatsiya-40991870/) |
|
||||
| <a href="https://panelbear.com/" class="favicon">Panelbear | Analytics | Monitoring and Analytics | — | — | [Tech Stack, November 2020](https://panelbear.com/blog/tech-stack/) |
|
||||
| <a href="https://www.percent.cn/" class="favicon">Percent 百分点</a> | Analytics | Main Product | — | — | [Slides in Chinese, June 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/4.%20ClickHouse万亿数据双中心的设计与实践%20.pdf) |
|
||||
@ -121,7 +123,7 @@ toc_title: Adopters
|
||||
| <a href="https://rspamd.com/" class="favicon">Rspamd</a> | Antispam | Analytics | — | — | [Official Website](https://rspamd.com/doc/modules/clickhouse.html) |
|
||||
| <a href="https://rusiem.com/en" class="favicon">RuSIEM</a> | SIEM | Main Product | — | — | [Official Website](https://rusiem.com/en/products/architecture) |
|
||||
| <a href="https://www.s7.ru" class="favicon">S7 Airlines</a> | Airlines | Metrics, Logging | — | — | [Talk in Russian, March 2019](https://www.youtube.com/watch?v=nwG68klRpPg&t=15s) |
|
||||
| <a href="https://www.sberbank.com/index" class="favicon">Sber</a> | Banking, Fintech, Retail, Cloud, Media | — | — | — | [Job advertisement, March 2021](https://career.habr.com/vacancies/1000073536) |
|
||||
| <a href="https://www.sberbank.com/index" class="favicon">Sber</a> | Banking, Fintech, Retail, Cloud, Media | — | 128 servers | >1 PB | [Job advertisement, March 2021](https://career.habr.com/vacancies/1000073536) |
|
||||
| <a href="https://www.scireum.de/" class="favicon">scireum GmbH</a> | e-Commerce | Main product | — | — | [Talk in German, February 2020](https://www.youtube.com/watch?v=7QWAn5RbyR4) |
|
||||
| <a href="https://segment.com/" class="favicon">Segment</a> | Data processing | Main product | 9 * i3en.3xlarge nodes 7.5TB NVME SSDs, 96GB Memory, 12 vCPUs | — | [Slides, 2019](https://slides.com/abraithwaite/segment-clickhouse) |
|
||||
| <a href="https://sembot.io/" class="favicon">sembot.io</a> | Shopping Ads | — | — | — | A comment on LinkedIn, 2020 |
|
||||
@ -172,5 +174,6 @@ toc_title: Adopters
|
||||
| <a href="https://shop.okraina.ru/" class="favicon">ООО «МПЗ Богородский»</a> | Agriculture | — | — | — | [Article in Russian, November 2020](https://cloud.yandex.ru/cases/okraina) |
|
||||
| <a href="https://domclick.ru/" class="favicon">ДомКлик</a> | Real Estate | — | — | — | [Article in Russian, October 2021](https://habr.com/ru/company/domclick/blog/585936/) |
|
||||
| <a href="https://www.deepl.com/" class="favicon">Deepl</a> | Machine Learning | — | — | — | [Video, October 2021](https://www.youtube.com/watch?v=WIYJiPwxXdM&t=1182s) |
|
||||
| <a href="https://vercel.com/" class="favicon">Vercel</a> | Traffic and Performance Analytics | — | — | — | Direct reference, October 2021 |
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/introduction/adopters/) <!--hide-->
|
||||
|
@ -21,7 +21,7 @@ By default, ClickHouse Keeper provides the same guarantees as ZooKeeper (lineari
|
||||
ClickHouse Keeper can be used as a standalone replacement for ZooKeeper or as an internal part of the ClickHouse server, but in both cases configuration is almost the same `.xml` file. The main ClickHouse Keeper configuration tag is `<keeper_server>`. Keeper configuration has the following parameters:
|
||||
|
||||
- `tcp_port` — Port for a client to connect (default for ZooKeeper is `2181`).
|
||||
- `tcp_port_secure` — Secure port for a client to connect.
|
||||
- `tcp_port_secure` — Secure port for an SSL connection between client and keeper-server.
|
||||
- `server_id` — Unique server id, each participant of the ClickHouse Keeper cluster must have a unique number (1, 2, 3, and so on).
|
||||
- `log_storage_path` — Path to coordination logs, better to store logs on the non-busy device (same for ZooKeeper).
|
||||
- `snapshot_storage_path` — Path to coordination snapshots.
|
||||
@ -50,7 +50,11 @@ Internal coordination settings are located in `<keeper_server>.<coordination_set
|
||||
- `shutdown_timeout` — Wait to finish internal connections and shutdown (ms) (default: 5000).
|
||||
- `startup_timeout` — If the server doesn't connect to other quorum participants in the specified timeout it will terminate (ms) (default: 30000).
|
||||
|
||||
Quorum configuration is located in `<keeper_server>.<raft_configuration>` section and contain servers description. The only parameter for the whole quorum is `secure`, which enables encrypted connection for communication between quorum participants. The main parameters for each `<server>` are:
|
||||
Quorum configuration is located in `<keeper_server>.<raft_configuration>` section and contain servers description.
|
||||
|
||||
The only parameter for the whole quorum is `secure`, which enables encrypted connection for communication between quorum participants. The parameter can be set `true` if SSL connection is required for internal communication between nodes, or left unspecified otherwise.
|
||||
|
||||
The main parameters for each `<server>` are:
|
||||
|
||||
- `id` — Server identifier in a quorum.
|
||||
- `hostname` — Hostname where this server is placed.
|
||||
|
@ -370,7 +370,7 @@ Opens `https://tabix.io/` when accessing `http://localhost: http_port`.
|
||||
<![CDATA[<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>]]>
|
||||
</http_server_default_response>
|
||||
```
|
||||
## hsts_max_age
|
||||
## hsts_max_age {#hsts-max-age}
|
||||
|
||||
Expired time for HSTS in seconds. The default value is 0 means clickhouse disabled HSTS. If you set a positive number, the HSTS will be enabled and the max-age is the number you set.
|
||||
|
||||
@ -763,6 +763,30 @@ Default value: 10000.
|
||||
<max_thread_pool_size>12000</max_thread_pool_size>
|
||||
```
|
||||
|
||||
## max_thread_pool_free_size {#max-thread-pool-free-size}
|
||||
|
||||
The number of threads that are always held in the Global Thread pool.
|
||||
|
||||
Default value: 1000.
|
||||
|
||||
**Example**
|
||||
|
||||
``` xml
|
||||
<max_thread_pool_free_size>1200</max_thread_pool_free_size>
|
||||
```
|
||||
|
||||
## thread_pool_queue_size {#thread-pool-queue-size}
|
||||
|
||||
The limit to the number of jobs that can be scheduled on the Global Thread pool. Increasing queue size leads to larger memory usage. It is recommended to keep this value equal to the `max_thread_pool_size`.
|
||||
|
||||
Default value: 10000.
|
||||
|
||||
**Example**
|
||||
|
||||
``` xml
|
||||
<thread_pool_queue_size>12000</thread_pool_queue_size>
|
||||
```
|
||||
|
||||
## merge_tree {#server_configuration_parameters-merge_tree}
|
||||
|
||||
Fine tuning for tables in the [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md).
|
||||
|
@ -328,3 +328,31 @@ Possible values:
|
||||
Default value: `true`.
|
||||
|
||||
By default, the ClickHouse server checks at table creation the data type of a column for sampling or sampling expression. If you already have tables with incorrect sampling expression and do not want the server to raise an exception during startup, set `check_sample_column_is_correct` to `false`.
|
||||
|
||||
## min_bytes_to_rebalance_partition_over_jbod {#min-bytes-to-rebalance-partition-over-jbod}
|
||||
|
||||
Sets minimal amount of bytes to enable balancing when distributing new big parts over volume disks [JBOD](https://en.wikipedia.org/wiki/Non-RAID_drive_architectures).
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
- 0 — Balancing is disabled.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
**Usage**
|
||||
|
||||
The value of the `min_bytes_to_rebalance_partition_over_jbod` setting should be less than the value of the [max_bytes_to_merge_at_max_space_in_pool](../../operations/settings/merge-tree-settings.md#max-bytes-to-merge-at-max-space-in-pool) setting. Otherwise, ClickHouse throws an exception.
|
||||
|
||||
## detach_not_byte_identical_parts {#detach_not_byte_identical_parts}
|
||||
|
||||
Enables or disables detaching a data part on a replica after a merge or a mutation, if it is not byte-identical to data parts on other replicas. If disabled, the data part is removed. Activate this setting if you want to analyze such parts later.
|
||||
|
||||
The setting is applicable to `MergeTree` tables with enabled [data replication](../../engines/table-engines/mergetree-family/replication.md).
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Parts are removed.
|
||||
- 1 — Parts are detached.
|
||||
|
||||
Default value: `0`.
|
||||
|
@ -425,12 +425,12 @@ Enabled by default.
|
||||
|
||||
## input_format_tsv_enum_as_number {#settings-input_format_tsv_enum_as_number}
|
||||
|
||||
Enables or disables parsing enum values as enum ids for TSV input format.
|
||||
When enabled, always treat enum values as enum ids for TSV input format. It's recommended to enable this setting if data contains only enum ids to optimize enum parsing.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Enum values are parsed as values.
|
||||
- 1 — Enum values are parsed as enum IDs.
|
||||
- 0 — Enum values are parsed as values or as enum IDs.
|
||||
- 1 — Enum values are parsed only as enum IDs.
|
||||
|
||||
Default value: 0.
|
||||
|
||||
@ -444,10 +444,39 @@ CREATE TABLE table_with_enum_column_for_tsv_insert (Id Int32,Value Enum('first'
|
||||
|
||||
When the `input_format_tsv_enum_as_number` setting is enabled:
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SET input_format_tsv_enum_as_number = 1;
|
||||
INSERT INTO table_with_enum_column_for_tsv_insert FORMAT TSV 102 2;
|
||||
INSERT INTO table_with_enum_column_for_tsv_insert FORMAT TSV 103 1;
|
||||
SELECT * FROM table_with_enum_column_for_tsv_insert;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌──Id─┬─Value──┐
|
||||
│ 102 │ second │
|
||||
└─────┴────────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SET input_format_tsv_enum_as_number = 1;
|
||||
INSERT INTO table_with_enum_column_for_tsv_insert FORMAT TSV 103 'first';
|
||||
```
|
||||
|
||||
throws an exception.
|
||||
|
||||
When the `input_format_tsv_enum_as_number` setting is disabled:
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SET input_format_tsv_enum_as_number = 0;
|
||||
INSERT INTO table_with_enum_column_for_tsv_insert FORMAT TSV 102 2;
|
||||
INSERT INTO table_with_enum_column_for_tsv_insert FORMAT TSV 103 'first';
|
||||
SELECT * FROM table_with_enum_column_for_tsv_insert;
|
||||
```
|
||||
|
||||
@ -462,15 +491,6 @@ Result:
|
||||
└─────┴────────┘
|
||||
```
|
||||
|
||||
When the `input_format_tsv_enum_as_number` setting is disabled, the `INSERT` query:
|
||||
|
||||
```sql
|
||||
SET input_format_tsv_enum_as_number = 0;
|
||||
INSERT INTO table_with_enum_column_for_tsv_insert FORMAT TSV 102 2;
|
||||
```
|
||||
|
||||
throws an exception.
|
||||
|
||||
## input_format_null_as_default {#settings-input-format-null-as-default}
|
||||
|
||||
Enables or disables the initialization of [NULL](../../sql-reference/syntax.md#null-literal) fields with [default values](../../sql-reference/statements/create/table.md#create-default-values), if data type of these fields is not [nullable](../../sql-reference/data-types/nullable.md#data_type-nullable).
|
||||
@ -992,6 +1012,16 @@ Example:
|
||||
log_query_views=1
|
||||
```
|
||||
|
||||
## log_formatted_queries {#settings-log-formatted-queries}
|
||||
|
||||
Allows to log formatted queries to the [system.query_log](../../operations/system-tables/query_log.md) system table.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Formatted queries are not logged in the system table.
|
||||
- 1 — Formatted queries are logged in the system table.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## log_comment {#settings-log-comment}
|
||||
|
||||
@ -1206,6 +1236,9 @@ Default value: `0`.
|
||||
|
||||
Could be used for throttling speed when replicating the data to add or replace new nodes.
|
||||
|
||||
!!! note "Note"
|
||||
60000000 bytes/s approximatly corresponds to 457 Mbps (60000000 / 1024 / 1024 * 8).
|
||||
|
||||
## max_replicated_sends_network_bandwidth_for_server {#max_replicated_sends_network_bandwidth_for_server}
|
||||
|
||||
Limits the maximum speed of data exchange over the network in bytes per second for [replicated](../../engines/table-engines/mergetree-family/replication.md) sends for the server. Only has meaning at server startup. You can also limit the speed for a particular table with [max_replicated_sends_network_bandwidth](../../operations/settings/merge-tree-settings.md#max_replicated_sends_network_bandwidth) setting.
|
||||
@ -1223,6 +1256,9 @@ Default value: `0`.
|
||||
|
||||
Could be used for throttling speed when replicating the data to add or replace new nodes.
|
||||
|
||||
!!! note "Note"
|
||||
60000000 bytes/s approximatly corresponds to 457 Mbps (60000000 / 1024 / 1024 * 8).
|
||||
|
||||
## connect_timeout_with_failover_ms {#connect-timeout-with-failover-ms}
|
||||
|
||||
The timeout in milliseconds for connecting to a remote server for a Distributed table engine, if the ‘shard’ and ‘replica’ sections are used in the cluster definition.
|
||||
@ -1576,18 +1612,14 @@ When `output_format_json_quote_denormals = 1`, the query returns:
|
||||
|
||||
The character is interpreted as a delimiter in the CSV data. By default, the delimiter is `,`.
|
||||
|
||||
## input_format_csv_unquoted_null_literal_as_null {#settings-input_format_csv_unquoted_null_literal_as_null}
|
||||
|
||||
For CSV input format enables or disables parsing of unquoted `NULL` as literal (synonym for `\N`).
|
||||
|
||||
## input_format_csv_enum_as_number {#settings-input_format_csv_enum_as_number}
|
||||
|
||||
Enables or disables parsing enum values as enum ids for CSV input format.
|
||||
When enabled, always treat enum values as enum ids for CSV input format. It's recommended to enable this setting if data contains only enum ids to optimize enum parsing.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Enum values are parsed as values.
|
||||
- 1 — Enum values are parsed as enum IDs.
|
||||
- 0 — Enum values are parsed as values or as enum IDs.
|
||||
- 1 — Enum values are parsed only as enum IDs.
|
||||
|
||||
Default value: 0.
|
||||
|
||||
@ -1601,29 +1633,52 @@ CREATE TABLE table_with_enum_column_for_csv_insert (Id Int32,Value Enum('first'
|
||||
|
||||
When the `input_format_csv_enum_as_number` setting is enabled:
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SET input_format_csv_enum_as_number = 1;
|
||||
INSERT INTO table_with_enum_column_for_csv_insert FORMAT CSV 102,2;
|
||||
INSERT INTO table_with_enum_column_for_csv_insert FORMAT CSV 102,2
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌──Id─┬─Value──┐
|
||||
│ 102 │ second │
|
||||
└─────┴────────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SET input_format_csv_enum_as_number = 1;
|
||||
INSERT INTO table_with_enum_column_for_csv_insert FORMAT CSV 103,'first'
|
||||
```
|
||||
|
||||
throws an exception.
|
||||
|
||||
When the `input_format_csv_enum_as_number` setting is disabled:
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SET input_format_csv_enum_as_number = 0;
|
||||
INSERT INTO table_with_enum_column_for_csv_insert FORMAT CSV 102,2
|
||||
INSERT INTO table_with_enum_column_for_csv_insert FORMAT CSV 103,'first'
|
||||
SELECT * FROM table_with_enum_column_for_csv_insert;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌──Id─┬─Value─────┐
|
||||
│ 102 │ second │
|
||||
└─────┴───────────┘
|
||||
┌──Id─┬─Value──┐
|
||||
│ 102 │ second │
|
||||
└─────┴────────┘
|
||||
┌──Id─┬─Value─┐
|
||||
│ 103 │ first │
|
||||
└─────┴───────┘
|
||||
```
|
||||
|
||||
When the `input_format_csv_enum_as_number` setting is disabled, the `INSERT` query:
|
||||
|
||||
```sql
|
||||
SET input_format_csv_enum_as_number = 0;
|
||||
INSERT INTO table_with_enum_column_for_csv_insert FORMAT CSV 102,2;
|
||||
```
|
||||
|
||||
throws an exception.
|
||||
|
||||
## output_format_csv_crlf_end_of_line {#settings-output-format-csv-crlf-end-of-line}
|
||||
|
||||
Use DOS/Windows-style line separator (CRLF) in CSV instead of Unix style (LF).
|
||||
@ -2940,9 +2995,9 @@ Possible values:
|
||||
|
||||
Default value: `1`.
|
||||
|
||||
## output_format_csv_null_representation {#output_format_csv_null_representation}
|
||||
## format_csv_null_representation {#format_csv_null_representation}
|
||||
|
||||
Defines the representation of `NULL` for [CSV](../../interfaces/formats.md#csv) output format. User can set any string as a value, for example, `My NULL`.
|
||||
Defines the representation of `NULL` for [CSV](../../interfaces/formats.md#csv) output and input formats. User can set any string as a value, for example, `My NULL`.
|
||||
|
||||
Default value: `\N`.
|
||||
|
||||
@ -2965,7 +3020,7 @@ Result
|
||||
Query
|
||||
|
||||
```sql
|
||||
SET output_format_csv_null_representation = 'My NULL';
|
||||
SET format_csv_null_representation = 'My NULL';
|
||||
SELECT * FROM csv_custom_null FORMAT CSV;
|
||||
```
|
||||
|
||||
@ -2977,9 +3032,9 @@ My NULL
|
||||
My NULL
|
||||
```
|
||||
|
||||
## output_format_tsv_null_representation {#output_format_tsv_null_representation}
|
||||
## format_tsv_null_representation {#format_tsv_null_representation}
|
||||
|
||||
Defines the representation of `NULL` for [TSV](../../interfaces/formats.md#tabseparated) output format. User can set any string as a value, for example, `My NULL`.
|
||||
Defines the representation of `NULL` for [TSV](../../interfaces/formats.md#tabseparated) output and input formats. User can set any string as a value, for example, `My NULL`.
|
||||
|
||||
Default value: `\N`.
|
||||
|
||||
@ -3002,7 +3057,7 @@ Result
|
||||
Query
|
||||
|
||||
```sql
|
||||
SET output_format_tsv_null_representation = 'My NULL';
|
||||
SET format_tsv_null_representation = 'My NULL';
|
||||
SELECT * FROM tsv_custom_null FORMAT TSV;
|
||||
```
|
||||
|
||||
|
@ -6,6 +6,7 @@ Columns:
|
||||
|
||||
- `database` ([String](../../sql-reference/data-types/string.md)) — Name of the database containing the dictionary created by DDL query. Empty string for other dictionaries.
|
||||
- `name` ([String](../../sql-reference/data-types/string.md)) — [Dictionary name](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md).
|
||||
- `uuid` ([UUID](../../sql-reference/data-types/uuid.md)) — Dictionary UUID.
|
||||
- `status` ([Enum8](../../sql-reference/data-types/enum.md)) — Dictionary status. Possible values:
|
||||
- `NOT_LOADED` — Dictionary was not loaded because it was not used.
|
||||
- `LOADED` — Dictionary loaded successfully.
|
||||
@ -15,9 +16,10 @@ Columns:
|
||||
- `FAILED_AND_RELOADING` — Could not load the dictionary as a result of an error and is loading now.
|
||||
- `origin` ([String](../../sql-reference/data-types/string.md)) — Path to the configuration file that describes the dictionary.
|
||||
- `type` ([String](../../sql-reference/data-types/string.md)) — Type of a dictionary allocation. [Storing Dictionaries in Memory](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md).
|
||||
- `key` — [Key type](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-key): Numeric Key ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) or Сomposite key ([String](../../sql-reference/data-types/string.md)) — form “(type 1, type 2, …, type n)”.
|
||||
- `key.names` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Array of [key names](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-key) provided by the dictionary.
|
||||
- `key.types` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Corresponding array of [key types](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-key) provided by the dictionary.
|
||||
- `attribute.names` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Array of [attribute names](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-attributes) provided by the dictionary.
|
||||
- `attribute.types` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Corresponding array of [attribute types](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-attributes) that are provided by the dictionary.
|
||||
- `attribute.types` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Corresponding array of [attribute types](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-attributes) provided by the dictionary.
|
||||
- `bytes_allocated` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Amount of RAM allocated for the dictionary.
|
||||
- `query_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of queries since the dictionary was loaded or since the last successful reboot.
|
||||
- `hit_rate` ([Float64](../../sql-reference/data-types/float.md)) — For cache dictionaries, the percentage of uses for which the value was in the cache.
|
||||
@ -31,34 +33,56 @@ Columns:
|
||||
- `last_successful_update_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — End time for loading or updating the dictionary. Helps to monitor some troubles with external sources and investigate causes.
|
||||
- `loading_duration` ([Float32](../../sql-reference/data-types/float.md)) — Duration of a dictionary loading.
|
||||
- `last_exception` ([String](../../sql-reference/data-types/string.md)) — Text of the error that occurs when creating or reloading the dictionary if the dictionary couldn’t be created.
|
||||
- `comment` ([String](../../sql-reference/data-types/string.md)) — Text of the comment to dictionary.
|
||||
|
||||
**Example**
|
||||
|
||||
Configure the dictionary.
|
||||
Configure the dictionary:
|
||||
|
||||
``` sql
|
||||
CREATE DICTIONARY dictdb.dict
|
||||
CREATE DICTIONARY dictionary_with_comment
|
||||
(
|
||||
`key` Int64 DEFAULT -1,
|
||||
`value_default` String DEFAULT 'world',
|
||||
`value_expression` String DEFAULT 'xxx' EXPRESSION 'toString(127 * 172)'
|
||||
id UInt64,
|
||||
value String
|
||||
)
|
||||
PRIMARY KEY key
|
||||
SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'dicttbl' DB 'dictdb'))
|
||||
LIFETIME(MIN 0 MAX 1)
|
||||
PRIMARY KEY id
|
||||
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'source_table'))
|
||||
LAYOUT(FLAT())
|
||||
LIFETIME(MIN 0 MAX 1000)
|
||||
COMMENT 'The temporary dictionary';
|
||||
```
|
||||
|
||||
Make sure that the dictionary is loaded.
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.dictionaries
|
||||
SELECT * FROM system.dictionaries LIMIT 1 FORMAT Vertical;
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─database─┬─name─┬─status─┬─origin──────┬─type─┬─key────┬─attribute.names──────────────────────┬─attribute.types─────┬─bytes_allocated─┬─query_count─┬─hit_rate─┬─element_count─┬───────────load_factor─┬─source─────────────────────┬─lifetime_min─┬─lifetime_max─┬──loading_start_time─┌──last_successful_update_time─┬──────loading_duration─┬─last_exception─┐
|
||||
│ dictdb │ dict │ LOADED │ dictdb.dict │ Flat │ UInt64 │ ['value_default','value_expression'] │ ['String','String'] │ 74032 │ 0 │ 1 │ 1 │ 0.0004887585532746823 │ ClickHouse: dictdb.dicttbl │ 0 │ 1 │ 2020-03-04 04:17:34 │ 2020-03-04 04:30:34 │ 0.002 │ │
|
||||
└──────────┴──────┴────────┴─────────────┴──────┴────────┴──────────────────────────────────────┴─────────────────────┴─────────────────┴─────────────┴──────────┴───────────────┴───────────────────────┴────────────────────────────┴──────────────┴──────────────┴─────────────────────┴──────────────────────────────┘───────────────────────┴────────────────┘
|
||||
Row 1:
|
||||
──────
|
||||
database: default
|
||||
name: dictionary_with_comment
|
||||
uuid: 4654d460-0d03-433a-8654-d4600d03d33a
|
||||
status: NOT_LOADED
|
||||
origin: 4654d460-0d03-433a-8654-d4600d03d33a
|
||||
type:
|
||||
key.names: ['id']
|
||||
key.types: ['UInt64']
|
||||
attribute.names: ['value']
|
||||
attribute.types: ['String']
|
||||
bytes_allocated: 0
|
||||
query_count: 0
|
||||
hit_rate: 0
|
||||
found_rate: 0
|
||||
element_count: 0
|
||||
load_factor: 0
|
||||
source:
|
||||
lifetime_min: 0
|
||||
lifetime_max: 0
|
||||
loading_start_time: 1970-01-01 00:00:00
|
||||
last_successful_update_time: 1970-01-01 00:00:00
|
||||
loading_duration: 0
|
||||
last_exception:
|
||||
comment: The temporary dictionary
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/operations/system-tables/dictionaries) <!--hide-->
|
||||
|
@ -26,6 +26,8 @@ Each query creates one or two rows in the `query_log` table, depending on the st
|
||||
|
||||
You can use the [log_queries_probability](../../operations/settings/settings.md#log-queries-probability) setting to reduce the number of queries, registered in the `query_log` table.
|
||||
|
||||
You can use the [log_formatted_queries](../../operations/settings/settings.md#settings-log-formatted-queries) setting to log formatted queries to the `formatted_query` column.
|
||||
|
||||
Columns:
|
||||
|
||||
- `type` ([Enum8](../../sql-reference/data-types/enum.md)) — Type of an event that occurred when executing the query. Values:
|
||||
@ -48,6 +50,7 @@ Columns:
|
||||
- `memory_usage` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Memory consumption by the query.
|
||||
- `current_database` ([String](../../sql-reference/data-types/string.md)) — Name of the current database.
|
||||
- `query` ([String](../../sql-reference/data-types/string.md)) — Query string.
|
||||
- `formatted_query` ([String](../../sql-reference/data-types/string.md)) — Formatted query string.
|
||||
- `normalized_query_hash` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Identical hash value without the values of literals for similar queries.
|
||||
- `query_kind` ([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md)) — Type of the query.
|
||||
- `databases` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — Names of the databases present in the query.
|
||||
@ -114,68 +117,68 @@ SELECT * FROM system.query_log WHERE type = 'QueryFinish' ORDER BY query_start_t
|
||||
Row 1:
|
||||
──────
|
||||
type: QueryFinish
|
||||
event_date: 2021-07-28
|
||||
event_time: 2021-07-28 13:46:56
|
||||
event_time_microseconds: 2021-07-28 13:46:56.719791
|
||||
query_start_time: 2021-07-28 13:46:56
|
||||
query_start_time_microseconds: 2021-07-28 13:46:56.704542
|
||||
query_duration_ms: 14
|
||||
read_rows: 8393
|
||||
read_bytes: 374325
|
||||
event_date: 2021-11-03
|
||||
event_time: 2021-11-03 16:13:54
|
||||
event_time_microseconds: 2021-11-03 16:13:54.953024
|
||||
query_start_time: 2021-11-03 16:13:54
|
||||
query_start_time_microseconds: 2021-11-03 16:13:54.952325
|
||||
query_duration_ms: 0
|
||||
read_rows: 69
|
||||
read_bytes: 6187
|
||||
written_rows: 0
|
||||
written_bytes: 0
|
||||
result_rows: 4201
|
||||
result_bytes: 153024
|
||||
memory_usage: 4714038
|
||||
result_rows: 69
|
||||
result_bytes: 48256
|
||||
memory_usage: 0
|
||||
current_database: default
|
||||
query: SELECT DISTINCT arrayJoin(extractAll(name, '[\\w_]{2,}')) AS res FROM (SELECT name FROM system.functions UNION ALL SELECT name FROM system.table_engines UNION ALL SELECT name FROM system.formats UNION ALL SELECT name FROM system.table_functions UNION ALL SELECT name FROM system.data_type_families UNION ALL SELECT name FROM system.merge_tree_settings UNION ALL SELECT name FROM system.settings UNION ALL SELECT cluster FROM system.clusters UNION ALL SELECT macro FROM system.macros UNION ALL SELECT policy_name FROM system.storage_policies UNION ALL SELECT concat(func.name, comb.name) FROM system.functions AS func CROSS JOIN system.aggregate_function_combinators AS comb WHERE is_aggregate UNION ALL SELECT name FROM system.databases LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.tables LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.dictionaries LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.columns LIMIT 10000) WHERE notEmpty(res)
|
||||
normalized_query_hash: 6666026786019643712
|
||||
query_kind: Select
|
||||
databases: ['system']
|
||||
tables: ['system.aggregate_function_combinators','system.clusters','system.columns','system.data_type_families','system.databases','system.dictionaries','system.formats','system.functions','system.macros','system.merge_tree_settings','system.settings','system.storage_policies','system.table_engines','system.table_functions','system.tables']
|
||||
columns: ['system.aggregate_function_combinators.name','system.clusters.cluster','system.columns.name','system.data_type_families.name','system.databases.name','system.dictionaries.name','system.formats.name','system.functions.is_aggregate','system.functions.name','system.macros.macro','system.merge_tree_settings.name','system.settings.name','system.storage_policies.policy_name','system.table_engines.name','system.table_functions.name','system.tables.name']
|
||||
query: DESCRIBE TABLE system.query_log
|
||||
formatted_query:
|
||||
normalized_query_hash: 8274064835331539124
|
||||
query_kind:
|
||||
databases: []
|
||||
tables: []
|
||||
columns: []
|
||||
projections: []
|
||||
views: []
|
||||
exception_code: 0
|
||||
exception:
|
||||
stack_trace:
|
||||
is_initial_query: 1
|
||||
user: default
|
||||
query_id: a3361f6e-a1fd-4d54-9f6f-f93a08bab0bf
|
||||
query_id: 7c28bbbb-753b-4eba-98b1-efcbe2b9bdf6
|
||||
address: ::ffff:127.0.0.1
|
||||
port: 51006
|
||||
port: 40452
|
||||
initial_user: default
|
||||
initial_query_id: a3361f6e-a1fd-4d54-9f6f-f93a08bab0bf
|
||||
initial_query_id: 7c28bbbb-753b-4eba-98b1-efcbe2b9bdf6
|
||||
initial_address: ::ffff:127.0.0.1
|
||||
initial_port: 51006
|
||||
initial_query_start_time: 2021-07-28 13:46:56
|
||||
initial_query_start_time_microseconds: 2021-07-28 13:46:56.704542
|
||||
initial_port: 40452
|
||||
initial_query_start_time: 2021-11-03 16:13:54
|
||||
initial_query_start_time_microseconds: 2021-11-03 16:13:54.952325
|
||||
interface: 1
|
||||
os_user:
|
||||
client_hostname:
|
||||
client_name: ClickHouse client
|
||||
os_user: sevirov
|
||||
client_hostname: clickhouse.ru-central1.internal
|
||||
client_name: ClickHouse
|
||||
client_revision: 54449
|
||||
client_version_major: 21
|
||||
client_version_minor: 8
|
||||
client_version_patch: 0
|
||||
client_version_minor: 10
|
||||
client_version_patch: 1
|
||||
http_method: 0
|
||||
http_user_agent:
|
||||
http_referer:
|
||||
forwarded_for:
|
||||
quota_key:
|
||||
revision: 54453
|
||||
revision: 54456
|
||||
log_comment:
|
||||
thread_ids: [5058,22097,22110,22094]
|
||||
ProfileEvents.Names: ['Query','SelectQuery','ArenaAllocChunks','ArenaAllocBytes','FunctionExecute','NetworkSendElapsedMicroseconds','SelectedRows','SelectedBytes','ContextLock','RWLockAcquiredReadLocks','RealTimeMicroseconds','UserTimeMicroseconds','SystemTimeMicroseconds','SoftPageFaults','OSCPUWaitMicroseconds','OSCPUVirtualTimeMicroseconds','OSWriteBytes','OSWriteChars']
|
||||
ProfileEvents.Values: [1,1,39,352256,64,360,8393,374325,412,440,34480,13108,4723,671,19,17828,8192,10240]
|
||||
Settings.Names: ['load_balancing','max_memory_usage']
|
||||
Settings.Values: ['random','10000000000']
|
||||
thread_ids: [30776,31174]
|
||||
ProfileEvents: {'Query':1,'NetworkSendElapsedMicroseconds':59,'NetworkSendBytes':2643,'SelectedRows':69,'SelectedBytes':6187,'ContextLock':9,'RWLockAcquiredReadLocks':1,'RealTimeMicroseconds':817,'UserTimeMicroseconds':427,'SystemTimeMicroseconds':212,'OSCPUVirtualTimeMicroseconds':639,'OSReadChars':894,'OSWriteChars':319}
|
||||
Settings: {'load_balancing':'random','max_memory_usage':'10000000000'}
|
||||
used_aggregate_functions: []
|
||||
used_aggregate_function_combinators: []
|
||||
used_database_engines: []
|
||||
used_data_type_families: ['UInt64','UInt8','Nullable','String','date']
|
||||
used_data_type_families: []
|
||||
used_dictionaries: []
|
||||
used_formats: []
|
||||
used_functions: ['concat','notEmpty','extractAll']
|
||||
used_functions: []
|
||||
used_storages: []
|
||||
used_table_functions: []
|
||||
```
|
||||
@ -183,6 +186,3 @@ used_table_functions: []
|
||||
**See Also**
|
||||
|
||||
- [system.query_thread_log](../../operations/system-tables/query_thread_log.md#system_tables-query_thread_log) — This table contains information about each query execution thread.
|
||||
- [system.query_views_log](../../operations/system-tables/query_views_log.md#system_tables-query_views_log) — This table contains information about each view executed during a query.
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/operations/system-tables/query_log) <!--hide-->
|
||||
|
77
docs/en/operations/system-tables/session_log.md
Normal file
77
docs/en/operations/system-tables/session_log.md
Normal file
@ -0,0 +1,77 @@
|
||||
# system.session_log {#system_tables-session_log}
|
||||
|
||||
Contains information about all successful and failed login and logout events.
|
||||
|
||||
Columns:
|
||||
|
||||
- `type` ([Enum8](../../sql-reference/data-types/enum.md)) — Login/logout result. Possible values:
|
||||
- `LoginFailure` — Login error.
|
||||
- `LoginSuccess` — Successful login.
|
||||
- `Logout` — Logout from the system.
|
||||
- `auth_id` ([UUID](../../sql-reference/data-types/uuid.md)) — Authentication ID, which is a UUID that is automatically generated each time user logins.
|
||||
- `session_id` ([String](../../sql-reference/data-types/string.md)) — Session ID that is passed by client via [HTTP](../../interfaces/http.md) interface.
|
||||
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — Login/logout date.
|
||||
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Login/logout time.
|
||||
- `event_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — Login/logout starting time with microseconds precision.
|
||||
- `user` ([String](../../sql-reference/data-types/string.md)) — User name.
|
||||
- `auth_type` ([Enum8](../../sql-reference/data-types/enum.md)) — The authentication type. Possible values:
|
||||
- `NO_PASSWORD`
|
||||
- `PLAINTEXT_PASSWORD`
|
||||
- `SHA256_PASSWORD`
|
||||
- `DOUBLE_SHA1_PASSWORD`
|
||||
- `LDAP`
|
||||
- `KERBEROS`
|
||||
- `profiles` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — The list of profiles set for all roles and/or users.
|
||||
- `roles` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — The list of roles to which the profile is applied.
|
||||
- `settings` ([Array](../../sql-reference/data-types/array.md)([Tuple](../../sql-reference/data-types/tuple.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md), [String](../../sql-reference/data-types/string.md)))) — Settings that were changed when the client logged in/out.
|
||||
- `client_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — The IP address that was used to log in/out.
|
||||
- `client_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — The client port that was used to log in/out.
|
||||
- `interface` ([Enum8](../../sql-reference/data-types/enum.md)) — The interface from which the login was initiated. Possible values:
|
||||
- `TCP`
|
||||
- `HTTP`
|
||||
- `gRPC`
|
||||
- `MySQL`
|
||||
- `PostgreSQL`
|
||||
- `client_hostname` ([String](../../sql-reference/data-types/string.md)) — The hostname of the client machine where the [clickhouse-client](../../interfaces/cli.md) or another TCP client is run.
|
||||
- `client_name` ([String](../../sql-reference/data-types/string.md)) — The `clickhouse-client` or another TCP client name.
|
||||
- `client_revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Revision of the `clickhouse-client` or another TCP client.
|
||||
- `client_version_major` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The major version of the `clickhouse-client` or another TCP client.
|
||||
- `client_version_minor` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The minor version of the `clickhouse-client` or another TCP client.
|
||||
- `client_version_patch` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Patch component of the `clickhouse-client` or another TCP client version.
|
||||
- `failure_reason` ([String](../../sql-reference/data-types/string.md)) — The exception message containing the reason for the login/logout failure.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.session_log LIMIT 1 FORMAT Vertical;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
Row 1:
|
||||
──────
|
||||
type: LoginSuccess
|
||||
auth_id: 45e6bd83-b4aa-4a23-85e6-bd83b4aa1a23
|
||||
session_id:
|
||||
event_date: 2021-10-14
|
||||
event_time: 2021-10-14 20:33:52
|
||||
event_time_microseconds: 2021-10-14 20:33:52.104247
|
||||
user: default
|
||||
auth_type: PLAINTEXT_PASSWORD
|
||||
profiles: ['default']
|
||||
roles: []
|
||||
settings: [('load_balancing','random'),('max_memory_usage','10000000000')]
|
||||
client_address: ::ffff:127.0.0.1
|
||||
client_port: 38490
|
||||
interface: TCP
|
||||
client_hostname:
|
||||
client_name: ClickHouse client
|
||||
client_revision: 54449
|
||||
client_version_major: 21
|
||||
client_version_minor: 10
|
||||
client_version_patch: 0
|
||||
failure_reason:
|
||||
```
|
@ -6,7 +6,7 @@ Columns:
|
||||
|
||||
- `policy_name` ([String](../../sql-reference/data-types/string.md)) — Name of the storage policy.
|
||||
- `volume_name` ([String](../../sql-reference/data-types/string.md)) — Volume name defined in the storage policy.
|
||||
- `volume_priority` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Volume order number in the configuration.
|
||||
- `volume_priority` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Volume order number in the configuration, the data fills the volumes according this priority, i.e. data during inserts and merges is written to volumes with a lower priority (taking into account other rules: TTL, `max_data_part_size`, `move_factor`).
|
||||
- `disks` ([Array(String)](../../sql-reference/data-types/array.md)) — Disk names, defined in the storage policy.
|
||||
- `max_data_part_size` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Maximum size of a data part that can be stored on volume disks (0 — no limit).
|
||||
- `move_factor` ([Float64](../../sql-reference/data-types/float.md)) — Ratio of free disk space. When the ratio exceeds the value of configuration parameter, ClickHouse start to move data to the next volume in order.
|
||||
|
@ -1,3 +1,4 @@
|
||||
---
|
||||
toc_priority: 58
|
||||
toc_title: Usage Recommendations
|
||||
---
|
||||
@ -68,11 +69,16 @@ Regardless of RAID use, always use replication for data security.
|
||||
Enable NCQ with a long queue. For HDD, choose the CFQ scheduler, and for SSD, choose noop. Don’t reduce the ‘readahead’ setting.
|
||||
For HDD, enable the write cache.
|
||||
|
||||
Make sure that [fstrim](https://en.wikipedia.org/wiki/Trim_(computing)) is enabled for NVME and SSD disks in your OS (usually it's implemented using a cronjob or systemd service).
|
||||
|
||||
## File System {#file-system}
|
||||
|
||||
Ext4 is the most reliable option. Set the mount options `noatime`.
|
||||
XFS is also suitable, but it hasn’t been as thoroughly tested with ClickHouse.
|
||||
Most other file systems should also work fine. File systems with delayed allocation work better.
|
||||
XFS should be avoided. It works mostly fine but there are some reports about lower performance.
|
||||
Most other file systems should also work fine.
|
||||
|
||||
Do not use compressed filesystems, because ClickHouse does compression on its own and better.
|
||||
It's not recommended to use encrypted filesystems, because you can use builtin encryption in ClickHouse, which is better.
|
||||
|
||||
## Linux Kernel {#linux-kernel}
|
||||
|
||||
@ -261,4 +267,8 @@ script
|
||||
end script
|
||||
```
|
||||
|
||||
## Antivirus software {#antivirus-software}
|
||||
|
||||
If you use antivirus software configure it to skip folders with Clickhouse datafiles (`/var/lib/clickhouse`) otherwise performance may be reduced and you may experience unexpected errors during data ingestion and background merges.
|
||||
|
||||
{## [Original article](https://clickhouse.com/docs/en/operations/tips/) ##}
|
||||
|
@ -25,6 +25,12 @@ Example 2: `uniqArray(arr)` – Counts the number of unique elements in all ‘a
|
||||
|
||||
-If and -Array can be combined. However, ‘Array’ must come first, then ‘If’. Examples: `uniqArrayIf(arr, cond)`, `quantilesTimingArrayIf(level1, level2)(arr, cond)`. Due to this order, the ‘cond’ argument won’t be an array.
|
||||
|
||||
## -Map {#agg-functions-combinator-map}
|
||||
|
||||
The -Map suffix can be appended to any aggregate function. This will create an aggregate function which gets Map type as an argument, and aggregates values of each key of the map separately using the specified aggregate function. The result is also of a Map type.
|
||||
|
||||
Examples: `sumMap(map(1,1))`, `avgMap(map('a', 1))`.
|
||||
|
||||
## -SimpleState {#agg-functions-combinator-simplestate}
|
||||
|
||||
If you apply this combinator, the aggregate function returns the same value but with a different type. This is a [SimpleAggregateFunction(...)](../../sql-reference/data-types/simpleaggregatefunction.md) that can be stored in a table to work with [AggregatingMergeTree](../../engines/table-engines/mergetree-family/aggregatingmergetree.md) tables.
|
||||
|
@ -10,6 +10,10 @@ The String type replaces the types VARCHAR, BLOB, CLOB, and others from other DB
|
||||
|
||||
When creating tables, numeric parameters for string fields can be set (e.g. `VARCHAR(255)`), but ClickHouse ignores them.
|
||||
|
||||
Aliases:
|
||||
|
||||
- `String` — `LONGTEXT`, `MEDIUMTEXT`, `TINYTEXT`, `TEXT`, `LONGBLOB`, `MEDIUMBLOB`, `TINYBLOB`, `BLOB`, `VARCHAR`, `CHAR`.
|
||||
|
||||
## Encodings {#encodings}
|
||||
|
||||
ClickHouse does not have the concept of encodings. Strings can contain an arbitrary set of bytes, which are stored and output as-is.
|
||||
|
@ -66,7 +66,6 @@ Types of sources (`source_type`):
|
||||
- DBMS
|
||||
- [ODBC](#dicts-external_dicts_dict_sources-odbc)
|
||||
- [MySQL](#dicts-external_dicts_dict_sources-mysql)
|
||||
- [PostgreSQL](#dicts-external_dicts_dict_sources-postgresql)
|
||||
- [ClickHouse](#dicts-external_dicts_dict_sources-clickhouse)
|
||||
- [MongoDB](#dicts-external_dicts_dict_sources-mongodb)
|
||||
- [Redis](#dicts-external_dicts_dict_sources-redis)
|
||||
@ -210,45 +209,6 @@ Setting fields:
|
||||
|
||||
When creating a dictionary using the DDL command (`CREATE DICTIONARY ...`) remote hosts for HTTP dictionaries are checked against the contents of `remote_url_allow_hosts` section from config to prevent database users to access arbitrary HTTP server.
|
||||
|
||||
## ODBC {#dicts-external_dicts_dict_sources-odbc}
|
||||
|
||||
You can use this method to connect any database that has an ODBC driver.
|
||||
|
||||
Example of settings:
|
||||
|
||||
``` xml
|
||||
<source>
|
||||
<odbc>
|
||||
<db>DatabaseName</db>
|
||||
<table>ShemaName.TableName</table>
|
||||
<connection_string>DSN=some_parameters</connection_string>
|
||||
<invalidate_query>SQL_QUERY</invalidate_query>
|
||||
</odbc>
|
||||
</source>
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
``` sql
|
||||
SOURCE(ODBC(
|
||||
db 'DatabaseName'
|
||||
table 'SchemaName.TableName'
|
||||
connection_string 'DSN=some_parameters'
|
||||
invalidate_query 'SQL_QUERY'
|
||||
))
|
||||
```
|
||||
|
||||
Setting fields:
|
||||
|
||||
- `db` – Name of the database. Omit it if the database name is set in the `<connection_string>` parameters.
|
||||
- `table` – Name of the table and schema if exists.
|
||||
- `connection_string` – Connection string.
|
||||
- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md).
|
||||
|
||||
ClickHouse receives quoting symbols from ODBC-driver and quote all settings in queries to driver, so it’s necessary to set table name accordingly to table name case in database.
|
||||
|
||||
If you have a problems with encodings when using Oracle, see the corresponding [F.A.Q.](../../../faq/integration/oracle-odbc.md) item.
|
||||
|
||||
### Known Vulnerability of the ODBC Dictionary Functionality {#known-vulnerability-of-the-odbc-dictionary-functionality}
|
||||
|
||||
!!! attention "Attention"
|
||||
@ -464,6 +424,51 @@ LIFETIME(MIN 300 MAX 360)
|
||||
|
||||
## DBMS {#dbms}
|
||||
|
||||
### ODBC {#dicts-external_dicts_dict_sources-odbc}
|
||||
|
||||
You can use this method to connect any database that has an ODBC driver.
|
||||
|
||||
Example of settings:
|
||||
|
||||
``` xml
|
||||
<source>
|
||||
<odbc>
|
||||
<db>DatabaseName</db>
|
||||
<table>ShemaName.TableName</table>
|
||||
<connection_string>DSN=some_parameters</connection_string>
|
||||
<invalidate_query>SQL_QUERY</invalidate_query>
|
||||
<query>SELECT id, value_1, value_2 FROM ShemaName.TableName</query>
|
||||
</odbc>
|
||||
</source>
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
``` sql
|
||||
SOURCE(ODBC(
|
||||
db 'DatabaseName'
|
||||
table 'SchemaName.TableName'
|
||||
connection_string 'DSN=some_parameters'
|
||||
invalidate_query 'SQL_QUERY'
|
||||
query 'SELECT id, value_1, value_2 FROM db_name.table_name'
|
||||
))
|
||||
```
|
||||
|
||||
Setting fields:
|
||||
|
||||
- `db` – Name of the database. Omit it if the database name is set in the `<connection_string>` parameters.
|
||||
- `table` – Name of the table and schema if exists.
|
||||
- `connection_string` – Connection string.
|
||||
- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md).
|
||||
- `query` – The custom query. Optional parameter.
|
||||
|
||||
!!! info "Note"
|
||||
The `table` and `query` fields cannot be used together. And either one of the `table` or `query` fields must be declared.
|
||||
|
||||
ClickHouse receives quoting symbols from ODBC-driver and quote all settings in queries to driver, so it’s necessary to set table name accordingly to table name case in database.
|
||||
|
||||
If you have a problems with encodings when using Oracle, see the corresponding [F.A.Q.](../../../faq/integration/oracle-odbc.md) item.
|
||||
|
||||
### Mysql {#dicts-external_dicts_dict_sources-mysql}
|
||||
|
||||
Example of settings:
|
||||
@ -487,6 +492,7 @@ Example of settings:
|
||||
<where>id=10</where>
|
||||
<invalidate_query>SQL_QUERY</invalidate_query>
|
||||
<fail_on_connection_loss>true</fail_on_connection_loss>
|
||||
<query>SELECT id, value_1, value_2 FROM db_name.table_name</query>
|
||||
</mysql>
|
||||
</source>
|
||||
```
|
||||
@ -505,6 +511,7 @@ SOURCE(MYSQL(
|
||||
where 'id=10'
|
||||
invalidate_query 'SQL_QUERY'
|
||||
fail_on_connection_loss 'true'
|
||||
query 'SELECT id, value_1, value_2 FROM db_name.table_name'
|
||||
))
|
||||
```
|
||||
|
||||
@ -531,6 +538,11 @@ Setting fields:
|
||||
|
||||
- `fail_on_connection_loss` – The configuration parameter that controls behavior of the server on connection loss. If `true`, an exception is thrown immediately if the connection between client and server was lost. If `false`, the ClickHouse server retries to execute the query three times before throwing an exception. Note that retrying leads to increased response times. Default value: `false`.
|
||||
|
||||
- `query` – The custom query. Optional parameter.
|
||||
|
||||
!!! info "Note"
|
||||
The `table` or `where` fields cannot be used together with the `query` field. And either one of the `table` or `query` fields must be declared.
|
||||
|
||||
MySQL can be connected on a local host via sockets. To do this, set `host` and `socket`.
|
||||
|
||||
Example of settings:
|
||||
@ -547,6 +559,7 @@ Example of settings:
|
||||
<where>id=10</where>
|
||||
<invalidate_query>SQL_QUERY</invalidate_query>
|
||||
<fail_on_connection_loss>true</fail_on_connection_loss>
|
||||
<query>SELECT id, value_1, value_2 FROM db_name.table_name</query>
|
||||
</mysql>
|
||||
</source>
|
||||
```
|
||||
@ -564,6 +577,7 @@ SOURCE(MYSQL(
|
||||
where 'id=10'
|
||||
invalidate_query 'SQL_QUERY'
|
||||
fail_on_connection_loss 'true'
|
||||
query 'SELECT id, value_1, value_2 FROM db_name.table_name'
|
||||
))
|
||||
```
|
||||
|
||||
@ -582,6 +596,7 @@ Example of settings:
|
||||
<table>ids</table>
|
||||
<where>id=10</where>
|
||||
<secure>1</secure>
|
||||
<query>SELECT id, value_1, value_2 FROM default.ids</query>
|
||||
</clickhouse>
|
||||
</source>
|
||||
```
|
||||
@ -598,6 +613,7 @@ SOURCE(CLICKHOUSE(
|
||||
table 'ids'
|
||||
where 'id=10'
|
||||
secure 1
|
||||
query 'SELECT id, value_1, value_2 FROM default.ids'
|
||||
));
|
||||
```
|
||||
|
||||
@ -612,6 +628,10 @@ Setting fields:
|
||||
- `where` – The selection criteria. May be omitted.
|
||||
- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md).
|
||||
- `secure` - Use ssl for connection.
|
||||
- `query` – The custom query. Optional parameter.
|
||||
|
||||
!!! info "Note"
|
||||
The `table` or `where` fields cannot be used together with the `query` field. And either one of the `table` or `query` fields must be declared.
|
||||
|
||||
### Mongodb {#dicts-external_dicts_dict_sources-mongodb}
|
||||
|
||||
@ -703,25 +723,28 @@ Example of settings:
|
||||
<consistency>One</consistency>
|
||||
<where>"SomeColumn" = 42</where>
|
||||
<max_threads>8</max_threads>
|
||||
<query>SELECT id, value_1, value_2 FROM database_name.table_name</query>
|
||||
</cassandra>
|
||||
</source>
|
||||
```
|
||||
|
||||
Setting fields:
|
||||
- `host` – The Cassandra host or comma-separated list of hosts.
|
||||
- `port` – The port on the Cassandra servers. If not specified, default port 9042 is used.
|
||||
- `user` – Name of the Cassandra user.
|
||||
- `password` – Password of the Cassandra user.
|
||||
- `keyspace` – Name of the keyspace (database).
|
||||
- `column_family` – Name of the column family (table).
|
||||
- `allow_filering` – Flag to allow or not potentially expensive conditions on clustering key columns. Default value is 1.
|
||||
- `partition_key_prefix` – Number of partition key columns in primary key of the Cassandra table.
|
||||
Required for compose key dictionaries. Order of key columns in the dictionary definition must be the same as in Cassandra.
|
||||
Default value is 1 (the first key column is a partition key and other key columns are clustering key).
|
||||
- `consistency` – Consistency level. Possible values: `One`, `Two`, `Three`,
|
||||
`All`, `EachQuorum`, `Quorum`, `LocalQuorum`, `LocalOne`, `Serial`, `LocalSerial`. Default is `One`.
|
||||
- `where` – Optional selection criteria.
|
||||
- `max_threads` – The maximum number of threads to use for loading data from multiple partitions in compose key dictionaries.
|
||||
|
||||
- `host` – The Cassandra host or comma-separated list of hosts.
|
||||
- `port` – The port on the Cassandra servers. If not specified, default port 9042 is used.
|
||||
- `user` – Name of the Cassandra user.
|
||||
- `password` – Password of the Cassandra user.
|
||||
- `keyspace` – Name of the keyspace (database).
|
||||
- `column_family` – Name of the column family (table).
|
||||
- `allow_filering` – Flag to allow or not potentially expensive conditions on clustering key columns. Default value is 1.
|
||||
- `partition_key_prefix` – Number of partition key columns in primary key of the Cassandra table. Required for compose key dictionaries. Order of key columns in the dictionary definition must be the same as in Cassandra. Default value is 1 (the first key column is a partition key and other key columns are clustering key).
|
||||
- `consistency` – Consistency level. Possible values: `One`, `Two`, `Three`, `All`, `EachQuorum`, `Quorum`, `LocalQuorum`, `LocalOne`, `Serial`, `LocalSerial`. Default value is `One`.
|
||||
- `where` – Optional selection criteria.
|
||||
- `max_threads` – The maximum number of threads to use for loading data from multiple partitions in compose key dictionaries.
|
||||
- `query` – The custom query. Optional parameter.
|
||||
|
||||
!!! info "Note"
|
||||
The `column_family` or `where` fields cannot be used together with the `query` field. And either one of the `column_family` or `query` fields must be declared.
|
||||
|
||||
### PosgreSQL {#dicts-external_dicts_dict_sources-postgresql}
|
||||
|
||||
@ -737,6 +760,7 @@ Example of settings:
|
||||
<table>table_name</table>
|
||||
<where>id=10</where>
|
||||
<invalidate_query>SQL_QUERY</invalidate_query>
|
||||
<query>SELECT id, value_1, value_2 FROM db_name.table_name</query>
|
||||
</postgresql>
|
||||
</source>
|
||||
```
|
||||
@ -755,6 +779,7 @@ SOURCE(POSTGRESQL(
|
||||
replica(host 'example01-2' port 5432 priority 2)
|
||||
where 'id=10'
|
||||
invalidate_query 'SQL_QUERY'
|
||||
query 'SELECT id, value_1, value_2 FROM db_name.table_name'
|
||||
))
|
||||
```
|
||||
|
||||
@ -764,11 +789,15 @@ Setting fields:
|
||||
- `port` – The port on the PostgreSQL server. You can specify it for all replicas, or for each one individually (inside `<replica>`).
|
||||
- `user` – Name of the PostgreSQL user. You can specify it for all replicas, or for each one individually (inside `<replica>`).
|
||||
- `password` – Password of the PostgreSQL user. You can specify it for all replicas, or for each one individually (inside `<replica>`).
|
||||
- `replica` – Section of replica configurations. There can be multiple sections.
|
||||
- `replica/host` – The PostgreSQL host.
|
||||
- `replica/port` – The PostgreSQL port.
|
||||
- `replica/priority` – The replica priority. When attempting to connect, ClickHouse traverses the replicas in order of priority. The lower the number, the higher the priority.
|
||||
- `replica` – Section of replica configurations. There can be multiple sections:
|
||||
- `replica/host` – The PostgreSQL host.
|
||||
- `replica/port` – The PostgreSQL port.
|
||||
- `replica/priority` – The replica priority. When attempting to connect, ClickHouse traverses the replicas in order of priority. The lower the number, the higher the priority.
|
||||
- `db` – Name of the database.
|
||||
- `table` – Name of the table.
|
||||
- `where` – The selection criteria. The syntax for conditions is the same as for `WHERE` clause in PostgreSQL, for example, `id > 10 AND id < 20`. Optional parameter.
|
||||
- `where` – The selection criteria. The syntax for conditions is the same as for `WHERE` clause in PostgreSQL. For example, `id > 10 AND id < 20`. Optional parameter.
|
||||
- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md).
|
||||
- `query` – The custom query. Optional parameter.
|
||||
|
||||
!!! info "Note"
|
||||
The `table` or `where` fields cannot be used together with the `query` field. And either one of the `table` or `query` fields must be declared.
|
||||
|
@ -5,7 +5,7 @@ toc_title: Bit
|
||||
|
||||
# Bit Functions {#bit-functions}
|
||||
|
||||
Bit functions work for any pair of types from UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64, Float32, or Float64.
|
||||
Bit functions work for any pair of types from `UInt8`, `UInt16`, `UInt32`, `UInt64`, `Int8`, `Int16`, `Int32`, `Int64`, `Float32`, or `Float64`. Some functions support `String` and `FixedString` types.
|
||||
|
||||
The result type is an integer with bits equal to the maximum bits of its arguments. If at least one of the arguments is signed, the result is a signed number. If an argument is a floating-point number, it is cast to Int64.
|
||||
|
||||
@ -19,8 +19,100 @@ The result type is an integer with bits equal to the maximum bits of its argumen
|
||||
|
||||
## bitShiftLeft(a, b) {#bitshiftlefta-b}
|
||||
|
||||
Shifts the binary representation of a value to the left by a specified number of bit positions.
|
||||
|
||||
A `FixedString` or a `String` is treated as a single multibyte value.
|
||||
|
||||
Bits of a `FixedString` value are lost as they are shifted out. On the contrary, a `String` value is extended with additional bytes, so no bits are lost.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
bitShiftLeft(a, b)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `a` — A value to shift. [Integer types](../../sql-reference/data-types/int-uint.md), [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
- `b` — The number of shift positions. [Unsigned integer types](../../sql-reference/data-types/int-uint.md), 64 bit types or less are allowed.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Shifted value.
|
||||
|
||||
The type of the returned value is the same as the type of the input value.
|
||||
|
||||
**Example**
|
||||
|
||||
In the following queries [bin](encoding-functions.md#bin) and [hex](encoding-functions.md#hex) functions are used to show bits of shifted values.
|
||||
|
||||
``` sql
|
||||
SELECT 99 AS a, bin(a), bitShiftLeft(a, 2) AS a_shifted, bin(a_shifted);
|
||||
SELECT 'abc' AS a, hex(a), bitShiftLeft(a, 4) AS a_shifted, hex(a_shifted);
|
||||
SELECT toFixedString('abc', 3) AS a, hex(a), bitShiftLeft(a, 4) AS a_shifted, hex(a_shifted);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌──a─┬─bin(99)──┬─a_shifted─┬─bin(bitShiftLeft(99, 2))─┐
|
||||
│ 99 │ 01100011 │ 140 │ 10001100 │
|
||||
└────┴──────────┴───────────┴──────────────────────────┘
|
||||
┌─a───┬─hex('abc')─┬─a_shifted─┬─hex(bitShiftLeft('abc', 4))─┐
|
||||
│ abc │ 616263 │ &0 │ 06162630 │
|
||||
└─────┴────────────┴───────────┴─────────────────────────────┘
|
||||
┌─a───┬─hex(toFixedString('abc', 3))─┬─a_shifted─┬─hex(bitShiftLeft(toFixedString('abc', 3), 4))─┐
|
||||
│ abc │ 616263 │ &0 │ 162630 │
|
||||
└─────┴──────────────────────────────┴───────────┴───────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## bitShiftRight(a, b) {#bitshiftrighta-b}
|
||||
|
||||
Shifts the binary representation of a value to the right by a specified number of bit positions.
|
||||
|
||||
A `FixedString` or a `String` is treated as a single multibyte value. Note that the length of a `String` value is reduced as bits are shifted out.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
bitShiftRight(a, b)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `a` — A value to shift. [Integer types](../../sql-reference/data-types/int-uint.md), [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
- `b` — The number of shift positions. [Unsigned integer types](../../sql-reference/data-types/int-uint.md), 64 bit types or less are allowed.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Shifted value.
|
||||
|
||||
The type of the returned value is the same as the type of the input value.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT 101 AS a, bin(a), bitShiftRight(a, 2) AS a_shifted, bin(a_shifted);
|
||||
SELECT 'abc' AS a, hex(a), bitShiftRight(a, 12) AS a_shifted, hex(a_shifted);
|
||||
SELECT toFixedString('abc', 3) AS a, hex(a), bitShiftRight(a, 12) AS a_shifted, hex(a_shifted);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌───a─┬─bin(101)─┬─a_shifted─┬─bin(bitShiftRight(101, 2))─┐
|
||||
│ 101 │ 01100101 │ 25 │ 00011001 │
|
||||
└─────┴──────────┴───────────┴────────────────────────────┘
|
||||
┌─a───┬─hex('abc')─┬─a_shifted─┬─hex(bitShiftRight('abc', 12))─┐
|
||||
│ abc │ 616263 │ │ 0616 │
|
||||
└─────┴────────────┴───────────┴───────────────────────────────┘
|
||||
┌─a───┬─hex(toFixedString('abc', 3))─┬─a_shifted─┬─hex(bitShiftRight(toFixedString('abc', 3), 12))─┐
|
||||
│ abc │ 616263 │ │ 000616 │
|
||||
└─────┴──────────────────────────────┴───────────┴─────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## bitRotateLeft(a, b) {#bitrotatelefta-b}
|
||||
|
||||
## bitRotateRight(a, b) {#bitrotaterighta-b}
|
||||
|
@ -216,6 +216,44 @@ Example:
|
||||
SELECT JSONExtractKeysAndValues('{"x": {"a": 5, "b": 7, "c": 11}}', 'x', 'Int8') = [('a',5),('b',7),('c',11)];
|
||||
```
|
||||
|
||||
## JSONExtractKeys {#jsonextractkeysjson-indices-or-keys}
|
||||
|
||||
Parses a JSON string and extracts the keys.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
JSONExtractKeys(json[, a, b, c...])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `json` — [String](../../sql-reference/data-types/string.md) with valid JSON.
|
||||
- `a, b, c...` — Comma-separated indices or keys that specify the path to the inner field in a nested JSON object. Each argument can be either a [String](../../sql-reference/data-types/string.md) to get the field by the key or an [Integer](../../sql-reference/data-types/int-uint.md) to get the N-th field (indexed from 1, negative integers count from the end). If not set, the whole JSON is parsed as the top-level object. Optional parameter.
|
||||
|
||||
**Returned value**
|
||||
|
||||
Array with the keys of the JSON.
|
||||
|
||||
Type: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT JSONExtractKeys('{"a": "hello", "b": [-100, 200.0, 300]}');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```
|
||||
text
|
||||
┌─JSONExtractKeys('{"a": "hello", "b": [-100, 200.0, 300]}')─┐
|
||||
│ ['a','b'] │
|
||||
└────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## JSONExtractRaw(json\[, indices_or_keys\]…) {#jsonextractrawjson-indices-or-keys}
|
||||
|
||||
Returns a part of JSON as unparsed string.
|
||||
|
@ -2463,3 +2463,39 @@ Result:
|
||||
│ Linux 4.15.0-55-generic │
|
||||
└─────────────────────────┘
|
||||
```
|
||||
|
||||
## zookeeperSessionUptime {#zookeepersessionuptime}
|
||||
|
||||
Returns the uptime of the current ZooKeeper session in seconds.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
zookeeperSessionUptime()
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- None.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Uptime of the current ZooKeeper session in seconds.
|
||||
|
||||
Type: [UInt32](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT zookeeperSessionUptime();
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─zookeeperSessionUptime()─┐
|
||||
│ 286 │
|
||||
└──────────────────────────┘
|
||||
```
|
||||
|
@ -212,6 +212,9 @@ SET mutations_sync = 2;
|
||||
CREATE TABLE tmp (x Int64) ENGINE = MergeTree() ORDER BY tuple() PARTITION BY tuple();
|
||||
INSERT INTO tmp SELECT * FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE tmp ADD COLUMN s String MATERIALIZED toString(x);
|
||||
|
||||
ALTER TABLE tmp MATERIALIZE COLUMN s;
|
||||
|
||||
SELECT groupArray(x), groupArray(s) FROM tmp;
|
||||
```
|
||||
|
||||
|
58
docs/en/sql-reference/statements/alter/comment.md
Normal file
58
docs/en/sql-reference/statements/alter/comment.md
Normal file
@ -0,0 +1,58 @@
|
||||
---
|
||||
toc_priority: 51
|
||||
toc_title: COMMENT
|
||||
---
|
||||
|
||||
# ALTER TABLE … MODIFY COMMENT {#alter-modify-comment}
|
||||
|
||||
Adds, modifies, or removes comment to the table, regardless if it was set before or not. Comment change is reflected in both [system.tables](../../../operations/system-tables/tables.md) and `SHOW CREATE TABLE` query.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
ALTER TABLE [db].name [ON CLUSTER cluster] MODIFY COMMENT 'Comment'
|
||||
```
|
||||
|
||||
**Examples**
|
||||
|
||||
Creating a table with comment (for more information, see the [COMMENT] clause(../../../sql-reference/statements/create/table.md#comment-table)):
|
||||
|
||||
``` sql
|
||||
CREATE TABLE table_with_comment
|
||||
(
|
||||
`k` UInt64,
|
||||
`s` String
|
||||
)
|
||||
ENGINE = Memory()
|
||||
COMMENT 'The temporary table';
|
||||
```
|
||||
|
||||
Modifying the table comment:
|
||||
|
||||
``` sql
|
||||
ALTER TABLE table_with_comment MODIFY COMMENT 'new comment on a table';
|
||||
SELECT comment FROM system.tables WHERE database = currentDatabase() AND name = 'table_with_comment';
|
||||
```
|
||||
|
||||
Output of a new comment:
|
||||
|
||||
```text
|
||||
┌─comment────────────────┐
|
||||
│ new comment on a table │
|
||||
└────────────────────────┘
|
||||
```
|
||||
|
||||
Removing the table comment:
|
||||
|
||||
``` sql
|
||||
ALTER TABLE table_with_comment MODIFY COMMENT '';
|
||||
SELECT comment FROM system.tables WHERE database = currentDatabase() AND name = 'table_with_comment';
|
||||
```
|
||||
|
||||
Output of a removed comment:
|
||||
|
||||
```text
|
||||
┌─comment─┐
|
||||
│ │
|
||||
└─────────┘
|
||||
```
|
@ -32,6 +32,8 @@ These `ALTER` statements modify entities related to role-based access control:
|
||||
- [ROW POLICY](../../../sql-reference/statements/alter/row-policy.md)
|
||||
- [SETTINGS PROFILE](../../../sql-reference/statements/alter/settings-profile.md)
|
||||
|
||||
[ALTER TABLE ... MODIFY COMMENT](../../../sql-reference/statements/alter/comment.md) statement adds, modifies, or removes comments to the table, regardless if it was set before or not.
|
||||
|
||||
## Mutations {#mutations}
|
||||
|
||||
`ALTER` queries that are intended to manipulate table data are implemented with a mechanism called “mutations”, most notably [ALTER TABLE … DELETE](../../../sql-reference/statements/alter/delete.md) and [ALTER TABLE … UPDATE](../../../sql-reference/statements/alter/update.md). They are asynchronous background processes similar to merges in [MergeTree](../../../engines/table-engines/mergetree-family/index.md) tables that to produce new “mutated” versions of parts.
|
||||
|
@ -155,7 +155,7 @@ ALTER TABLE visits CLEAR COLUMN hour in PARTITION 201902
|
||||
## FREEZE PARTITION {#alter_freeze-partition}
|
||||
|
||||
``` sql
|
||||
ALTER TABLE table_name FREEZE [PARTITION partition_expr]
|
||||
ALTER TABLE table_name FREEZE [PARTITION partition_expr] [WITH NAME 'backup_name']
|
||||
```
|
||||
|
||||
This query creates a local backup of a specified partition. If the `PARTITION` clause is omitted, the query creates the backup of all partitions at once.
|
||||
@ -169,6 +169,7 @@ At the time of execution, for a data snapshot, the query creates hardlinks to a
|
||||
|
||||
- `/var/lib/clickhouse/` is the working ClickHouse directory specified in the config.
|
||||
- `N` is the incremental number of the backup.
|
||||
- if the `WITH NAME` parameter is specified, then the value of the `'backup_name'` parameter is used instead of the incremental number.
|
||||
|
||||
!!! note "Note"
|
||||
If you use [a set of disks for data storage in a table](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes), the `shadow/N` directory appears on every disk, storing data parts that matched by the `PARTITION` expression.
|
||||
|
@ -7,13 +7,13 @@ toc_title: DICTIONARY
|
||||
|
||||
Creates a new [external dictionary](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) with given [structure](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md), [source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md), [layout](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md) and [lifetime](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md).
|
||||
|
||||
Syntax:
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
CREATE DICTIONARY [IF NOT EXISTS] [db.]dictionary_name [ON CLUSTER cluster]
|
||||
CREATE DICTIONARY [OR REPLACE][IF NOT EXISTS] [db.]dictionary_name [ON CLUSTER cluster]
|
||||
(
|
||||
key1 type1 [DEFAULT|EXPRESSION expr1] [IS_OBJECT_ID],
|
||||
key2 type2 [DEFAULT|EXPRESSION expr2] ,
|
||||
key2 type2 [DEFAULT|EXPRESSION expr2],
|
||||
attr1 type2 [DEFAULT|EXPRESSION expr3] [HIERARCHICAL|INJECTIVE],
|
||||
attr2 type2 [DEFAULT|EXPRESSION expr4] [HIERARCHICAL|INJECTIVE]
|
||||
)
|
||||
@ -21,6 +21,8 @@ PRIMARY KEY key1, key2
|
||||
SOURCE(SOURCE_NAME([param1 value1 ... paramN valueN]))
|
||||
LAYOUT(LAYOUT_NAME([param_name param_value]))
|
||||
LIFETIME({MIN min_val MAX max_val | max_val})
|
||||
SETTINGS(setting_name = setting_value, setting_name = setting_value, ...)
|
||||
COMMENT 'Comment'
|
||||
```
|
||||
|
||||
External dictionary structure consists of attributes. Dictionary attributes are specified similarly to table columns. The only required attribute property is its type, all other properties may have default values.
|
||||
@ -30,3 +32,68 @@ External dictionary structure consists of attributes. Dictionary attributes are
|
||||
Depending on dictionary [layout](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md) one or more attributes can be specified as dictionary keys.
|
||||
|
||||
For more information, see [External Dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) section.
|
||||
|
||||
You can add a comment to the dictionary when you creating it using `COMMENT` clause.
|
||||
|
||||
**Example**
|
||||
|
||||
Input table `source_table`:
|
||||
|
||||
``` text
|
||||
┌─id─┬─value──┐
|
||||
│ 1 │ First │
|
||||
│ 2 │ Second │
|
||||
└────┴────────┘
|
||||
```
|
||||
|
||||
Creating the dictionary:
|
||||
|
||||
``` sql
|
||||
CREATE DICTIONARY dictionary_with_comment
|
||||
(
|
||||
id UInt64,
|
||||
value String
|
||||
)
|
||||
PRIMARY KEY id
|
||||
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'source_table'))
|
||||
LAYOUT(FLAT())
|
||||
LIFETIME(MIN 0 MAX 1000)
|
||||
COMMENT 'The temporary dictionary';
|
||||
```
|
||||
|
||||
Output the dictionary:
|
||||
|
||||
``` sql
|
||||
SHOW CREATE DICTIONARY dictionary_with_comment;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─statement───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ CREATE DICTIONARY default.dictionary_with_comment
|
||||
(
|
||||
`id` UInt64,
|
||||
`value` String
|
||||
)
|
||||
PRIMARY KEY id
|
||||
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'source_table'))
|
||||
LIFETIME(MIN 0 MAX 1000)
|
||||
LAYOUT(FLAT())
|
||||
COMMENT 'The temporary dictionary' │
|
||||
└─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Output the comment to dictionary:
|
||||
|
||||
``` sql
|
||||
SELECT comment FROM system.dictionaries WHERE name == 'dictionary_with_comment' AND database == currentDatabase();
|
||||
```
|
||||
|
||||
```text
|
||||
┌─comment──────────────────┐
|
||||
│ The temporary dictionary │
|
||||
└──────────────────────────┘
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [system.dictionaries](../../../operations/system-tables/dictionaries.md) — This table contains information about [external dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md).
|
||||
|
@ -5,7 +5,23 @@ toc_title: merge
|
||||
|
||||
# merge {#merge}
|
||||
|
||||
`merge(db_name, 'tables_regexp')` – Creates a temporary Merge table. For more information, see the section “Table engines, Merge”.
|
||||
Creates a temporary [Merge](../../engines/table-engines/special/merge.md) table. The table structure is taken from the first table encountered that matches the regular expression.
|
||||
|
||||
The table structure is taken from the first table encountered that matches the regular expression.
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
merge('db_name', 'tables_regexp')
|
||||
```
|
||||
**Arguments**
|
||||
|
||||
- `db_name` — Possible values:
|
||||
- database name,
|
||||
- constant expression that returns a string with a database name, for example, `currentDatabase()`,
|
||||
- `REGEXP(expression)`, where `expression` is a regular expression to match the DB names.
|
||||
|
||||
- `tables_regexp` — A regular expression to match the table names in the specified DB or DBs.
|
||||
|
||||
**See Also**
|
||||
|
||||
- [Merge](../../engines/table-engines/special/merge.md) table engine
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 71
|
||||
toc_priority: 72
|
||||
toc_title: "Навигация по коду ClickHouse"
|
||||
---
|
||||
|
||||
|
1
docs/ru/development/build-cross-riscv.md
Symbolic link
1
docs/ru/development/build-cross-riscv.md
Symbolic link
@ -0,0 +1 @@
|
||||
../../en/development/build-cross-riscv.md
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 70
|
||||
toc_priority: 71
|
||||
toc_title: "Используемые сторонние библиотеки"
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
toc_priority: 68
|
||||
toc_priority: 69
|
||||
toc_title: "Как писать код на C++"
|
||||
---
|
||||
|
||||
|
@ -23,6 +23,20 @@ ENGINE = MaterializedPostgreSQL('host:port', ['database' | database], 'user', 'p
|
||||
- `user` — пользователь PostgreSQL.
|
||||
- `password` — пароль пользователя.
|
||||
|
||||
## Динамическое добавление новых таблиц в репликацию {#dynamically-adding-table-to-replication}
|
||||
|
||||
``` sql
|
||||
ATTACH TABLE postgres_database.new_table;
|
||||
```
|
||||
|
||||
При указании конкретного списка таблиц в базе с помощью настройки [materialized_postgresql_tables_list](../../operations/settings/settings.md#materialized-postgresql-tables-list), он будет обновлен (в `.sql` метаданных) на актуальный с учетом таблиц, добавленных с помощью запроса `ATTACH TABLE`.
|
||||
|
||||
## Динамическое удаление таблиц из репликации {#dynamically-removing-table-from-replication}
|
||||
|
||||
``` sql
|
||||
DETACH TABLE postgres_database.table_to_remove;
|
||||
```
|
||||
|
||||
## Настройки {#settings}
|
||||
|
||||
- [materialized_postgresql_max_block_size](../../operations/settings/settings.md#materialized-postgresql-max-block-size)
|
||||
@ -44,6 +58,12 @@ SETTINGS materialized_postgresql_max_block_size = 65536,
|
||||
SELECT * FROM database1.table1;
|
||||
```
|
||||
|
||||
Настройки можно при необходимости изменить с помощью DDL запроса. Однако с помощью него нельзя изменить настройку `materialized_postgresql_tables_list`, для обновления списка таблиц в данной настройке нужно использовать запрос `ATTACH TABLE`.
|
||||
|
||||
``` sql
|
||||
ALTER DATABASE postgres_database MODIFY SETTING materialized_postgresql_max_block_size = <new_size>;
|
||||
```
|
||||
|
||||
## Требования {#requirements}
|
||||
|
||||
1. Настройка [wal_level](https://postgrespro.ru/docs/postgrespro/10/runtime-config-wal) должна иметь значение `logical`, параметр `max_replication_slots` должен быть равен по меньшей мере `2` в конфигурационном файле в PostgreSQL.
|
||||
|
@ -30,6 +30,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
[rabbitmq_skip_broken_messages = N,]
|
||||
[rabbitmq_max_block_size = N,]
|
||||
[rabbitmq_flush_interval_ms = N]
|
||||
[rabbitmq_queue_settings_list = 'x-dead-letter-exchange=my-dlx,x-max-length=10,x-overflow=reject-publish']
|
||||
```
|
||||
|
||||
Обязательные параметры:
|
||||
@ -51,6 +52,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
- `rabbitmq_skip_broken_messages` – максимальное количество некорректных сообщений в блоке. Если `rabbitmq_skip_broken_messages = N`, то движок отбрасывает `N` сообщений, которые не получилось обработать. Одно сообщение в точности соответствует одной записи (строке). Значение по умолчанию – 0.
|
||||
- `rabbitmq_max_block_size`
|
||||
- `rabbitmq_flush_interval_ms`
|
||||
- `rabbitmq_queue_settings_list` - позволяет самостоятельно установить настройки RabbitMQ при создании очереди. Доступные настройки: `x-max-length`, `x-max-length-bytes`, `x-message-ttl`, `x-expires`, `x-priority`, `x-max-priority`, `x-overflow`, `x-dead-letter-exchange`, `x-queue-type`. Настрока `durable` для очереди ставится автоматически.
|
||||
|
||||
Настройки форматов данных также могут быть добавлены в списке RabbitMQ настроек.
|
||||
|
||||
|
@ -775,6 +775,8 @@ SETTINGS storage_policy = 'moving_from_ssd_to_hdd'
|
||||
|
||||
После выполнения фоновых слияний или мутаций старые куски не удаляются сразу, а через некоторое время (табличная настройка `old_parts_lifetime`). Также они не перемещаются на другие тома или диски, поэтому до момента удаления они продолжают учитываться при подсчёте занятого дискового пространства.
|
||||
|
||||
Пользователь может сбалансированно распределять новые большие куски данных по разным дискам тома [JBOD](https://en.wikipedia.org/wiki/Non-RAID_drive_architectures), используя настройку [min_bytes_to_rebalance_partition_over_jbod](../../../operations/settings/merge-tree-settings.md#min-bytes-to-rebalance-partition-over-jbod).
|
||||
|
||||
## Использование сервиса S3 для хранения данных {#table_engine-mergetree-s3}
|
||||
|
||||
Таблицы семейства `MergeTree` могут хранить данные в сервисе [S3](https://aws.amazon.com/s3/) при использовании диска типа `s3`.
|
||||
|
@ -7,43 +7,56 @@ toc_title: Merge
|
||||
|
||||
Движок `Merge` (не путайте с движком `MergeTree`) не хранит данные самостоятельно, а позволяет читать одновременно из произвольного количества других таблиц.
|
||||
Чтение автоматически распараллеливается. Запись в таблицу не поддерживается. При чтении будут использованы индексы тех таблиц, из которых реально идёт чтение, если они существуют.
|
||||
Движок `Merge` принимает параметры: имя базы данных и регулярное выражение для таблиц.
|
||||
|
||||
Пример:
|
||||
## Создание таблицы {#creating-a-table}
|
||||
|
||||
``` sql
|
||||
Merge(hits, '^WatchLog')
|
||||
CREATE TABLE ... Engine=Merge(db_name, tables_regexp)
|
||||
```
|
||||
|
||||
Данные будут читаться из таблиц в базе `hits`, имена которых соответствуют регулярному выражению ‘`^WatchLog`’.
|
||||
**Параметры движка**
|
||||
|
||||
Вместо имени базы данных может использоваться константное выражение, возвращающее строку. Например, `currentDatabase()`.
|
||||
- `db_name` — Возможные варианты:
|
||||
- имя БД,
|
||||
- выражение, возвращающее строку с именем БД, например, `currentDatabase()`,
|
||||
- `REGEXP(expression)`, где `expression` — регулярное выражение для отбора БД.
|
||||
|
||||
- `tables_regexp` — регулярное выражение для имен таблиц в указанной БД или нескольких БД.
|
||||
|
||||
## Использование {#usage}
|
||||
|
||||
Регулярные выражения — [re2](https://github.com/google/re2) (поддерживает подмножество PCRE), регистрозависимые.
|
||||
Смотрите замечание об экранировании в регулярных выражениях в разделе «match».
|
||||
|
||||
При выборе таблиц для чтения, сама `Merge`-таблица не будет выбрана, даже если попадает под регулярное выражение, чтобы не возникло циклов.
|
||||
Впрочем, вы можете создать две `Merge`-таблицы, которые будут пытаться бесконечно читать данные друг друга, но делать этого не нужно.
|
||||
При выборе таблиц для чтения сама `Merge`-таблица не будет выбрана, даже если попадает под регулярное выражение, чтобы не возникло циклов.
|
||||
Впрочем, вы можете создать две `Merge`-таблицы, которые будут пытаться бесконечно читать данные друг друга, но делать этого не рекомендуется.
|
||||
|
||||
Типичный способ использования движка `Merge` — работа с большим количеством таблиц типа `TinyLog`, как с одной.
|
||||
Типичный способ использования движка `Merge` — работа с большим количеством таблиц типа `TinyLog` как с одной.
|
||||
|
||||
Пример 2:
|
||||
**Пример 1**
|
||||
|
||||
Пусть есть две БД `ABC_corporate_site` и `ABC_store`. Таблица `all_visitors` будет содержать ID из таблиц `visitors` в обеих БД.
|
||||
|
||||
``` sql
|
||||
CREATE TABLE all_visitors (id UInt32) ENGINE=Merge(REGEXP('ABC_*'), 'visitors');
|
||||
```
|
||||
|
||||
**Пример 2**
|
||||
|
||||
Пусть есть старая таблица `WatchLog_old`. Необходимо изменить партиционирование без перемещения данных в новую таблицу `WatchLog_new`. При этом в выборке должны участвовать данные обеих таблиц.
|
||||
|
||||
``` sql
|
||||
CREATE TABLE WatchLog_old(date Date, UserId Int64, EventType String, Cnt UInt64)
|
||||
ENGINE=MergeTree(date, (UserId, EventType), 8192);
|
||||
ENGINE=MergeTree(date, (UserId, EventType), 8192);
|
||||
INSERT INTO WatchLog_old VALUES ('2018-01-01', 1, 'hit', 3);
|
||||
|
||||
CREATE TABLE WatchLog_new(date Date, UserId Int64, EventType String, Cnt UInt64)
|
||||
ENGINE=MergeTree PARTITION BY date ORDER BY (UserId, EventType) SETTINGS index_granularity=8192;
|
||||
ENGINE=MergeTree PARTITION BY date ORDER BY (UserId, EventType) SETTINGS index_granularity=8192;
|
||||
INSERT INTO WatchLog_new VALUES ('2018-01-02', 2, 'hit', 3);
|
||||
|
||||
CREATE TABLE WatchLog as WatchLog_old ENGINE=Merge(currentDatabase(), '^WatchLog');
|
||||
|
||||
SELECT *
|
||||
FROM WatchLog
|
||||
SELECT * FROM WatchLog;
|
||||
```
|
||||
|
||||
``` text
|
||||
@ -61,7 +74,7 @@ FROM WatchLog
|
||||
|
||||
В секции `WHERE/PREWHERE` можно установить константное условие на столбец `_table` (например, `WHERE _table='xyz'`). В этом случае операции чтения выполняются только для тех таблиц, для которых выполняется условие на значение `_table`, таким образом, столбец `_table` работает как индекс.
|
||||
|
||||
**Смотрите также**
|
||||
**См. также**
|
||||
|
||||
- [Виртуальные столбцы](index.md#table_engines-virtual_columns)
|
||||
|
||||
- Табличная функция [merge](../../../sql-reference/table-functions/merge.md)
|
||||
|
@ -430,7 +430,7 @@ $ curl -v 'http://localhost:8123/predefined_query'
|
||||
<http_handlers>
|
||||
<rule>
|
||||
<url><![CDATA[/query_param_with_url/\w+/(?P<name_1>[^/]+)(/(?P<name_2>[^/]+))?]]></url>
|
||||
<method>GET</method>
|
||||
<methods>GET</methods>
|
||||
<headers>
|
||||
<XXX>TEST_HEADER_VALUE</XXX>
|
||||
<PARAMS_XXX><![CDATA[(?P<name_1>[^/]+)(/(?P<name_2>[^/]+))?]]></PARAMS_XXX>
|
||||
|
@ -30,11 +30,13 @@ toc_title: "Отличительные возможности ClickHouse"
|
||||
Почти все перечисленные ранее столбцовые СУБД не поддерживают распределённую обработку запроса.
|
||||
В ClickHouse данные могут быть расположены на разных шардах. Каждый шард может представлять собой группу реплик, которые используются для отказоустойчивости. Запрос будет выполнен на всех шардах параллельно. Это делается прозрачно для пользователя.
|
||||
|
||||
## Поддержка SQL {#podderzhka-sql}
|
||||
## Поддержка SQL {#sql-support}
|
||||
|
||||
ClickHouse поддерживает декларативный язык запросов на основе SQL и во многих случаях совпадающий с SQL стандартом.
|
||||
Поддерживаются GROUP BY, ORDER BY, подзапросы в секциях FROM, IN, JOIN, а также скалярные подзапросы.
|
||||
Зависимые подзапросы и оконные функции не поддерживаются.
|
||||
ClickHouse поддерживает [декларативный язык запросов на основе SQL](../sql-reference/index.md) и во [многих случаях](../sql-reference/ansi.md) совпадающий с SQL стандартом.
|
||||
|
||||
Поддерживаются [GROUP BY](../sql-reference/statements/select/group-by.md), [ORDER BY](../sql-reference/statements/select/order-by.md), подзапросы в секциях [FROM](../sql-reference/statements/select/from.md), [IN](../sql-reference/operators/in.md), [JOIN](../sql-reference/statements/select/join.md), [функции window](../sql-reference/window-functions/index.md), а также скалярные подзапросы.
|
||||
|
||||
Зависимые подзапросы не поддерживаются, но могут стать доступными в будущем.
|
||||
|
||||
## Векторный движок {#vektornyi-dvizhok}
|
||||
|
||||
|
@ -368,6 +368,16 @@ ClickHouse проверяет условия для `min_part_size` и `min_part
|
||||
</http_server_default_response>
|
||||
```
|
||||
|
||||
## hsts_max_age {#hsts-max-age}
|
||||
|
||||
Срок действия HSTS в секундах. Значение по умолчанию `0` (HSTS выключен). Для включения HSTS задайте положительное число. Срок действия HSTS будет равен введенному числу.
|
||||
|
||||
**Пример**
|
||||
|
||||
```xml
|
||||
<hsts_max_age>600000</hsts_max_age>
|
||||
```
|
||||
|
||||
## include_from {#server_configuration_parameters-include_from}
|
||||
|
||||
Путь к файлу с подстановками.
|
||||
|
@ -327,3 +327,31 @@ Eсли суммарное число активных кусков во все
|
||||
Значение по умолчанию: `true`.
|
||||
|
||||
По умолчанию сервер ClickHouse при создании таблицы проверяет тип данных столбца для сэмплирования или выражения сэмплирования. Если уже существуют таблицы с некорректным выражением сэмплирования, то чтобы не возникало исключение при запуске сервера, установите `check_sample_column_is_correct` в значение `false`.
|
||||
|
||||
## min_bytes_to_rebalance_partition_over_jbod {#min-bytes-to-rebalance-partition-over-jbod}
|
||||
|
||||
Устанавливает минимальное количество байтов для обеспечения балансировки при распределении новых больших кусков данных по дискам тома [JBOD](https://en.wikipedia.org/wiki/Non-RAID_drive_architectures).
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- Положительное целое число.
|
||||
- 0 — балансировка отключена.
|
||||
|
||||
Значение по умолчанию: `0`.
|
||||
|
||||
**Использование**
|
||||
|
||||
Значение настройки `min_bytes_to_rebalance_partition_over_jbod` должно быть меньше значения настройки [max_bytes_to_merge_at_max_space_in_pool](../../operations/settings/merge-tree-settings.md#max-bytes-to-merge-at-max-space-in-pool). Иначе ClickHouse сгенерирует исключение.
|
||||
|
||||
## detach_not_byte_identical_parts {#detach_not_byte_identical_parts}
|
||||
|
||||
Настраивает отключение куска данных после выполнения слияния или мутации, если на одной реплике его содержимое побайтно не совпадает с кусками на других репликах. Если настройка отключена, то кусок удаляется. Активируйте это поведение, если хотите анализировать такие куски позже.
|
||||
|
||||
Эта настройка применяется к таблицам `MergeTree` с включенной [репликацией данных](../../engines/table-engines/mergetree-family/replication.md).
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- 0 — куски данных удаляются.
|
||||
- 1 — куски данных открепляются.
|
||||
|
||||
Значение по умолчанию: `0`.
|
||||
|
@ -922,6 +922,17 @@ log_queries_min_type='EXCEPTION_WHILE_PROCESSING'
|
||||
log_query_threads=1
|
||||
```
|
||||
|
||||
## log_formatted_queries {#settings-log-formatted-queries}
|
||||
|
||||
Позволяет регистрировать отформатированные запросы в системной таблице [system.query_log](../../operations/system-tables/query_log.md).
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- 0 — отформатированные запросы не регистрируются в системной таблице.
|
||||
- 1 — отформатированные запросы регистрируются в системной таблице.
|
||||
|
||||
Значение по умолчанию: `0`.
|
||||
|
||||
## log_comment {#settings-log-comment}
|
||||
|
||||
Задаёт значение поля `log_comment` таблицы [system.query_log](../system-tables/query_log.md) и текст комментария в логе сервера.
|
||||
@ -1135,6 +1146,9 @@ SELECT type, query FROM system.query_log WHERE log_comment = 'log_comment test'
|
||||
|
||||
Может быть использована для ограничения скорости сети при репликации данных для добавления или замены новых узлов.
|
||||
|
||||
!!! note "Note"
|
||||
60000000 байт/с примерно соответствует 457 Мбит/с (60000000 / 1024 / 1024 * 8).
|
||||
|
||||
## max_replicated_sends_network_bandwidth_for_server {#max_replicated_sends_network_bandwidth_for_server}
|
||||
|
||||
Ограничивает максимальную скорость обмена данными в сети (в байтах в секунду) для [репликационных](../../engines/table-engines/mergetree-family/replication.md) отправок. Применяется только при запуске сервера. Можно также ограничить скорость для конкретной таблицы с помощью настройки [max_replicated_sends_network_bandwidth](../../operations/settings/merge-tree-settings.md#max_replicated_sends_network_bandwidth).
|
||||
@ -1152,6 +1166,9 @@ SELECT type, query FROM system.query_log WHERE log_comment = 'log_comment test'
|
||||
|
||||
Может быть использована для ограничения скорости сети при репликации данных для добавления или замены новых узлов.
|
||||
|
||||
!!! note "Note"
|
||||
60000000 байт/с примерно соответствует 457 Мбит/с (60000000 / 1024 / 1024 * 8).
|
||||
|
||||
## connect_timeout_with_failover_ms {#connect-timeout-with-failover-ms}
|
||||
|
||||
Таймаут в миллисекундах на соединение с удалённым сервером, для движка таблиц Distributed, если используются секции shard и replica в описании кластера.
|
||||
|
@ -10,6 +10,9 @@
|
||||
- `type` ([String](../../sql-reference/data-types/string.md)) — тип индекса.
|
||||
- `expr` ([String](../../sql-reference/data-types/string.md)) — выражение, используемое для вычисления индекса.
|
||||
- `granularity` ([UInt64](../../sql-reference/data-types/int-uint.md)) — количество гранул в блоке данных.
|
||||
- `data_compressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — размер сжатых данных в байтах.
|
||||
- `data_uncompressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — размер несжатых данных в байтах.
|
||||
- `marks_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — размер засечек в байтах.
|
||||
|
||||
**Пример**
|
||||
|
||||
@ -26,6 +29,9 @@ name: clicks_idx
|
||||
type: minmax
|
||||
expr: clicks
|
||||
granularity: 1
|
||||
data_compressed_bytes: 58
|
||||
data_uncompressed_bytes: 6
|
||||
marks: 48
|
||||
|
||||
Row 2:
|
||||
──────
|
||||
@ -35,4 +41,7 @@ name: contacts_null_idx
|
||||
type: minmax
|
||||
expr: assumeNotNull(contacts_null)
|
||||
granularity: 1
|
||||
data_compressed_bytes: 58
|
||||
data_uncompressed_bytes: 6
|
||||
marks: 48
|
||||
```
|
||||
|
@ -4,58 +4,84 @@
|
||||
|
||||
Столбцы:
|
||||
|
||||
- `database` ([String](../../sql-reference/data-types/string.md)) — Имя базы данных, в которой находится словарь, созданный с помощью DDL-запроса. Пустая строка для других словарей.
|
||||
- `name` ([String](../../sql-reference/data-types/string.md)) — [Имя словаря](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md).
|
||||
- `status` ([Enum8](../../sql-reference/data-types/enum.md)) — Статус словаря. Возможные значения:
|
||||
- `NOT_LOADED` — Словарь не загружен, потому что не использовался.
|
||||
- `LOADED` — Словарь загружен успешно.
|
||||
- `FAILED` — Словарь не загружен в результате ошибки.
|
||||
- `LOADING` — Словарь в процессе загрузки.
|
||||
- `LOADED_AND_RELOADING` — Словарь загружен успешно, сейчас перезагружается (частые причины: запрос [SYSTEM RELOAD DICTIONARY](../../sql-reference/statements/system.md#query_language-system-reload-dictionary), таймаут, изменение настроек словаря).
|
||||
- `FAILED_AND_RELOADING` — Словарь не загружен в результате ошибки, сейчас перезагружается.
|
||||
- `origin` ([String](../../sql-reference/data-types/string.md)) — Путь к конфигурационному файлу, описывающему словарь.
|
||||
- `type` ([String](../../sql-reference/data-types/string.md)) — Тип размещения словаря. [Хранение словарей в памяти](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md).
|
||||
- `key` — [Тип ключа](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-key): Числовой ключ ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) или Составной ключ ([String](../../sql-reference/data-types/string.md)) — строка вида “(тип 1, тип 2, …, тип n)”.
|
||||
- `attribute.names` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Массив [имен атрибутов](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-attributes), предоставляемых справочником.
|
||||
- `attribute.types` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Соответствующий массив [типов атрибутов](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-attributes), предоставляемых справочником.
|
||||
- `bytes_allocated` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Объем оперативной памяти, используемый словарем.
|
||||
- `query_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Количество запросов с момента загрузки словаря или с момента последней успешной перезагрузки.
|
||||
- `hit_rate` ([Float64](../../sql-reference/data-types/float.md)) — Для cache-словарей — процент закэшированных значений.
|
||||
- `found_rate` ([Float64](../../sql-reference/data-types/float.md)) — Процент обращений к словарю, при которых значение было найдено.
|
||||
- `element_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Количество элементов, хранящихся в словаре.
|
||||
- `load_factor` ([Float64](../../sql-reference/data-types/float.md)) — Процент заполнения словаря (для хэшированного словаря — процент заполнения хэш-таблицы).
|
||||
- `source` ([String](../../sql-reference/data-types/string.md)) — Текст, описывающий [источник данных](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md) для словаря.
|
||||
- `lifetime_min` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Минимальное [время обновления](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md) словаря в памяти, по истечении которого Clickhouse попытается перезагрузить словарь (если задано `invalidate_query`, то только если он изменился). Задается в секундах.
|
||||
- `lifetime_max` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Максимальное [время обновления](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md) словаря в памяти, по истечении которого Clickhouse попытается перезагрузить словарь (если задано `invalidate_query`, то только если он изменился). Задается в секундах.
|
||||
- `loading_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Время начала загрузки словаря.
|
||||
- `loading_duration` ([Float32](../../sql-reference/data-types/float.md)) — Время, затраченное на загрузку словаря.
|
||||
- `last_exception` ([String](../../sql-reference/data-types/string.md)) — Текст ошибки, возникающей при создании или перезагрузке словаря, если словарь не удалось создать.
|
||||
- `database` ([String](../../sql-reference/data-types/string.md)) — имя базы данных, в которой находится словарь, созданный с помощью DDL-запроса. Пустая строка для других словарей.
|
||||
- `name` ([String](../../sql-reference/data-types/string.md)) — [имя словаря](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md).
|
||||
- `uuid` ([UUID](../../sql-reference/data-types/uuid.md)) — уникальный UUID словаря.
|
||||
- `status` ([Enum8](../../sql-reference/data-types/enum.md)) — статус словаря. Возможные значения:
|
||||
- `NOT_LOADED` — словарь не загружен, потому что не использовался.
|
||||
- `LOADED` — словарь загружен успешно.
|
||||
- `FAILED` — словарь не загружен в результате ошибки.
|
||||
- `LOADING` — словарь в процессе загрузки.
|
||||
- `LOADED_AND_RELOADING` — словарь загружен успешно, сейчас перезагружается (частые причины: запрос [SYSTEM RELOAD DICTIONARY](../../sql-reference/statements/system.md#query_language-system-reload-dictionary), таймаут, изменение настроек словаря).
|
||||
- `FAILED_AND_RELOADING` — словарь не загружен в результате ошибки, сейчас перезагружается.
|
||||
- `origin` ([String](../../sql-reference/data-types/string.md)) — путь к конфигурационному файлу, описывающему словарь.
|
||||
- `type` ([String](../../sql-reference/data-types/string.md)) — тип размещения словаря. [Хранение словарей в памяти](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md).
|
||||
- `key.names` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — массив [имен ключей](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-key), предоставляемых словарем.
|
||||
- `key.types` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — соответствующий массив [типов ключей](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-key), предоставляемых словарем.
|
||||
- `attribute.names` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — массив [имен атрибутов](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-attributes), предоставляемых словарем.
|
||||
- `attribute.types` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — соответствующий массив [типов атрибутов](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-attributes), предоставляемых словарем.
|
||||
- `bytes_allocated` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — объем оперативной памяти, используемый словарем.
|
||||
- `query_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — количество запросов с момента загрузки словаря или с момента последней успешной перезагрузки.
|
||||
- `hit_rate` ([Float64](../../sql-reference/data-types/float.md)) — для cache-словарей — процент закэшированных значений.
|
||||
- `found_rate` ([Float64](../../sql-reference/data-types/float.md)) — процент обращений к словарю, при которых значение было найдено.
|
||||
- `element_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — количество элементов, хранящихся в словаре.
|
||||
- `load_factor` ([Float64](../../sql-reference/data-types/float.md)) — процент заполнения словаря (для хэшированного словаря — процент заполнения хэш-таблицы).
|
||||
- `source` ([String](../../sql-reference/data-types/string.md)) — текст, описывающий [источник данных](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md) для словаря.
|
||||
- `lifetime_min` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — минимальное [время обновления](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md) словаря в памяти, по истечении которого Clickhouse попытается перезагрузить словарь (если задано `invalidate_query`, то только если он изменился). Задается в секундах.
|
||||
- `lifetime_max` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — максимальное [время обновления](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md) словаря в памяти, по истечении которого Clickhouse попытается перезагрузить словарь (если задано `invalidate_query`, то только если он изменился). Задается в секундах.
|
||||
- `loading_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — время начала загрузки словаря.
|
||||
- `loading_duration` ([Float32](../../sql-reference/data-types/float.md)) — время, затраченное на загрузку словаря.
|
||||
- `last_exception` ([String](../../sql-reference/data-types/string.md)) — текст ошибки, возникающей при создании или перезагрузке словаря, если словарь не удалось создать.
|
||||
- `comment` ([String](../../sql-reference/data-types/string.md)) — текст комментария к словарю.
|
||||
|
||||
**Пример**
|
||||
|
||||
Настройте словарь.
|
||||
Настройте словарь:
|
||||
|
||||
``` sql
|
||||
CREATE DICTIONARY dictdb.dict
|
||||
CREATE DICTIONARY dictionary_with_comment
|
||||
(
|
||||
`key` Int64 DEFAULT -1,
|
||||
`value_default` String DEFAULT 'world',
|
||||
`value_expression` String DEFAULT 'xxx' EXPRESSION 'toString(127 * 172)'
|
||||
id UInt64,
|
||||
value String
|
||||
)
|
||||
PRIMARY KEY key
|
||||
SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'dicttbl' DB 'dictdb'))
|
||||
LIFETIME(MIN 0 MAX 1)
|
||||
PRIMARY KEY id
|
||||
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'source_table'))
|
||||
LAYOUT(FLAT())
|
||||
LIFETIME(MIN 0 MAX 1000)
|
||||
COMMENT 'The temporary dictionary';
|
||||
```
|
||||
|
||||
Убедитесь, что словарь загружен.
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.dictionaries
|
||||
SELECT * FROM system.dictionaries LIMIT 1 FORMAT Vertical;
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─database─┬─name─┬─status─┬─origin──────┬─type─┬─key────┬─attribute.names──────────────────────┬─attribute.types─────┬─bytes_allocated─┬─query_count─┬─hit_rate─┬─element_count─┬───────────load_factor─┬─source─────────────────────┬─lifetime_min─┬─lifetime_max─┬──loading_start_time─┌──last_successful_update_time─┬──────loading_duration─┬─last_exception─┐
|
||||
│ dictdb │ dict │ LOADED │ dictdb.dict │ Flat │ UInt64 │ ['value_default','value_expression'] │ ['String','String'] │ 74032 │ 0 │ 1 │ 1 │ 0.0004887585532746823 │ ClickHouse: dictdb.dicttbl │ 0 │ 1 │ 2020-03-04 04:17:34 │ 2020-03-04 04:30:34 │ 0.002 │ │
|
||||
└──────────┴──────┴────────┴─────────────┴──────┴────────┴──────────────────────────────────────┴─────────────────────┴─────────────────┴─────────────┴──────────┴───────────────┴───────────────────────┴────────────────────────────┴──────────────┴──────────────┴─────────────────────┴──────────────────────────────┘───────────────────────┴────────────────┘
|
||||
Row 1:
|
||||
──────
|
||||
database: default
|
||||
name: dictionary_with_comment
|
||||
uuid: 4654d460-0d03-433a-8654-d4600d03d33a
|
||||
status: NOT_LOADED
|
||||
origin: 4654d460-0d03-433a-8654-d4600d03d33a
|
||||
type:
|
||||
key.names: ['id']
|
||||
key.types: ['UInt64']
|
||||
attribute.names: ['value']
|
||||
attribute.types: ['String']
|
||||
bytes_allocated: 0
|
||||
query_count: 0
|
||||
hit_rate: 0
|
||||
found_rate: 0
|
||||
element_count: 0
|
||||
load_factor: 0
|
||||
source:
|
||||
lifetime_min: 0
|
||||
lifetime_max: 0
|
||||
loading_start_time: 1970-01-01 00:00:00
|
||||
last_successful_update_time: 1970-01-01 00:00:00
|
||||
loading_duration: 0
|
||||
last_exception:
|
||||
comment: The temporary dictionary
|
||||
```
|
||||
|
@ -38,6 +38,12 @@
|
||||
|
||||
- `marks_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) – размер файла с засечками.
|
||||
|
||||
- `secondary_indices_compressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) – общий размер сжатых данных для вторичных индексов в куске данных. Вспомогательные файлы (например, файлы с засечками) не включены.
|
||||
|
||||
- `secondary_indices_uncompressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) – общий размер несжатых данных для вторичных индексов в куске данных. Вспомогательные файлы (например, файлы с засечками) не включены.
|
||||
|
||||
- `secondary_indices_marks_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) – размер файла с засечками для вторичных индексов.
|
||||
|
||||
- `modification_time` ([DateTime](../../sql-reference/data-types/datetime.md)) – время модификации директории с куском данных. Обычно соответствует времени создания куска.
|
||||
|
||||
- `remove_time` ([DateTime](../../sql-reference/data-types/datetime.md)) – время, когда кусок стал неактивным.
|
||||
@ -119,6 +125,9 @@ rows: 6
|
||||
bytes_on_disk: 310
|
||||
data_compressed_bytes: 157
|
||||
data_uncompressed_bytes: 91
|
||||
secondary_indices_compressed_bytes: 58
|
||||
secondary_indices_uncompressed_bytes: 6
|
||||
secondary_indices_marks_bytes: 48
|
||||
marks_bytes: 144
|
||||
modification_time: 2020-06-18 13:01:49
|
||||
remove_time: 0000-00-00 00:00:00
|
||||
|
@ -26,6 +26,8 @@ ClickHouse не удаляет данные из таблица автомати
|
||||
|
||||
Чтобы уменьшить количество запросов, регистрирующихся в таблице `query_log`, вы можете использовать настройку [log_queries_probability](../../operations/settings/settings.md#log-queries-probability).
|
||||
|
||||
Чтобы регистрировать отформатированные запросы в столбце `formatted_query`, вы можете использовать настройку [log_formatted_queries](../../operations/settings/settings.md#settings-log-formatted-queries).
|
||||
|
||||
Столбцы:
|
||||
|
||||
- `type` ([Enum8](../../sql-reference/data-types/enum.md)) — тип события, произошедшего при выполнении запроса. Значения:
|
||||
@ -48,6 +50,7 @@ ClickHouse не удаляет данные из таблица автомати
|
||||
- `memory_usage` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — потребление RAM запросом.
|
||||
- `current_database` ([String](../../sql-reference/data-types/string.md)) — имя текущей базы данных.
|
||||
- `query` ([String](../../sql-reference/data-types/string.md)) — текст запроса.
|
||||
- `formatted_query` ([String](../../sql-reference/data-types/string.md)) — текст отформатированного запроса.
|
||||
- `normalized_query_hash` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — идентичная хэш-сумма без значений литералов для аналогичных запросов.
|
||||
- `query_kind` ([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md)) — тип запроса.
|
||||
- `databases` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — имена баз данных, присутствующих в запросе.
|
||||
@ -113,74 +116,72 @@ SELECT * FROM system.query_log WHERE type = 'QueryFinish' ORDER BY query_start_t
|
||||
Row 1:
|
||||
──────
|
||||
type: QueryFinish
|
||||
event_date: 2021-07-28
|
||||
event_time: 2021-07-28 13:46:56
|
||||
event_time_microseconds: 2021-07-28 13:46:56.719791
|
||||
query_start_time: 2021-07-28 13:46:56
|
||||
query_start_time_microseconds: 2021-07-28 13:46:56.704542
|
||||
query_duration_ms: 14
|
||||
read_rows: 8393
|
||||
read_bytes: 374325
|
||||
event_date: 2021-11-03
|
||||
event_time: 2021-11-03 16:13:54
|
||||
event_time_microseconds: 2021-11-03 16:13:54.953024
|
||||
query_start_time: 2021-11-03 16:13:54
|
||||
query_start_time_microseconds: 2021-11-03 16:13:54.952325
|
||||
query_duration_ms: 0
|
||||
read_rows: 69
|
||||
read_bytes: 6187
|
||||
written_rows: 0
|
||||
written_bytes: 0
|
||||
result_rows: 4201
|
||||
result_bytes: 153024
|
||||
memory_usage: 4714038
|
||||
result_rows: 69
|
||||
result_bytes: 48256
|
||||
memory_usage: 0
|
||||
current_database: default
|
||||
query: SELECT DISTINCT arrayJoin(extractAll(name, '[\\w_]{2,}')) AS res FROM (SELECT name FROM system.functions UNION ALL SELECT name FROM system.table_engines UNION ALL SELECT name FROM system.formats UNION ALL SELECT name FROM system.table_functions UNION ALL SELECT name FROM system.data_type_families UNION ALL SELECT name FROM system.merge_tree_settings UNION ALL SELECT name FROM system.settings UNION ALL SELECT cluster FROM system.clusters UNION ALL SELECT macro FROM system.macros UNION ALL SELECT policy_name FROM system.storage_policies UNION ALL SELECT concat(func.name, comb.name) FROM system.functions AS func CROSS JOIN system.aggregate_function_combinators AS comb WHERE is_aggregate UNION ALL SELECT name FROM system.databases LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.tables LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.dictionaries LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.columns LIMIT 10000) WHERE notEmpty(res)
|
||||
normalized_query_hash: 6666026786019643712
|
||||
query_kind: Select
|
||||
databases: ['system']
|
||||
tables: ['system.aggregate_function_combinators','system.clusters','system.columns','system.data_type_families','system.databases','system.dictionaries','system.formats','system.functions','system.macros','system.merge_tree_settings','system.settings','system.storage_policies','system.table_engines','system.table_functions','system.tables']
|
||||
columns: ['system.aggregate_function_combinators.name','system.clusters.cluster','system.columns.name','system.data_type_families.name','system.databases.name','system.dictionaries.name','system.formats.name','system.functions.is_aggregate','system.functions.name','system.macros.macro','system.merge_tree_settings.name','system.settings.name','system.storage_policies.policy_name','system.table_engines.name','system.table_functions.name','system.tables.name']
|
||||
query: DESCRIBE TABLE system.query_log
|
||||
formatted_query:
|
||||
normalized_query_hash: 8274064835331539124
|
||||
query_kind:
|
||||
databases: []
|
||||
tables: []
|
||||
columns: []
|
||||
projections: []
|
||||
views: []
|
||||
exception_code: 0
|
||||
exception:
|
||||
stack_trace:
|
||||
is_initial_query: 1
|
||||
user: default
|
||||
query_id: a3361f6e-a1fd-4d54-9f6f-f93a08bab0bf
|
||||
query_id: 7c28bbbb-753b-4eba-98b1-efcbe2b9bdf6
|
||||
address: ::ffff:127.0.0.1
|
||||
port: 51006
|
||||
port: 40452
|
||||
initial_user: default
|
||||
initial_query_id: a3361f6e-a1fd-4d54-9f6f-f93a08bab0bf
|
||||
initial_query_id: 7c28bbbb-753b-4eba-98b1-efcbe2b9bdf6
|
||||
initial_address: ::ffff:127.0.0.1
|
||||
initial_port: 51006
|
||||
initial_query_start_time: 2021-07-28 13:46:56
|
||||
initial_query_start_time_microseconds: 2021-07-28 13:46:56.704542
|
||||
initial_port: 40452
|
||||
initial_query_start_time: 2021-11-03 16:13:54
|
||||
initial_query_start_time_microseconds: 2021-11-03 16:13:54.952325
|
||||
interface: 1
|
||||
os_user:
|
||||
client_hostname:
|
||||
client_name: ClickHouse client
|
||||
os_user: sevirov
|
||||
client_hostname: clickhouse.ru-central1.internal
|
||||
client_name: ClickHouse
|
||||
client_revision: 54449
|
||||
client_version_major: 21
|
||||
client_version_minor: 8
|
||||
client_version_patch: 0
|
||||
client_version_minor: 10
|
||||
client_version_patch: 1
|
||||
http_method: 0
|
||||
http_user_agent:
|
||||
http_referer:
|
||||
forwarded_for:
|
||||
quota_key:
|
||||
revision: 54453
|
||||
revision: 54456
|
||||
log_comment:
|
||||
thread_ids: [5058,22097,22110,22094]
|
||||
ProfileEvents.Names: ['Query','SelectQuery','ArenaAllocChunks','ArenaAllocBytes','FunctionExecute','NetworkSendElapsedMicroseconds','SelectedRows','SelectedBytes','ContextLock','RWLockAcquiredReadLocks','RealTimeMicroseconds','UserTimeMicroseconds','SystemTimeMicroseconds','SoftPageFaults','OSCPUWaitMicroseconds','OSCPUVirtualTimeMicroseconds','OSWriteBytes','OSWriteChars']
|
||||
ProfileEvents.Values: [1,1,39,352256,64,360,8393,374325,412,440,34480,13108,4723,671,19,17828,8192,10240]
|
||||
Settings.Names: ['load_balancing','max_memory_usage']
|
||||
Settings.Values: ['random','10000000000']
|
||||
thread_ids: [30776,31174]
|
||||
ProfileEvents: {'Query':1,'NetworkSendElapsedMicroseconds':59,'NetworkSendBytes':2643,'SelectedRows':69,'SelectedBytes':6187,'ContextLock':9,'RWLockAcquiredReadLocks':1,'RealTimeMicroseconds':817,'UserTimeMicroseconds':427,'SystemTimeMicroseconds':212,'OSCPUVirtualTimeMicroseconds':639,'OSReadChars':894,'OSWriteChars':319}
|
||||
Settings: {'load_balancing':'random','max_memory_usage':'10000000000'}
|
||||
used_aggregate_functions: []
|
||||
used_aggregate_function_combinators: []
|
||||
used_database_engines: []
|
||||
used_data_type_families: ['UInt64','UInt8','Nullable','String','date']
|
||||
used_data_type_families: []
|
||||
used_dictionaries: []
|
||||
used_formats: []
|
||||
used_functions: ['concat','notEmpty','extractAll']
|
||||
used_functions: []
|
||||
used_storages: []
|
||||
used_table_functions: []
|
||||
```
|
||||
|
||||
**Смотрите также**
|
||||
**См. также**
|
||||
|
||||
- [system.query_thread_log](../../operations/system-tables/query_thread_log.md#system_tables-query_thread_log) — в этой таблице содержится информация о цепочке каждого выполненного запроса.
|
||||
|
||||
[Оригинальная статья](https://clickhouse.com/docs/ru/operations/system_tables/query_log) <!--hide-->
|
||||
|
77
docs/ru/operations/system-tables/session_log.md
Normal file
77
docs/ru/operations/system-tables/session_log.md
Normal file
@ -0,0 +1,77 @@
|
||||
# system.session_log {#system_tables-session_log}
|
||||
|
||||
Содержит информацию о всех успешных и неудачных событиях входа и выхода из системы.
|
||||
|
||||
Столбцы:
|
||||
|
||||
- `type` ([Enum8](../../sql-reference/data-types/enum.md)) — результат входа или выхода из системы. Возможные значения:
|
||||
- `LoginFailure` — ошибка входа в систему.
|
||||
- `LoginSuccess` — успешный вход в систему.
|
||||
- `Logout` — выход из системы.
|
||||
- `auth_id` ([UUID](../../sql-reference/data-types/uuid.md)) — идентификатор аутентификации, представляющий собой UUID, который автоматически генерируется при каждом входе пользователя в систему.
|
||||
- `session_id` ([String](../../sql-reference/data-types/string.md)) — идентификатор сессии, который передается клиентом через [HTTP](../../interfaces/http.md)-интерфейс.
|
||||
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — дата входа или выхода из системы.
|
||||
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — время входа или выхода из системы.
|
||||
- `event_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — время начала входа или выхода из системы с точностью до микросекунд.
|
||||
- `user` ([String](../../sql-reference/data-types/string.md)) — имя пользователя.
|
||||
- `auth_type` ([Enum8](../../sql-reference/data-types/enum.md)) — тип аутентификации. Возможные значения:
|
||||
- `NO_PASSWORD`
|
||||
- `PLAINTEXT_PASSWORD`
|
||||
- `SHA256_PASSWORD`
|
||||
- `DOUBLE_SHA1_PASSWORD`
|
||||
- `LDAP`
|
||||
- `KERBEROS`
|
||||
- `profiles` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — список профилей, установленных для всех ролей и (или) пользователей.
|
||||
- `roles` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — список ролей, к которым применяется данный профиль.
|
||||
- `settings` ([Array](../../sql-reference/data-types/array.md)([Tuple](../../sql-reference/data-types/tuple.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md), [String](../../sql-reference/data-types/string.md)))) — настройки, которые были изменены при входе или выходе клиента из системы.
|
||||
- `client_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP-адрес, который использовался для входа или выхода из системы.
|
||||
- `client_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — порт клиента, который использовался для входа или выхода из системы.
|
||||
- `interface` ([Enum8](../../sql-reference/data-types/enum.md)) — интерфейс, с которого был инициирован вход в систему. Возможные значения:
|
||||
- `TCP`
|
||||
- `HTTP`
|
||||
- `gRPC`
|
||||
- `MySQL`
|
||||
- `PostgreSQL`
|
||||
- `client_hostname` ([String](../../sql-reference/data-types/string.md)) — имя хоста клиентской машины, с которой присоединился [clickhouse-client](../../interfaces/cli.md) или другой TCP клиент.
|
||||
- `client_name` ([String](../../sql-reference/data-types/string.md)) — `clickhouse-client` или другой TCP клиент.
|
||||
- `client_revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ревизия `clickhouse-client` или другого TCP клиента.
|
||||
- `client_version_major` ([UInt32](../../sql-reference/data-types/int-uint.md)) — старшая версия `clickhouse-client` или другого TCP клиента.
|
||||
- `client_version_minor` ([UInt32](../../sql-reference/data-types/int-uint.md)) — младшая версия `clickhouse-client` или другого TCP клиента.
|
||||
- `client_version_patch` ([UInt32](../../sql-reference/data-types/int-uint.md)) — патч `clickhouse-client` или другого TCP клиента.
|
||||
- `failure_reason` ([String](../../sql-reference/data-types/string.md)) — сообщение об исключении, содержащее причину сбоя при входе или выходе из системы.
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.session_log LIMIT 1 FORMAT Vertical;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
Row 1:
|
||||
──────
|
||||
type: LoginSuccess
|
||||
auth_id: 45e6bd83-b4aa-4a23-85e6-bd83b4aa1a23
|
||||
session_id:
|
||||
event_date: 2021-10-14
|
||||
event_time: 2021-10-14 20:33:52
|
||||
event_time_microseconds: 2021-10-14 20:33:52.104247
|
||||
user: default
|
||||
auth_type: PLAINTEXT_PASSWORD
|
||||
profiles: ['default']
|
||||
roles: []
|
||||
settings: [('load_balancing','random'),('max_memory_usage','10000000000')]
|
||||
client_address: ::ffff:127.0.0.1
|
||||
client_port: 38490
|
||||
interface: TCP
|
||||
client_hostname:
|
||||
client_name: ClickHouse client
|
||||
client_revision: 54449
|
||||
client_version_major: 21
|
||||
client_version_minor: 10
|
||||
client_version_patch: 0
|
||||
failure_reason:
|
||||
```
|
@ -6,7 +6,7 @@
|
||||
|
||||
- `policy_name` ([String](../../sql-reference/data-types/string.md)) — имя политики хранения.
|
||||
- `volume_name` ([String](../../sql-reference/data-types/string.md)) — имя тома, который содержится в политике хранения.
|
||||
- `volume_priority` ([UInt64](../../sql-reference/data-types/int-uint.md)) — порядковый номер тома согласно конфигурации.
|
||||
- `volume_priority` ([UInt64](../../sql-reference/data-types/int-uint.md)) — порядковый номер тома согласно конфигурации, приоритет согласно которому данные заполняют тома, т.е. данные при инсертах и мержах записываются на тома с более низким приоритетом (с учетом других правил: TTL, `max_data_part_size`, `move_factor`).
|
||||
- `disks` ([Array(String)](../../sql-reference/data-types/array.md)) — имена дисков, содержащихся в политике хранения.
|
||||
- `max_data_part_size` ([UInt64](../../sql-reference/data-types/int-uint.md)) — максимальный размер куска данных, который может храниться на дисках тома (0 — без ограничений).
|
||||
- `move_factor` — доля доступного свободного места на томе, если места становится меньше, то данные начнут перемещение на следующий том, если он есть (по умолчанию 0.1).
|
||||
|
@ -66,7 +66,6 @@ SETTINGS(format_csv_allow_single_quotes = 0)
|
||||
- СУБД:
|
||||
- [ODBC](#dicts-external_dicts_dict_sources-odbc)
|
||||
- [MySQL](#dicts-external_dicts_dict_sources-mysql)
|
||||
- [PostgreSQL](#dicts-external_dicts_dict_sources-postgresql)
|
||||
- [ClickHouse](#dicts-external_dicts_dict_sources-clickhouse)
|
||||
- [MongoDB](#dicts-external_dicts_dict_sources-mongodb)
|
||||
- [Redis](#dicts-external_dicts_dict_sources-redis)
|
||||
@ -210,45 +209,6 @@ SOURCE(HTTP(
|
||||
|
||||
При создании словаря с помощью DDL-команды (`CREATE DICTIONARY ...`) удаленные хосты для HTTP-словарей проверяются в разделе `remote_url_allow_hosts` из конфигурации сервера. Иначе пользователи базы данных будут иметь доступ к произвольному HTTP-серверу.
|
||||
|
||||
## ODBC {#dicts-external_dicts_dict_sources-odbc}
|
||||
|
||||
Этим способом можно подключить любую базу данных, имеющую ODBC драйвер.
|
||||
|
||||
Пример настройки:
|
||||
|
||||
``` xml
|
||||
<source>
|
||||
<odbc>
|
||||
<db>DatabaseName</db>
|
||||
<table>ShemaName.TableName</table>
|
||||
<connection_string>DSN=some_parameters</connection_string>
|
||||
<invalidate_query>SQL_QUERY</invalidate_query>
|
||||
</odbc>
|
||||
</source>
|
||||
```
|
||||
|
||||
или
|
||||
|
||||
``` sql
|
||||
SOURCE(ODBC(
|
||||
db 'DatabaseName'
|
||||
table 'SchemaName.TableName'
|
||||
connection_string 'DSN=some_parameters'
|
||||
invalidate_query 'SQL_QUERY'
|
||||
))
|
||||
```
|
||||
|
||||
Поля настройки:
|
||||
|
||||
- `db` — имя базы данных. Не указывать, если имя базы задано в параметрах. `<connection_string>`.
|
||||
- `table` — имя таблицы и схемы, если она есть.
|
||||
- `connection_string` — строка соединения.
|
||||
- `invalidate_query` — запрос для проверки статуса словаря. Необязательный параметр. Читайте подробнее в разделе [Обновление словарей](external-dicts-dict-lifetime.md).
|
||||
|
||||
ClickHouse получает от ODBC-драйвера информацию о квотировании и квотирует настройки в запросах к драйверу, поэтому имя таблицы нужно указывать в соответствии с регистром имени таблицы в базе данных.
|
||||
|
||||
Если у вас есть проблемы с кодировками при использовании Oracle, ознакомьтесь с соответствующим разделом [FAQ](../../../faq/integration/oracle-odbc.md).
|
||||
|
||||
### Выявленная уязвимость в функционировании ODBC словарей {#vyiavlennaia-uiazvimost-v-funktsionirovanii-odbc-slovarei}
|
||||
|
||||
!!! attention "Внимание"
|
||||
@ -464,6 +424,51 @@ LIFETIME(MIN 300 MAX 360)
|
||||
|
||||
## СУБД {#subd}
|
||||
|
||||
### ODBC {#dicts-external_dicts_dict_sources-odbc}
|
||||
|
||||
Этим способом можно подключить любую базу данных, имеющую ODBC драйвер.
|
||||
|
||||
Пример настройки:
|
||||
|
||||
``` xml
|
||||
<source>
|
||||
<odbc>
|
||||
<db>DatabaseName</db>
|
||||
<table>ShemaName.TableName</table>
|
||||
<connection_string>DSN=some_parameters</connection_string>
|
||||
<invalidate_query>SQL_QUERY</invalidate_query>
|
||||
<query>SELECT id, value_1, value_2 FROM ShemaName.TableName</query>
|
||||
</odbc>
|
||||
</source>
|
||||
```
|
||||
|
||||
или
|
||||
|
||||
``` sql
|
||||
SOURCE(ODBC(
|
||||
db 'DatabaseName'
|
||||
table 'SchemaName.TableName'
|
||||
connection_string 'DSN=some_parameters'
|
||||
invalidate_query 'SQL_QUERY'
|
||||
query 'SELECT id, value_1, value_2 FROM db_name.table_name'
|
||||
))
|
||||
```
|
||||
|
||||
Поля настройки:
|
||||
|
||||
- `db` — имя базы данных. Не указывать, если имя базы задано в параметрах. `<connection_string>`.
|
||||
- `table` — имя таблицы и схемы, если она есть.
|
||||
- `connection_string` — строка соединения.
|
||||
- `invalidate_query` — запрос для проверки статуса словаря. Необязательный параметр. Читайте подробнее в разделе [Обновление словарей](external-dicts-dict-lifetime.md).
|
||||
- `query` – пользовательский запрос. Необязательный параметр.
|
||||
|
||||
!!! info "Примечание"
|
||||
Поля `table` и `query` не могут быть использованы вместе. Также обязательно должен быть один из источников данных: `table` или `query`.
|
||||
|
||||
ClickHouse получает от ODBC-драйвера информацию о квотировании и квотирует настройки в запросах к драйверу, поэтому имя таблицы нужно указывать в соответствии с регистром имени таблицы в базе данных.
|
||||
|
||||
Если у вас есть проблемы с кодировками при использовании Oracle, ознакомьтесь с соответствующим разделом [FAQ](../../../faq/integration/oracle-odbc.md).
|
||||
|
||||
### MySQL {#dicts-external_dicts_dict_sources-mysql}
|
||||
|
||||
Пример настройки:
|
||||
@ -487,6 +492,7 @@ LIFETIME(MIN 300 MAX 360)
|
||||
<where>id=10</where>
|
||||
<invalidate_query>SQL_QUERY</invalidate_query>
|
||||
<fail_on_connection_loss>true</fail_on_connection_loss>
|
||||
<query>SELECT id, value_1, value_2 FROM db_name.table_name</query>
|
||||
</mysql>
|
||||
</source>
|
||||
```
|
||||
@ -505,6 +511,7 @@ SOURCE(MYSQL(
|
||||
where 'id=10'
|
||||
invalidate_query 'SQL_QUERY'
|
||||
fail_on_connection_loss 'true'
|
||||
query 'SELECT id, value_1, value_2 FROM db_name.table_name'
|
||||
))
|
||||
```
|
||||
|
||||
@ -531,6 +538,11 @@ SOURCE(MYSQL(
|
||||
|
||||
- `fail_on_connection_loss` – параметр конфигурации, контролирующий поведение сервера при потере соединения. Если значение `true`, то исключение генерируется сразу же, если соединение между клиентом и сервером было потеряно. Если значение `false`, то сервер повторно попытается выполнить запрос три раза прежде чем сгенерировать исключение. Имейте в виду, что повторные попытки могут увеличить время выполнения запроса. Значение по умолчанию: `false`.
|
||||
|
||||
- `query` – пользовательский запрос. Необязательный параметр.
|
||||
|
||||
!!! info "Примечание"
|
||||
Поля `table` или `where` не могут быть использованы вместе с полем `query`. Также обязательно должен быть один из источников данных: `table` или `query`.
|
||||
|
||||
MySQL можно подключить на локальном хосте через сокеты, для этого необходимо задать `host` и `socket`.
|
||||
|
||||
Пример настройки:
|
||||
@ -547,6 +559,7 @@ MySQL можно подключить на локальном хосте чер
|
||||
<where>id=10</where>
|
||||
<invalidate_query>SQL_QUERY</invalidate_query>
|
||||
<fail_on_connection_loss>true</fail_on_connection_loss>
|
||||
<query>SELECT id, value_1, value_2 FROM db_name.table_name</query>
|
||||
</mysql>
|
||||
</source>
|
||||
```
|
||||
@ -564,6 +577,7 @@ SOURCE(MYSQL(
|
||||
where 'id=10'
|
||||
invalidate_query 'SQL_QUERY'
|
||||
fail_on_connection_loss 'true'
|
||||
query 'SELECT id, value_1, value_2 FROM db_name.table_name'
|
||||
))
|
||||
```
|
||||
|
||||
@ -582,6 +596,7 @@ SOURCE(MYSQL(
|
||||
<table>ids</table>
|
||||
<where>id=10</where>
|
||||
<secure>1</secure>
|
||||
<query>SELECT id, value_1, value_2 FROM default.ids</query>
|
||||
</clickhouse>
|
||||
</source>
|
||||
```
|
||||
@ -598,6 +613,7 @@ SOURCE(CLICKHOUSE(
|
||||
table 'ids'
|
||||
where 'id=10'
|
||||
secure 1
|
||||
query 'SELECT id, value_1, value_2 FROM default.ids'
|
||||
));
|
||||
```
|
||||
|
||||
@ -612,6 +628,10 @@ SOURCE(CLICKHOUSE(
|
||||
- `where` — условие выбора. Может отсутствовать.
|
||||
- `invalidate_query` — запрос для проверки статуса словаря. Необязательный параметр. Читайте подробнее в разделе [Обновление словарей](external-dicts-dict-lifetime.md).
|
||||
- `secure` - флаг, разрешающий или не разрешающий защищённое SSL-соединение.
|
||||
- `query` – пользовательский запрос. Необязательный параметр.
|
||||
|
||||
!!! info "Примечание"
|
||||
Поля `table` или `where` не могут быть использованы вместе с полем `query`. Также обязательно должен быть один из источников данных: `table` или `query`.
|
||||
|
||||
### MongoDB {#dicts-external_dicts_dict_sources-mongodb}
|
||||
|
||||
@ -703,25 +723,30 @@ SOURCE(REDIS(
|
||||
<consistency>One</consistency>
|
||||
<where>"SomeColumn" = 42</where>
|
||||
<max_threads>8</max_threads>
|
||||
<query>SELECT id, value_1, value_2 FROM database_name.table_name</query>
|
||||
</cassandra>
|
||||
</source>
|
||||
```
|
||||
|
||||
Поля настройки:
|
||||
- `host` – Имя хоста с установленной Cassandra или разделенный через запятую список хостов.
|
||||
- `port` – Порт на серверах Cassandra. Если не указан, используется значение по умолчанию 9042.
|
||||
- `user` – Имя пользователя для соединения с Cassandra.
|
||||
- `password` – Пароль для соединения с Cassandra.
|
||||
- `keyspace` – Имя keyspace (база данных).
|
||||
- `column_family` – Имя семейства столбцов (таблица).
|
||||
- `allow_filering` – Флаг, разрешающий или не разрешающий потенциально дорогостоящие условия на кластеризации ключевых столбцов. Значение по умолчанию 1.
|
||||
- `partition_key_prefix` – Количество партиций ключевых столбцов в первичном ключе таблицы Cassandra.
|
||||
Необходимо для составления ключей словаря. Порядок ключевых столбцов в определении словеря должен быть таким же как в Cassandra.
|
||||
Значение по умолчанию 1 (первый ключевой столбец это ключ партицирования, остальные ключевые столбцы - ключи кластеризации).
|
||||
- `consistency` – Уровень консистентности. Возмодные значения: `One`, `Two`, `Three`,
|
||||
`All`, `EachQuorum`, `Quorum`, `LocalQuorum`, `LocalOne`, `Serial`, `LocalSerial`. Значение по умолчанию `One`.
|
||||
- `where` – Опциональный критерий выборки.
|
||||
- `max_threads` – Максимальное кол-во тредов для загрузки данных из нескольких партиций в словарь.
|
||||
|
||||
- `host` – имя хоста с установленной Cassandra или разделенный через запятую список хостов.
|
||||
- `port` – порт на серверах Cassandra. Если не указан, используется значение по умолчанию: 9042.
|
||||
- `user` – имя пользователя для соединения с Cassandra.
|
||||
- `password` – пароль для соединения с Cassandra.
|
||||
- `keyspace` – имя keyspace (база данных).
|
||||
- `column_family` – имя семейства столбцов (таблица).
|
||||
- `allow_filering` – флаг, разрешающий или не разрешающий потенциально дорогостоящие условия на кластеризации ключевых столбцов. Значение по умолчанию: 1.
|
||||
- `partition_key_prefix` – количество партиций ключевых столбцов в первичном ключе таблицы Cassandra.
|
||||
Необходимо для составления ключей словаря. Порядок ключевых столбцов в определении словаря должен быть таким же, как в Cassandra.
|
||||
Значение по умолчанию: 1 (первый ключевой столбец - это ключ партицирования, остальные ключевые столбцы - ключи кластеризации).
|
||||
- `consistency` – уровень консистентности. Возможные значения: `One`, `Two`, `Three`, `All`, `EachQuorum`, `Quorum`, `LocalQuorum`, `LocalOne`, `Serial`, `LocalSerial`. Значение по умолчанию: `One`.
|
||||
- `where` – опциональный критерий выборки.
|
||||
- `max_threads` – максимальное количество тредов для загрузки данных из нескольких партиций в словарь.
|
||||
- `query` – пользовательский запрос. Необязательный параметр.
|
||||
|
||||
!!! info "Примечание"
|
||||
Поля `column_family` или `where` не могут быть использованы вместе с полем `query`. Также обязательно должен быть один из источников данных: `column_family` или `query`.
|
||||
|
||||
### PosgreSQL {#dicts-external_dicts_dict_sources-postgresql}
|
||||
|
||||
@ -737,6 +762,7 @@ SOURCE(REDIS(
|
||||
<table>table_name</table>
|
||||
<where>id=10</where>
|
||||
<invalidate_query>SQL_QUERY</invalidate_query>
|
||||
<query>SELECT id, value_1, value_2 FROM db_name.table_name</query>
|
||||
</postgresql>
|
||||
</source>
|
||||
```
|
||||
@ -755,20 +781,25 @@ SOURCE(POSTGRESQL(
|
||||
replica(host 'example01-2' port 5432 priority 2)
|
||||
where 'id=10'
|
||||
invalidate_query 'SQL_QUERY'
|
||||
query 'SELECT id, value_1, value_2 FROM db_name.table_name'
|
||||
))
|
||||
```
|
||||
|
||||
Setting fields:
|
||||
Поля настройки:
|
||||
|
||||
- `host` – Хост для соединения с PostgreSQL. Вы можете указать его для всех реплик или задать индивидуально для каждой релпики (внутри `<replica>`).
|
||||
- `port` – Порт для соединения с PostgreSQL. Вы можете указать его для всех реплик или задать индивидуально для каждой релпики (внутри `<replica>`).
|
||||
- `user` – Имя пользователя для соединения с PostgreSQL. Вы можете указать его для всех реплик или задать индивидуально для каждой релпики (внутри `<replica>`).
|
||||
- `password` – Пароль для пользователя PostgreSQL.
|
||||
- `replica` – Раздел конфигурации реплик. Может быть несколько.
|
||||
- `replica/host` – хост PostgreSQL.
|
||||
- `replica/port` – порт PostgreSQL .
|
||||
- `replica/priority` – Приоритет реплики. Во время попытки соединения, ClickHouse будет перебирать реплики в порядке приоритет. Меньшее значение означает более высокий приоритет.
|
||||
- `db` – Имя базы данных.
|
||||
- `table` – Имя таблицы.
|
||||
- `where` – Условие выборки. Синтаксис для условий такой же как для `WHERE` выражения в PostgreSQL, для примера, `id > 10 AND id < 20`. Необязательный параметр.
|
||||
- `invalidate_query` – Запрос для проверки условия загрузки словаря. Необязательный параметр. Читайте больше в разделе [Обновление словарей](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md).
|
||||
- `host` – хост для соединения с PostgreSQL. Вы можете указать его для всех реплик или задать индивидуально для каждой релпики (внутри `<replica>`).
|
||||
- `port` – порт для соединения с PostgreSQL. Вы можете указать его для всех реплик или задать индивидуально для каждой релпики (внутри `<replica>`).
|
||||
- `user` – имя пользователя для соединения с PostgreSQL. Вы можете указать его для всех реплик или задать индивидуально для каждой релпики (внутри `<replica>`).
|
||||
- `password` – пароль для пользователя PostgreSQL.
|
||||
- `replica` – раздел конфигурации реплик. Может быть несколько.
|
||||
- `replica/host` – хост PostgreSQL.
|
||||
- `replica/port` – порт PostgreSQL .
|
||||
- `replica/priority` – приоритет реплики. Во время попытки соединения ClickHouse будет перебирать реплики в порядке приоритета. Меньшее значение означает более высокий приоритет.
|
||||
- `db` – имя базы данных.
|
||||
- `table` – имя таблицы.
|
||||
- `where` – условие выборки. Синтаксис для условий такой же, как для выражения `WHERE` в PostgreSQL. Например, `id > 10 AND id < 20`. Необязательный параметр.
|
||||
- `invalidate_query` – запрос для проверки условия загрузки словаря. Необязательный параметр. Более подробную информацию смотрите в разделе [обновление словарей](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md).
|
||||
- `query` – пользовательский запрос. Необязательный параметр.
|
||||
|
||||
!!! info "Примечание"
|
||||
Поля `table` или `where` не могут быть использованы вместе с полем `query`. Также обязательно должен быть один из источников данных: `table` или `query`.
|
||||
|
@ -5,7 +5,7 @@ toc_title: "Битовые функции"
|
||||
|
||||
# Битовые функции {#bitovye-funktsii}
|
||||
|
||||
Битовые функции работают для любой пары типов из UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64, Float32, Float64.
|
||||
Битовые функции работают для любой пары типов из `UInt8`, `UInt16`, `UInt32`, `UInt64`, `Int8`, `Int16`, `Int32`, `Int64`, `Float32`, `Float64`.
|
||||
|
||||
Тип результата - целое число, битность которого равна максимальной битности аргументов. Если хотя бы один аргумент знаковый, то результат - знаковое число. Если аргумент - число с плавающей запятой - оно приводится к Int64.
|
||||
|
||||
@ -19,8 +19,100 @@ toc_title: "Битовые функции"
|
||||
|
||||
## bitShiftLeft(a, b) {#bitshiftlefta-b}
|
||||
|
||||
Сдвигает влево на заданное количество битов бинарное представление значения.
|
||||
|
||||
Если передан аргумент типа `FixedString` или `String`, то он рассматривается, как одно многобайтовое значение.
|
||||
|
||||
Биты `FixedString` теряются по мере того, как выдвигаются за пределы строки. Значение `String` дополняется байтами, поэтому его биты не теряются.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
bitShiftLeft(a, b)
|
||||
```
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `a` — сдвигаемое значение. [Целое число](../../sql-reference/data-types/int-uint.md), [String](../../sql-reference/data-types/string.md) или [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
- `b` — величина сдвига. [Беззнаковое целое число](../../sql-reference/data-types/int-uint.md), допустимы типы с разрядностью не более 64 битов.
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Сдвинутое значение.
|
||||
|
||||
Тип совпадает с типом сдвигаемого значения.
|
||||
|
||||
**Пример**
|
||||
|
||||
В запросах используются функции [bin](encoding-functions.md#bin) и [hex](encoding-functions.md#hex), чтобы наглядно показать биты после сдвига.
|
||||
|
||||
``` sql
|
||||
SELECT 99 AS a, bin(a), bitShiftLeft(a, 2) AS a_shifted, bin(a_shifted);
|
||||
SELECT 'abc' AS a, hex(a), bitShiftLeft(a, 4) AS a_shifted, hex(a_shifted);
|
||||
SELECT toFixedString('abc', 3) AS a, hex(a), bitShiftLeft(a, 4) AS a_shifted, hex(a_shifted);
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌──a─┬─bin(99)──┬─a_shifted─┬─bin(bitShiftLeft(99, 2))─┐
|
||||
│ 99 │ 01100011 │ 140 │ 10001100 │
|
||||
└────┴──────────┴───────────┴──────────────────────────┘
|
||||
┌─a───┬─hex('abc')─┬─a_shifted─┬─hex(bitShiftLeft('abc', 4))─┐
|
||||
│ abc │ 616263 │ &0 │ 06162630 │
|
||||
└─────┴────────────┴───────────┴─────────────────────────────┘
|
||||
┌─a───┬─hex(toFixedString('abc', 3))─┬─a_shifted─┬─hex(bitShiftLeft(toFixedString('abc', 3), 4))─┐
|
||||
│ abc │ 616263 │ &0 │ 162630 │
|
||||
└─────┴──────────────────────────────┴───────────┴───────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## bitShiftRight(a, b) {#bitshiftrighta-b}
|
||||
|
||||
Сдвигает вправо на заданное количество битов бинарное представление значения.
|
||||
|
||||
Если передан аргумент типа `FixedString` или `String`, то он рассматривается, как одно многобайтовое значение. Длина значения типа `String` уменьшается по мере сдвига.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
bitShiftRight(a, b)
|
||||
```
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `a` — сдвигаемое значение. [Целое число](../../sql-reference/data-types/int-uint.md), [String](../../sql-reference/data-types/string.md) или [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
- `b` — величина сдвига. [Беззнаковое целое число](../../sql-reference/data-types/int-uint.md), допустимы типы с разрядностью не более 64 битов.
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Сдвинутое значение.
|
||||
|
||||
Тип совпадает с типом сдвигаемого значения.
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT 101 AS a, bin(a), bitShiftRight(a, 2) AS a_shifted, bin(a_shifted);
|
||||
SELECT 'abc' AS a, hex(a), bitShiftRight(a, 12) AS a_shifted, hex(a_shifted);
|
||||
SELECT toFixedString('abc', 3) AS a, hex(a), bitShiftRight(a, 12) AS a_shifted, hex(a_shifted);
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌───a─┬─bin(101)─┬─a_shifted─┬─bin(bitShiftRight(101, 2))─┐
|
||||
│ 101 │ 01100101 │ 25 │ 00011001 │
|
||||
└─────┴──────────┴───────────┴────────────────────────────┘
|
||||
┌─a───┬─hex('abc')─┬─a_shifted─┬─hex(bitShiftRight('abc', 12))─┐
|
||||
│ abc │ 616263 │ │ 0616 │
|
||||
└─────┴────────────┴───────────┴───────────────────────────────┘
|
||||
┌─a───┬─hex(toFixedString('abc', 3))─┬─a_shifted─┬─hex(bitShiftRight(toFixedString('abc', 3), 12))─┐
|
||||
│ abc │ 616263 │ │ 000616 │
|
||||
└─────┴──────────────────────────────┴───────────┴─────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## bitTest {#bittest}
|
||||
|
||||
Принимает любое целое число и конвертирует его в [двоичное число](https://en.wikipedia.org/wiki/Binary_number), возвращает значение бита в указанной позиции. Отсчет начинается с 0 справа налево.
|
||||
|
@ -227,6 +227,42 @@ SELECT h3ToGeo(644325524701193974) coordinates;
|
||||
└───────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## h3ToGeoBoundary {#h3togeoboundary}
|
||||
|
||||
Возвращает массив пар `(lon, lat)`, который соответствует границе указанного H3 индекса.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
h3ToGeoBoundary(h3Index)
|
||||
```
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `h3Index` — H3 индекс. Тип: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Возвращаемые значения**
|
||||
|
||||
- Массив пар '(lon, lat)'.
|
||||
Тип: [Array](../../../sql-reference/data-types/array.md)([Float64](../../../sql-reference/data-types/float.md), [Float64](../../../sql-reference/data-types/float.md)).
|
||||
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT h3ToGeoBoundary(644325524701193974) AS coordinates;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─h3ToGeoBoundary(599686042433355775)────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ [(37.2713558667319,-121.91508032705622),(37.353926450852256,-121.8622232890249),(37.42834118609435,-121.92354999630156),(37.42012867767779,-122.03773496427027),(37.33755608435299,-122.090428929044),(37.26319797461824,-122.02910130919001)] │
|
||||
└────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## h3kRing {#h3kring}
|
||||
|
||||
Возвращает [H3](#h3index)-индексы шестигранников в радиусе `k` от данного в произвольном порядке.
|
||||
|
@ -216,6 +216,44 @@ SELECT JSONExtract('{"day": 5}', 'day', 'Enum8(\'Sunday\' = 0, \'Monday\' = 1, \
|
||||
SELECT JSONExtractKeysAndValues('{"x": {"a": 5, "b": 7, "c": 11}}', 'x', 'Int8') = [('a',5),('b',7),('c',11)];
|
||||
```
|
||||
|
||||
## JSONExtractKeys {#jsonextractkeysjson-indices-or-keys}
|
||||
|
||||
Парсит строку JSON и извлекает ключи.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
JSONExtractKeys(json[, a, b, c...])
|
||||
```
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `json` — [строка](../data-types/string.md), содержащая валидный JSON.
|
||||
- `a, b, c...` — индексы или ключи, разделенные запятыми, которые указывают путь к внутреннему полю во вложенном объекте JSON. Каждый аргумент может быть либо [строкой](../data-types/string.md) для получения поля по ключу, либо [целым числом](../data-types/int-uint.md) для получения N-го поля (индексирование начинается с 1, отрицательные числа используются для отсчета с конца). Если параметр не задан, весь JSON разбирается как объект верхнего уровня. Необязательный параметр.
|
||||
|
||||
**Возвращаемые значения**
|
||||
|
||||
Массив с ключами JSON.
|
||||
|
||||
Тип: [Array](../data-types/array.md)([String](../data-types/string.md)).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
```sql
|
||||
SELECT JSONExtractKeys('{"a": "hello", "b": [-100, 200.0, 300]}');
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
```
|
||||
text
|
||||
┌─JSONExtractKeys('{"a": "hello", "b": [-100, 200.0, 300]}')─┐
|
||||
│ ['a','b'] │
|
||||
└────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## JSONExtractRaw(json\[, indices_or_keys\]…) {#jsonextractrawjson-indices-or-keys}
|
||||
|
||||
Возвращает часть JSON в виде строки, содержащей неразобранную подстроку.
|
||||
|
@ -2411,3 +2411,39 @@ SELECT getOSKernelVersion();
|
||||
│ Linux 4.15.0-55-generic │
|
||||
└─────────────────────────┘
|
||||
```
|
||||
|
||||
## zookeeperSessionUptime {#zookeepersessionuptime}
|
||||
|
||||
Возвращает аптайм текущего сеанса ZooKeeper в секундах.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
zookeeperSessionUptime()
|
||||
```
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- Нет.
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Аптайм текущего сеанса ZooKeeper в секундах.
|
||||
|
||||
Тип: [UInt32](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT zookeeperSessionUptime();
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─zookeeperSessionUptime()─┐
|
||||
│ 286 │
|
||||
└──────────────────────────┘
|
||||
```
|
||||
|
@ -212,6 +212,9 @@ SET mutations_sync = 2;
|
||||
CREATE TABLE tmp (x Int64) ENGINE = MergeTree() ORDER BY tuple() PARTITION BY tuple();
|
||||
INSERT INTO tmp SELECT * FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE tmp ADD COLUMN s String MATERIALIZED toString(x);
|
||||
|
||||
ALTER TABLE tmp MATERIALIZE COLUMN s;
|
||||
|
||||
SELECT groupArray(x), groupArray(s) FROM tmp;
|
||||
```
|
||||
|
||||
|
58
docs/ru/sql-reference/statements/alter/comment.md
Normal file
58
docs/ru/sql-reference/statements/alter/comment.md
Normal file
@ -0,0 +1,58 @@
|
||||
---
|
||||
toc_priority: 51
|
||||
toc_title: COMMENT
|
||||
---
|
||||
|
||||
# ALTER TABLE … MODIFY COMMENT {#alter-modify-comment}
|
||||
|
||||
Добавляет, изменяет или удаляет комментарий к таблице, независимо от того, был ли он установлен раньше или нет. Изменение комментария отражается как в системной таблице [system.tables](../../../operations/system-tables/tables.md), так и в результате выполнения запроса `SHOW CREATE TABLE`.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
ALTER TABLE [db].name [ON CLUSTER cluster] MODIFY COMMENT 'Comment'
|
||||
```
|
||||
|
||||
**Примеры**
|
||||
|
||||
Создание таблицы с комментарием (для более подробной информации смотрите секцию [COMMENT](../../../sql-reference/statements/create/table.md#comment-table)):
|
||||
|
||||
``` sql
|
||||
CREATE TABLE table_with_comment
|
||||
(
|
||||
`k` UInt64,
|
||||
`s` String
|
||||
)
|
||||
ENGINE = Memory()
|
||||
COMMENT 'The temporary table';
|
||||
```
|
||||
|
||||
Изменение комментария:
|
||||
|
||||
``` sql
|
||||
ALTER TABLE table_with_comment MODIFY COMMENT 'new comment on a table';
|
||||
SELECT comment FROM system.tables WHERE database = currentDatabase() AND name = 'table_with_comment';
|
||||
```
|
||||
|
||||
Вывод нового комментария:
|
||||
|
||||
```text
|
||||
┌─comment────────────────┐
|
||||
│ new comment on a table │
|
||||
└────────────────────────┘
|
||||
```
|
||||
|
||||
Удаление комментария:
|
||||
|
||||
``` sql
|
||||
ALTER TABLE table_with_comment MODIFY COMMENT '';
|
||||
SELECT comment FROM system.tables WHERE database = currentDatabase() AND name = 'table_with_comment';
|
||||
```
|
||||
|
||||
Вывод удаленного комментария:
|
||||
|
||||
```text
|
||||
┌─comment─┐
|
||||
│ │
|
||||
└─────────┘
|
||||
```
|
@ -41,6 +41,8 @@ ALTER TABLE [db].name [ON CLUSTER cluster] ADD|DROP|CLEAR|COMMENT|MODIFY COLUMN
|
||||
- [ROW POLICY](../../../sql-reference/statements/alter/row-policy.md)
|
||||
- [SETTINGS PROFILE](../../../sql-reference/statements/alter/settings-profile.md)
|
||||
|
||||
Выражение [ALTER TABLE ... MODIFY COMMENT](../../../sql-reference/statements/alter/comment.md) добавляет, изменяет или удаляет комментарий к таблице, независимо от того, был ли он установлен раньше или нет.
|
||||
|
||||
### Мутации {#mutations}
|
||||
|
||||
Мутации - разновидность запроса ALTER, позволяющая изменять или удалять данные в таблице. В отличие от стандартных запросов [ALTER TABLE … DELETE](../../../sql-reference/statements/alter/delete.md) и [ALTER TABLE … UPDATE](../../../sql-reference/statements/alter/update.md), рассчитанных на точечное изменение данных, область применения мутаций - достаточно тяжёлые изменения, затрагивающие много строк в таблице. Поддержана для движков таблиц семейства [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md), в том числе для движков с репликацией.
|
||||
|
@ -165,7 +165,7 @@ ALTER TABLE table_name CLEAR INDEX index_name IN PARTITION partition_expr
|
||||
## FREEZE PARTITION {#alter_freeze-partition}
|
||||
|
||||
``` sql
|
||||
ALTER TABLE table_name FREEZE [PARTITION partition_expr]
|
||||
ALTER TABLE table_name FREEZE [PARTITION partition_expr] [WITH NAME 'backup_name']
|
||||
```
|
||||
|
||||
Создаёт резервную копию для заданной партиции. Если выражение `PARTITION` опущено, резервные копии будут созданы для всех партиций.
|
||||
@ -179,6 +179,7 @@ ALTER TABLE table_name FREEZE [PARTITION partition_expr]
|
||||
|
||||
- `/var/lib/clickhouse/` — рабочая директория ClickHouse, заданная в конфигурационном файле;
|
||||
- `N` — инкрементальный номер резервной копии.
|
||||
- если задан параметр `WITH NAME`, то вместо инкрементального номера используется значение параметра `'backup_name'`.
|
||||
|
||||
!!! note "Примечание"
|
||||
При использовании [нескольких дисков для хранения данных таблицы](../../statements/alter/index.md#table_engine-mergetree-multiple-volumes) директория `shadow/N` появляется на каждом из дисков, на которых были куски, попавшие под выражение `PARTITION`.
|
||||
|
@ -5,8 +5,12 @@ toc_title: "Словарь"
|
||||
|
||||
# CREATE DICTIONARY {#create-dictionary-query}
|
||||
|
||||
Создаёт [внешний словарь](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) с заданной [структурой](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md), [источником](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md), [способом размещения в памяти](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md) и [периодом обновления](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md).
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
CREATE DICTIONARY [IF NOT EXISTS] [db.]dictionary_name [ON CLUSTER cluster]
|
||||
CREATE DICTIONARY [OR REPLACE][IF NOT EXISTS] [db.]dictionary_name [ON CLUSTER cluster]
|
||||
(
|
||||
key1 type1 [DEFAULT|EXPRESSION expr1] [IS_OBJECT_ID],
|
||||
key2 type2 [DEFAULT|EXPRESSION expr2],
|
||||
@ -17,14 +21,77 @@ PRIMARY KEY key1, key2
|
||||
SOURCE(SOURCE_NAME([param1 value1 ... paramN valueN]))
|
||||
LAYOUT(LAYOUT_NAME([param_name param_value]))
|
||||
LIFETIME({MIN min_val MAX max_val | max_val})
|
||||
SETTINGS(setting_name = setting_value, setting_name = setting_value, ...)
|
||||
COMMENT 'Comment'
|
||||
```
|
||||
|
||||
Создаёт [внешний словарь](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) с заданной [структурой](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md), [источником](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md), [способом размещения в памяти](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md) и [периодом обновления](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md).
|
||||
|
||||
Структура внешнего словаря состоит из атрибутов. Атрибуты словаря задаются как столбцы таблицы. Единственным обязательным свойством атрибута является его тип, все остальные свойства могут иметь значения по умолчанию.
|
||||
|
||||
В зависимости от [способа размещения словаря в памяти](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md), ключами словаря могут быть один и более атрибутов.
|
||||
|
||||
Смотрите [Внешние словари](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md).
|
||||
Более подробную информацию смотрите в разделе [внешние словари](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md).
|
||||
|
||||
<!--hide-->
|
||||
Вы можете добавить комментарий к словарю при его создании, используя секцию `COMMENT`.
|
||||
|
||||
**Пример**
|
||||
|
||||
Входная таблица `source_table`:
|
||||
|
||||
``` text
|
||||
┌─id─┬─value──┐
|
||||
│ 1 │ First │
|
||||
│ 2 │ Second │
|
||||
└────┴────────┘
|
||||
```
|
||||
|
||||
Создание словаря:
|
||||
|
||||
``` sql
|
||||
CREATE DICTIONARY dictionary_with_comment
|
||||
(
|
||||
id UInt64,
|
||||
value String
|
||||
)
|
||||
PRIMARY KEY id
|
||||
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'source_table'))
|
||||
LAYOUT(FLAT())
|
||||
LIFETIME(MIN 0 MAX 1000)
|
||||
COMMENT 'The temporary dictionary';
|
||||
```
|
||||
|
||||
Вывод словаря:
|
||||
|
||||
``` sql
|
||||
SHOW CREATE DICTIONARY dictionary_with_comment;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─statement───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ CREATE DICTIONARY default.dictionary_with_comment
|
||||
(
|
||||
`id` UInt64,
|
||||
`value` String
|
||||
)
|
||||
PRIMARY KEY id
|
||||
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'source_table'))
|
||||
LIFETIME(MIN 0 MAX 1000)
|
||||
LAYOUT(FLAT())
|
||||
COMMENT 'The temporary dictionary' │
|
||||
└─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Вывод комментария к словарю:
|
||||
|
||||
``` sql
|
||||
SELECT comment FROM system.dictionaries WHERE name == 'dictionary_with_comment' AND database == currentDatabase();
|
||||
```
|
||||
|
||||
```text
|
||||
┌─comment──────────────────┐
|
||||
│ The temporary dictionary │
|
||||
└──────────────────────────┘
|
||||
```
|
||||
|
||||
**См. также**
|
||||
|
||||
- [system.dictionaries](../../../operations/system-tables/dictionaries.md) — эта таблица содержит информацию о [внешних словарях](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md).
|
||||
|
@ -5,7 +5,22 @@ toc_title: merge
|
||||
|
||||
# merge {#merge}
|
||||
|
||||
`merge(db_name, 'tables_regexp')` - создаёт временную таблицу типа Merge. Подробнее смотрите раздел «Движки таблиц, Merge».
|
||||
Cоздаёт временную таблицу типа [Merge](../../engines/table-engines/special/merge.md). Структура таблицы берётся из первой попавшейся таблицы, подходящей под регулярное выражение.
|
||||
|
||||
Структура таблицы берётся из первой попавшейся таблицы, подходящей под регулярное выражение.
|
||||
**Синтаксис**
|
||||
|
||||
```sql
|
||||
merge('db_name', 'tables_regexp')
|
||||
```
|
||||
**Аргументы**
|
||||
|
||||
- `db_name` — Возможные варианты:
|
||||
- имя БД,
|
||||
- выражение, возвращающее строку с именем БД, например, `currentDatabase()`,
|
||||
- `REGEXP(expression)`, где `expression` — регулярное выражение для отбора БД.
|
||||
|
||||
- `tables_regexp` — регулярное выражение для имен таблиц в указанной БД или нескольких БД.
|
||||
|
||||
**См. также**
|
||||
|
||||
- Табличный движок [Merge](../../engines/table-engines/special/merge.md)
|
||||
|
@ -1,6 +1,6 @@
|
||||
## How ClickHouse documentation is generated? {#how-clickhouse-documentation-is-generated}
|
||||
|
||||
ClickHouse documentation is built using [build.py](build.py) script that uses [mkdocs](https://www.mkdocs.org) library and it’s dependencies to separately build all version of documentations (all languages in either single and multi page mode) as static HTMLs and then a PDF for each single page version. The results are then put in the correct directory structure. It is recommended to use Python 3.7 to run this script.
|
||||
ClickHouse documentation is built using [build.py](build.py) script that uses [mkdocs](https://www.mkdocs.org) library and it’s dependencies to separately build all version of documentations (all languages in either single and multi page mode) as static HTMLs for each single page version. The results are then put in the correct directory structure. It is recommended to use Python 3.7 to run this script.
|
||||
|
||||
[release.sh](release.sh) also pulls static files needed for [official ClickHouse website](https://clickhouse.com) from [../../website](../../website) folder then pushes to specified GitHub repo to be served via [GitHub Pages](https://pages.github.com).
|
||||
|
||||
@ -22,16 +22,12 @@ It’ll take some effort to go through, but the result will be very close to pro
|
||||
|
||||
For the first time you’ll need to:
|
||||
|
||||
#### 1. Install [wkhtmltopdf](https://wkhtmltopdf.org/)
|
||||
|
||||
Follow the instructions on it's official website: <https://wkhtmltopdf.org/downloads.html>
|
||||
|
||||
#### 2. Install CLI tools from npm
|
||||
#### 1. Install CLI tools from npm
|
||||
|
||||
1. `sudo apt-get install npm` for Debian/Ubuntu or `brew install npm` on Mac OS X.
|
||||
2. `sudo npm install -g purify-css amphtml-validator`.
|
||||
|
||||
#### 3. Set up virtualenv
|
||||
#### 2. Set up virtualenv
|
||||
|
||||
``` bash
|
||||
$ cd ClickHouse/docs/tools
|
||||
@ -41,7 +37,7 @@ $ source venv/bin/activate
|
||||
$ pip3 install -r requirements.txt
|
||||
```
|
||||
|
||||
#### 4. Run build.py
|
||||
#### 3. Run build.py
|
||||
|
||||
When all prerequisites are installed, running `build.py` without args (there are some, check `build.py --help`) will generate `ClickHouse/docs/build` folder with complete static html website.
|
||||
|
||||
|
@ -117,6 +117,9 @@ def build_for_lang(lang, args):
|
||||
)
|
||||
)
|
||||
|
||||
# Clean to be safe if last build finished abnormally
|
||||
single_page.remove_temporary_files(lang, args)
|
||||
|
||||
raw_config['nav'] = nav.build_docs_nav(lang, args)
|
||||
|
||||
cfg = config.load_config(**raw_config)
|
||||
@ -189,7 +192,6 @@ if __name__ == '__main__':
|
||||
arg_parser.add_argument('--skip-multi-page', action='store_true')
|
||||
arg_parser.add_argument('--skip-single-page', action='store_true')
|
||||
arg_parser.add_argument('--skip-amp', action='store_true')
|
||||
arg_parser.add_argument('--skip-pdf', action='store_true')
|
||||
arg_parser.add_argument('--skip-website', action='store_true')
|
||||
arg_parser.add_argument('--skip-blog', action='store_true')
|
||||
arg_parser.add_argument('--skip-git-log', action='store_true')
|
||||
@ -225,7 +227,6 @@ if __name__ == '__main__':
|
||||
args.skip_multi_page = True
|
||||
args.skip_blog = True
|
||||
args.skip_website = True
|
||||
args.skip_pdf = True
|
||||
args.skip_amp = True
|
||||
|
||||
if args.skip_git_log or args.skip_amp:
|
||||
|
@ -12,6 +12,7 @@ import test
|
||||
import util
|
||||
import website
|
||||
|
||||
TEMPORARY_FILE_NAME = 'single.md'
|
||||
|
||||
def recursive_values(item):
|
||||
if isinstance(item, dict):
|
||||
@ -101,6 +102,14 @@ def concatenate(lang, docs_path, single_page_file, nav):
|
||||
|
||||
single_page_file.flush()
|
||||
|
||||
def get_temporary_file_name(lang, args):
|
||||
return os.path.join(args.docs_dir, lang, TEMPORARY_FILE_NAME)
|
||||
|
||||
def remove_temporary_files(lang, args):
|
||||
single_md_path = get_temporary_file_name(lang, args)
|
||||
if os.path.exists(single_md_path):
|
||||
os.unlink(single_md_path)
|
||||
|
||||
|
||||
def build_single_page_version(lang, args, nav, cfg):
|
||||
logging.info(f'Building single page version for {lang}')
|
||||
@ -109,7 +118,7 @@ def build_single_page_version(lang, args, nav, cfg):
|
||||
extra['single_page'] = True
|
||||
extra['is_amp'] = False
|
||||
|
||||
single_md_path = os.path.join(args.docs_dir, lang, 'single.md')
|
||||
single_md_path = get_temporary_file_name(lang, args)
|
||||
with open(single_md_path, 'w') as single_md:
|
||||
concatenate(lang, args.docs_dir, single_md, nav)
|
||||
|
||||
@ -186,45 +195,6 @@ def build_single_page_version(lang, args, nav, cfg):
|
||||
test.test_single_page(
|
||||
os.path.join(test_dir, 'single', 'index.html'), lang)
|
||||
|
||||
if not args.skip_pdf:
|
||||
single_page_index_html = os.path.join(test_dir, 'single', 'index.html')
|
||||
single_page_pdf = os.path.abspath(
|
||||
os.path.join(single_page_output_path, f'clickhouse_{lang}.pdf')
|
||||
)
|
||||
|
||||
with open(single_page_index_html, 'r') as f:
|
||||
soup = bs4.BeautifulSoup(
|
||||
f.read(),
|
||||
features='html.parser'
|
||||
)
|
||||
soup_prefix = f'file://{test_dir}'
|
||||
for img in soup.findAll('img'):
|
||||
if img['src'].startswith('/'):
|
||||
img['src'] = soup_prefix + img['src']
|
||||
for script in soup.findAll('script'):
|
||||
script_src = script.get('src')
|
||||
if script_src:
|
||||
script['src'] = soup_prefix + script_src.split('?', 1)[0]
|
||||
for link in soup.findAll('link'):
|
||||
link['href'] = soup_prefix + link['href'].split('?', 1)[0]
|
||||
|
||||
with open(single_page_index_html, 'w') as f:
|
||||
f.write(str(soup))
|
||||
|
||||
create_pdf_command = [
|
||||
'wkhtmltopdf',
|
||||
'--print-media-type',
|
||||
'--log-level', 'warn',
|
||||
single_page_index_html, single_page_pdf
|
||||
]
|
||||
|
||||
logging.info(' '.join(create_pdf_command))
|
||||
try:
|
||||
subprocess.check_call(' '.join(create_pdf_command), shell=True)
|
||||
except:
|
||||
pass # TODO: fix pdf issues
|
||||
|
||||
logging.info(f'Finished building single page version for {lang}')
|
||||
|
||||
if os.path.exists(single_md_path):
|
||||
os.unlink(single_md_path)
|
||||
remove_temporary_files(lang, args)
|
||||
|
@ -1,82 +1,74 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
|
||||
toc_priority: 54
|
||||
toc_title: "\u6D4B\u8BD5\u786C\u4EF6"
|
||||
---
|
||||
|
||||
# 如何使用ClickHouse测试您的硬件 {#how-to-test-your-hardware-with-clickhouse}
|
||||
# 如何使用 ClickHouse 测试您的硬件 {#how-to-test-your-hardware-with-clickhouse}
|
||||
|
||||
使用此指令,您可以在任何服务器上运行基本的ClickHouse性能测试,而无需安装ClickHouse软件包。
|
||||
你可以在任何服务器上运行基本的 ClickHouse 性能测试,而无需安装 ClickHouse 软件包。
|
||||
|
||||
1. 转到 “commits” 页数:https://github.com/ClickHouse/ClickHouse/commits/master
|
||||
|
||||
2. 点击第一个绿色复选标记或红色十字与绿色 “ClickHouse Build Check” 然后点击 “Details” 附近链接 “ClickHouse Build Check”. 在一些提交中没有这样的链接,例如与文档的提交。 在这种情况下,请选择具有此链接的最近提交。
|
||||
## 自动运行
|
||||
|
||||
3. 将链接复制到 “clickhouse” 二进制为amd64或aarch64.
|
||||
你可以使用一个简单脚本来运行基准测试。
|
||||
|
||||
4. ssh到服务器并使用wget下载它:
|
||||
1. 下载脚本
|
||||
```
|
||||
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/hardware.sh
|
||||
```
|
||||
|
||||
<!-- -->
|
||||
2. 运行脚本
|
||||
```
|
||||
chmod a+x ./hardware.sh
|
||||
./hardware.sh
|
||||
```
|
||||
|
||||
# For amd64:
|
||||
wget https://clickhouse-builds.s3.yandex.net/0/00ba767f5d2a929394ea3be193b1f79074a1c4bc/1578163263_binary/clickhouse
|
||||
# For aarch64:
|
||||
wget https://clickhouse-builds.s3.yandex.net/0/00ba767f5d2a929394ea3be193b1f79074a1c4bc/1578161264_binary/clickhouse
|
||||
# Then do:
|
||||
chmod a+x clickhouse
|
||||
3. 复制输出的信息并将它发送给 feedback@clickhouse.com
|
||||
|
||||
1. 下载配置:
|
||||
所有的结果都在这里公布: https://clickhouse.com/benchmark/hardware/
|
||||
|
||||
<!-- -->
|
||||
|
||||
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.xml
|
||||
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/users.xml
|
||||
mkdir config.d
|
||||
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/path.xml -O config.d/path.xml
|
||||
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/log_to_console.xml -O config.d/log_to_console.xml
|
||||
## 人工运行
|
||||
|
||||
1. 下载基准测试文件:
|
||||
或者,你可以按照以下步骤实施基准测试。
|
||||
```bash
|
||||
# For amd64:
|
||||
wget https://builds.clickhouse.com/master/amd64/clickhouse
|
||||
# For aarch64:
|
||||
wget https://builds.clickhouse.com/master/aarch64/clickhouse
|
||||
# Then do:
|
||||
chmod a+x clickhouse
|
||||
```
|
||||
|
||||
<!-- -->
|
||||
2. 下载基准文件
|
||||
```bash
|
||||
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/benchmark-new.sh
|
||||
chmod a+x benchmark-new.sh
|
||||
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/queries.sql
|
||||
```
|
||||
|
||||
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/benchmark-new.sh
|
||||
chmod a+x benchmark-new.sh
|
||||
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/queries.sql
|
||||
3. 根据 [Yandex.Metrica 数据集](../getting-started/example-datasets/metrica.md) 中的说明下载测试数据(“ hits ” 数据表包含 1 亿行记录)。
|
||||
```bash
|
||||
wget https://datasets.clickhouse.com/hits/partitions/hits_100m_obfuscated_v1.tar.xz
|
||||
tar xvf hits_100m_obfuscated_v1.tar.xz -C .
|
||||
mv hits_100m_obfuscated_v1/* .
|
||||
```
|
||||
|
||||
1. 根据下载测试数据 [Yandex梅里卡数据集](../getting-started/example-datasets/metrica.md) 说明 (“hits” 表包含100万行)。
|
||||
4. 运行服务器:
|
||||
```bash
|
||||
./clickhouse server
|
||||
```
|
||||
|
||||
<!-- -->
|
||||
5. 检查数据:在另一个终端中通过 ssh 登陆服务器
|
||||
```bash
|
||||
./clickhouse client --query "SELECT count() FROM hits_100m_obfuscated"
|
||||
100000000
|
||||
```
|
||||
6. 运行基准测试:
|
||||
```bash
|
||||
./benchmark-new.sh hits_100m_obfuscated
|
||||
```
|
||||
|
||||
wget https://datasets.clickhouse.com/hits/partitions/hits_100m_obfuscated_v1.tar.xz
|
||||
tar xvf hits_100m_obfuscated_v1.tar.xz -C .
|
||||
mv hits_100m_obfuscated_v1/* .
|
||||
7. 将有关硬件配置的型号和信息发送到 clickhouse-feedback@yandex-team.com
|
||||
|
||||
1. 运行服务器:
|
||||
|
||||
<!-- -->
|
||||
|
||||
./clickhouse server
|
||||
|
||||
1. 检查数据:ssh到另一个终端中的服务器
|
||||
|
||||
<!-- -->
|
||||
|
||||
./clickhouse client --query "SELECT count() FROM hits_100m_obfuscated"
|
||||
100000000
|
||||
|
||||
1. 编辑benchmark-new.sh,改变 `clickhouse-client` 到 `./clickhouse client` 并添加 `–-max_memory_usage 100000000000` 参数。
|
||||
|
||||
<!-- -->
|
||||
|
||||
mcedit benchmark-new.sh
|
||||
|
||||
1. 运行基准测试:
|
||||
|
||||
<!-- -->
|
||||
|
||||
./benchmark-new.sh hits_100m_obfuscated
|
||||
|
||||
1. 将有关硬件配置的编号和信息发送到clickhouse-feedback@yandex-team.com
|
||||
|
||||
所有结果都在这里公布:https://clickhouse.技术/基准/硬件/
|
||||
所有结果都在这里公布:https://clickhouse.com/benchmark/hardware/
|
||||
|
@ -1,6 +1,4 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
|
||||
toc_priority: 58
|
||||
toc_title: "\u67E5\u8BE2\u6743\u9650"
|
||||
---
|
||||
|
@ -1,32 +1,30 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
|
||||
toc_priority: 61
|
||||
toc_title: "\u8BBE\u7F6E\u914D\u7F6E\u6587\u4EF6"
|
||||
toc_title: "\u8BBE\u7F6E\u914D\u7F6E"
|
||||
---
|
||||
|
||||
# 设置配置文件 {#settings-profiles}
|
||||
# 设置配置 {#settings-profiles}
|
||||
|
||||
设置配置文件是以相同名称分组的设置的集合。
|
||||
设置配置是设置的集合,并按照相同的名称进行分组。
|
||||
|
||||
!!! note "信息"
|
||||
ClickHouse还支持 [SQL驱动的工作流](../access-rights.md#access-control) 用于管理设置配置文件。 我们建议使用它。
|
||||
ClickHouse 还支持用 [SQL驱动的工作流](../../operations/access-rights.md#access-control) 管理设置配置。我们建议使用它。
|
||||
|
||||
配置文件可以有任何名称。 配置文件可以有任何名称。 您可以为不同的用户指定相同的配置文件。 您可以在设置配置文件中编写的最重要的事情是 `readonly=1`,这确保只读访问。
|
||||
设置配置可以任意命名。你可以为不同的用户指定相同的设置配置。您可以在设置配置中写入的最重要的内容是 `readonly=1`,这将确保只读访问。
|
||||
|
||||
设置配置文件可以彼此继承。 要使用继承,请指示一个或多个 `profile` 配置文件中列出的其他设置之前的设置。 如果在不同的配置文件中定义了一个设置,则使用最新定义。
|
||||
设置配置可以彼此继承。要使用继承,请在文件中列举的其他设置之前,指定一个或多个 `profile` 设置。如果在不同的设置配置中定义了同一个设置,则使用最新的定义。
|
||||
|
||||
要应用配置文件中的所有设置,请设置 `profile` 设置。
|
||||
要应用设置配置中的所有设置,请设定 `profile` 设置。
|
||||
|
||||
示例:
|
||||
|
||||
安装 `web` 侧写
|
||||
添加 `web` 配置。
|
||||
|
||||
``` sql
|
||||
SET profile = 'web'
|
||||
```
|
||||
|
||||
设置配置文件在用户配置文件中声明。 这通常是 `users.xml`.
|
||||
设置配置在用户配置文件中声明。这通常是指 `users.xml`.
|
||||
|
||||
示例:
|
||||
|
||||
@ -72,10 +70,10 @@ SET profile = 'web'
|
||||
</profiles>
|
||||
```
|
||||
|
||||
该示例指定了两个配置文件: `default` 和 `web`.
|
||||
这个示例指定了两个配置: `default` 和 `web` 。
|
||||
|
||||
该 `default` 配置文件有一个特殊用途:它必须始终存在并在启动服务器时应用。 换句话说, `default` 配置文件包含默认设置。
|
||||
这个 `default` 配置有一个特殊用途:它必须始终存在并在启动服务时应用。换句话说, `default` 配置包含默认设置。
|
||||
|
||||
该 `web` 配置文件是一个常规的配置文件,可以使用设置 `SET` 查询或在HTTP查询中使用URL参数。
|
||||
`web` 配置是一个常规的配置,它可以通过 `SET` 查询进行设定,也可以通过在HTTP查询中使用URL参数进行设定。
|
||||
|
||||
[原始文章](https://clickhouse.com/docs/en/operations/settings/settings_profiles/) <!--hide-->
|
||||
|
@ -60,10 +60,7 @@
|
||||
|
||||
namespace CurrentMetrics
|
||||
{
|
||||
extern const Metric Revision;
|
||||
extern const Metric VersionInteger;
|
||||
extern const Metric MemoryTracking;
|
||||
extern const Metric MaxDDLEntryID;
|
||||
}
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
@ -330,7 +327,9 @@ std::vector<String> Client::loadWarningMessages()
|
||||
{
|
||||
std::vector<String> messages;
|
||||
connection->sendQuery(connection_parameters.timeouts, "SELECT message FROM system.warnings", "" /* query_id */,
|
||||
QueryProcessingStage::Complete, nullptr, nullptr, false);
|
||||
QueryProcessingStage::Complete,
|
||||
&global_context->getSettingsRef(),
|
||||
&global_context->getClientInfo(), false);
|
||||
while (true)
|
||||
{
|
||||
Packet packet = connection->receivePacket();
|
||||
@ -429,6 +428,7 @@ try
|
||||
|
||||
processConfig();
|
||||
|
||||
/// Includes delayed_interactive.
|
||||
if (is_interactive)
|
||||
{
|
||||
clearTerminal();
|
||||
@ -437,28 +437,28 @@ try
|
||||
|
||||
connect();
|
||||
|
||||
if (is_interactive)
|
||||
/// Load Warnings at the beginning of connection
|
||||
if (is_interactive && !config().has("no-warnings"))
|
||||
{
|
||||
/// Load Warnings at the beginning of connection
|
||||
if (!config().has("no-warnings"))
|
||||
try
|
||||
{
|
||||
try
|
||||
std::vector<String> messages = loadWarningMessages();
|
||||
if (!messages.empty())
|
||||
{
|
||||
std::vector<String> messages = loadWarningMessages();
|
||||
if (!messages.empty())
|
||||
{
|
||||
std::cout << "Warnings:" << std::endl;
|
||||
for (const auto & message : messages)
|
||||
std::cout << " * " << message << std::endl;
|
||||
std::cout << std::endl;
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
/// Ignore exception
|
||||
std::cout << "Warnings:" << std::endl;
|
||||
for (const auto & message : messages)
|
||||
std::cout << " * " << message << std::endl;
|
||||
std::cout << std::endl;
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
/// Ignore exception
|
||||
}
|
||||
}
|
||||
|
||||
if (is_interactive && !delayed_interactive)
|
||||
{
|
||||
runInteractive();
|
||||
}
|
||||
else
|
||||
@ -482,6 +482,9 @@ try
|
||||
// case so that at least we don't lose an error.
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (delayed_interactive)
|
||||
runInteractive();
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -555,8 +558,7 @@ void Client::connect()
|
||||
if (is_interactive)
|
||||
{
|
||||
std::cout << "Connected to " << server_name << " server version " << server_version << " revision " << server_revision << "."
|
||||
<< std::endl
|
||||
<< std::endl;
|
||||
<< std::endl << std::endl;
|
||||
|
||||
auto client_version_tuple = std::make_tuple(VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH);
|
||||
auto server_version_tuple = std::make_tuple(server_version_major, server_version_minor, server_version_patch);
|
||||
@ -1156,11 +1158,11 @@ void Client::processConfig()
|
||||
/// - stdin is not a terminal. In this case queries are read from it.
|
||||
/// - -qf (--queries-file) command line option is present.
|
||||
/// The value of the option is used as file with query (or of multiple queries) to execute.
|
||||
if (stdin_is_a_tty && !config().has("query") && queries_files.empty())
|
||||
{
|
||||
if (config().has("query") && config().has("queries-file"))
|
||||
throw Exception("Specify either `query` or `queries-file` option", ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
delayed_interactive = config().has("interactive") && (config().has("query") || config().has("queries-file"));
|
||||
if (stdin_is_a_tty
|
||||
&& (delayed_interactive || (!config().has("query") && queries_files.empty())))
|
||||
{
|
||||
is_interactive = true;
|
||||
}
|
||||
else
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user