mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-27 18:12:02 +00:00
Merge branch 'master' of github.com:ClickHouse/ClickHouse into intersect-except-fix
This commit is contained in:
commit
dead99011b
178
.github/workflows/main.yml
vendored
178
.github/workflows/main.yml
vendored
@ -21,7 +21,6 @@ jobs:
|
||||
python3 run_check.py
|
||||
DockerHubPush:
|
||||
needs: CheckLabels
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
@ -47,25 +46,61 @@ jobs:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Style Check
|
||||
env:
|
||||
TEMP_PATH: ${{ runner.temp }}/style_check
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 style_check.py
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
DocsCheck:
|
||||
needs: DockerHubPush
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/docs_check
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Docs Check
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/docs_check
|
||||
REPO_COPY: ${{runner.temp}}/docs_check/ClickHouse
|
||||
run: |
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 docs_check.py
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
BuilderDebDebug:
|
||||
needs: DockerHubPush
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/build_check
|
||||
path: ${{ runner.temp }}/images_path
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'recursive'
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/build_check
|
||||
IMAGES_PATH: ${{runner.temp}}/images_path
|
||||
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH: ${{runner.temp}}/../ccaches
|
||||
CHECK_NAME: 'ClickHouse build check (actions)'
|
||||
@ -80,6 +115,12 @@ jobs:
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
BuilderReport:
|
||||
needs: [BuilderDebDebug]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
@ -100,8 +141,133 @@ jobs:
|
||||
mkdir -p $TEMP_PATH
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 build_report_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatelessTestDebug:
|
||||
needs: [BuilderDebDebug]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stateless_debug
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stateless tests (debug, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stateless_debug/ClickHouse
|
||||
REQUIRED_BUILD_NUMBER: 7
|
||||
KILL_TIMEOUT: 10800
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $REQUIRED_BUILD_NUMBER $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatefulTestDebug:
|
||||
needs: [BuilderDebDebug]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stateful_debug
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stateful tests (debug, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stateful_debug/ClickHouse
|
||||
REQUIRED_BUILD_NUMBER: 7
|
||||
KILL_TIMEOUT: 3600
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $REQUIRED_BUILD_NUMBER $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
StressTestDebug:
|
||||
needs: [BuilderDebDebug]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Stress test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stress_debug
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stress tests (debug, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stress_debug/ClickHouse
|
||||
REQUIRED_BUILD_NUMBER: 7
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 stress_check.py "$CHECK_NAME" $REQUIRED_BUILD_NUMBER
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
ASTFuzzerTestDebug:
|
||||
needs: [BuilderDebDebug]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Fuzzer
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/ast_fuzzer_debug
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'AST fuzzer (debug, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/ast_fuzzer_debug/ClickHouse
|
||||
REQUIRED_BUILD_NUMBER: 7
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 ast_fuzzer_check.py "$CHECK_NAME" $REQUIRED_BUILD_NUMBER
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FastTest:
|
||||
needs: DockerHubPush
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
@ -116,8 +282,14 @@ jobs:
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci && python3 fast_test_check.py
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FinishCheck:
|
||||
needs: [StyleCheck, DockerHubPush, CheckLabels, BuilderReport, FastTest]
|
||||
needs: [StyleCheck, DockerHubPush, CheckLabels, BuilderReport, FastTest, FunctionalStatelessTestDebug, FunctionalStatefulTestDebug, DocsCheck, StressTestDebug, ASTFuzzerTestDebug]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
|
55
.github/workflows/release.yml
vendored
Normal file
55
.github/workflows/release.yml
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
name: DocsReleaseChecks
|
||||
concurrency:
|
||||
group: master-release
|
||||
cancel-in-progress: true
|
||||
on: # yamllint disable-line rule:truthy
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- 'docs/**'
|
||||
- 'website/**'
|
||||
- 'benchmark/**'
|
||||
- 'docker/**'
|
||||
jobs:
|
||||
DockerHubPush:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Images check
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 docker_images_check.py
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images.json
|
||||
DocsRelease:
|
||||
needs: DockerHubPush
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{runner.temp}}/docs_release
|
||||
- name: Docs Release
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/docs_release
|
||||
REPO_COPY: ${{runner.temp}}/docs_release/ClickHouse
|
||||
CLOUDFLARE_TOKEN: ${{secrets.CLOUDFLARE}}
|
||||
ROBOT_CLICKHOUSE_SSH_KEY: ${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||
run: |
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 docs_release.py
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
2
.gitmodules
vendored
2
.gitmodules
vendored
@ -76,7 +76,7 @@
|
||||
url = https://github.com/ClickHouse-Extras/libcxxabi.git
|
||||
[submodule "contrib/snappy"]
|
||||
path = contrib/snappy
|
||||
url = https://github.com/google/snappy
|
||||
url = https://github.com/ClickHouse-Extras/snappy.git
|
||||
[submodule "contrib/cppkafka"]
|
||||
path = contrib/cppkafka
|
||||
url = https://github.com/mfontanini/cppkafka.git
|
||||
|
266
CHANGELOG.md
266
CHANGELOG.md
@ -1,3 +1,269 @@
|
||||
### ClickHouse release v21.11, 2021-11-07
|
||||
|
||||
#### Backward Incompatible Change
|
||||
|
||||
* Change order of json_path and json arguments in SQL/JSON functions (to be consistent with the standard). Closes [#30449](https://github.com/ClickHouse/ClickHouse/issues/30449). [#30474](https://github.com/ClickHouse/ClickHouse/pull/30474) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Remove `MergeTree` table setting `write_final_mark`. It will be always `true`. [#30455](https://github.com/ClickHouse/ClickHouse/pull/30455) ([Kseniia Sumarokova](https://github.com/kssenii)). No actions required, all tables are compatible with the new version.
|
||||
* Function `bayesAB` is removed. Please help to return this function back, refreshed. This closes [#26233](https://github.com/ClickHouse/ClickHouse/issues/26233). [#29934](https://github.com/ClickHouse/ClickHouse/pull/29934) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* This is relevant only if you already started using the experimental `clickhouse-keeper` support. Now ClickHouse Keeper snapshots compressed with `ZSTD` codec by default instead of custom ClickHouse LZ4 block compression. This behavior can be turned off with `compress_snapshots_with_zstd_format` coordination setting (must be equal on all quorum replicas). Backward incompatibility is quite rare and may happen only when new node will send snapshot (happens in case of recovery) to the old node which is unable to read snapshots in ZSTD format. [#29417](https://github.com/ClickHouse/ClickHouse/pull/29417) ([alesapin](https://github.com/alesapin)).
|
||||
|
||||
#### New Feature
|
||||
|
||||
* New asynchronous INSERT mode allows to accumulate inserted data and store it in a single batch in background. On client it can be enabled by setting `async_insert` for `INSERT` queries with data inlined in query or in separate buffer (e.g. for `INSERT` queries via HTTP protocol). If `wait_for_async_insert` is true (by default) the client will wait until data will be flushed to table. On server-side it controlled by the settings `async_insert_threads`, `async_insert_max_data_size` and `async_insert_busy_timeout_ms`. Implements [#18282](https://github.com/ClickHouse/ClickHouse/issues/18282). [#27537](https://github.com/ClickHouse/ClickHouse/pull/27537) ([Anton Popov](https://github.com/CurtizJ)). [#20557](https://github.com/ClickHouse/ClickHouse/pull/20557) ([Ivan](https://github.com/abyss7)). Notes on performance: with asynchronous inserts you can do up to around 10 000 individual INSERT queries per second, so it is still recommended to insert in batches if you want to achieve performance up to millions inserted rows per second.
|
||||
* Add interactive mode for `clickhouse-local`. So, you can just run `clickhouse-local` to get a command line ClickHouse interface without connecting to a server and process data from files and external data sources. Also merge the code of `clickhouse-client` and `clickhouse-local` together. Closes [#7203](https://github.com/ClickHouse/ClickHouse/issues/7203). Closes [#25516](https://github.com/ClickHouse/ClickHouse/issues/25516). Closes [#22401](https://github.com/ClickHouse/ClickHouse/issues/22401). [#26231](https://github.com/ClickHouse/ClickHouse/pull/26231) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Added support for executable (scriptable) user defined functions. These are UDFs that can be written in any programming language. [#28803](https://github.com/ClickHouse/ClickHouse/pull/28803) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Allow predefined connections to external data sources. This allows to avoid specifying credentials or addresses while using external data sources, they can be referenced by names instead. Closes [#28367](https://github.com/ClickHouse/ClickHouse/issues/28367). [#28577](https://github.com/ClickHouse/ClickHouse/pull/28577) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Added `INFORMATION_SCHEMA` database with `SCHEMATA`, `TABLES`, `VIEWS` and `COLUMNS` views to the corresponding tables in `system` database. Closes [#9770](https://github.com/ClickHouse/ClickHouse/issues/9770). [#28691](https://github.com/ClickHouse/ClickHouse/pull/28691) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Support `EXISTS (subquery)`. Closes [#6852](https://github.com/ClickHouse/ClickHouse/issues/6852). [#29731](https://github.com/ClickHouse/ClickHouse/pull/29731) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Session logging for audit. Logging all successful and failed login and logout events to a new `system.session_log` table. [#22415](https://github.com/ClickHouse/ClickHouse/pull/22415) ([Vasily Nemkov](https://github.com/Enmk)) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Support multidimensional cosine distance and euclidean distance functions; L1, L2, Lp, Linf distances and norms. Scalar product on tuples and various arithmetic operators on tuples. This fully closes [#4509](https://github.com/ClickHouse/ClickHouse/issues/4509) and even more. [#27933](https://github.com/ClickHouse/ClickHouse/pull/27933) ([Alexey Boykov](https://github.com/mathalex)).
|
||||
* Add support for compression and decompression for `INTO OUTPUT` and `FROM INFILE` (with autodetect or with additional optional parameter). [#27135](https://github.com/ClickHouse/ClickHouse/pull/27135) ([Filatenkov Artur](https://github.com/FArthur-cmd)).
|
||||
* Add CORS (Cross Origin Resource Sharing) support with HTTP `OPTIONS` request. It means, now Grafana will work with serverless requests without a kludges. Closes [#18693](https://github.com/ClickHouse/ClickHouse/issues/18693). [#29155](https://github.com/ClickHouse/ClickHouse/pull/29155) ([Filatenkov Artur](https://github.com/FArthur-cmd)).
|
||||
* Queries with JOIN ON now supports disjunctions (OR). [#21320](https://github.com/ClickHouse/ClickHouse/pull/21320) ([Ilya Golshtein](https://github.com/ilejn)).
|
||||
* Added function `tokens`. That allow to split string into tokens using non-alpha numeric ASCII characters as separators. [#29981](https://github.com/ClickHouse/ClickHouse/pull/29981) ([Maksim Kita](https://github.com/kitaisreal)). Added function `ngrams` to extract ngrams from text. Closes [#29699](https://github.com/ClickHouse/ClickHouse/issues/29699). [#29738](https://github.com/ClickHouse/ClickHouse/pull/29738) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Add functions for Unicode normalization: `normalizeUTF8NFC`, `normalizeUTF8NFD`, `normalizeUTF8NFKC`, `normalizeUTF8NFKD` functions. [#28633](https://github.com/ClickHouse/ClickHouse/pull/28633) ([darkkeks](https://github.com/darkkeks)).
|
||||
* Streaming consumption of application log files in ClickHouse with `FileLog` table engine. It's like `Kafka` or `RabbitMQ` engine but for append-only and rotated logs in local filesystem. Closes [#6953](https://github.com/ClickHouse/ClickHouse/issues/6953). [#25969](https://github.com/ClickHouse/ClickHouse/pull/25969) ([flynn](https://github.com/ucasfl)) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Add `CapnProto` output format, refactor `CapnProto` input format. [#29291](https://github.com/ClickHouse/ClickHouse/pull/29291) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Allow to write number in query as binary literal. Example `SELECT 0b001;`. [#29304](https://github.com/ClickHouse/ClickHouse/pull/29304) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Added `hashed_array` dictionary type. It saves memory when using dictionaries with multiple attributes. Closes [#30236](https://github.com/ClickHouse/ClickHouse/issues/30236). [#30242](https://github.com/ClickHouse/ClickHouse/pull/30242) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Added `JSONExtractKeys` function. [#30056](https://github.com/ClickHouse/ClickHouse/pull/30056) ([Vitaly](https://github.com/orloffv)).
|
||||
* Add a function `getOSKernelVersion` - it returns a string with OS kernel version. [#29755](https://github.com/ClickHouse/ClickHouse/pull/29755) ([Memo](https://github.com/Joeywzr)).
|
||||
* Added `MD4` and `SHA384` functions. MD4 is an obsolete and insecure hash function, it can be used only in rare cases when MD4 is already being used in some legacy system and you need to get exactly the same result. [#29602](https://github.com/ClickHouse/ClickHouse/pull/29602) ([Nikita Tikhomirov](https://github.com/NSTikhomirov)).
|
||||
* HSTS can be enabled for Clickhouse HTTP server by setting `hsts_max_age` in configuration file with a positive number. [#29516](https://github.com/ClickHouse/ClickHouse/pull/29516) ([凌涛](https://github.com/lingtaolf)).
|
||||
* Huawei OBS Storage support. Closes [#24294](https://github.com/ClickHouse/ClickHouse/issues/24294). [#29511](https://github.com/ClickHouse/ClickHouse/pull/29511) ([kevin wan](https://github.com/MaxWk)).
|
||||
* New function `mapContainsKeyLike` to get the map that key matches a simple regular expression. [#29471](https://github.com/ClickHouse/ClickHouse/pull/29471) ([凌涛](https://github.com/lingtaolf)). New function `mapExtractKeyLike` to get the map only kept elements matched specified pattern. [#30793](https://github.com/ClickHouse/ClickHouse/pull/30793) ([凌涛](https://github.com/lingtaolf)).
|
||||
* Implemented `ALTER TABLE x MODIFY COMMENT`. [#29264](https://github.com/ClickHouse/ClickHouse/pull/29264) ([Vasily Nemkov](https://github.com/Enmk)).
|
||||
* Adds H3 inspection functions that are missing from ClickHouse but are available via the H3 api: https://h3geo.org/docs/api/inspection. [#29209](https://github.com/ClickHouse/ClickHouse/pull/29209) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* Allow non-replicated ALTER TABLE FETCH and ATTACH in Replicated databases. [#29202](https://github.com/ClickHouse/ClickHouse/pull/29202) ([Kevin Michel](https://github.com/kmichel-aiven)).
|
||||
* Added a setting `output_format_csv_null_representation`: This is the same as `output_format_tsv_null_representation` but is for CSV output. [#29123](https://github.com/ClickHouse/ClickHouse/pull/29123) ([PHO](https://github.com/depressed-pho)).
|
||||
* Added function `zookeeperSessionUptime()` which returns uptime of current ZooKeeper session in seconds. [#28983](https://github.com/ClickHouse/ClickHouse/pull/28983) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Implements the `h3ToGeoBoundary` function. [#28952](https://github.com/ClickHouse/ClickHouse/pull/28952) ([Ivan Veselov](https://github.com/fuzzERot)).
|
||||
* Add aggregate function `exponentialMovingAverage` that can be used as window function. This closes [#27511](https://github.com/ClickHouse/ClickHouse/issues/27511). [#28914](https://github.com/ClickHouse/ClickHouse/pull/28914) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Allow to include subcolumns of table columns into `DESCRIBE` query result (can be enabled by setting `describe_include_subcolumns`). [#28905](https://github.com/ClickHouse/ClickHouse/pull/28905) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* `Executable`, `ExecutablePool` added option `send_chunk_header`. If this option is true then chunk rows_count with line break will be sent to client before chunk. [#28833](https://github.com/ClickHouse/ClickHouse/pull/28833) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* `tokenbf_v1` and `ngram` support Map with key of String of FixedSring type. It enhance data skipping in query with map key filter. ```sql CREATE TABLE map_tokenbf ( row_id UInt32, map Map(String, String), INDEX map_tokenbf map TYPE ngrambf_v1(4,256,2,0) GRANULARITY 1 ) Engine=MergeTree() Order by id ``` With table above, the query `select * from map_tokebf where map['K']='V'` will skip the granule that doesn't contain key `A` . Of course, how many rows will skipped is depended on the `granularity` and `index_granularity` you set. [#28511](https://github.com/ClickHouse/ClickHouse/pull/28511) ([凌涛](https://github.com/lingtaolf)).
|
||||
* Send profile events from server to client. New packet type `ProfileEvents` was introduced. Closes [#26177](https://github.com/ClickHouse/ClickHouse/issues/26177). [#28364](https://github.com/ClickHouse/ClickHouse/pull/28364) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Bit shift operations for `FixedString` and `String` data types. This closes [#27763](https://github.com/ClickHouse/ClickHouse/issues/27763). [#28325](https://github.com/ClickHouse/ClickHouse/pull/28325) ([小路](https://github.com/nicelulu)).
|
||||
* Support adding / deleting tables to replication from PostgreSQL dynamically in database engine MaterializedPostgreSQL. Support alter for database settings. Closes [#27573](https://github.com/ClickHouse/ClickHouse/issues/27573). [#28301](https://github.com/ClickHouse/ClickHouse/pull/28301) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Added function accurateCastOrDefault(x, T). Closes [#21330](https://github.com/ClickHouse/ClickHouse/issues/21330). Authors @taiyang-li. [#23028](https://github.com/ClickHouse/ClickHouse/pull/23028) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Add Function `toUUIDOrDefault`, `toUInt8/16/32/64/256OrDefault`, `toInt8/16/32/64/128/256OrDefault`, which enables user defining default value(not null) when string parsing is failed. [#21330](https://github.com/ClickHouse/ClickHouse/pull/21330) ([taiyang-li](https://github.com/taiyang-li)).
|
||||
|
||||
#### Performance Improvement
|
||||
|
||||
* Background merges can be preempted by each other and they are scheduled with appropriate priorities. Now long running merges won't prevent short merges to proceed. This is needed for a better scheduling and controlling of merges execution. It reduces the chances to get "too many parts" error. [#22381](https://github.com/ClickHouse/ClickHouse/issues/22381). [#25165](https://github.com/ClickHouse/ClickHouse/pull/25165) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). Added an ability to execute more merges and mutations than the number of threads in background pool. Merges and mutations will be executed step by step according to their sizes (lower is more prioritized). The ratio of the number of tasks to threads to execute is controlled by a setting `background_merges_mutations_concurrency_ratio`, 2 by default. [#29140](https://github.com/ClickHouse/ClickHouse/pull/29140) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Allow to use asynchronous reads for remote filesystems. Lower the number of seeks while reading from remote filesystems. It improves performance tremendously and makes the experimental `web` and `s3` disks to work faster than EBS under certain conditions. [#29205](https://github.com/ClickHouse/ClickHouse/pull/29205) ([Kseniia Sumarokova](https://github.com/kssenii)). In the meantime, the `web` disk type (static dataset hosted on a web server) is graduated from being experimental to be production ready.
|
||||
* Queries with `INTO OUTFILE` in `clickhouse-client` will use multiple threads. Fix the issue with flickering progress-bar when using `INTO OUTFILE`. This closes [#30873](https://github.com/ClickHouse/ClickHouse/issues/30873). This closes [#30872](https://github.com/ClickHouse/ClickHouse/issues/30872). [#30886](https://github.com/ClickHouse/ClickHouse/pull/30886) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Reduce amount of redundant compressed data read from disk for some types `SELECT` queries (only for `MergeTree` engines family). [#30111](https://github.com/ClickHouse/ClickHouse/pull/30111) ([alesapin](https://github.com/alesapin)).
|
||||
* Remove some redundant `seek` calls while reading compressed blocks in MergeTree table engines family. [#29766](https://github.com/ClickHouse/ClickHouse/pull/29766) ([alesapin](https://github.com/alesapin)).
|
||||
* Make `url` table function to process multiple URLs in parallel. This closes [#29670](https://github.com/ClickHouse/ClickHouse/issues/29670) and closes [#29671](https://github.com/ClickHouse/ClickHouse/issues/29671). [#29673](https://github.com/ClickHouse/ClickHouse/pull/29673) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Improve performance of aggregation in order of primary key (with enabled setting `optimize_aggregation_in_order`). [#30266](https://github.com/ClickHouse/ClickHouse/pull/30266) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Now clickhouse is using DNS cache while communicating with external S3. [#29999](https://github.com/ClickHouse/ClickHouse/pull/29999) ([alesapin](https://github.com/alesapin)).
|
||||
* Add support for pushdown of `IS NULL`/`IS NOT NULL` to external databases (i.e. MySQL). [#29463](https://github.com/ClickHouse/ClickHouse/pull/29463) ([Azat Khuzhin](https://github.com/azat)). Transform `isNull`/`isNotNull` to `IS NULL`/`IS NOT NULL` (for external dbs, i.e. MySQL). [#29446](https://github.com/ClickHouse/ClickHouse/pull/29446) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* SELECT queries from Dictionary tables will use multiple threads. [#30500](https://github.com/ClickHouse/ClickHouse/pull/30500) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Improve performance for filtering (WHERE operation) of `Decimal` columns. [#30431](https://github.com/ClickHouse/ClickHouse/pull/30431) ([Jun Jin](https://github.com/vesslanjin)).
|
||||
* Remove branchy code in filter operation with a better implementation with popcnt/ctz which have better performance. [#29881](https://github.com/ClickHouse/ClickHouse/pull/29881) ([Jun Jin](https://github.com/vesslanjin)).
|
||||
* Improve filter bytemask generator (used for WHERE operator) function all in one with SSE/AVX2/AVX512 instructions. Note that by default ClickHouse is only using SSE, so it's only relevant for custom builds. [#30014](https://github.com/ClickHouse/ClickHouse/pull/30014) ([jasperzhu](https://github.com/jinjunzh)). [#30670](https://github.com/ClickHouse/ClickHouse/pull/30670) ([jasperzhu](https://github.com/jinjunzh)).
|
||||
* Improve the performance of SUM aggregate function of Nullable floating point numbers. [#28906](https://github.com/ClickHouse/ClickHouse/pull/28906) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Speed up part loading process with multiple disks are in use. The idea is similar to https://github.com/ClickHouse/ClickHouse/pull/16423 . Prod env shows improvement: 24 min -> 16 min . [#28363](https://github.com/ClickHouse/ClickHouse/pull/28363) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Reduce default settings for S3 multipart upload part size to lower memory usage. [#28679](https://github.com/ClickHouse/ClickHouse/pull/28679) ([ianton-ru](https://github.com/ianton-ru)).
|
||||
* Speed up `bitmapAnd` function. [#28332](https://github.com/ClickHouse/ClickHouse/pull/28332) ([dddounaiking](https://github.com/OodounaikingoO)).
|
||||
* Removed sub-optimal mutation notifications in `StorageMergeTree` when merges are still going. [#27552](https://github.com/ClickHouse/ClickHouse/pull/27552) ([Vladimir Chebotarev](https://github.com/excitoon)).
|
||||
* Attempt to improve performance of string comparison. [#28767](https://github.com/ClickHouse/ClickHouse/pull/28767) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Primary key index and partition filter can work in tuple. [#29281](https://github.com/ClickHouse/ClickHouse/pull/29281) ([凌涛](https://github.com/lingtaolf)).
|
||||
* If query has multiple quantile aggregate functions with the same arguments but different level parameter, they will be fused together and executed in one pass if the setting `optimize_syntax_fuse_functions` is enabled. [#26657](https://github.com/ClickHouse/ClickHouse/pull/26657) ([hexiaoting](https://github.com/hexiaoting)).
|
||||
* Now min-max aggregation over the first expression of primary key is optimized by projection. This is for [#329](https://github.com/ClickHouse/ClickHouse/issues/329). [#29918](https://github.com/ClickHouse/ClickHouse/pull/29918) ([Amos Bird](https://github.com/amosbird)).
|
||||
|
||||
#### Experimental Feature
|
||||
|
||||
* Add ability to change nodes configuration (in `.xml` file) for ClickHouse Keeper. [#30372](https://github.com/ClickHouse/ClickHouse/pull/30372) ([alesapin](https://github.com/alesapin)).
|
||||
* Add `sparkbar` aggregate function. This closes [#26175](https://github.com/ClickHouse/ClickHouse/issues/26175). [#27481](https://github.com/ClickHouse/ClickHouse/pull/27481) ([小路](https://github.com/nicelulu)). Note: there is one flaw in this function, the behaviour will be changed in future releases.
|
||||
|
||||
#### Improvement
|
||||
|
||||
* Allow user to change log levels without restart. [#29586](https://github.com/ClickHouse/ClickHouse/pull/29586) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Multiple improvements for SQL UDF. Queries for manipulation of SQL User Defined Functions now support ON CLUSTER clause. Example `CREATE FUNCTION test_function ON CLUSTER 'cluster' AS x -> x + 1;`. Closes [#30666](https://github.com/ClickHouse/ClickHouse/issues/30666). [#30734](https://github.com/ClickHouse/ClickHouse/pull/30734) ([Maksim Kita](https://github.com/kitaisreal)). Support `CREATE OR REPLACE`, `CREATE IF NOT EXISTS` syntaxes. [#30454](https://github.com/ClickHouse/ClickHouse/pull/30454) ([Maksim Kita](https://github.com/kitaisreal)). Added DROP IF EXISTS support. Example `DROP FUNCTION IF EXISTS test_function`. [#30437](https://github.com/ClickHouse/ClickHouse/pull/30437) ([Maksim Kita](https://github.com/kitaisreal)). Support lambdas. Example `CREATE FUNCTION lambda_function AS x -> arrayMap(element -> element * 2, x);`. [#30435](https://github.com/ClickHouse/ClickHouse/pull/30435) ([Maksim Kita](https://github.com/kitaisreal)). Support SQL user defined functions for `clickhouse-local`. [#30179](https://github.com/ClickHouse/ClickHouse/pull/30179) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Enable per-query memory profiler (set to `memory_profiler_step` = 4MiB) globally. [#29455](https://github.com/ClickHouse/ClickHouse/pull/29455) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Added columns `data_compressed_bytes`, `data_uncompressed_bytes`, `marks_bytes` into `system.data_skipping_indices`. Added columns `secondary_indices_compressed_bytes`, `secondary_indices_uncompressed_bytes`, `secondary_indices_marks_bytes` into `system.parts`. Closes [#29697](https://github.com/ClickHouse/ClickHouse/issues/29697). [#29896](https://github.com/ClickHouse/ClickHouse/pull/29896) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Add `table` alias to system.tables and `database` alias to system.databases [#29677](https://github.com/ClickHouse/ClickHouse/issues/29677). [#29882](https://github.com/ClickHouse/ClickHouse/pull/29882) ([kevin wan](https://github.com/MaxWk)).
|
||||
* Correctly resolve interdependencies between tables on server startup. Closes [#8004](https://github.com/ClickHouse/ClickHouse/issues/8004), closes [#15170](https://github.com/ClickHouse/ClickHouse/issues/15170). [#28373](https://github.com/ClickHouse/ClickHouse/pull/28373) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Avoid error "Division by zero" when denominator is Nullable in functions `divide`, `intDiv` and `modulo`. Closes [#22621](https://github.com/ClickHouse/ClickHouse/issues/22621). [#28352](https://github.com/ClickHouse/ClickHouse/pull/28352) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Allow to parse values of `Date` data type in text formats as `YYYYMMDD` in addition to `YYYY-MM-DD`. This closes [#30870](https://github.com/ClickHouse/ClickHouse/issues/30870). [#30871](https://github.com/ClickHouse/ClickHouse/pull/30871) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Web UI: render bars in table cells. [#29792](https://github.com/ClickHouse/ClickHouse/pull/29792) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* User can now create dictionaries with comments: `CREATE DICTIONARY ... COMMENT 'vaue'` ... [#29899](https://github.com/ClickHouse/ClickHouse/pull/29899) ([Vasily Nemkov](https://github.com/Enmk)). Users now can set comments to database in `CREATE DATABASE` statement ... [#29429](https://github.com/ClickHouse/ClickHouse/pull/29429) ([Vasily Nemkov](https://github.com/Enmk)).
|
||||
* Introduce `compiled_expression_cache_elements_size` setting. If you will ever want to use this setting, you will already know what it does. [#30667](https://github.com/ClickHouse/ClickHouse/pull/30667) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* clickhouse-format now supports option `--query`. In previous versions you have to pass the query to stdin. [#29325](https://github.com/ClickHouse/ClickHouse/pull/29325) ([凌涛](https://github.com/lingtaolf)).
|
||||
* Support `ALTER TABLE` for tables in `Memory` databases. Memory databases are used in `clickhouse-local`. [#30866](https://github.com/ClickHouse/ClickHouse/pull/30866) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Arrays of all serializable types are now supported by `arrayStringConcat`. [#30840](https://github.com/ClickHouse/ClickHouse/pull/30840) ([Nickita Taranov](https://github.com/nickitat)).
|
||||
* ClickHouse now will account docker/cgroups limitations to get system memory amount. See [#25662](https://github.com/ClickHouse/ClickHouse/issues/25662). [#30574](https://github.com/ClickHouse/ClickHouse/pull/30574) ([Pavel Medvedev](https://github.com/pmed)).
|
||||
* Fetched table structure for PostgreSQL database is more reliable now. [#30477](https://github.com/ClickHouse/ClickHouse/pull/30477) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Full support of positional arguments in GROUP BY and ORDER BY. [#30433](https://github.com/ClickHouse/ClickHouse/pull/30433) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Allow extracting non-string element as string using JSONExtractString. This is for [pull/25452#issuecomment-927123287](https://github.com/ClickHouse/ClickHouse/pull/25452#issuecomment-927123287). [#30426](https://github.com/ClickHouse/ClickHouse/pull/30426) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Added an ability to use FINAL clause in SELECT queries from `GraphiteMergeTree`. [#30360](https://github.com/ClickHouse/ClickHouse/pull/30360) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Minor improvements in replica cloning and enqueuing fetch for broken parts, that should avoid extremely rare hanging of `GET_PART` entries in replication queue. [#30346](https://github.com/ClickHouse/ClickHouse/pull/30346) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Allow symlinks to files in `user_files` directory for file table function. [#30309](https://github.com/ClickHouse/ClickHouse/pull/30309) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fixed comparison of `Date32` with `Date`, `DateTime`, `DateTime64` and `String`. [#30219](https://github.com/ClickHouse/ClickHouse/pull/30219) ([liang.huang](https://github.com/lhuang09287750)).
|
||||
* Allow to remove `SAMPLE BY` expression from `MergeTree` tables (`ALTER TABLE <table> REMOVE SAMPLE BY`). [#30180](https://github.com/ClickHouse/ClickHouse/pull/30180) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Now `Keeper` (as part of `clickhouse-server`) will start asynchronously if it can connect to some other node. [#30170](https://github.com/ClickHouse/ClickHouse/pull/30170) ([alesapin](https://github.com/alesapin)).
|
||||
* Now `clickhouse-client` supports native multi-line editing. [#30143](https://github.com/ClickHouse/ClickHouse/pull/30143) ([Amos Bird](https://github.com/amosbird)).
|
||||
* `polygon` dictionaries (reverse geocoding): added support for reading the dictionary content with SELECT query method if setting `store_polygon_key_column` = true. Closes [#30090](https://github.com/ClickHouse/ClickHouse/issues/30090). [#30142](https://github.com/ClickHouse/ClickHouse/pull/30142) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Add ClickHouse logo to Play UI. [#29674](https://github.com/ClickHouse/ClickHouse/pull/29674) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Better exception message while reading column from Arrow-supported formats like `Arrow`, `ArrowStream`, `Parquet` and `ORC`. This closes [#29926](https://github.com/ClickHouse/ClickHouse/issues/29926). [#29927](https://github.com/ClickHouse/ClickHouse/pull/29927) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix data-race between flush and startup in `Buffer` tables. This can appear in tests. [#29930](https://github.com/ClickHouse/ClickHouse/pull/29930) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix `lock-order-inversion` between `DROP TABLE` for `DatabaseMemory` and `LiveView`. Live View is an experimental feature. Memory database is used in clickhouse-local. [#29929](https://github.com/ClickHouse/ClickHouse/pull/29929) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix lock-order-inversion between periodic dictionary reload and config reload. [#29928](https://github.com/ClickHouse/ClickHouse/pull/29928) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Update zoneinfo files to 2021c. [#29925](https://github.com/ClickHouse/ClickHouse/pull/29925) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add ability to configure retries and delays between them for `clickhouse-copier`. [#29921](https://github.com/ClickHouse/ClickHouse/pull/29921) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Add `shutdown_wait_unfinished_queries` server setting to allowing waiting for running queries up to `shutdown_wait_unfinished` time. This is for [#24451](https://github.com/ClickHouse/ClickHouse/issues/24451). [#29914](https://github.com/ClickHouse/ClickHouse/pull/29914) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Add ability to trace peak memory usage (with new trace_type in `system.trace_log` - `MemoryPeak`). [#29858](https://github.com/ClickHouse/ClickHouse/pull/29858) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* PostgreSQL foreign tables: Added partitioned table prefix 'p' for the query for fetching replica identity index. [#29828](https://github.com/ClickHouse/ClickHouse/pull/29828) ([Shoh Jahon](https://github.com/Shohjahon)).
|
||||
* Apply `max_untracked_memory`/`memory_profiler_step`/`memory_profiler_sample_probability` during mutate/merge to profile memory usage during merges. [#29681](https://github.com/ClickHouse/ClickHouse/pull/29681) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Query obfuscator: `clickhouse-format --obfuscate` now works with more types of queries. [#29672](https://github.com/ClickHouse/ClickHouse/pull/29672) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fixed the issue: `clickhouse-format --obfuscate` cannot process queries with embedded dictionaries (functions `regionTo...`). [#29667](https://github.com/ClickHouse/ClickHouse/pull/29667) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix incorrect Nullable processing of JSON functions. This fixes [#29615](https://github.com/ClickHouse/ClickHouse/issues/29615) . Mark as improvement because https://github.com/ClickHouse/ClickHouse/pull/28012 is not released. [#29659](https://github.com/ClickHouse/ClickHouse/pull/29659) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Increase `listen_backlog` by default (to match default in newer linux kernel). [#29643](https://github.com/ClickHouse/ClickHouse/pull/29643) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Reload dictionaries, models, user defined executable functions if servers config `dictionaries_config`, `models_config`, `user_defined_executable_functions_config` changes. Closes [#28142](https://github.com/ClickHouse/ClickHouse/issues/28142). [#29529](https://github.com/ClickHouse/ClickHouse/pull/29529) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Get rid of pointless restriction on projection name. Now projection name can start with `tmp_`. [#29520](https://github.com/ClickHouse/ClickHouse/pull/29520) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fixed `There is no query or query context has expired` error in mutations with nested subqueries. Do not allow subqueries in mutation if table is replicated and `allow_nondeterministic_mutations` setting is disabled. [#29495](https://github.com/ClickHouse/ClickHouse/pull/29495) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Apply config changes to `max_concurrent_queries` during runtime (no need to restart). [#29414](https://github.com/ClickHouse/ClickHouse/pull/29414) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Added setting `use_skip_indexes`. [#29405](https://github.com/ClickHouse/ClickHouse/pull/29405) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Add support for `FREEZE`ing in-memory parts (for backups). [#29376](https://github.com/ClickHouse/ClickHouse/pull/29376) ([Mo Xuan](https://github.com/mo-avatar)).
|
||||
* Pass through initial query_id for `clickhouse-benchmark` (previously if you run remote query via `clickhouse-benchmark`, queries on shards will not be linked to the initial query via `initial_query_id`). [#29364](https://github.com/ClickHouse/ClickHouse/pull/29364) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Skip indexes `tokenbf_v1` and `ngrambf_v1`: added support for `Array` data type with key of `String` of `FixedString` type. [#29280](https://github.com/ClickHouse/ClickHouse/pull/29280) ([Maksim Kita](https://github.com/kitaisreal)). Skip indexes `tokenbf_v1` and `ngrambf_v1` added support for `Map` data type with key of `String` of `FixedString` type. Author @lingtaolf. [#29220](https://github.com/ClickHouse/ClickHouse/pull/29220) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Function `has`: added support for `Map` data type. [#29267](https://github.com/ClickHouse/ClickHouse/pull/29267) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Add `compress_logs` settings for clickhouse-keeper which allow to compress clickhouse-keeper logs (for replicated state machine) in `ZSTD` . Implements: [#26977](https://github.com/ClickHouse/ClickHouse/issues/26977). [#29223](https://github.com/ClickHouse/ClickHouse/pull/29223) ([alesapin](https://github.com/alesapin)).
|
||||
* Add a setting `external_table_strict_query` - it will force passing the whole WHERE expression in queries to foreign databases even if it is incompatible. [#29206](https://github.com/ClickHouse/ClickHouse/pull/29206) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Disable projections when `ARRAY JOIN` is used. In previous versions projection analysis may break aliases in array join. [#29139](https://github.com/ClickHouse/ClickHouse/pull/29139) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Support more types in `MsgPack` input/output format. [#29077](https://github.com/ClickHouse/ClickHouse/pull/29077) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Allow to input and output `LowCardinality` columns in `ORC` input/output format. [#29062](https://github.com/ClickHouse/ClickHouse/pull/29062) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Select from `system.distributed_ddl_queue` might show incorrect values, it's fixed. [#29061](https://github.com/ClickHouse/ClickHouse/pull/29061) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Correct behaviour with unknown methods for HTTP connection. Solves [#29050](https://github.com/ClickHouse/ClickHouse/issues/29050). [#29057](https://github.com/ClickHouse/ClickHouse/pull/29057) ([Filatenkov Artur](https://github.com/FArthur-cmd)).
|
||||
* `clickhouse-keeper`: Fix bug in `clickhouse-keeper-converter` which can lead to some data loss while restoring from ZooKeeper logs (not snapshot). [#29030](https://github.com/ClickHouse/ClickHouse/pull/29030) ([小路](https://github.com/nicelulu)). Fix bug in `clickhouse-keeper-converter` which can lead to incorrect ZooKeeper log deserialization. [#29071](https://github.com/ClickHouse/ClickHouse/pull/29071) ([小路](https://github.com/nicelulu)).
|
||||
* Apply settings from `CREATE ... AS SELECT` queries (fixes: [#28810](https://github.com/ClickHouse/ClickHouse/issues/28810)). [#28962](https://github.com/ClickHouse/ClickHouse/pull/28962) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Respect default database setting for ALTER TABLE ... ON CLUSTER ... REPLACE/MOVE PARTITION FROM/TO ... [#28955](https://github.com/ClickHouse/ClickHouse/pull/28955) ([anneji-dev](https://github.com/anneji-dev)).
|
||||
* gRPC protocol: Allow change server-side compression from client. [#28953](https://github.com/ClickHouse/ClickHouse/pull/28953) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Skip "no data" exception when reading thermal sensors for asynchronous metrics. This closes [#28852](https://github.com/ClickHouse/ClickHouse/issues/28852). [#28882](https://github.com/ClickHouse/ClickHouse/pull/28882) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fixed logical race condition that might cause `Dictionary not found` error for existing dictionary in rare cases. [#28853](https://github.com/ClickHouse/ClickHouse/pull/28853) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Relax nested function for If-combinator check (but forbid nested identical combinators). [#28828](https://github.com/ClickHouse/ClickHouse/pull/28828) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix possible uncaught exception during server termination. [#28761](https://github.com/ClickHouse/ClickHouse/pull/28761) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Forbid cleaning of tmp directories that can be used by an active mutation/merge if mutation/merge is extraordinarily long. [#28760](https://github.com/ClickHouse/ClickHouse/pull/28760) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Allow optimization `optimize_arithmetic_operations_in_aggregate_functions = 1` when alias is used. [#28746](https://github.com/ClickHouse/ClickHouse/pull/28746) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Implement `detach_not_byte_identical_parts` setting for `ReplicatedMergeTree`, that will detach instead of remove not byte-identical parts (after mege/mutate). [#28708](https://github.com/ClickHouse/ClickHouse/pull/28708) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Implement `max_suspicious_broken_parts_bytes` setting for `MergeTree` (to limit total size of all broken parts, default is `1GiB`). [#28707](https://github.com/ClickHouse/ClickHouse/pull/28707) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Enable expanding macros in `RabbitMQ` table settings. [#28683](https://github.com/ClickHouse/ClickHouse/pull/28683) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Restore the possibility to read data of a table using the `Log` engine in multiple threads. [#28125](https://github.com/ClickHouse/ClickHouse/pull/28125) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix misbehavior of NULL column handling in JSON functions. This fixes [#27930](https://github.com/ClickHouse/ClickHouse/issues/27930). [#28012](https://github.com/ClickHouse/ClickHouse/pull/28012) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Allow to set the size of Mark/Uncompressed cache for skip indices separately from columns. [#27961](https://github.com/ClickHouse/ClickHouse/pull/27961) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Allow to mix JOIN with `USING` with other JOIN types. [#23881](https://github.com/ClickHouse/ClickHouse/pull/23881) ([darkkeks](https://github.com/darkkeks)).
|
||||
* Update aws-sdk submodule for throttling in Yandex Cloud S3. [#30646](https://github.com/ClickHouse/ClickHouse/pull/30646) ([ianton-ru](https://github.com/ianton-ru)).
|
||||
* Fix releasing query ID and session ID at the end of query processing while handing gRPC call. [#29954](https://github.com/ClickHouse/ClickHouse/pull/29954) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix shutdown of `AccessControlManager` to fix flaky test. [#29951](https://github.com/ClickHouse/ClickHouse/pull/29951) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix failed assertion in reading from `HDFS`. Update libhdfs3 library to be able to run in tests in debug. Closes [#29251](https://github.com/ClickHouse/ClickHouse/issues/29251). Closes [#27814](https://github.com/ClickHouse/ClickHouse/issues/27814). [#29276](https://github.com/ClickHouse/ClickHouse/pull/29276) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
|
||||
* Add support for FreeBSD builds for Aarch64 machines. [#29952](https://github.com/ClickHouse/ClickHouse/pull/29952) ([MikaelUrankar](https://github.com/MikaelUrankar)).
|
||||
* Recursive submodules are no longer needed for ClickHouse. [#30315](https://github.com/ClickHouse/ClickHouse/pull/30315) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* ClickHouse can be statically built with Musl. This is added as experiment, it does not support building `odbc-bridge`, `library-bridge`, integration with CatBoost and some libraries. [#30248](https://github.com/ClickHouse/ClickHouse/pull/30248) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Enable `Protobuf`, `Arrow`, `ORC`, `Parquet` for `AArch64` and `Darwin` (macOS) builds. This closes [#29248](https://github.com/ClickHouse/ClickHouse/issues/29248). This closes [#28018](https://github.com/ClickHouse/ClickHouse/issues/28018). [#30015](https://github.com/ClickHouse/ClickHouse/pull/30015) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add cross-build for PowerPC (powerpc64le). This closes [#9589](https://github.com/ClickHouse/ClickHouse/issues/9589). Enable support for interaction with MySQL for AArch64 and PowerPC. This closes [#26301](https://github.com/ClickHouse/ClickHouse/issues/26301). [#30010](https://github.com/ClickHouse/ClickHouse/pull/30010) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Leave only required files in cross-compile toolchains. Include them as submodules (earlier they were downloaded as tarballs). [#29974](https://github.com/ClickHouse/ClickHouse/pull/29974) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Implemented structure-aware fuzzing approach in ClickHouse for select statement parser. [#30012](https://github.com/ClickHouse/ClickHouse/pull/30012) ([Paul](https://github.com/PaulCher)).
|
||||
* Turning on experimental constexpr expressions evaluator for clang to speed up template code compilation. [#29668](https://github.com/ClickHouse/ClickHouse/pull/29668) ([myrrc](https://github.com/myrrc)).
|
||||
* Add ability to compile using newer version fo glibc without using new symbols. [#29594](https://github.com/ClickHouse/ClickHouse/pull/29594) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Reduce Debug build binary size by clang optimization option. [#28736](https://github.com/ClickHouse/ClickHouse/pull/28736) ([flynn](https://github.com/ucasfl)).
|
||||
* Now all images for CI will be placed in the separate dockerhub repo. [#28656](https://github.com/ClickHouse/ClickHouse/pull/28656) ([alesapin](https://github.com/alesapin)).
|
||||
* Improve support for build with clang-13. [#28046](https://github.com/ClickHouse/ClickHouse/pull/28046) ([Sergei Semin](https://github.com/syominsergey)).
|
||||
* Add ability to print raw profile events to `clickhouse-client` (This can be useful for debugging and for testing). [#30064](https://github.com/ClickHouse/ClickHouse/pull/30064) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Add time dependency for clickhouse-server unit (systemd and sysvinit init). [#28891](https://github.com/ClickHouse/ClickHouse/pull/28891) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Reload stacktrace cache when symbol is reloaded. [#28137](https://github.com/ClickHouse/ClickHouse/pull/28137) ([Amos Bird](https://github.com/amosbird)).
|
||||
|
||||
#### Bug Fix
|
||||
|
||||
* Functions for case-insensitive search in UTF-8 strings like `positionCaseInsensitiveUTF8` and `countSubstringsCaseInsensitiveUTF8` might find substrings that actually does not match in very rare cases, it's fixed. [#30663](https://github.com/ClickHouse/ClickHouse/pull/30663) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix reading from empty file on encrypted disk. [#30494](https://github.com/ClickHouse/ClickHouse/pull/30494) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix transformation of disjunctions chain to `IN` (controlled by settings `optimize_min_equality_disjunction_chain_length`) in distributed queries with settings `legacy_column_name_of_tuple_literal = 0`. [#28658](https://github.com/ClickHouse/ClickHouse/pull/28658) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Allow using a materialized column as the sharding key in a distributed table even if `insert_allow_materialized_columns=0`:. [#28637](https://github.com/ClickHouse/ClickHouse/pull/28637) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix `ORDER BY ... WITH FILL` with set `TO` and `FROM` and no rows in result set. [#30888](https://github.com/ClickHouse/ClickHouse/pull/30888) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix set index not used in AND/OR expressions when there are more than two operands. This fixes [#30416](https://github.com/ClickHouse/ClickHouse/issues/30416) . [#30887](https://github.com/ClickHouse/ClickHouse/pull/30887) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix crash when projection with hashing function is materialized. This fixes [#30861](https://github.com/ClickHouse/ClickHouse/issues/30861) . The issue is similar to https://github.com/ClickHouse/ClickHouse/pull/28560 which is a lack of proper understanding of the invariant of header's emptyness. [#30877](https://github.com/ClickHouse/ClickHouse/pull/30877) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fixed ambiguity when extracting auxiliary ZooKeeper name from ZooKeeper path in `ReplicatedMergeTree`. Previously server might fail to start with `Unknown auxiliary ZooKeeper name` if ZooKeeper path contains a colon. Fixes [#29052](https://github.com/ClickHouse/ClickHouse/issues/29052). Also it was allowed to specify ZooKeeper path that does not start with slash, but now it's deprecated and creation of new tables with such path is not allowed. Slashes and colons in auxiliary ZooKeeper names are not allowed too. [#30822](https://github.com/ClickHouse/ClickHouse/pull/30822) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Clean temporary directory when localBackup failed by some reason. [#30797](https://github.com/ClickHouse/ClickHouse/pull/30797) ([ianton-ru](https://github.com/ianton-ru)).
|
||||
* Fixed a race condition between `REPLACE/MOVE PARTITION` and background merge in non-replicated `MergeTree` that might cause a part of moved/replaced data to remain in partition. Fixes [#29327](https://github.com/ClickHouse/ClickHouse/issues/29327). [#30717](https://github.com/ClickHouse/ClickHouse/pull/30717) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix PREWHERE with WHERE in case of always true PREWHERE. [#30668](https://github.com/ClickHouse/ClickHouse/pull/30668) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Limit push down optimization could cause a error `Cannot find column`. Fixes [#30438](https://github.com/ClickHouse/ClickHouse/issues/30438). [#30562](https://github.com/ClickHouse/ClickHouse/pull/30562) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Add missing parenthesis for `isNotNull`/`isNull` rewrites to `IS [NOT] NULL` (fixes queries that has something like `isNotNull(1)+isNotNull(2)`). [#30520](https://github.com/ClickHouse/ClickHouse/pull/30520) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix deadlock on ALTER with scalar subquery to the same table, close [#30461](https://github.com/ClickHouse/ClickHouse/issues/30461). [#30492](https://github.com/ClickHouse/ClickHouse/pull/30492) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fixed segfault which might happen if session expired during execution of REPLACE PARTITION. [#30432](https://github.com/ClickHouse/ClickHouse/pull/30432) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Queries with condition like `IN (subquery)` could return incorrect result in case if aggregate projection applied. Fixed creation of sets for projections. [#30310](https://github.com/ClickHouse/ClickHouse/pull/30310) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix column alias resolution of JOIN queries when projection is enabled. This fixes [#30146](https://github.com/ClickHouse/ClickHouse/issues/30146). [#30293](https://github.com/ClickHouse/ClickHouse/pull/30293) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix some deficiency in `replaceRegexpAll` function. [#30292](https://github.com/ClickHouse/ClickHouse/pull/30292) ([Memo](https://github.com/Joeywzr)).
|
||||
* Fix ComplexKeyHashedDictionary, ComplexKeySparseHashedDictionary parsing `preallocate` option from layout config. [#30246](https://github.com/ClickHouse/ClickHouse/pull/30246) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix `[I]LIKE` function. Closes [#28661](https://github.com/ClickHouse/ClickHouse/issues/28661). [#30244](https://github.com/ClickHouse/ClickHouse/pull/30244) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix crash with shortcircuit and lowcardinality in multiIf. [#30243](https://github.com/ClickHouse/ClickHouse/pull/30243) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* FlatDictionary, HashedDictionary fix bytes_allocated calculation for nullable attributes. [#30238](https://github.com/ClickHouse/ClickHouse/pull/30238) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Allow identifiers starting with numbers in multiple joins. [#30230](https://github.com/ClickHouse/ClickHouse/pull/30230) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix reading from `MergeTree` with `max_read_buffer_size = 0` (when the user wants to shoot himself in the foot) (can lead to exceptions `Can't adjust last granule`, `LOGICAL_ERROR`, or even data loss). [#30192](https://github.com/ClickHouse/ClickHouse/pull/30192) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix `pread_fake_async`/`pread_threadpool` with `min_bytes_to_use_direct_io`. [#30191](https://github.com/ClickHouse/ClickHouse/pull/30191) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix INSERT SELECT incorrectly fills MATERIALIZED column based of Nullable column. [#30189](https://github.com/ClickHouse/ClickHouse/pull/30189) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Support nullable arguments in function `initializeAggregation`. [#30177](https://github.com/ClickHouse/ClickHouse/pull/30177) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix error `Port is already connected` for queries with `GLOBAL IN` and `WITH TOTALS`. Only for 21.9 and 21.10. [#30086](https://github.com/ClickHouse/ClickHouse/pull/30086) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix race between MOVE PARTITION and merges/mutations for MergeTree. [#30074](https://github.com/ClickHouse/ClickHouse/pull/30074) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Dropped `Memory` database might reappear after server restart, it's fixed ([#29795](https://github.com/ClickHouse/ClickHouse/issues/29795)). Also added `force_remove_data_recursively_on_drop` setting as a workaround for `Directory not empty` error when dropping `Ordinary` database (because it's not possible to remove data leftovers manually in cloud environment). [#30054](https://github.com/ClickHouse/ClickHouse/pull/30054) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix crash of sample by `tuple()`, closes [#30004](https://github.com/ClickHouse/ClickHouse/issues/30004). [#30016](https://github.com/ClickHouse/ClickHouse/pull/30016) ([flynn](https://github.com/ucasfl)).
|
||||
* try to close issue: [#29965](https://github.com/ClickHouse/ClickHouse/issues/29965). [#29976](https://github.com/ClickHouse/ClickHouse/pull/29976) ([hexiaoting](https://github.com/hexiaoting)).
|
||||
* Fix possible data-race between `FileChecker` and `StorageLog`/`StorageStripeLog`. [#29959](https://github.com/ClickHouse/ClickHouse/pull/29959) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix data-race between `LogSink::writeMarks()` and `LogSource` in `StorageLog`. [#29946](https://github.com/ClickHouse/ClickHouse/pull/29946) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix potential resource leak of the concurrent query limit of merge tree tables introduced in https://github.com/ClickHouse/ClickHouse/pull/19544. [#29879](https://github.com/ClickHouse/ClickHouse/pull/29879) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix system tables recreation check (fails to detect changes in enum values). [#29857](https://github.com/ClickHouse/ClickHouse/pull/29857) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* MaterializedMySQL: Fix an issue where if the connection to MySQL was lost, only parts of a transaction could be processed. [#29837](https://github.com/ClickHouse/ClickHouse/pull/29837) ([Håvard Kvålen](https://github.com/havardk)).
|
||||
* Avoid `Timeout exceeded: elapsed 18446744073.709553 seconds` error that might happen in extremely rare cases, presumably due to some bug in kernel. Fixes [#29154](https://github.com/ClickHouse/ClickHouse/issues/29154). [#29811](https://github.com/ClickHouse/ClickHouse/pull/29811) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix bad cast in `ATTACH TABLE ... FROM 'path'` query when non-string literal is used instead of path. It may lead to reading of uninitialized memory. [#29790](https://github.com/ClickHouse/ClickHouse/pull/29790) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix concurrent access to `LowCardinality` during `GROUP BY` (in combination with `Buffer` tables it may lead to troubles). [#29782](https://github.com/ClickHouse/ClickHouse/pull/29782) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix incorrect `GROUP BY` (multiple rows with the same keys in result) in case of distributed query when shards had mixed versions `<= 21.3` and `>= 21.4`, `GROUP BY` key had several columns all with fixed size, and two-level aggregation was activated (see `group_by_two_level_threshold` and `group_by_two_level_threshold_bytes`). Fixes [#29580](https://github.com/ClickHouse/ClickHouse/issues/29580). [#29735](https://github.com/ClickHouse/ClickHouse/pull/29735) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fixed incorrect behaviour of setting `materialized_postgresql_tables_list` at server restart. Found in [#28529](https://github.com/ClickHouse/ClickHouse/issues/28529). [#29686](https://github.com/ClickHouse/ClickHouse/pull/29686) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Condition in filter predicate could be lost after push-down optimisation. [#29625](https://github.com/ClickHouse/ClickHouse/pull/29625) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix JIT expression compilation with aliases and short-circuit expression evaluation. Closes [#29403](https://github.com/ClickHouse/ClickHouse/issues/29403). [#29574](https://github.com/ClickHouse/ClickHouse/pull/29574) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix rare segfault in `ALTER MODIFY` query when using incorrect table identifier in `DEFAULT` expression like `x.y.z...` Fixes [#29184](https://github.com/ClickHouse/ClickHouse/issues/29184). [#29573](https://github.com/ClickHouse/ClickHouse/pull/29573) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix nullptr deference for `GROUP BY WITH TOTALS HAVING` (when the column from `HAVING` wasn't selected). [#29553](https://github.com/ClickHouse/ClickHouse/pull/29553) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Avoid deadlocks when reading and writting on Join table engine tables at the same time. [#29544](https://github.com/ClickHouse/ClickHouse/pull/29544) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix bug in check `pathStartsWith` becuase there was bug with the usage of `std::mismatch`: ` The behavior is undefined if the second range is shorter than the first range.`. [#29531](https://github.com/ClickHouse/ClickHouse/pull/29531) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* In ODBC bridge add retries for error Invalid cursor state. It is a retriable error. Closes [#29473](https://github.com/ClickHouse/ClickHouse/issues/29473). [#29518](https://github.com/ClickHouse/ClickHouse/pull/29518) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fixed incorrect table name parsing on loading of `Lazy` database. Fixes [#29456](https://github.com/ClickHouse/ClickHouse/issues/29456). [#29476](https://github.com/ClickHouse/ClickHouse/pull/29476) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix possible `Block structure mismatch` for subqueries with pushed-down `HAVING` predicate. Fixes [#29010](https://github.com/ClickHouse/ClickHouse/issues/29010). [#29475](https://github.com/ClickHouse/ClickHouse/pull/29475) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix Logical error `Cannot capture columns` in functions greatest/least. Closes [#29334](https://github.com/ClickHouse/ClickHouse/issues/29334). [#29454](https://github.com/ClickHouse/ClickHouse/pull/29454) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* RocksDB table engine: fix race condition during multiple DB opening (and get back some tests that triggers the problem on CI). [#29393](https://github.com/ClickHouse/ClickHouse/pull/29393) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix replicated access storage not shutting down cleanly when misconfigured. [#29388](https://github.com/ClickHouse/ClickHouse/pull/29388) ([Kevin Michel](https://github.com/kmichel-aiven)).
|
||||
* Remove window function `nth_value` as it is not memory-safe. This closes [#29347](https://github.com/ClickHouse/ClickHouse/issues/29347). [#29348](https://github.com/ClickHouse/ClickHouse/pull/29348) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix vertical merges of projection parts. This fixes [#29253](https://github.com/ClickHouse/ClickHouse/issues/29253) . This PR also fixes several projection merge/mutation issues introduced in https://github.com/ClickHouse/ClickHouse/pull/25165. [#29337](https://github.com/ClickHouse/ClickHouse/pull/29337) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix hanging DDL queries on Replicated database while adding a new replica. [#29328](https://github.com/ClickHouse/ClickHouse/pull/29328) ([Kevin Michel](https://github.com/kmichel-aiven)).
|
||||
* Fix connection timeouts (`send_timeout`/`receive_timeout`). [#29282](https://github.com/ClickHouse/ClickHouse/pull/29282) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix possible `Table columns structure in ZooKeeper is different from local table structure` exception while recreating or creating new replicas of `ReplicatedMergeTree`, when one of table columns have default expressions with case-insensitive functions. [#29266](https://github.com/ClickHouse/ClickHouse/pull/29266) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Send normal `Database doesn't exist error` (`UNKNOWN_DATABASE`) to the client (via TCP) instead of `Attempt to read after eof` (`ATTEMPT_TO_READ_AFTER_EOF`). [#29229](https://github.com/ClickHouse/ClickHouse/pull/29229) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix segfault while inserting into column with type LowCardinality(Nullable) in Avro input format. [#29132](https://github.com/ClickHouse/ClickHouse/pull/29132) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Do not allow to reuse previous credentials in case of inter-server secret (Before INSERT via Buffer/Kafka to Distributed table with interserver secret configured for that cluster, may re-use previously set user for that connection). [#29060](https://github.com/ClickHouse/ClickHouse/pull/29060) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Handle `any_join_distinct_right_table_keys` when join with dictionary, close [#29007](https://github.com/ClickHouse/ClickHouse/issues/29007). [#29014](https://github.com/ClickHouse/ClickHouse/pull/29014) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix "Not found column ... in block" error, when join on alias column, close [#26980](https://github.com/ClickHouse/ClickHouse/issues/26980). [#29008](https://github.com/ClickHouse/ClickHouse/pull/29008) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix the number of threads used in `GLOBAL IN` subquery (it was executed in single threads since [#19414](https://github.com/ClickHouse/ClickHouse/issues/19414) bugfix). [#28997](https://github.com/ClickHouse/ClickHouse/pull/28997) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix bad optimizations of ORDER BY if it contains WITH FILL. This closes [#28908](https://github.com/ClickHouse/ClickHouse/issues/28908). This closes [#26049](https://github.com/ClickHouse/ClickHouse/issues/26049). [#28910](https://github.com/ClickHouse/ClickHouse/pull/28910) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix higher-order array functions (`SIGSEGV` for `arrayCompact`/`ILLEGAL_COLUMN` for `arrayDifference`/`arrayCumSumNonNegative`) with consts. [#28904](https://github.com/ClickHouse/ClickHouse/pull/28904) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix waiting for mutation with `mutations_sync=2`. [#28889](https://github.com/ClickHouse/ClickHouse/pull/28889) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix queries to external databases (i.e. MySQL) with multiple columns in IN ( i.e. `(k,v) IN ((1, 2))` ). [#28888](https://github.com/ClickHouse/ClickHouse/pull/28888) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix bug with `LowCardinality` in short-curcuit function evaluation. Closes [#28884](https://github.com/ClickHouse/ClickHouse/issues/28884). [#28887](https://github.com/ClickHouse/ClickHouse/pull/28887) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix reading of subcolumns from compact parts. [#28873](https://github.com/ClickHouse/ClickHouse/pull/28873) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fixed a race condition between `DROP PART` and `REPLACE/MOVE PARTITION` that might cause replicas to diverge in rare cases. [#28864](https://github.com/ClickHouse/ClickHouse/pull/28864) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix expressions compilation with short circuit evaluation. [#28821](https://github.com/ClickHouse/ClickHouse/pull/28821) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix extremely rare case when ReplicatedMergeTree replicas can diverge after hard reboot of all replicas. The error looks like `Part ... intersects (previous|next) part ...`. [#28817](https://github.com/ClickHouse/ClickHouse/pull/28817) ([alesapin](https://github.com/alesapin)).
|
||||
* Better check for connection usability and also catch any exception in `RabbitMQ` shutdown just in case. [#28797](https://github.com/ClickHouse/ClickHouse/pull/28797) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix benign race condition in ReplicatedMergeTreeQueue. Shouldn't be visible for user, but can lead to subtle bugs. [#28734](https://github.com/ClickHouse/ClickHouse/pull/28734) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix possible crash for `SELECT` with partially created aggregate projection in case of exception. [#28700](https://github.com/ClickHouse/ClickHouse/pull/28700) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix the coredump in the creation of distributed tables, when the parameters passed in are wrong. [#28686](https://github.com/ClickHouse/ClickHouse/pull/28686) ([Zhiyong Wang](https://github.com/ljcui)).
|
||||
* Add Settings.Names, Settings.Values aliases for system.processes table. [#28685](https://github.com/ClickHouse/ClickHouse/pull/28685) ([Vitaly](https://github.com/orloffv)).
|
||||
* Support for S2 Geometry library: Fix the number of arguments required by `s2RectAdd` and `s2RectContains` functions. [#28663](https://github.com/ClickHouse/ClickHouse/pull/28663) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* Fix invalid constant type conversion when Nullable or LowCardinality primary key is used. [#28636](https://github.com/ClickHouse/ClickHouse/pull/28636) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix "Column is not under aggregate function and not in GROUP BY" with PREWHERE (Fixes: [#28461](https://github.com/ClickHouse/ClickHouse/issues/28461)). [#28502](https://github.com/ClickHouse/ClickHouse/pull/28502) ([Azat Khuzhin](https://github.com/azat)).
|
||||
|
||||
|
||||
### ClickHouse release v21.10, 2021-10-16
|
||||
|
||||
#### Backward Incompatible Change
|
||||
|
@ -1,4 +1,5 @@
|
||||
#include <stdexcept>
|
||||
#include <fstream>
|
||||
#include <base/getMemoryAmount.h>
|
||||
#include <base/getPageSize.h>
|
||||
|
||||
@ -15,6 +16,17 @@
|
||||
*/
|
||||
uint64_t getMemoryAmountOrZero()
|
||||
{
|
||||
#if defined(OS_LINUX)
|
||||
// Try to lookup at the Cgroup limit
|
||||
std::ifstream cgroup_limit("/sys/fs/cgroup/memory/memory.limit_in_bytes");
|
||||
if (cgroup_limit.is_open())
|
||||
{
|
||||
uint64_t amount = 0; // in case of read error
|
||||
cgroup_limit >> amount;
|
||||
return amount;
|
||||
}
|
||||
#endif
|
||||
|
||||
int64_t num_pages = sysconf(_SC_PHYS_PAGES);
|
||||
if (num_pages <= 0)
|
||||
return 0;
|
||||
|
15906
benchmark/duckdb/log
Normal file
15906
benchmark/duckdb/log
Normal file
File diff suppressed because one or more lines are too long
43
benchmark/duckdb/queries.sql
Normal file
43
benchmark/duckdb/queries.sql
Normal file
@ -0,0 +1,43 @@
|
||||
SELECT count(*) FROM hits;
|
||||
SELECT count(*) FROM hits WHERE AdvEngineID != 0;
|
||||
SELECT sum(AdvEngineID), count(*), avg(ResolutionWidth) FROM hits;
|
||||
SELECT sum(UserID) FROM hits;
|
||||
SELECT COUNT(DISTINCT UserID) FROM hits;
|
||||
SELECT COUNT(DISTINCT SearchPhrase) FROM hits;
|
||||
SELECT min(EventDate), max(EventDate) FROM hits;
|
||||
SELECT AdvEngineID, count(*) FROM hits WHERE AdvEngineID != 0 GROUP BY AdvEngineID ORDER BY count(*) DESC;
|
||||
SELECT RegionID, COUNT(DISTINCT UserID) AS u FROM hits GROUP BY RegionID ORDER BY u DESC LIMIT 10;
|
||||
SELECT RegionID, sum(AdvEngineID), count(*) AS c, avg(ResolutionWidth), COUNT(DISTINCT UserID) FROM hits GROUP BY RegionID ORDER BY c DESC LIMIT 10;
|
||||
SELECT MobilePhoneModel, COUNT(DISTINCT UserID) AS u FROM hits WHERE octet_length(MobilePhoneModel) > 0 GROUP BY MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||
SELECT MobilePhone, MobilePhoneModel, COUNT(DISTINCT UserID) AS u FROM hits WHERE octet_length(MobilePhoneModel) > 0 GROUP BY MobilePhone, MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||
SELECT SearchPhrase, count(*) AS c FROM hits WHERE octet_length(SearchPhrase) > 0 GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT SearchPhrase, COUNT(DISTINCT UserID) AS u FROM hits WHERE octet_length(SearchPhrase) > 0 GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;
|
||||
SELECT SearchEngineID, SearchPhrase, count(*) AS c FROM hits WHERE octet_length(SearchPhrase) > 0 GROUP BY SearchEngineID, SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT UserID, count(*) FROM hits GROUP BY UserID ORDER BY count(*) DESC LIMIT 10;
|
||||
SELECT UserID, SearchPhrase, count(*) FROM hits GROUP BY UserID, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||
SELECT UserID, SearchPhrase, count(*) FROM hits GROUP BY UserID, SearchPhrase LIMIT 10;
|
||||
SELECT UserID, extract(minute FROM (TIMESTAMP '1970-01-01 00:00:00' + to_seconds(EventTime))) AS m, SearchPhrase, count(*) FROM hits GROUP BY UserID, m, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||
SELECT UserID FROM hits WHERE UserID = 12345678901234567890;
|
||||
SELECT count(*) FROM hits WHERE URL::TEXT LIKE '%metrika%';
|
||||
SELECT SearchPhrase, min(URL), count(*) AS c FROM hits WHERE URL::TEXT LIKE '%metrika%' AND octet_length(SearchPhrase) > 0 GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT SearchPhrase, min(URL), min(Title), count(*) AS c, COUNT(DISTINCT UserID) FROM hits WHERE Title::TEXT LIKE '%Яндекс%' AND URL::TEXT NOT LIKE '%.yandex.%' AND octet_length(SearchPhrase) > 0 GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT * FROM hits WHERE URL::TEXT LIKE '%metrika%' ORDER BY EventTime LIMIT 10;
|
||||
SELECT SearchPhrase FROM hits WHERE octet_length(SearchPhrase) > 0 ORDER BY EventTime LIMIT 10;
|
||||
SELECT SearchPhrase FROM hits WHERE octet_length(SearchPhrase) > 0 ORDER BY SearchPhrase LIMIT 10;
|
||||
SELECT SearchPhrase FROM hits WHERE octet_length(SearchPhrase) > 0 ORDER BY EventTime, SearchPhrase LIMIT 10;
|
||||
SELECT CounterID, avg(octet_length(URL)) AS l, count(*) AS c FROM hits WHERE octet_length(URL) > 0 GROUP BY CounterID HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||
SELECT regexp_replace(Referer::TEXT, '^https?://(?:www\.)?([^/]+)/.*$', '\1') AS key, avg(octet_length(Referer)) AS l, count(*) AS c, min(Referer) FROM hits WHERE octet_length(Referer) > 0 GROUP BY key HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||
SELECT sum(ResolutionWidth), sum(ResolutionWidth + 1), sum(ResolutionWidth + 2), sum(ResolutionWidth + 3), sum(ResolutionWidth + 4), sum(ResolutionWidth + 5), sum(ResolutionWidth + 6), sum(ResolutionWidth + 7), sum(ResolutionWidth + 8), sum(ResolutionWidth + 9), sum(ResolutionWidth + 10), sum(ResolutionWidth + 11), sum(ResolutionWidth + 12), sum(ResolutionWidth + 13), sum(ResolutionWidth + 14), sum(ResolutionWidth + 15), sum(ResolutionWidth + 16), sum(ResolutionWidth + 17), sum(ResolutionWidth + 18), sum(ResolutionWidth + 19), sum(ResolutionWidth + 20), sum(ResolutionWidth + 21), sum(ResolutionWidth + 22), sum(ResolutionWidth + 23), sum(ResolutionWidth + 24), sum(ResolutionWidth + 25), sum(ResolutionWidth + 26), sum(ResolutionWidth + 27), sum(ResolutionWidth + 28), sum(ResolutionWidth + 29), sum(ResolutionWidth + 30), sum(ResolutionWidth + 31), sum(ResolutionWidth + 32), sum(ResolutionWidth + 33), sum(ResolutionWidth + 34), sum(ResolutionWidth + 35), sum(ResolutionWidth + 36), sum(ResolutionWidth + 37), sum(ResolutionWidth + 38), sum(ResolutionWidth + 39), sum(ResolutionWidth + 40), sum(ResolutionWidth + 41), sum(ResolutionWidth + 42), sum(ResolutionWidth + 43), sum(ResolutionWidth + 44), sum(ResolutionWidth + 45), sum(ResolutionWidth + 46), sum(ResolutionWidth + 47), sum(ResolutionWidth + 48), sum(ResolutionWidth + 49), sum(ResolutionWidth + 50), sum(ResolutionWidth + 51), sum(ResolutionWidth + 52), sum(ResolutionWidth + 53), sum(ResolutionWidth + 54), sum(ResolutionWidth + 55), sum(ResolutionWidth + 56), sum(ResolutionWidth + 57), sum(ResolutionWidth + 58), sum(ResolutionWidth + 59), sum(ResolutionWidth + 60), sum(ResolutionWidth + 61), sum(ResolutionWidth + 62), sum(ResolutionWidth + 63), sum(ResolutionWidth + 64), sum(ResolutionWidth + 65), sum(ResolutionWidth + 66), sum(ResolutionWidth + 67), sum(ResolutionWidth + 68), sum(ResolutionWidth + 69), sum(ResolutionWidth + 70), sum(ResolutionWidth + 71), sum(ResolutionWidth + 72), sum(ResolutionWidth + 73), sum(ResolutionWidth + 74), sum(ResolutionWidth + 75), sum(ResolutionWidth + 76), sum(ResolutionWidth + 77), sum(ResolutionWidth + 78), sum(ResolutionWidth + 79), sum(ResolutionWidth + 80), sum(ResolutionWidth + 81), sum(ResolutionWidth + 82), sum(ResolutionWidth + 83), sum(ResolutionWidth + 84), sum(ResolutionWidth + 85), sum(ResolutionWidth + 86), sum(ResolutionWidth + 87), sum(ResolutionWidth + 88), sum(ResolutionWidth + 89) FROM hits;
|
||||
SELECT SearchEngineID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM hits WHERE octet_length(SearchPhrase) > 0 GROUP BY SearchEngineID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT WatchID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM hits WHERE octet_length(SearchPhrase) > 0 GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT WatchID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM hits GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT URL, count(*) AS c FROM hits GROUP BY URL ORDER BY c DESC LIMIT 10;
|
||||
SELECT 1, URL, count(*) AS c FROM hits GROUP BY 1, URL ORDER BY c DESC LIMIT 10;
|
||||
SELECT ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3, count(*) AS c FROM hits GROUP BY ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3 ORDER BY c DESC LIMIT 10;
|
||||
SELECT URL, count(*) AS PageViews FROM hits WHERE CounterID = 62 AND (DATE '1970-01-01' + EventDate) >= '2013-07-01' AND (DATE '1970-01-01' + EventDate) <= '2013-07-31' AND DontCountHits = 0 AND "refresh" = 0 AND octet_length(URL) > 0 GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
|
||||
SELECT Title, count(*) AS PageViews FROM hits WHERE CounterID = 62 AND (DATE '1970-01-01' + EventDate) >= '2013-07-01' AND (DATE '1970-01-01' + EventDate) <= '2013-07-31' AND DontCountHits = 0 AND "refresh" = 0 AND octet_length(Title) > 0 GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
|
||||
SELECT URL, count(*) AS PageViews FROM hits WHERE CounterID = 62 AND (DATE '1970-01-01' + EventDate) >= '2013-07-01' AND (DATE '1970-01-01' + EventDate) <= '2013-07-31' AND "refresh" = 0 AND IsLink != 0 AND IsDownload = 0 GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
|
||||
SELECT TraficSourceID, SearchEngineID, AdvEngineID, CASE WHEN (SearchEngineID = 0 AND AdvEngineID = 0) THEN Referer ELSE '' END AS Src, URL AS Dst, count(*) AS PageViews FROM hits WHERE CounterID = 62 AND (DATE '1970-01-01' + EventDate) >= '2013-07-01' AND (DATE '1970-01-01' + EventDate) <= '2013-07-31' AND "refresh" = 0 GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 1000;
|
||||
SELECT URLHash, EventDate, count(*) AS PageViews FROM hits WHERE CounterID = 62 AND (DATE '1970-01-01' + EventDate) >= '2013-07-01' AND (DATE '1970-01-01' + EventDate) <= '2013-07-31' AND "refresh" = 0 AND TraficSourceID IN (-1, 6) AND RefererHash = 686716256552154761 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100;
|
||||
SELECT WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM hits WHERE CounterID = 62 AND (DATE '1970-01-01' + EventDate) >= '2013-07-01' AND (DATE '1970-01-01' + EventDate) <= '2013-07-31' AND "refresh" = 0 AND DontCountHits = 0 AND URLHash = 686716256552154761 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
|
||||
SELECT DATE_TRUNC('minute', (TIMESTAMP '1970-01-01 00:00:00' + to_seconds(EventTime))) AS "Minute", count(*) AS PageViews FROM hits WHERE CounterID = 62 AND (DATE '1970-01-01' + EventDate) >= '2013-07-01' AND (DATE '1970-01-01' + EventDate) <= '2013-07-02' AND "refresh" = 0 AND DontCountHits = 0 GROUP BY DATE_TRUNC('minute', (TIMESTAMP '1970-01-01 00:00:00' + to_seconds(EventTime))) ORDER BY DATE_TRUNC('minute', (TIMESTAMP '1970-01-01 00:00:00' + to_seconds(EventTime)));
|
762
benchmark/duckdb/usability.md
Normal file
762
benchmark/duckdb/usability.md
Normal file
File diff suppressed because one or more lines are too long
12
benchmark/postgresql/benchmark.sh
Executable file
12
benchmark/postgresql/benchmark.sh
Executable file
@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
grep -v -P '^#' queries.sql | sed -e 's/{table}/hits_100m_pg/' | while read query; do
|
||||
|
||||
echo 3 | sudo tee /proc/sys/vm/drop_caches
|
||||
|
||||
echo "$query";
|
||||
for i in {1..3}; do
|
||||
# For some reason JIT does not work on my machine
|
||||
sudo -u postgres psql tutorial -t -c 'set jit = off' -c '\timing' -c "$query" | grep 'Time' | tee --append log
|
||||
done;
|
||||
done;
|
142
benchmark/postgresql/instructions.md
Normal file
142
benchmark/postgresql/instructions.md
Normal file
@ -0,0 +1,142 @@
|
||||
Create a table in PostgreSQL:
|
||||
|
||||
```
|
||||
CREATE TABLE hits_100m_pg
|
||||
(
|
||||
WatchID BIGINT NOT NULL,
|
||||
JavaEnable SMALLINT NOT NULL,
|
||||
Title TEXT NOT NULL,
|
||||
GoodEvent SMALLINT NOT NULL,
|
||||
EventTime TIMESTAMP NOT NULL,
|
||||
EventDate Date NOT NULL,
|
||||
CounterID INTEGER NOT NULL,
|
||||
ClientIP INTEGER NOT NULL,
|
||||
RegionID INTEGER NOT NULL,
|
||||
UserID BIGINT NOT NULL,
|
||||
CounterClass SMALLINT NOT NULL,
|
||||
OS SMALLINT NOT NULL,
|
||||
UserAgent SMALLINT NOT NULL,
|
||||
URL TEXT NOT NULL,
|
||||
Referer TEXT NOT NULL,
|
||||
Refresh SMALLINT NOT NULL,
|
||||
RefererCategoryID SMALLINT NOT NULL,
|
||||
RefererRegionID INTEGER NOT NULL,
|
||||
URLCategoryID SMALLINT NOT NULL,
|
||||
URLRegionID INTEGER NOT NULL,
|
||||
ResolutionWidth SMALLINT NOT NULL,
|
||||
ResolutionHeight SMALLINT NOT NULL,
|
||||
ResolutionDepth SMALLINT NOT NULL,
|
||||
FlashMajor SMALLINT NOT NULL,
|
||||
FlashMinor SMALLINT NOT NULL,
|
||||
FlashMinor2 TEXT NOT NULL,
|
||||
NetMajor SMALLINT NOT NULL,
|
||||
NetMinor SMALLINT NOT NULL,
|
||||
UserAgentMajor SMALLINT NOT NULL,
|
||||
UserAgentMinor CHAR(2) NOT NULL,
|
||||
CookieEnable SMALLINT NOT NULL,
|
||||
JavascriptEnable SMALLINT NOT NULL,
|
||||
IsMobile SMALLINT NOT NULL,
|
||||
MobilePhone SMALLINT NOT NULL,
|
||||
MobilePhoneModel TEXT NOT NULL,
|
||||
Params TEXT NOT NULL,
|
||||
IPNetworkID INTEGER NOT NULL,
|
||||
TraficSourceID SMALLINT NOT NULL,
|
||||
SearchEngineID SMALLINT NOT NULL,
|
||||
SearchPhrase TEXT NOT NULL,
|
||||
AdvEngineID SMALLINT NOT NULL,
|
||||
IsArtifical SMALLINT NOT NULL,
|
||||
WindowClientWidth SMALLINT NOT NULL,
|
||||
WindowClientHeight SMALLINT NOT NULL,
|
||||
ClientTimeZone SMALLINT NOT NULL,
|
||||
ClientEventTime TIMESTAMP NOT NULL,
|
||||
SilverlightVersion1 SMALLINT NOT NULL,
|
||||
SilverlightVersion2 SMALLINT NOT NULL,
|
||||
SilverlightVersion3 INTEGER NOT NULL,
|
||||
SilverlightVersion4 SMALLINT NOT NULL,
|
||||
PageCharset TEXT NOT NULL,
|
||||
CodeVersion INTEGER NOT NULL,
|
||||
IsLink SMALLINT NOT NULL,
|
||||
IsDownload SMALLINT NOT NULL,
|
||||
IsNotBounce SMALLINT NOT NULL,
|
||||
FUniqID BIGINT NOT NULL,
|
||||
OriginalURL TEXT NOT NULL,
|
||||
HID INTEGER NOT NULL,
|
||||
IsOldCounter SMALLINT NOT NULL,
|
||||
IsEvent SMALLINT NOT NULL,
|
||||
IsParameter SMALLINT NOT NULL,
|
||||
DontCountHits SMALLINT NOT NULL,
|
||||
WithHash SMALLINT NOT NULL,
|
||||
HitColor CHAR NOT NULL,
|
||||
LocalEventTime TIMESTAMP NOT NULL,
|
||||
Age SMALLINT NOT NULL,
|
||||
Sex SMALLINT NOT NULL,
|
||||
Income SMALLINT NOT NULL,
|
||||
Interests SMALLINT NOT NULL,
|
||||
Robotness SMALLINT NOT NULL,
|
||||
RemoteIP INTEGER NOT NULL,
|
||||
WindowName INTEGER NOT NULL,
|
||||
OpenerName INTEGER NOT NULL,
|
||||
HistoryLength SMALLINT NOT NULL,
|
||||
BrowserLanguage TEXT NOT NULL,
|
||||
BrowserCountry TEXT NOT NULL,
|
||||
SocialNetwork TEXT NOT NULL,
|
||||
SocialAction TEXT NOT NULL,
|
||||
HTTPError SMALLINT NOT NULL,
|
||||
SendTiming INTEGER NOT NULL,
|
||||
DNSTiming INTEGER NOT NULL,
|
||||
ConnectTiming INTEGER NOT NULL,
|
||||
ResponseStartTiming INTEGER NOT NULL,
|
||||
ResponseEndTiming INTEGER NOT NULL,
|
||||
FetchTiming INTEGER NOT NULL,
|
||||
SocialSourceNetworkID SMALLINT NOT NULL,
|
||||
SocialSourcePage TEXT NOT NULL,
|
||||
ParamPrice BIGINT NOT NULL,
|
||||
ParamOrderID TEXT NOT NULL,
|
||||
ParamCurrency TEXT NOT NULL,
|
||||
ParamCurrencyID SMALLINT NOT NULL,
|
||||
OpenstatServiceName TEXT NOT NULL,
|
||||
OpenstatCampaignID TEXT NOT NULL,
|
||||
OpenstatAdID TEXT NOT NULL,
|
||||
OpenstatSourceID TEXT NOT NULL,
|
||||
UTMSource TEXT NOT NULL,
|
||||
UTMMedium TEXT NOT NULL,
|
||||
UTMCampaign TEXT NOT NULL,
|
||||
UTMContent TEXT NOT NULL,
|
||||
UTMTerm TEXT NOT NULL,
|
||||
FromTag TEXT NOT NULL,
|
||||
HasGCLID SMALLINT NOT NULL,
|
||||
RefererHash BIGINT NOT NULL,
|
||||
URLHash BIGINT NOT NULL,
|
||||
CLID INTEGER NOT NULL
|
||||
);
|
||||
```
|
||||
|
||||
Create a dump from ClickHouse:
|
||||
|
||||
```
|
||||
SELECT WatchID::Int64, JavaEnable, replaceAll(replaceAll(replaceAll(toValidUTF8(Title), '\0', ''), '"', ''), '\\', ''), GoodEvent, EventTime, EventDate, CounterID::Int32, ClientIP::Int32, RegionID::Int32,
|
||||
UserID::Int64, CounterClass, OS, UserAgent, replaceAll(replaceAll(replaceAll(toValidUTF8(URL), '\0', ''), '"', ''), '\\', ''), replaceAll(replaceAll(replaceAll(toValidUTF8(Referer), '\0', ''), '"', ''), '\\', ''), Refresh, RefererCategoryID::Int16, RefererRegionID::Int32,
|
||||
URLCategoryID::Int16, URLRegionID::Int32, ResolutionWidth::Int16, ResolutionHeight::Int16, ResolutionDepth, FlashMajor, FlashMinor,
|
||||
FlashMinor2, NetMajor, NetMinor, UserAgentMajor::Int16, replaceAll(replaceAll(replaceAll(toValidUTF8(UserAgentMinor::String), '\0', ''), '"', ''), '\\', ''), CookieEnable, JavascriptEnable, IsMobile, MobilePhone,
|
||||
replaceAll(replaceAll(replaceAll(toValidUTF8(MobilePhoneModel), '\0', ''), '"', ''), '\\', ''), replaceAll(replaceAll(replaceAll(toValidUTF8(Params), '\0', ''), '"', ''), '\\', ''), IPNetworkID::Int32, TraficSourceID, SearchEngineID::Int16, replaceAll(replaceAll(replaceAll(toValidUTF8(SearchPhrase), '\0', ''), '"', ''), '\\', ''),
|
||||
AdvEngineID, IsArtifical, WindowClientWidth::Int16, WindowClientHeight::Int16, ClientTimeZone, ClientEventTime,
|
||||
SilverlightVersion1, SilverlightVersion2, SilverlightVersion3::Int32, SilverlightVersion4::Int16, replaceAll(replaceAll(replaceAll(toValidUTF8(PageCharset), '\0', ''), '"', ''), '\\', ''),
|
||||
CodeVersion::Int32, IsLink, IsDownload, IsNotBounce, FUniqID::Int64, replaceAll(replaceAll(replaceAll(toValidUTF8(OriginalURL), '\0', ''), '"', ''), '\\', ''), HID::Int32, IsOldCounter, IsEvent,
|
||||
IsParameter, DontCountHits, WithHash, replaceAll(replaceAll(replaceAll(toValidUTF8(HitColor::String), '\0', ''), '"', ''), '\\', ''), LocalEventTime, Age, Sex, Income, Interests::Int16, Robotness, RemoteIP::Int32,
|
||||
WindowName, OpenerName, HistoryLength, replaceAll(replaceAll(replaceAll(toValidUTF8(BrowserLanguage::String), '\0', ''), '"', ''), '\\', ''), replaceAll(replaceAll(replaceAll(toValidUTF8(BrowserCountry::String), '\0', ''), '"', ''), '\\', ''),
|
||||
replaceAll(replaceAll(replaceAll(toValidUTF8(SocialNetwork), '\0', ''), '"', ''), '\\', ''), replaceAll(replaceAll(replaceAll(toValidUTF8(SocialAction), '\0', ''), '"', ''), '\\', ''),
|
||||
HTTPError, least(SendTiming, 30000), least(DNSTiming, 30000), least(ConnectTiming, 30000), least(ResponseStartTiming, 30000),
|
||||
least(ResponseEndTiming, 30000), least(FetchTiming, 30000), SocialSourceNetworkID,
|
||||
replaceAll(replaceAll(replaceAll(toValidUTF8(SocialSourcePage), '\0', ''), '"', ''), '\\', ''), ParamPrice, replaceAll(replaceAll(replaceAll(toValidUTF8(ParamOrderID), '\0', ''), '"', ''), '\\', ''), replaceAll(replaceAll(replaceAll(toValidUTF8(ParamCurrency::String), '\0', ''), '"', ''), '\\', ''),
|
||||
ParamCurrencyID::Int16, OpenstatServiceName, OpenstatCampaignID, OpenstatAdID, OpenstatSourceID,
|
||||
UTMSource, UTMMedium, UTMCampaign, UTMContent, UTMTerm, FromTag, HasGCLID, RefererHash::Int64, URLHash::Int64, CLID::Int32
|
||||
FROM hits_100m_obfuscated
|
||||
INTO OUTFILE 'dump.tsv'
|
||||
FORMAT TSV
|
||||
```
|
||||
|
||||
Insert data into PostgreSQL:
|
||||
|
||||
```
|
||||
\copy hits_100m_pg FROM 'dump.tsv';
|
||||
```
|
129
benchmark/postgresql/log
Normal file
129
benchmark/postgresql/log
Normal file
@ -0,0 +1,129 @@
|
||||
Time: 122020.258 ms (02:02.020)
|
||||
Time: 5060.281 ms (00:05.060)
|
||||
Time: 5052.692 ms (00:05.053)
|
||||
Time: 129594.172 ms (02:09.594)
|
||||
Time: 8079.623 ms (00:08.080)
|
||||
Time: 7866.964 ms (00:07.867)
|
||||
Time: 129584.717 ms (02:09.585)
|
||||
Time: 8276.161 ms (00:08.276)
|
||||
Time: 8153.295 ms (00:08.153)
|
||||
Time: 123707.890 ms (02:03.708)
|
||||
Time: 6835.297 ms (00:06.835)
|
||||
Time: 6607.039 ms (00:06.607)
|
||||
Time: 166640.676 ms (02:46.641)
|
||||
Time: 75401.239 ms (01:15.401)
|
||||
Time: 73526.027 ms (01:13.526)
|
||||
Time: 272715.750 ms (04:32.716)
|
||||
Time: 182721.613 ms (03:02.722)
|
||||
Time: 182880.525 ms (03:02.881)
|
||||
Time: 127108.191 ms (02:07.108)
|
||||
Time: 6542.913 ms (00:06.543)
|
||||
Time: 6339.887 ms (00:06.340)
|
||||
Time: 127339.314 ms (02:07.339)
|
||||
Time: 8376.381 ms (00:08.376)
|
||||
Time: 7831.872 ms (00:07.832)
|
||||
Time: 179176.439 ms (02:59.176)
|
||||
Time: 58559.297 ms (00:58.559)
|
||||
Time: 58139.265 ms (00:58.139)
|
||||
Time: 182019.101 ms (03:02.019)
|
||||
Time: 58435.027 ms (00:58.435)
|
||||
Time: 58130.994 ms (00:58.131)
|
||||
Time: 132449.502 ms (02:12.450)
|
||||
Time: 11203.104 ms (00:11.203)
|
||||
Time: 11048.435 ms (00:11.048)
|
||||
Time: 128445.641 ms (02:08.446)
|
||||
Time: 11602.145 ms (00:11.602)
|
||||
Time: 11418.356 ms (00:11.418)
|
||||
Time: 162831.387 ms (02:42.831)
|
||||
Time: 41510.710 ms (00:41.511)
|
||||
Time: 41682.899 ms (00:41.683)
|
||||
Time: 171898.965 ms (02:51.899)
|
||||
Time: 47379.274 ms (00:47.379)
|
||||
Time: 47429.908 ms (00:47.430)
|
||||
Time: 161607.811 ms (02:41.608)
|
||||
Time: 41674.409 ms (00:41.674)
|
||||
Time: 40854.340 ms (00:40.854)
|
||||
Time: 175247.929 ms (02:55.248)
|
||||
Time: 46721.776 ms (00:46.722)
|
||||
Time: 46507.631 ms (00:46.508)
|
||||
Time: 335961.271 ms (05:35.961)
|
||||
Time: 248535.866 ms (04:08.536)
|
||||
Time: 247383.678 ms (04:07.384)
|
||||
Time: 132852.983 ms (02:12.853)
|
||||
Time: 14939.304 ms (00:14.939)
|
||||
Time: 14607.525 ms (00:14.608)
|
||||
Time: 243461.844 ms (04:03.462)
|
||||
Time: 157307.904 ms (02:37.308)
|
||||
Time: 155093.101 ms (02:35.093)
|
||||
Time: 122090.761 ms (02:02.091)
|
||||
Time: 6411.266 ms (00:06.411)
|
||||
Time: 6308.178 ms (00:06.308)
|
||||
Time: 126584.819 ms (02:06.585)
|
||||
Time: 8836.471 ms (00:08.836)
|
||||
Time: 8532.176 ms (00:08.532)
|
||||
Time: 125225.097 ms (02:05.225)
|
||||
Time: 10236.910 ms (00:10.237)
|
||||
Time: 9849.757 ms (00:09.850)
|
||||
Time: 139140.064 ms (02:19.140)
|
||||
Time: 21797.859 ms (00:21.798)
|
||||
Time: 21559.214 ms (00:21.559)
|
||||
Time: 124757.485 ms (02:04.757)
|
||||
Time: 8728.403 ms (00:08.728)
|
||||
Time: 8714.130 ms (00:08.714)
|
||||
Time: 120687.258 ms (02:00.687)
|
||||
Time: 8366.245 ms (00:08.366)
|
||||
Time: 8146.856 ms (00:08.147)
|
||||
Time: 122327.148 ms (02:02.327)
|
||||
Time: 8698.359 ms (00:08.698)
|
||||
Time: 8480.807 ms (00:08.481)
|
||||
Time: 123958.614 ms (02:03.959)
|
||||
Time: 8595.931 ms (00:08.596)
|
||||
Time: 8241.773 ms (00:08.242)
|
||||
Time: 128982.905 ms (02:08.983)
|
||||
Time: 11252.783 ms (00:11.253)
|
||||
Time: 10957.931 ms (00:10.958)
|
||||
Time: 208455.385 ms (03:28.455)
|
||||
Time: 102530.897 ms (01:42.531)
|
||||
Time: 102049.298 ms (01:42.049)
|
||||
Time: 131268.420 ms (02:11.268)
|
||||
Time: 21094.466 ms (00:21.094)
|
||||
Time: 20934.610 ms (00:20.935)
|
||||
Time: 164084.134 ms (02:44.084)
|
||||
Time: 77418.547 ms (01:17.419)
|
||||
Time: 75422.290 ms (01:15.422)
|
||||
Time: 174800.022 ms (02:54.800)
|
||||
Time: 87859.594 ms (01:27.860)
|
||||
Time: 85733.954 ms (01:25.734)
|
||||
Time: 419357.463 ms (06:59.357)
|
||||
Time: 339047.269 ms (05:39.047)
|
||||
Time: 334808.230 ms (05:34.808)
|
||||
Time: 475011.901 ms (07:55.012)
|
||||
Time: 344406.246 ms (05:44.406)
|
||||
Time: 347197.731 ms (05:47.198)
|
||||
Time: 464657.732 ms (07:44.658)
|
||||
Time: 332084.079 ms (05:32.084)
|
||||
Time: 330921.322 ms (05:30.921)
|
||||
Time: 152490.615 ms (02:32.491)
|
||||
Time: 30954.343 ms (00:30.954)
|
||||
Time: 31379.062 ms (00:31.379)
|
||||
Time: 128539.127 ms (02:08.539)
|
||||
Time: 12802.672 ms (00:12.803)
|
||||
Time: 12494.088 ms (00:12.494)
|
||||
Time: 125850.120 ms (02:05.850)
|
||||
Time: 10318.773 ms (00:10.319)
|
||||
Time: 9953.030 ms (00:09.953)
|
||||
Time: 126602.092 ms (02:06.602)
|
||||
Time: 8935.571 ms (00:08.936)
|
||||
Time: 8711.184 ms (00:08.711)
|
||||
Time: 133222.456 ms (02:13.222)
|
||||
Time: 11848.869 ms (00:11.849)
|
||||
Time: 11752.640 ms (00:11.753)
|
||||
Time: 126950.067 ms (02:06.950)
|
||||
Time: 11260.892 ms (00:11.261)
|
||||
Time: 10943.649 ms (00:10.944)
|
||||
Time: 128451.171 ms (02:08.451)
|
||||
Time: 10984.980 ms (00:10.985)
|
||||
Time: 10770.609 ms (00:10.771)
|
||||
Time: 124621.000 ms (02:04.621)
|
||||
Time: 8885.466 ms (00:08.885)
|
||||
Time: 8857.296 ms (00:08.857)
|
43
benchmark/postgresql/queries.sql
Normal file
43
benchmark/postgresql/queries.sql
Normal file
@ -0,0 +1,43 @@
|
||||
SELECT count(*) FROM {table};
|
||||
SELECT count(*) FROM {table} WHERE AdvEngineID != 0;
|
||||
SELECT sum(AdvEngineID), count(*), avg(ResolutionWidth) FROM {table};
|
||||
SELECT sum(UserID) FROM {table};
|
||||
SELECT COUNT(DISTINCT UserID) FROM {table};
|
||||
SELECT COUNT(DISTINCT SearchPhrase) FROM {table};
|
||||
SELECT min(EventDate), max(EventDate) FROM {table};
|
||||
SELECT AdvEngineID, count(*) FROM {table} WHERE AdvEngineID != 0 GROUP BY AdvEngineID ORDER BY count(*) DESC;
|
||||
SELECT RegionID, COUNT(DISTINCT UserID) AS u FROM {table} GROUP BY RegionID ORDER BY u DESC LIMIT 10;
|
||||
SELECT RegionID, sum(AdvEngineID), count(*) AS c, avg(ResolutionWidth), COUNT(DISTINCT UserID) FROM {table} GROUP BY RegionID ORDER BY c DESC LIMIT 10;
|
||||
SELECT MobilePhoneModel, COUNT(DISTINCT UserID) AS u FROM {table} WHERE MobilePhoneModel != '' GROUP BY MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||
SELECT MobilePhone, MobilePhoneModel, COUNT(DISTINCT UserID) AS u FROM {table} WHERE MobilePhoneModel != '' GROUP BY MobilePhone, MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||
SELECT SearchPhrase, count(*) AS c FROM {table} WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT SearchPhrase, COUNT(DISTINCT UserID) AS u FROM {table} WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;
|
||||
SELECT SearchEngineID, SearchPhrase, count(*) AS c FROM {table} WHERE SearchPhrase != '' GROUP BY SearchEngineID, SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT UserID, count(*) FROM {table} GROUP BY UserID ORDER BY count(*) DESC LIMIT 10;
|
||||
SELECT UserID, SearchPhrase, count(*) FROM {table} GROUP BY UserID, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||
SELECT UserID, SearchPhrase, count(*) FROM {table} GROUP BY UserID, SearchPhrase LIMIT 10;
|
||||
SELECT UserID, extract(minute FROM EventTime) AS m, SearchPhrase, count(*) FROM {table} GROUP BY UserID, m, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||
SELECT UserID FROM {table} WHERE UserID = -6101065172474983726;
|
||||
SELECT count(*) FROM {table} WHERE URL LIKE '%metrika%';
|
||||
SELECT SearchPhrase, min(URL), count(*) AS c FROM {table} WHERE URL LIKE '%metrika%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT SearchPhrase, min(URL), min(Title), count(*) AS c, COUNT(DISTINCT UserID) FROM {table} WHERE Title LIKE '%Яндекс%' AND URL NOT LIKE '%.yandex.%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT * FROM {table} WHERE URL LIKE '%metrika%' ORDER BY EventTime LIMIT 10;
|
||||
SELECT SearchPhrase FROM {table} WHERE SearchPhrase != '' ORDER BY EventTime LIMIT 10;
|
||||
SELECT SearchPhrase FROM {table} WHERE SearchPhrase != '' ORDER BY SearchPhrase LIMIT 10;
|
||||
SELECT SearchPhrase FROM {table} WHERE SearchPhrase != '' ORDER BY EventTime, SearchPhrase LIMIT 10;
|
||||
SELECT CounterID, avg(length(URL)) AS l, count(*) AS c FROM {table} WHERE URL != '' GROUP BY CounterID HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||
SELECT REGEXP_REPLACE(Referer, '^https?://(?:www\.)?([^/]+)/.*$', '\1') AS key, avg(length(Referer)) AS l, count(*) AS c, min(Referer) FROM {table} WHERE Referer != '' GROUP BY key HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||
SELECT sum(ResolutionWidth), sum(ResolutionWidth + 1), sum(ResolutionWidth + 2), sum(ResolutionWidth + 3), sum(ResolutionWidth + 4), sum(ResolutionWidth + 5), sum(ResolutionWidth + 6), sum(ResolutionWidth + 7), sum(ResolutionWidth + 8), sum(ResolutionWidth + 9), sum(ResolutionWidth + 10), sum(ResolutionWidth + 11), sum(ResolutionWidth + 12), sum(ResolutionWidth + 13), sum(ResolutionWidth + 14), sum(ResolutionWidth + 15), sum(ResolutionWidth + 16), sum(ResolutionWidth + 17), sum(ResolutionWidth + 18), sum(ResolutionWidth + 19), sum(ResolutionWidth + 20), sum(ResolutionWidth + 21), sum(ResolutionWidth + 22), sum(ResolutionWidth + 23), sum(ResolutionWidth + 24), sum(ResolutionWidth + 25), sum(ResolutionWidth + 26), sum(ResolutionWidth + 27), sum(ResolutionWidth + 28), sum(ResolutionWidth + 29), sum(ResolutionWidth + 30), sum(ResolutionWidth + 31), sum(ResolutionWidth + 32), sum(ResolutionWidth + 33), sum(ResolutionWidth + 34), sum(ResolutionWidth + 35), sum(ResolutionWidth + 36), sum(ResolutionWidth + 37), sum(ResolutionWidth + 38), sum(ResolutionWidth + 39), sum(ResolutionWidth + 40), sum(ResolutionWidth + 41), sum(ResolutionWidth + 42), sum(ResolutionWidth + 43), sum(ResolutionWidth + 44), sum(ResolutionWidth + 45), sum(ResolutionWidth + 46), sum(ResolutionWidth + 47), sum(ResolutionWidth + 48), sum(ResolutionWidth + 49), sum(ResolutionWidth + 50), sum(ResolutionWidth + 51), sum(ResolutionWidth + 52), sum(ResolutionWidth + 53), sum(ResolutionWidth + 54), sum(ResolutionWidth + 55), sum(ResolutionWidth + 56), sum(ResolutionWidth + 57), sum(ResolutionWidth + 58), sum(ResolutionWidth + 59), sum(ResolutionWidth + 60), sum(ResolutionWidth + 61), sum(ResolutionWidth + 62), sum(ResolutionWidth + 63), sum(ResolutionWidth + 64), sum(ResolutionWidth + 65), sum(ResolutionWidth + 66), sum(ResolutionWidth + 67), sum(ResolutionWidth + 68), sum(ResolutionWidth + 69), sum(ResolutionWidth + 70), sum(ResolutionWidth + 71), sum(ResolutionWidth + 72), sum(ResolutionWidth + 73), sum(ResolutionWidth + 74), sum(ResolutionWidth + 75), sum(ResolutionWidth + 76), sum(ResolutionWidth + 77), sum(ResolutionWidth + 78), sum(ResolutionWidth + 79), sum(ResolutionWidth + 80), sum(ResolutionWidth + 81), sum(ResolutionWidth + 82), sum(ResolutionWidth + 83), sum(ResolutionWidth + 84), sum(ResolutionWidth + 85), sum(ResolutionWidth + 86), sum(ResolutionWidth + 87), sum(ResolutionWidth + 88), sum(ResolutionWidth + 89) FROM {table};
|
||||
SELECT SearchEngineID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM {table} WHERE SearchPhrase != '' GROUP BY SearchEngineID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT WatchID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM {table} WHERE SearchPhrase != '' GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT WatchID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM {table} GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT URL, count(*) AS c FROM {table} GROUP BY URL ORDER BY c DESC LIMIT 10;
|
||||
SELECT 1, URL, count(*) AS c FROM {table} GROUP BY 1, URL ORDER BY c DESC LIMIT 10;
|
||||
SELECT ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3, count(*) AS c FROM {table} GROUP BY ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3 ORDER BY c DESC LIMIT 10;
|
||||
SELECT URL, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND DontCountHits = 0 AND "refresh" = 0 AND URL != '' GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
|
||||
SELECT Title, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND DontCountHits = 0 AND "refresh" = 0 AND Title != '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
|
||||
SELECT URL, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "refresh" = 0 AND IsLink != 0 AND IsDownload = 0 GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
|
||||
SELECT TraficSourceID, SearchEngineID, AdvEngineID, CASE WHEN (SearchEngineID = 0 AND AdvEngineID = 0) THEN Referer ELSE '' END AS Src, URL AS Dst, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "refresh" = 0 GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 1000;
|
||||
SELECT URLHash, EventDate, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "refresh" = 0 AND TraficSourceID IN (-1, 6) AND RefererHash = 686716256552154761 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100;
|
||||
SELECT WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "refresh" = 0 AND DontCountHits = 0 AND URLHash = 686716256552154761 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
|
||||
SELECT DATE_TRUNC('minute', EventTime) AS "Minute", count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-02' AND "refresh" = 0 AND DontCountHits = 0 GROUP BY DATE_TRUNC('minute', EventTime) ORDER BY DATE_TRUNC('minute', EventTime);
|
11
benchmark/timescaledb/benchmark.sh
Executable file
11
benchmark/timescaledb/benchmark.sh
Executable file
@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
|
||||
grep -v -P '^#' queries.sql | sed -e 's/{table}/hits_100m_obfuscated/' | while read query; do
|
||||
|
||||
echo 3 | sudo tee /proc/sys/vm/drop_caches
|
||||
|
||||
echo "$query";
|
||||
for i in {1..3}; do
|
||||
sudo -u postgres psql tutorial -t -c 'set jit = off' -c '\timing' -c "$query" | grep 'Time' | tee --append log
|
||||
done;
|
||||
done;
|
215
benchmark/timescaledb/log
Normal file
215
benchmark/timescaledb/log
Normal file
@ -0,0 +1,215 @@
|
||||
3
|
||||
SELECT count(*) FROM hits_100m_obfuscated;
|
||||
Time: 3259.733 ms (00:03.260)
|
||||
Time: 3135.484 ms (00:03.135)
|
||||
Time: 3135.579 ms (00:03.136)
|
||||
3
|
||||
SELECT count(*) FROM hits_100m_obfuscated WHERE AdvEngineID != 0;
|
||||
Time: 146854.557 ms (02:26.855)
|
||||
Time: 6921.736 ms (00:06.922)
|
||||
Time: 6619.892 ms (00:06.620)
|
||||
3
|
||||
SELECT sum(AdvEngineID), count(*), avg(ResolutionWidth) FROM hits_100m_obfuscated;
|
||||
Time: 146568.297 ms (02:26.568)
|
||||
Time: 7481.610 ms (00:07.482)
|
||||
Time: 7258.209 ms (00:07.258)
|
||||
3
|
||||
SELECT sum(UserID) FROM hits_100m_obfuscated;
|
||||
Time: 146864.106 ms (02:26.864)
|
||||
Time: 5690.024 ms (00:05.690)
|
||||
Time: 5381.820 ms (00:05.382)
|
||||
3
|
||||
SELECT COUNT(DISTINCT UserID) FROM hits_100m_obfuscated;
|
||||
Time: 227507.331 ms (03:47.507)
|
||||
Time: 69165.471 ms (01:09.165)
|
||||
Time: 72216.950 ms (01:12.217)
|
||||
3
|
||||
SELECT COUNT(DISTINCT SearchPhrase) FROM hits_100m_obfuscated;
|
||||
Time: 323644.397 ms (05:23.644)
|
||||
Time: 177578.740 ms (02:57.579)
|
||||
Time: 175055.738 ms (02:55.056)
|
||||
3
|
||||
SELECT min(EventDate), max(EventDate) FROM hits_100m_obfuscated;
|
||||
Time: 146147.843 ms (02:26.148)
|
||||
Time: 5735.128 ms (00:05.735)
|
||||
Time: 5428.638 ms (00:05.429)
|
||||
3
|
||||
SELECT AdvEngineID, count(*) FROM hits_100m_obfuscated WHERE AdvEngineID != 0 GROUP BY AdvEngineID ORDER BY count(*) DESC;
|
||||
Time: 148658.450 ms (02:28.658)
|
||||
Time: 7014.882 ms (00:07.015)
|
||||
Time: 6599.736 ms (00:06.600)
|
||||
3
|
||||
SELECT RegionID, COUNT(DISTINCT UserID) AS u FROM hits_100m_obfuscated GROUP BY RegionID ORDER BY u DESC LIMIT 10;
|
||||
Time: 202423.122 ms (03:22.423)
|
||||
Time: 54439.047 ms (00:54.439)
|
||||
Time: 54800.354 ms (00:54.800)
|
||||
3
|
||||
SELECT RegionID, sum(AdvEngineID), count(*) AS c, avg(ResolutionWidth), COUNT(DISTINCT UserID) FROM hits_100m_obfuscated GROUP BY RegionID ORDER BY c DESC LIMIT 10;
|
||||
Time: 201152.491 ms (03:21.152)
|
||||
Time: 55875.854 ms (00:55.876)
|
||||
Time: 55200.330 ms (00:55.200)
|
||||
3
|
||||
SELECT MobilePhoneModel, COUNT(DISTINCT UserID) AS u FROM hits_100m_obfuscated WHERE MobilePhoneModel != '' GROUP BY MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||
Time: 146042.603 ms (02:26.043)
|
||||
Time: 9931.633 ms (00:09.932)
|
||||
Time: 10037.032 ms (00:10.037)
|
||||
3
|
||||
SELECT MobilePhone, MobilePhoneModel, COUNT(DISTINCT UserID) AS u FROM hits_100m_obfuscated WHERE MobilePhoneModel != '' GROUP BY MobilePhone, MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||
Time: 150811.952 ms (02:30.812)
|
||||
Time: 10320.230 ms (00:10.320)
|
||||
Time: 9993.232 ms (00:09.993)
|
||||
3
|
||||
SELECT SearchPhrase, count(*) AS c FROM hits_100m_obfuscated WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
Time: 173071.218 ms (02:53.071)
|
||||
Time: 34314.835 ms (00:34.315)
|
||||
Time: 34420.919 ms (00:34.421)
|
||||
3
|
||||
SELECT SearchPhrase, COUNT(DISTINCT UserID) AS u FROM hits_100m_obfuscated WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;
|
||||
Time: 172874.155 ms (02:52.874)
|
||||
Time: 43704.494 ms (00:43.704)
|
||||
Time: 43918.380 ms (00:43.918)
|
||||
3
|
||||
SELECT SearchEngineID, SearchPhrase, count(*) AS c FROM hits_100m_obfuscated WHERE SearchPhrase != '' GROUP BY SearchEngineID, SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
Time: 178484.822 ms (02:58.485)
|
||||
Time: 36850.436 ms (00:36.850)
|
||||
Time: 35789.029 ms (00:35.789)
|
||||
3
|
||||
SELECT UserID, count(*) FROM hits_100m_obfuscated GROUP BY UserID ORDER BY count(*) DESC LIMIT 10;
|
||||
Time: 169720.759 ms (02:49.721)
|
||||
Time: 24125.730 ms (00:24.126)
|
||||
Time: 23782.745 ms (00:23.783)
|
||||
3
|
||||
SELECT UserID, SearchPhrase, count(*) FROM hits_100m_obfuscated GROUP BY UserID, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||
Time: 182335.631 ms (03:02.336)
|
||||
Time: 37324.563 ms (00:37.325)
|
||||
Time: 37124.250 ms (00:37.124)
|
||||
3
|
||||
SELECT UserID, SearchPhrase, count(*) FROM hits_100m_obfuscated GROUP BY UserID, SearchPhrase LIMIT 10;
|
||||
Time: 163799.714 ms (02:43.800)
|
||||
Time: 18514.031 ms (00:18.514)
|
||||
Time: 18968.524 ms (00:18.969)
|
||||
3
|
||||
SELECT UserID, extract(minute FROM EventTime) AS m, SearchPhrase, count(*) FROM hits_100m_obfuscated GROUP BY UserID, m, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||
Time: 294799.480 ms (04:54.799)
|
||||
Time: 149592.992 ms (02:29.593)
|
||||
Time: 149466.291 ms (02:29.466)
|
||||
3
|
||||
SELECT UserID FROM hits_100m_obfuscated WHERE UserID = -6101065172474983726;
|
||||
Time: 140797.496 ms (02:20.797)
|
||||
Time: 5312.321 ms (00:05.312)
|
||||
Time: 5020.502 ms (00:05.021)
|
||||
3
|
||||
SELECT count(*) FROM hits_100m_obfuscated WHERE URL LIKE '%metrika%';
|
||||
Time: 143092.287 ms (02:23.092)
|
||||
Time: 7893.874 ms (00:07.894)
|
||||
Time: 7661.326 ms (00:07.661)
|
||||
3
|
||||
SELECT SearchPhrase, min(URL), count(*) AS c FROM hits_100m_obfuscated WHERE URL LIKE '%metrika%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
Time: 143682.424 ms (02:23.682)
|
||||
Time: 9249.962 ms (00:09.250)
|
||||
Time: 9073.876 ms (00:09.074)
|
||||
3
|
||||
SELECT SearchPhrase, min(URL), min(Title), count(*) AS c, COUNT(DISTINCT UserID) FROM hits_100m_obfuscated WHERE Title LIKE '%Яндекс%' AND URL NOT LIKE '%.yandex.%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
Time: 150965.884 ms (02:30.966)
|
||||
Time: 20350.812 ms (00:20.351)
|
||||
Time: 20074.939 ms (00:20.075)
|
||||
3
|
||||
SELECT * FROM hits_100m_obfuscated WHERE URL LIKE '%metrika%' ORDER BY EventTime LIMIT 10;
|
||||
Time: 4674.669 ms (00:04.675)
|
||||
Time: 4532.389 ms (00:04.532)
|
||||
Time: 4555.457 ms (00:04.555)
|
||||
3
|
||||
SELECT SearchPhrase FROM hits_100m_obfuscated WHERE SearchPhrase != '' ORDER BY EventTime LIMIT 10;
|
||||
Time: 5.177 ms
|
||||
Time: 5.031 ms
|
||||
Time: 4.419 ms
|
||||
3
|
||||
SELECT SearchPhrase FROM hits_100m_obfuscated WHERE SearchPhrase != '' ORDER BY SearchPhrase LIMIT 10;
|
||||
Time: 141152.210 ms (02:21.152)
|
||||
Time: 7492.968 ms (00:07.493)
|
||||
Time: 7300.428 ms (00:07.300)
|
||||
3
|
||||
SELECT SearchPhrase FROM hits_100m_obfuscated WHERE SearchPhrase != '' ORDER BY EventTime, SearchPhrase LIMIT 10;
|
||||
Time: 30.736 ms
|
||||
Time: 5.018 ms
|
||||
Time: 5.132 ms
|
||||
3
|
||||
SELECT CounterID, avg(length(URL)) AS l, count(*) AS c FROM hits_100m_obfuscated WHERE URL != '' GROUP BY CounterID HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||
Time: 144034.016 ms (02:24.034)
|
||||
Time: 10701.672 ms (00:10.702)
|
||||
Time: 10348.565 ms (00:10.349)
|
||||
3
|
||||
SELECT REGEXP_REPLACE(Referer, '^https?://(?:www.)?([^/]+)/.*$', '1') AS key, avg(length(Referer)) AS l, count(*) AS c, min(Referer) FROM hits_100m_obfuscated WHERE Referer != '' GROUP BY key HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||
Time: 191575.080 ms (03:11.575)
|
||||
Time: 97836.706 ms (01:37.837)
|
||||
Time: 97673.219 ms (01:37.673)
|
||||
3
|
||||
SELECT sum(ResolutionWidth), sum(ResolutionWidth + 1), sum(ResolutionWidth + 2), sum(ResolutionWidth + 3), sum(ResolutionWidth + 4), sum(ResolutionWidth + 5), sum(ResolutionWidth + 6), sum(ResolutionWidth + 7), sum(ResolutionWidth + 8), sum(ResolutionWidth + 9), sum(ResolutionWidth + 10), sum(ResolutionWidth + 11), sum(ResolutionWidth + 12), sum(ResolutionWidth + 13), sum(ResolutionWidth + 14), sum(ResolutionWidth + 15), sum(ResolutionWidth + 16), sum(ResolutionWidth + 17), sum(ResolutionWidth + 18), sum(ResolutionWidth + 19), sum(ResolutionWidth + 20), sum(ResolutionWidth + 21), sum(ResolutionWidth + 22), sum(ResolutionWidth + 23), sum(ResolutionWidth + 24), sum(ResolutionWidth + 25), sum(ResolutionWidth + 26), sum(ResolutionWidth + 27), sum(ResolutionWidth + 28), sum(ResolutionWidth + 29), sum(ResolutionWidth + 30), sum(ResolutionWidth + 31), sum(ResolutionWidth + 32), sum(ResolutionWidth + 33), sum(ResolutionWidth + 34), sum(ResolutionWidth + 35), sum(ResolutionWidth + 36), sum(ResolutionWidth + 37), sum(ResolutionWidth + 38), sum(ResolutionWidth + 39), sum(ResolutionWidth + 40), sum(ResolutionWidth + 41), sum(ResolutionWidth + 42), sum(ResolutionWidth + 43), sum(ResolutionWidth + 44), sum(ResolutionWidth + 45), sum(ResolutionWidth + 46), sum(ResolutionWidth + 47), sum(ResolutionWidth + 48), sum(ResolutionWidth + 49), sum(ResolutionWidth + 50), sum(ResolutionWidth + 51), sum(ResolutionWidth + 52), sum(ResolutionWidth + 53), sum(ResolutionWidth + 54), sum(ResolutionWidth + 55), sum(ResolutionWidth + 56), sum(ResolutionWidth + 57), sum(ResolutionWidth + 58), sum(ResolutionWidth + 59), sum(ResolutionWidth + 60), sum(ResolutionWidth + 61), sum(ResolutionWidth + 62), sum(ResolutionWidth + 63), sum(ResolutionWidth + 64), sum(ResolutionWidth + 65), sum(ResolutionWidth + 66), sum(ResolutionWidth + 67), sum(ResolutionWidth + 68), sum(ResolutionWidth + 69), sum(ResolutionWidth + 70), sum(ResolutionWidth + 71), sum(ResolutionWidth + 72), sum(ResolutionWidth + 73), sum(ResolutionWidth + 74), sum(ResolutionWidth + 75), sum(ResolutionWidth + 76), sum(ResolutionWidth + 77), sum(ResolutionWidth + 78), sum(ResolutionWidth + 79), sum(ResolutionWidth + 80), sum(ResolutionWidth + 81), sum(ResolutionWidth + 82), sum(ResolutionWidth + 83), sum(ResolutionWidth + 84), sum(ResolutionWidth + 85), sum(ResolutionWidth + 86), sum(ResolutionWidth + 87), sum(ResolutionWidth + 88), sum(ResolutionWidth + 89) FROM hits_100m_obfuscated;
|
||||
Time: 143652.317 ms (02:23.652)
|
||||
Time: 22185.656 ms (00:22.186)
|
||||
Time: 21887.411 ms (00:21.887)
|
||||
3
|
||||
SELECT SearchEngineID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM hits_100m_obfuscated WHERE SearchPhrase != '' GROUP BY SearchEngineID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
Time: 153481.944 ms (02:33.482)
|
||||
Time: 17748.628 ms (00:17.749)
|
||||
Time: 17551.116 ms (00:17.551)
|
||||
3
|
||||
SELECT WatchID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM hits_100m_obfuscated WHERE SearchPhrase != '' GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
Time: 167448.684 ms (02:47.449)
|
||||
Time: 25902.961 ms (00:25.903)
|
||||
Time: 25592.018 ms (00:25.592)
|
||||
3
|
||||
SELECT WatchID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM hits_100m_obfuscated GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
Time: 299183.443 ms (04:59.183)
|
||||
Time: 145349.772 ms (02:25.350)
|
||||
Time: 143214.688 ms (02:23.215)
|
||||
3
|
||||
SELECT URL, count(*) AS c FROM hits_100m_obfuscated GROUP BY URL ORDER BY c DESC LIMIT 10;
|
||||
Time: 389851.369 ms (06:29.851)
|
||||
Time: 228158.639 ms (03:48.159)
|
||||
Time: 231811.118 ms (03:51.811)
|
||||
3
|
||||
SELECT 1, URL, count(*) AS c FROM hits_100m_obfuscated GROUP BY 1, URL ORDER BY c DESC LIMIT 10;
|
||||
Time: 407458.343 ms (06:47.458)
|
||||
Time: 230125.530 ms (03:50.126)
|
||||
Time: 230764.511 ms (03:50.765)
|
||||
3
|
||||
SELECT ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3, count(*) AS c FROM hits_100m_obfuscated GROUP BY ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3 ORDER BY c DESC LIMIT 10;
|
||||
Time: 174098.556 ms (02:54.099)
|
||||
Time: 23503.975 ms (00:23.504)
|
||||
Time: 24322.856 ms (00:24.323)
|
||||
3
|
||||
SELECT URL, count(*) AS PageViews FROM hits_100m_obfuscated WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND DontCountHits = 0 AND "refresh" = 0 AND URL != '' GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
|
||||
Time: 145906.025 ms (02:25.906)
|
||||
Time: 10824.695 ms (00:10.825)
|
||||
Time: 10484.885 ms (00:10.485)
|
||||
3
|
||||
SELECT Title, count(*) AS PageViews FROM hits_100m_obfuscated WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND DontCountHits = 0 AND "refresh" = 0 AND Title != '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
|
||||
Time: 144063.711 ms (02:24.064)
|
||||
Time: 8947.980 ms (00:08.948)
|
||||
Time: 8608.434 ms (00:08.608)
|
||||
3
|
||||
SELECT URL, count(*) AS PageViews FROM hits_100m_obfuscated WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "refresh" = 0 AND IsLink != 0 AND IsDownload = 0 GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
|
||||
Time: 141883.596 ms (02:21.884)
|
||||
Time: 7977.257 ms (00:07.977)
|
||||
Time: 7673.547 ms (00:07.674)
|
||||
3
|
||||
SELECT TraficSourceID, SearchEngineID, AdvEngineID, CASE WHEN (SearchEngineID = 0 AND AdvEngineID = 0) THEN Referer ELSE '' END AS Src, URL AS Dst, count(*) AS PageViews FROM hits_100m_obfuscated WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "refresh" = 0 GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 1000;
|
||||
Time: 147100.084 ms (02:27.100)
|
||||
Time: 9527.812 ms (00:09.528)
|
||||
Time: 9457.663 ms (00:09.458)
|
||||
3
|
||||
SELECT URLHash, EventDate, count(*) AS PageViews FROM hits_100m_obfuscated WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "refresh" = 0 AND TraficSourceID IN (-1, 6) AND RefererHash = 686716256552154761 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100;
|
||||
Time: 144585.669 ms (02:24.586)
|
||||
Time: 10815.223 ms (00:10.815)
|
||||
Time: 10594.707 ms (00:10.595)
|
||||
3
|
||||
SELECT WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM hits_100m_obfuscated WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "refresh" = 0 AND DontCountHits = 0 AND URLHash = 686716256552154761 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
|
||||
Time: 145738.341 ms (02:25.738)
|
||||
Time: 10592.979 ms (00:10.593)
|
||||
Time: 10181.477 ms (00:10.181)
|
||||
3
|
||||
SELECT DATE_TRUNC('minute', EventTime) AS "Minute", count(*) AS PageViews FROM hits_100m_obfuscated WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-02' AND "refresh" = 0 AND DontCountHits = 0 GROUP BY DATE_TRUNC('minute', EventTime) ORDER BY DATE_TRUNC('minute', EventTime);
|
||||
Time: 145023.796 ms (02:25.024)
|
||||
Time: 8035.337 ms (00:08.035)
|
||||
Time: 7865.698 ms (00:07.866)
|
129
benchmark/timescaledb/log_compressed
Normal file
129
benchmark/timescaledb/log_compressed
Normal file
@ -0,0 +1,129 @@
|
||||
Time: 1784.299 ms (00:01.784)
|
||||
Time: 1223.461 ms (00:01.223)
|
||||
Time: 1200.665 ms (00:01.201)
|
||||
Time: 22730.141 ms (00:22.730)
|
||||
Time: 1379.227 ms (00:01.379)
|
||||
Time: 1361.595 ms (00:01.362)
|
||||
Time: 29888.235 ms (00:29.888)
|
||||
Time: 3160.611 ms (00:03.161)
|
||||
Time: 3207.363 ms (00:03.207)
|
||||
Time: 53922.569 ms (00:53.923)
|
||||
Time: 2301.456 ms (00:02.301)
|
||||
Time: 2277.009 ms (00:02.277)
|
||||
Time: 45363.999 ms (00:45.364)
|
||||
Time: 43765.848 ms (00:43.766)
|
||||
Time: 44066.621 ms (00:44.067)
|
||||
Time: 172945.633 ms (02:52.946)
|
||||
Time: 136944.098 ms (02:16.944)
|
||||
Time: 138268.413 ms (02:18.268)
|
||||
Time: 16764.579 ms (00:16.765)
|
||||
Time: 2579.907 ms (00:02.580)
|
||||
Time: 2590.390 ms (00:02.590)
|
||||
Time: 1498.034 ms (00:01.498)
|
||||
Time: 1434.534 ms (00:01.435)
|
||||
Time: 1448.123 ms (00:01.448)
|
||||
Time: 113533.016 ms (01:53.533)
|
||||
Time: 78465.335 ms (01:18.465)
|
||||
Time: 80778.839 ms (01:20.779)
|
||||
Time: 90456.388 ms (01:30.456)
|
||||
Time: 87050.166 ms (01:27.050)
|
||||
Time: 88426.851 ms (01:28.427)
|
||||
Time: 45021.632 ms (00:45.022)
|
||||
Time: 12486.342 ms (00:12.486)
|
||||
Time: 12222.489 ms (00:12.222)
|
||||
Time: 44246.843 ms (00:44.247)
|
||||
Time: 15606.856 ms (00:15.607)
|
||||
Time: 15251.554 ms (00:15.252)
|
||||
Time: 29654.719 ms (00:29.655)
|
||||
Time: 29441.858 ms (00:29.442)
|
||||
Time: 29608.141 ms (00:29.608)
|
||||
Time: 103547.383 ms (01:43.547)
|
||||
Time: 104733.648 ms (01:44.734)
|
||||
Time: 105779.016 ms (01:45.779)
|
||||
Time: 29695.834 ms (00:29.696)
|
||||
Time: 15395.447 ms (00:15.395)
|
||||
Time: 15819.650 ms (00:15.820)
|
||||
Time: 27841.552 ms (00:27.842)
|
||||
Time: 29521.849 ms (00:29.522)
|
||||
Time: 27508.521 ms (00:27.509)
|
||||
Time: 56665.709 ms (00:56.666)
|
||||
Time: 56459.321 ms (00:56.459)
|
||||
Time: 56407.620 ms (00:56.408)
|
||||
Time: 27488.888 ms (00:27.489)
|
||||
Time: 25557.427 ms (00:25.557)
|
||||
Time: 25634.140 ms (00:25.634)
|
||||
Time: 97376.463 ms (01:37.376)
|
||||
Time: 96047.902 ms (01:36.048)
|
||||
Time: 99918.341 ms (01:39.918)
|
||||
Time: 6294.887 ms (00:06.295)
|
||||
Time: 6407.262 ms (00:06.407)
|
||||
Time: 6376.369 ms (00:06.376)
|
||||
Time: 40787.808 ms (00:40.788)
|
||||
Time: 11206.256 ms (00:11.206)
|
||||
Time: 11219.871 ms (00:11.220)
|
||||
Time: 12420.227 ms (00:12.420)
|
||||
Time: 12548.301 ms (00:12.548)
|
||||
Time: 12468.458 ms (00:12.468)
|
||||
Time: 57679.878 ms (00:57.680)
|
||||
Time: 35466.123 ms (00:35.466)
|
||||
Time: 35562.064 ms (00:35.562)
|
||||
Time: 13551.276 ms (00:13.551)
|
||||
Time: 13417.313 ms (00:13.417)
|
||||
Time: 13645.287 ms (00:13.645)
|
||||
Time: 150.297 ms
|
||||
Time: 55.995 ms
|
||||
Time: 55.796 ms
|
||||
Time: 3059.796 ms (00:03.060)
|
||||
Time: 3038.246 ms (00:03.038)
|
||||
Time: 3041.210 ms (00:03.041)
|
||||
Time: 4461.720 ms (00:04.462)
|
||||
Time: 4446.691 ms (00:04.447)
|
||||
Time: 4424.526 ms (00:04.425)
|
||||
Time: 29275.463 ms (00:29.275)
|
||||
Time: 17558.747 ms (00:17.559)
|
||||
Time: 17438.621 ms (00:17.439)
|
||||
Time: 203316.184 ms (03:23.316)
|
||||
Time: 190037.946 ms (03:10.038)
|
||||
Time: 189276.624 ms (03:09.277)
|
||||
Time: 36921.542 ms (00:36.922)
|
||||
Time: 36963.771 ms (00:36.964)
|
||||
Time: 36660.406 ms (00:36.660)
|
||||
Time: 38307.345 ms (00:38.307)
|
||||
Time: 17597.355 ms (00:17.597)
|
||||
Time: 17324.776 ms (00:17.325)
|
||||
Time: 39857.567 ms (00:39.858)
|
||||
Time: 26776.411 ms (00:26.776)
|
||||
Time: 26592.819 ms (00:26.593)
|
||||
Time: 162782.290 ms (02:42.782)
|
||||
Time: 160722.582 ms (02:40.723)
|
||||
Time: 162487.263 ms (02:42.487)
|
||||
Time: 261494.290 ms (04:21.494)
|
||||
Time: 263594.014 ms (04:23.594)
|
||||
Time: 260436.201 ms (04:20.436)
|
||||
Time: 265758.455 ms (04:25.758)
|
||||
Time: 270087.523 ms (04:30.088)
|
||||
Time: 266617.218 ms (04:26.617)
|
||||
Time: 30677.159 ms (00:30.677)
|
||||
Time: 28933.542 ms (00:28.934)
|
||||
Time: 29815.271 ms (00:29.815)
|
||||
Time: 19754.932 ms (00:19.755)
|
||||
Time: 16851.157 ms (00:16.851)
|
||||
Time: 16703.289 ms (00:16.703)
|
||||
Time: 10379.500 ms (00:10.379)
|
||||
Time: 10267.336 ms (00:10.267)
|
||||
Time: 10287.944 ms (00:10.288)
|
||||
Time: 17320.582 ms (00:17.321)
|
||||
Time: 9786.410 ms (00:09.786)
|
||||
Time: 9760.578 ms (00:09.761)
|
||||
Time: 33487.352 ms (00:33.487)
|
||||
Time: 26056.528 ms (00:26.057)
|
||||
Time: 25958.258 ms (00:25.958)
|
||||
Time: 28020.227 ms (00:28.020)
|
||||
Time: 5609.725 ms (00:05.610)
|
||||
Time: 5538.744 ms (00:05.539)
|
||||
Time: 15119.473 ms (00:15.119)
|
||||
Time: 5057.455 ms (00:05.057)
|
||||
Time: 5063.154 ms (00:05.063)
|
||||
Time: 3627.703 ms (00:03.628)
|
||||
Time: 3645.232 ms (00:03.645)
|
||||
Time: 3546.855 ms (00:03.547)
|
43
benchmark/timescaledb/queries.sql
Normal file
43
benchmark/timescaledb/queries.sql
Normal file
@ -0,0 +1,43 @@
|
||||
SELECT count(*) FROM {table};
|
||||
SELECT count(*) FROM {table} WHERE AdvEngineID != 0;
|
||||
SELECT sum(AdvEngineID), count(*), avg(ResolutionWidth) FROM {table};
|
||||
SELECT sum(UserID) FROM {table};
|
||||
SELECT COUNT(DISTINCT UserID) FROM {table};
|
||||
SELECT COUNT(DISTINCT SearchPhrase) FROM {table};
|
||||
SELECT min(EventDate), max(EventDate) FROM {table};
|
||||
SELECT AdvEngineID, count(*) FROM {table} WHERE AdvEngineID != 0 GROUP BY AdvEngineID ORDER BY count(*) DESC;
|
||||
SELECT RegionID, COUNT(DISTINCT UserID) AS u FROM {table} GROUP BY RegionID ORDER BY u DESC LIMIT 10;
|
||||
SELECT RegionID, sum(AdvEngineID), count(*) AS c, avg(ResolutionWidth), COUNT(DISTINCT UserID) FROM {table} GROUP BY RegionID ORDER BY c DESC LIMIT 10;
|
||||
SELECT MobilePhoneModel, COUNT(DISTINCT UserID) AS u FROM {table} WHERE MobilePhoneModel != '' GROUP BY MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||
SELECT MobilePhone, MobilePhoneModel, COUNT(DISTINCT UserID) AS u FROM {table} WHERE MobilePhoneModel != '' GROUP BY MobilePhone, MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||
SELECT SearchPhrase, count(*) AS c FROM {table} WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT SearchPhrase, COUNT(DISTINCT UserID) AS u FROM {table} WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;
|
||||
SELECT SearchEngineID, SearchPhrase, count(*) AS c FROM {table} WHERE SearchPhrase != '' GROUP BY SearchEngineID, SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT UserID, count(*) FROM {table} GROUP BY UserID ORDER BY count(*) DESC LIMIT 10;
|
||||
SELECT UserID, SearchPhrase, count(*) FROM {table} GROUP BY UserID, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||
SELECT UserID, SearchPhrase, count(*) FROM {table} GROUP BY UserID, SearchPhrase LIMIT 10;
|
||||
SELECT UserID, extract(minute FROM EventTime) AS m, SearchPhrase, count(*) FROM {table} GROUP BY UserID, m, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||
SELECT UserID FROM {table} WHERE UserID = -6101065172474983726;
|
||||
SELECT count(*) FROM {table} WHERE URL LIKE '%metrika%';
|
||||
SELECT SearchPhrase, min(URL), count(*) AS c FROM {table} WHERE URL LIKE '%metrika%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT SearchPhrase, min(URL), min(Title), count(*) AS c, COUNT(DISTINCT UserID) FROM {table} WHERE Title LIKE '%Яндекс%' AND URL NOT LIKE '%.yandex.%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT * FROM {table} WHERE URL LIKE '%metrika%' ORDER BY EventTime LIMIT 10;
|
||||
SELECT SearchPhrase FROM {table} WHERE SearchPhrase != '' ORDER BY EventTime LIMIT 10;
|
||||
SELECT SearchPhrase FROM {table} WHERE SearchPhrase != '' ORDER BY SearchPhrase LIMIT 10;
|
||||
SELECT SearchPhrase FROM {table} WHERE SearchPhrase != '' ORDER BY EventTime, SearchPhrase LIMIT 10;
|
||||
SELECT CounterID, avg(length(URL)) AS l, count(*) AS c FROM {table} WHERE URL != '' GROUP BY CounterID HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||
SELECT REGEXP_REPLACE(Referer, '^https?://(?:www\.)?([^/]+)/.*$', '\1') AS key, avg(length(Referer)) AS l, count(*) AS c, min(Referer) FROM {table} WHERE Referer != '' GROUP BY key HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||
SELECT sum(ResolutionWidth), sum(ResolutionWidth + 1), sum(ResolutionWidth + 2), sum(ResolutionWidth + 3), sum(ResolutionWidth + 4), sum(ResolutionWidth + 5), sum(ResolutionWidth + 6), sum(ResolutionWidth + 7), sum(ResolutionWidth + 8), sum(ResolutionWidth + 9), sum(ResolutionWidth + 10), sum(ResolutionWidth + 11), sum(ResolutionWidth + 12), sum(ResolutionWidth + 13), sum(ResolutionWidth + 14), sum(ResolutionWidth + 15), sum(ResolutionWidth + 16), sum(ResolutionWidth + 17), sum(ResolutionWidth + 18), sum(ResolutionWidth + 19), sum(ResolutionWidth + 20), sum(ResolutionWidth + 21), sum(ResolutionWidth + 22), sum(ResolutionWidth + 23), sum(ResolutionWidth + 24), sum(ResolutionWidth + 25), sum(ResolutionWidth + 26), sum(ResolutionWidth + 27), sum(ResolutionWidth + 28), sum(ResolutionWidth + 29), sum(ResolutionWidth + 30), sum(ResolutionWidth + 31), sum(ResolutionWidth + 32), sum(ResolutionWidth + 33), sum(ResolutionWidth + 34), sum(ResolutionWidth + 35), sum(ResolutionWidth + 36), sum(ResolutionWidth + 37), sum(ResolutionWidth + 38), sum(ResolutionWidth + 39), sum(ResolutionWidth + 40), sum(ResolutionWidth + 41), sum(ResolutionWidth + 42), sum(ResolutionWidth + 43), sum(ResolutionWidth + 44), sum(ResolutionWidth + 45), sum(ResolutionWidth + 46), sum(ResolutionWidth + 47), sum(ResolutionWidth + 48), sum(ResolutionWidth + 49), sum(ResolutionWidth + 50), sum(ResolutionWidth + 51), sum(ResolutionWidth + 52), sum(ResolutionWidth + 53), sum(ResolutionWidth + 54), sum(ResolutionWidth + 55), sum(ResolutionWidth + 56), sum(ResolutionWidth + 57), sum(ResolutionWidth + 58), sum(ResolutionWidth + 59), sum(ResolutionWidth + 60), sum(ResolutionWidth + 61), sum(ResolutionWidth + 62), sum(ResolutionWidth + 63), sum(ResolutionWidth + 64), sum(ResolutionWidth + 65), sum(ResolutionWidth + 66), sum(ResolutionWidth + 67), sum(ResolutionWidth + 68), sum(ResolutionWidth + 69), sum(ResolutionWidth + 70), sum(ResolutionWidth + 71), sum(ResolutionWidth + 72), sum(ResolutionWidth + 73), sum(ResolutionWidth + 74), sum(ResolutionWidth + 75), sum(ResolutionWidth + 76), sum(ResolutionWidth + 77), sum(ResolutionWidth + 78), sum(ResolutionWidth + 79), sum(ResolutionWidth + 80), sum(ResolutionWidth + 81), sum(ResolutionWidth + 82), sum(ResolutionWidth + 83), sum(ResolutionWidth + 84), sum(ResolutionWidth + 85), sum(ResolutionWidth + 86), sum(ResolutionWidth + 87), sum(ResolutionWidth + 88), sum(ResolutionWidth + 89) FROM {table};
|
||||
SELECT SearchEngineID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM {table} WHERE SearchPhrase != '' GROUP BY SearchEngineID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT WatchID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM {table} WHERE SearchPhrase != '' GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT WatchID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM {table} GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT URL, count(*) AS c FROM {table} GROUP BY URL ORDER BY c DESC LIMIT 10;
|
||||
SELECT 1, URL, count(*) AS c FROM {table} GROUP BY 1, URL ORDER BY c DESC LIMIT 10;
|
||||
SELECT ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3, count(*) AS c FROM {table} GROUP BY ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3 ORDER BY c DESC LIMIT 10;
|
||||
SELECT URL, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND DontCountHits = 0 AND "refresh" = 0 AND URL != '' GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
|
||||
SELECT Title, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND DontCountHits = 0 AND "refresh" = 0 AND Title != '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
|
||||
SELECT URL, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "refresh" = 0 AND IsLink != 0 AND IsDownload = 0 GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
|
||||
SELECT TraficSourceID, SearchEngineID, AdvEngineID, CASE WHEN (SearchEngineID = 0 AND AdvEngineID = 0) THEN Referer ELSE '' END AS Src, URL AS Dst, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "refresh" = 0 GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 1000;
|
||||
SELECT URLHash, EventDate, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "refresh" = 0 AND TraficSourceID IN (-1, 6) AND RefererHash = 686716256552154761 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100;
|
||||
SELECT WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "refresh" = 0 AND DontCountHits = 0 AND URLHash = 686716256552154761 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
|
||||
SELECT DATE_TRUNC('minute', EventTime) AS "Minute", count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-02' AND "refresh" = 0 AND DontCountHits = 0 GROUP BY DATE_TRUNC('minute', EventTime) ORDER BY DATE_TRUNC('minute', EventTime);
|
1663
benchmark/timescaledb/usability.md
Normal file
1663
benchmark/timescaledb/usability.md
Normal file
File diff suppressed because it is too large
Load Diff
@ -2,11 +2,11 @@
|
||||
|
||||
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
||||
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
||||
SET(VERSION_REVISION 54456)
|
||||
SET(VERSION_REVISION 54457)
|
||||
SET(VERSION_MAJOR 21)
|
||||
SET(VERSION_MINOR 11)
|
||||
SET(VERSION_MINOR 12)
|
||||
SET(VERSION_PATCH 1)
|
||||
SET(VERSION_GITHASH 7a4a0b0edef0ad6e0aa662cd3b90c3f4acf796e7)
|
||||
SET(VERSION_DESCRIBE v21.11.1.1-prestable)
|
||||
SET(VERSION_STRING 21.11.1.1)
|
||||
SET(VERSION_GITHASH 503a418dedf0011e9040c3a1b6913e0b5488be4c)
|
||||
SET(VERSION_DESCRIBE v21.12.1.1-prestable)
|
||||
SET(VERSION_STRING 21.12.1.1)
|
||||
# end of autochange
|
||||
|
@ -1,10 +1,3 @@
|
||||
option (ENABLE_FILELOG "Enable FILELOG" ON)
|
||||
|
||||
if (NOT ENABLE_FILELOG)
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't use StorageFileLog with ENABLE_FILELOG=OFF")
|
||||
return()
|
||||
endif()
|
||||
|
||||
# StorageFileLog only support Linux platform
|
||||
if (OS_LINUX)
|
||||
set (USE_FILELOG 1)
|
||||
|
2
contrib/snappy
vendored
2
contrib/snappy
vendored
@ -1 +1 @@
|
||||
Subproject commit 3f194acb57e0487531c96b97af61dcbd025a78a3
|
||||
Subproject commit fb057edfed820212076239fd32cb2ff23e9016bf
|
4
debian/changelog
vendored
4
debian/changelog
vendored
@ -1,5 +1,5 @@
|
||||
clickhouse (21.11.1.1) unstable; urgency=low
|
||||
clickhouse (21.12.1.1) unstable; urgency=low
|
||||
|
||||
* Modified source code
|
||||
|
||||
-- clickhouse-release <clickhouse-release@yandex-team.ru> Thu, 09 Sep 2021 12:03:26 +0300
|
||||
-- clickhouse-release <clickhouse-release@yandex-team.ru> Tue, 02 Nov 2021 00:56:42 +0300
|
||||
|
@ -1,7 +1,7 @@
|
||||
FROM ubuntu:18.04
|
||||
|
||||
ARG repository="deb https://repo.clickhouse.com/deb/stable/ main/"
|
||||
ARG version=21.11.1.*
|
||||
ARG version=21.12.1.*
|
||||
|
||||
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
|
||||
|
||||
|
43
docker/docs/builder/Dockerfile
Normal file
43
docker/docs/builder/Dockerfile
Normal file
@ -0,0 +1,43 @@
|
||||
# docker build -t clickhouse/docs-build .
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ENV LANG=C.UTF-8
|
||||
|
||||
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
|
||||
|
||||
RUN apt-get update \
|
||||
&& DEBIAN_FRONTEND=noninteractive apt-get install --yes --no-install-recommends \
|
||||
python3-setuptools \
|
||||
virtualenv \
|
||||
wget \
|
||||
bash \
|
||||
python \
|
||||
curl \
|
||||
python3-requests \
|
||||
sudo \
|
||||
git \
|
||||
openssl \
|
||||
python3-pip \
|
||||
software-properties-common \
|
||||
language-pack-zh* \
|
||||
chinese* \
|
||||
fonts-arphic-ukai \
|
||||
fonts-arphic-uming \
|
||||
fonts-ipafont-mincho \
|
||||
fonts-ipafont-gothic \
|
||||
fonts-unfonts-core \
|
||||
xvfb \
|
||||
nodejs \
|
||||
npm \
|
||||
openjdk-11-jdk \
|
||||
ssh-client \
|
||||
&& pip --no-cache-dir install scipy \
|
||||
&& apt-get autoremove --yes \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN wget 'https://github.com/wkhtmltopdf/packaging/releases/download/0.12.6-1/wkhtmltox_0.12.6-1.focal_amd64.deb'
|
||||
|
||||
RUN npm i -g purify-css
|
||||
|
||||
RUN pip3 install --ignore-installed --upgrade setuptools pip virtualenv
|
9
docker/docs/check/Dockerfile
Normal file
9
docker/docs/check/Dockerfile
Normal file
@ -0,0 +1,9 @@
|
||||
# docker build -t clickhouse/docs-check .
|
||||
FROM clickhouse/docs-builder
|
||||
|
||||
COPY run.sh /
|
||||
|
||||
ENV REPO_PATH=/repo_path
|
||||
ENV OUTPUT_PATH=/output_path
|
||||
|
||||
CMD ["/bin/bash", "/run.sh"]
|
9
docker/docs/check/run.sh
Normal file
9
docker/docs/check/run.sh
Normal file
@ -0,0 +1,9 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
cd $REPO_PATH/docs/tools
|
||||
mkdir venv
|
||||
virtualenv -p $(which python3) venv
|
||||
source venv/bin/activate
|
||||
python3 -m pip install --ignore-installed -r requirements.txt
|
||||
./build.py --skip-git-log 2>&1 | tee $OUTPUT_PATH/output.log
|
9
docker/docs/release/Dockerfile
Normal file
9
docker/docs/release/Dockerfile
Normal file
@ -0,0 +1,9 @@
|
||||
# docker build -t clickhouse/docs-release .
|
||||
FROM clickhouse/docs-builder
|
||||
|
||||
COPY run.sh /
|
||||
|
||||
ENV REPO_PATH=/repo_path
|
||||
ENV OUTPUT_PATH=/output_path
|
||||
|
||||
CMD ["/bin/bash", "/run.sh"]
|
10
docker/docs/release/run.sh
Normal file
10
docker/docs/release/run.sh
Normal file
@ -0,0 +1,10 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
cd $REPO_PATH/docs/tools
|
||||
mkdir venv
|
||||
virtualenv -p $(which python3) venv
|
||||
source venv/bin/activate
|
||||
python3 -m pip install --ignore-installed -r requirements.txt
|
||||
mkdir -p ~/.ssh && ssh-keyscan -t rsa github.com >> ~/.ssh/known_hosts
|
||||
./release.sh 2>&1 | tee tee $OUTPUT_PATH/output.log
|
@ -166,5 +166,20 @@
|
||||
"docker/test/keeper-jepsen": {
|
||||
"name": "clickhouse/keeper-jepsen-test",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/docs/builder": {
|
||||
"name": "clickhouse/docs-builder",
|
||||
"dependent": [
|
||||
"docker/docs/check",
|
||||
"docker/docs/release"
|
||||
]
|
||||
},
|
||||
"docker/docs/check": {
|
||||
"name": "clickhouse/docs-check",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/docs/release": {
|
||||
"name": "clickhouse/docs-release",
|
||||
"dependent": []
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ARG repository="deb https://repo.clickhouse.com/deb/stable/ main/"
|
||||
ARG version=21.11.1.*
|
||||
ARG version=21.12.1.*
|
||||
ARG gosu_ver=1.10
|
||||
|
||||
# set non-empty deb_location_url url to create a docker image
|
||||
|
@ -86,7 +86,7 @@ done
|
||||
if [ -n "$CLICKHOUSE_USER" ] && [ "$CLICKHOUSE_USER" != "default" ] || [ -n "$CLICKHOUSE_PASSWORD" ]; then
|
||||
echo "$0: create new user '$CLICKHOUSE_USER' instead 'default'"
|
||||
cat <<EOT > /etc/clickhouse-server/users.d/default-user.xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<!-- Docs: <https://clickhouse.com/docs/en/operations/settings/settings_users/> -->
|
||||
<users>
|
||||
<!-- Remove default user -->
|
||||
@ -103,7 +103,7 @@ if [ -n "$CLICKHOUSE_USER" ] && [ "$CLICKHOUSE_USER" != "default" ] || [ -n "$CL
|
||||
<access_management>${CLICKHOUSE_ACCESS_MANAGEMENT}</access_management>
|
||||
</${CLICKHOUSE_USER}>
|
||||
</users>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
EOT
|
||||
fi
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
FROM ubuntu:18.04
|
||||
|
||||
ARG repository="deb https://repo.clickhouse.com/deb/stable/ main/"
|
||||
ARG version=21.11.1.*
|
||||
ARG version=21.12.1.*
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y apt-transport-https dirmngr && \
|
||||
|
@ -264,7 +264,7 @@ function run_tests
|
||||
|
||||
set +e
|
||||
time clickhouse-test --hung-check -j 8 --order=random \
|
||||
--fast-tests-only --no-long --testname --shard --zookeeper \
|
||||
--fast-tests-only --no-long --testname --shard --zookeeper --check-zookeeper-session \
|
||||
-- "$FASTTEST_FOCUS" 2>&1 \
|
||||
| ts '%Y-%m-%d %H:%M:%S' \
|
||||
| tee "$FASTTEST_OUTPUT/test_result.txt"
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/bin/bash
|
||||
# shellcheck disable=SC2086,SC2001
|
||||
# shellcheck disable=SC2086,SC2001,SC2046
|
||||
|
||||
set -eux
|
||||
set -o pipefail
|
||||
@ -13,24 +13,48 @@ script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||
echo "$script_dir"
|
||||
repo_dir=ch
|
||||
BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-13_debug_none_bundled_unsplitted_disable_False_binary"}
|
||||
BINARY_URL_TO_DOWNLOAD=${BINARY_URL_TO_DOWNLOAD:="https://clickhouse-builds.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/$BINARY_TO_DOWNLOAD/clickhouse"}
|
||||
|
||||
function clone
|
||||
{
|
||||
# The download() function is dependent on CI binaries anyway, so we can take
|
||||
# the repo from the CI as well. For local runs, start directly from the "fuzz"
|
||||
# stage.
|
||||
rm -rf ch ||:
|
||||
mkdir ch ||:
|
||||
wget -nv -nd -c "https://clickhouse-test-reports.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/repo/clickhouse_no_subs.tar.gz"
|
||||
tar -C ch --strip-components=1 -xf clickhouse_no_subs.tar.gz
|
||||
# For local runs, start directly from the "fuzz" stage.
|
||||
rm -rf "$repo_dir" ||:
|
||||
mkdir "$repo_dir" ||:
|
||||
|
||||
git clone --depth 1 https://github.com/ClickHouse/ClickHouse.git -- "$repo_dir" 2>&1 | ts '%Y-%m-%d %H:%M:%S'
|
||||
(
|
||||
cd "$repo_dir"
|
||||
if [ "$PR_TO_TEST" != "0" ]; then
|
||||
if git fetch --depth 1 origin "+refs/pull/$PR_TO_TEST/merge"; then
|
||||
git checkout FETCH_HEAD
|
||||
echo "Checked out pull/$PR_TO_TEST/merge ($(git rev-parse FETCH_HEAD))"
|
||||
else
|
||||
git fetch --depth 1 origin "+refs/pull/$PR_TO_TEST/head"
|
||||
git checkout "$SHA_TO_TEST"
|
||||
echo "Checked out nominal SHA $SHA_TO_TEST for PR $PR_TO_TEST"
|
||||
fi
|
||||
git diff --name-only master HEAD | tee ci-changed-files.txt
|
||||
else
|
||||
if [ -v COMMIT_SHA ]; then
|
||||
git fetch --depth 2 origin "$SHA_TO_TEST"
|
||||
git checkout "$SHA_TO_TEST"
|
||||
echo "Checked out nominal SHA $SHA_TO_TEST for master"
|
||||
else
|
||||
git fetch --depth 2 origin
|
||||
echo "Using default repository head $(git rev-parse HEAD)"
|
||||
fi
|
||||
git diff --name-only HEAD~1 HEAD | tee ci-changed-files.txt
|
||||
fi
|
||||
cd -
|
||||
)
|
||||
|
||||
ls -lath ||:
|
||||
|
||||
}
|
||||
|
||||
function download
|
||||
{
|
||||
wget -nv -nd -c "https://clickhouse-builds.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/$BINARY_TO_DOWNLOAD/clickhouse" &
|
||||
wget -nv -nd -c "https://clickhouse-test-reports.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/repo/ci-changed-files.txt" &
|
||||
wait
|
||||
wget -nv -nd -c "$BINARY_URL_TO_DOWNLOAD"
|
||||
|
||||
chmod +x clickhouse
|
||||
ln -s ./clickhouse ./clickhouse-server
|
||||
@ -113,7 +137,7 @@ function fuzz
|
||||
|
||||
# Obtain the list of newly added tests. They will be fuzzed in more extreme way than other tests.
|
||||
# Don't overwrite the NEW_TESTS_OPT so that it can be set from the environment.
|
||||
NEW_TESTS="$(sed -n 's!\(^tests/queries/0_stateless/.*\.sql\(\.j2\)\?\)$!ch/\1!p' ci-changed-files.txt | sort -R)"
|
||||
NEW_TESTS="$(sed -n 's!\(^tests/queries/0_stateless/.*\.sql\(\.j2\)\?\)$!ch/\1!p' $repo_dir/ci-changed-files.txt | sort -R)"
|
||||
# ci-changed-files.txt contains also files that has been deleted/renamed, filter them out.
|
||||
NEW_TESTS="$(filter_exists_and_template $NEW_TESTS)"
|
||||
if [[ -n "$NEW_TESTS" ]]
|
||||
|
@ -33,7 +33,7 @@ RUN apt-get update \
|
||||
tzdata \
|
||||
vim \
|
||||
wget \
|
||||
&& pip3 --no-cache-dir install 'git+https://github.com/mymarilyn/clickhouse-driver.git' scipy \
|
||||
&& pip3 --no-cache-dir install 'clickhouse-driver==0.2.1' scipy \
|
||||
&& apt-get purge --yes python3-dev g++ \
|
||||
&& apt-get autoremove --yes \
|
||||
&& apt-get clean \
|
||||
|
@ -196,7 +196,6 @@ function run_tests
|
||||
test_files=$(ls "$test_prefix" | grep "$CHPC_TEST_GREP" | xargs -I{} -n1 readlink -f "$test_prefix/{}")
|
||||
elif [ "$PR_TO_TEST" -ne 0 ] \
|
||||
&& [ "$(wc -l < changed-test-definitions.txt)" -gt 0 ] \
|
||||
&& [ "$(wc -l < changed-test-scripts.txt)" -eq 0 ] \
|
||||
&& [ "$(wc -l < other-changed-files.txt)" -eq 0 ]
|
||||
then
|
||||
# If only the perf tests were changed in the PR, we will run only these
|
||||
@ -208,15 +207,15 @@ function run_tests
|
||||
test_files=$(ls "$test_prefix"/*.xml)
|
||||
fi
|
||||
|
||||
# For PRs w/o changes in test definitons and scripts, test only a subset of
|
||||
# queries, and run them less times. If the corresponding environment variables
|
||||
# are already set, keep those values.
|
||||
if [ "$PR_TO_TEST" -ne 0 ] \
|
||||
&& [ "$(wc -l < changed-test-definitions.txt)" -eq 0 ] \
|
||||
&& [ "$(wc -l < changed-test-scripts.txt)" -eq 0 ]
|
||||
# For PRs w/o changes in test definitons, test only a subset of queries,
|
||||
# and run them less times. If the corresponding environment variables are
|
||||
# already set, keep those values.
|
||||
#
|
||||
# NOTE: too high CHPC_RUNS/CHPC_MAX_QUERIES may hit internal CI timeout.
|
||||
if [ "$PR_TO_TEST" -ne 0 ] && [ "$(wc -l < changed-test-definitions.txt)" -eq 0 ]
|
||||
then
|
||||
CHPC_RUNS=${CHPC_RUNS:-7}
|
||||
CHPC_MAX_QUERIES=${CHPC_MAX_QUERIES:-20}
|
||||
CHPC_MAX_QUERIES=${CHPC_MAX_QUERIES:-10}
|
||||
else
|
||||
CHPC_RUNS=${CHPC_RUNS:-13}
|
||||
CHPC_MAX_QUERIES=${CHPC_MAX_QUERIES:-0}
|
||||
@ -309,24 +308,19 @@ function get_profiles_watchdog
|
||||
function get_profiles
|
||||
{
|
||||
# Collect the profiles
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "set query_profiler_cpu_time_period_ns = 0"
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "set query_profiler_real_time_period_ns = 0"
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "system flush logs" &
|
||||
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "set query_profiler_cpu_time_period_ns = 0"
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "set query_profiler_real_time_period_ns = 0"
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "system flush logs" &
|
||||
|
||||
wait
|
||||
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.query_log where type = 'QueryFinish' format TSVWithNamesAndTypes" > left-query-log.tsv ||: &
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.query_log where type in ('QueryFinish', 'ExceptionWhileProcessing') format TSVWithNamesAndTypes" > left-query-log.tsv ||: &
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.query_thread_log format TSVWithNamesAndTypes" > left-query-thread-log.tsv ||: &
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.trace_log format TSVWithNamesAndTypes" > left-trace-log.tsv ||: &
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select arrayJoin(trace) addr, concat(splitByChar('/', addressToLine(addr))[-1], '#', demangle(addressToSymbol(addr)) ) name from system.trace_log group by addr format TSVWithNamesAndTypes" > left-addresses.tsv ||: &
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.metric_log format TSVWithNamesAndTypes" > left-metric-log.tsv ||: &
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.asynchronous_metric_log format TSVWithNamesAndTypes" > left-async-metric-log.tsv ||: &
|
||||
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.query_log where type = 'QueryFinish' format TSVWithNamesAndTypes" > right-query-log.tsv ||: &
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.query_log where type in ('QueryFinish', 'ExceptionWhileProcessing') format TSVWithNamesAndTypes" > right-query-log.tsv ||: &
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.query_thread_log format TSVWithNamesAndTypes" > right-query-thread-log.tsv ||: &
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.trace_log format TSVWithNamesAndTypes" > right-trace-log.tsv ||: &
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select arrayJoin(trace) addr, concat(splitByChar('/', addressToLine(addr))[-1], '#', demangle(addressToSymbol(addr)) ) name from system.trace_log group by addr format TSVWithNamesAndTypes" > right-addresses.tsv ||: &
|
||||
@ -635,7 +629,7 @@ create view query_display_names as select * from
|
||||
|
||||
create view partial_query_times as select * from
|
||||
file('analyze/partial-query-times.tsv', TSVWithNamesAndTypes,
|
||||
'test text, query_index int, time_stddev float, time_median float')
|
||||
'test text, query_index int, time_stddev float, time_median double')
|
||||
;
|
||||
|
||||
-- Report for partial queries that we could only run on the new server (e.g.
|
||||
|
@ -5,22 +5,19 @@
|
||||
<interserver_http_port remove="remove"/>
|
||||
<tcp_with_proxy_port remove="remove"/>
|
||||
<keeper_server remove="remove"/>
|
||||
<zookeeper remove="remove"/>
|
||||
<listen_host>::</listen_host>
|
||||
|
||||
<logger>
|
||||
<console>true</console>
|
||||
</logger>
|
||||
|
||||
<text_log remove="remove">
|
||||
<table remove="remove"/>
|
||||
</text_log>
|
||||
|
||||
<metric_log replace="replace">
|
||||
<database>system</database>
|
||||
<table>metric_log</table>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<collect_interval_milliseconds>1000</collect_interval_milliseconds>
|
||||
</metric_log>
|
||||
<text_log remove="remove"/>
|
||||
<crash_log remove="remove"/>
|
||||
<query_views_log remove="remove"/>
|
||||
<part_log remove="remove"/>
|
||||
<opentelemetry_span_log remove="remove"/>
|
||||
<session_log remove="remove"/>
|
||||
|
||||
<uncompressed_cache_size>1000000000</uncompressed_cache_size>
|
||||
|
||||
|
@ -24,6 +24,13 @@
|
||||
|
||||
<!-- Don't fail some prewarm queries too early -->
|
||||
<timeout_before_checking_execution_speed>60</timeout_before_checking_execution_speed>
|
||||
|
||||
<!-- Query profiler enabled only for prewarm queries explicitly (see perf.py)
|
||||
This is needed for flamegraphs. -->
|
||||
<query_profiler_real_time_period_ns>0</query_profiler_real_time_period_ns>
|
||||
<query_profiler_cpu_time_period_ns>0</query_profiler_cpu_time_period_ns>
|
||||
<!-- Disable memory profiler too, since due to max_untracked_memory some queries may add trace entry and some may not -->
|
||||
<memory_profiler_step>0</memory_profiler_step>
|
||||
</default>
|
||||
</profiles>
|
||||
<users>
|
||||
|
@ -102,7 +102,6 @@ then
|
||||
base=$(git -C right/ch merge-base pr origin/master)
|
||||
git -C right/ch diff --name-only "$base" pr -- . | tee all-changed-files.txt
|
||||
git -C right/ch diff --name-only "$base" pr -- tests/performance | tee changed-test-definitions.txt
|
||||
git -C right/ch diff --name-only "$base" pr -- docker/test/performance-comparison | tee changed-test-scripts.txt
|
||||
git -C right/ch diff --name-only "$base" pr -- :!tests/performance :!docker/test/performance-comparison | tee other-changed-files.txt
|
||||
fi
|
||||
|
||||
|
@ -283,8 +283,11 @@ for query_index in queries_to_run:
|
||||
# test coverage. We disable profiler for normal runs because
|
||||
# it makes the results unstable.
|
||||
res = c.execute(q, query_id = prewarm_id,
|
||||
settings = {'max_execution_time': args.max_query_seconds,
|
||||
'query_profiler_real_time_period_ns': 10000000})
|
||||
settings = {
|
||||
'max_execution_time': args.max_query_seconds,
|
||||
'query_profiler_real_time_period_ns': 10000000,
|
||||
'memory_profiler_step': '4Mi',
|
||||
})
|
||||
except clickhouse_driver.errors.Error as e:
|
||||
# Add query id to the exception to make debugging easier.
|
||||
e.args = (prewarm_id, *e.args)
|
||||
|
@ -9,6 +9,7 @@ RUN apt-get update -y \
|
||||
|
||||
COPY s3downloader /s3downloader
|
||||
|
||||
ENV S3_URL="https://clickhouse-datasets.s3.yandex.net"
|
||||
ENV DATASETS="hits visits"
|
||||
|
||||
COPY run.sh /
|
||||
|
@ -56,7 +56,7 @@ function start()
|
||||
|
||||
start
|
||||
# shellcheck disable=SC2086 # No quotes because I want to split it into words.
|
||||
/s3downloader --dataset-names $DATASETS
|
||||
/s3downloader --url-prefix "$S3_URL" --dataset-names $DATASETS
|
||||
chmod 777 -R /var/lib/clickhouse
|
||||
clickhouse-client --query "SHOW DATABASES"
|
||||
|
||||
@ -109,7 +109,7 @@ function run_tests()
|
||||
fi
|
||||
|
||||
set +e
|
||||
clickhouse-test --testname --shard --zookeeper --no-stateless --hung-check --print-time "${ADDITIONAL_OPTIONS[@]}" \
|
||||
clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --no-stateless --hung-check --print-time "${ADDITIONAL_OPTIONS[@]}" \
|
||||
"$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt
|
||||
set -e
|
||||
}
|
||||
|
@ -97,7 +97,7 @@ function run_tests()
|
||||
fi
|
||||
|
||||
set +e
|
||||
clickhouse-test --testname --shard --zookeeper --hung-check --print-time \
|
||||
clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \
|
||||
--test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \
|
||||
| ts '%Y-%m-%d %H:%M:%S' \
|
||||
| tee -a test_output/test_result.txt
|
||||
|
@ -26,4 +26,6 @@ COPY ./stress /stress
|
||||
COPY run.sh /
|
||||
|
||||
ENV DATASETS="hits visits"
|
||||
ENV S3_URL="https://clickhouse-datasets.s3.yandex.net"
|
||||
|
||||
CMD ["/bin/bash", "/run.sh"]
|
||||
|
@ -46,11 +46,11 @@ function configure()
|
||||
sudo chown root: /var/lib/clickhouse
|
||||
|
||||
# Set more frequent update period of asynchronous metrics to more frequently update information about real memory usage (less chance of OOM).
|
||||
echo "<yandex><asynchronous_metrics_update_period_s>1</asynchronous_metrics_update_period_s></yandex>" \
|
||||
echo "<clickhouse><asynchronous_metrics_update_period_s>1</asynchronous_metrics_update_period_s></clickhouse>" \
|
||||
> /etc/clickhouse-server/config.d/asynchronous_metrics_update_period_s.xml
|
||||
|
||||
# Set maximum memory usage as half of total memory (less chance of OOM).
|
||||
echo "<yandex><max_server_memory_usage_to_ram_ratio>0.5</max_server_memory_usage_to_ram_ratio></yandex>" \
|
||||
echo "<clickhouse><max_server_memory_usage_to_ram_ratio>0.5</max_server_memory_usage_to_ram_ratio></clickhouse>" \
|
||||
> /etc/clickhouse-server/config.d/max_server_memory_usage_to_ram_ratio.xml
|
||||
}
|
||||
|
||||
@ -112,7 +112,7 @@ configure
|
||||
start
|
||||
|
||||
# shellcheck disable=SC2086 # No quotes because I want to split it into words.
|
||||
/s3downloader --dataset-names $DATASETS
|
||||
/s3downloader --url-prefix "$S3_URL" --dataset-names $DATASETS
|
||||
chmod 777 -R /var/lib/clickhouse
|
||||
clickhouse-client --query "ATTACH DATABASE IF NOT EXISTS datasets ENGINE = Ordinary"
|
||||
clickhouse-client --query "CREATE DATABASE IF NOT EXISTS test"
|
||||
|
@ -50,7 +50,7 @@ URL="https://builds.clickhouse.com/master/${DIR}/clickhouse"
|
||||
echo
|
||||
echo "Will download ${URL}"
|
||||
echo
|
||||
curl -O "${URL}" && chmod a+x clickhouse &&
|
||||
curl -O "${URL}" && chmod a+x clickhouse || exit 1
|
||||
echo
|
||||
echo "Successfully downloaded the ClickHouse binary, you can run it as:
|
||||
./clickhouse"
|
||||
|
@ -23,15 +23,15 @@ ENGINE = MaterializedPostgreSQL('host:port', ['database' | database], 'user', 'p
|
||||
- `user` — PostgreSQL user.
|
||||
- `password` — User password.
|
||||
|
||||
## Dynamically adding new tables to replication
|
||||
## Dynamically adding new tables to replication {#dynamically-adding-table-to-replication}
|
||||
|
||||
``` sql
|
||||
ATTACH TABLE postgres_database.new_table;
|
||||
```
|
||||
|
||||
It will work as well if there is a setting `materialized_postgresql_tables_list`.
|
||||
When specifying a specific list of tables in the database using the setting [materialized_postgresql_tables_list](../../operations/settings/settings.md#materialized-postgresql-tables-list), it will be updated to the current state, taking into account the tables which were added by the `ATTACH TABLE` query.
|
||||
|
||||
## Dynamically removing tables from replication
|
||||
## Dynamically removing tables from replication {#dynamically-removing-table-from-replication}
|
||||
|
||||
``` sql
|
||||
DETACH TABLE postgres_database.table_to_remove;
|
||||
@ -58,7 +58,7 @@ SETTINGS materialized_postgresql_max_block_size = 65536,
|
||||
SELECT * FROM database1.table1;
|
||||
```
|
||||
|
||||
It is also possible to change settings at run time.
|
||||
The settings can be changed, if necessary, using a DDL query. But it is impossible to change the setting `materialized_postgresql_tables_list`. To update the list of tables in this setting use the `ATTACH TABLE` query.
|
||||
|
||||
``` sql
|
||||
ALTER DATABASE postgres_database MODIFY SETTING materialized_postgresql_max_block_size = <new_size>;
|
||||
|
@ -787,6 +787,8 @@ Moving data does not interfere with data replication. Therefore, different stora
|
||||
After the completion of background merges and mutations, old parts are removed only after a certain amount of time (`old_parts_lifetime`).
|
||||
During this time, they are not moved to other volumes or disks. Therefore, until the parts are finally removed, they are still taken into account for evaluation of the occupied disk space.
|
||||
|
||||
User can assign new big parts to different disks of a [JBOD](https://en.wikipedia.org/wiki/Non-RAID_drive_architectures) volume in a balanced way using the [min_bytes_to_rebalance_partition_over_jbod](../../../operations/settings/merge-tree-settings.md#min-bytes-to-rebalance-partition-over-jbod) setting.
|
||||
|
||||
## Using S3 for Data Storage {#table_engine-mergetree-s3}
|
||||
|
||||
`MergeTree` family table engines can store data to [S3](https://aws.amazon.com/s3/) using a disk with type `s3`.
|
||||
|
@ -9,31 +9,44 @@ The `Merge` engine (not to be confused with `MergeTree`) does not store data its
|
||||
|
||||
Reading is automatically parallelized. Writing to a table is not supported. When reading, the indexes of tables that are actually being read are used, if they exist.
|
||||
|
||||
The `Merge` engine accepts parameters: the database name and a regular expression for tables.
|
||||
|
||||
## Examples {#examples}
|
||||
|
||||
Example 1:
|
||||
## Creating a Table {#creating-a-table}
|
||||
|
||||
``` sql
|
||||
Merge(hits, '^WatchLog')
|
||||
CREATE TABLE ... Engine=Merge(db_name, tables_regexp)
|
||||
```
|
||||
|
||||
Data will be read from the tables in the `hits` database that have names that match the regular expression ‘`^WatchLog`’.
|
||||
**Engine Parameters**
|
||||
|
||||
Instead of the database name, you can use a constant expression that returns a string. For example, `currentDatabase()`.
|
||||
- `db_name` — Possible values:
|
||||
- database name,
|
||||
- constant expression that returns a string with a database name, for example, `currentDatabase()`,
|
||||
- `REGEXP(expression)`, where `expression` is a regular expression to match the DB names.
|
||||
|
||||
- `tables_regexp` — A regular expression to match the table names in the specified DB or DBs.
|
||||
|
||||
Regular expressions — [re2](https://github.com/google/re2) (supports a subset of PCRE), case-sensitive.
|
||||
See the notes about escaping symbols in regular expressions in the “match” section.
|
||||
See the notes about escaping symbols in regular expressions in the "match" section.
|
||||
|
||||
When selecting tables to read, the `Merge` table itself will not be selected, even if it matches the regex. This is to avoid loops.
|
||||
It is possible to create two `Merge` tables that will endlessly try to read each others’ data, but this is not a good idea.
|
||||
## Usage {#usage}
|
||||
|
||||
When selecting tables to read, the `Merge` table itself is not selected, even if it matches the regex. This is to avoid loops.
|
||||
It is possible to create two `Merge` tables that will endlessly try to read each others' data, but this is not a good idea.
|
||||
|
||||
The typical way to use the `Merge` engine is for working with a large number of `TinyLog` tables as if with a single table.
|
||||
|
||||
Example 2:
|
||||
## Examples {#examples}
|
||||
|
||||
Let’s say you have a old table (WatchLog_old) and decided to change partitioning without moving data to a new table (WatchLog_new) and you need to see data from both tables.
|
||||
**Example 1**
|
||||
|
||||
Consider two databases `ABC_corporate_site` and `ABC_store`. The `all_visitors` table will contain IDs from the tables `visitors` in both databases.
|
||||
|
||||
``` sql
|
||||
CREATE TABLE all_visitors (id UInt32) ENGINE=Merge(REGEXP('ABC_*'), 'visitors');
|
||||
```
|
||||
|
||||
**Example 2**
|
||||
|
||||
Let's say you have an old table `WatchLog_old` and decided to change partitioning without moving data to a new table `WatchLog_new`, and you need to see data from both tables.
|
||||
|
||||
``` sql
|
||||
CREATE TABLE WatchLog_old(date Date, UserId Int64, EventType String, Cnt UInt64)
|
||||
@ -46,8 +59,7 @@ INSERT INTO WatchLog_new VALUES ('2018-01-02', 2, 'hit', 3);
|
||||
|
||||
CREATE TABLE WatchLog as WatchLog_old ENGINE=Merge(currentDatabase(), '^WatchLog');
|
||||
|
||||
SELECT *
|
||||
FROM WatchLog
|
||||
SELECT * FROM WatchLog;
|
||||
```
|
||||
|
||||
``` text
|
||||
@ -68,5 +80,4 @@ FROM WatchLog
|
||||
**See Also**
|
||||
|
||||
- [Virtual columns](../../../engines/table-engines/special/index.md#table_engines-virtual_columns)
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/operations/table_engines/merge/) <!--hide-->
|
||||
- [merge](../../../sql-reference/table-functions/merge.md) table function
|
||||
|
@ -332,7 +332,7 @@ ORDER BY year, count(*) DESC
|
||||
|
||||
The following server was used:
|
||||
|
||||
Two Intel(R) Xeon(R) CPU E5-2650 v2 @ 2.60GHz, 16 physical kernels total,128 GiB RAM,8x6 TB HD on hardware RAID-5
|
||||
Two Intel(R) Xeon(R) CPU E5-2650 v2 @ 2.60GHz, 16 physical cores total, 128 GiB RAM, 8x6 TB HD on hardware RAID-5
|
||||
|
||||
Execution time is the best of three runs. But starting from the second run, queries read data from the file system cache. No further caching occurs: the data is read out and processed in each run.
|
||||
|
||||
|
@ -16,10 +16,13 @@ The supported formats are:
|
||||
| [TabSeparatedRaw](#tabseparatedraw) | ✔ | ✔ |
|
||||
| [TabSeparatedWithNames](#tabseparatedwithnames) | ✔ | ✔ |
|
||||
| [TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes) | ✔ | ✔ |
|
||||
| [TabSeparatedRawWithNames](#tabseparatedrawwithnames) | ✔ | ✔ |
|
||||
| [TabSeparatedRawWithNamesAndTypes](#tabseparatedrawwithnamesandtypes) | ✔ | ✔ |
|
||||
| [Template](#format-template) | ✔ | ✔ |
|
||||
| [TemplateIgnoreSpaces](#templateignorespaces) | ✔ | ✗ |
|
||||
| [CSV](#csv) | ✔ | ✔ |
|
||||
| [CSVWithNames](#csvwithnames) | ✔ | ✔ |
|
||||
| [CSVWithNamesAndTypes](#csvwithnamesandtypes) | ✔ | ✔ |
|
||||
| [CustomSeparated](#format-customseparated) | ✔ | ✔ |
|
||||
| [Values](#data-format-values) | ✔ | ✔ |
|
||||
| [Vertical](#vertical) | ✗ | ✔ |
|
||||
@ -33,8 +36,10 @@ The supported formats are:
|
||||
| [JSONStringsEachRow](#jsonstringseachrow) | ✔ | ✔ |
|
||||
| [JSONStringsEachRowWithProgress](#jsonstringseachrowwithprogress) | ✗ | ✔ |
|
||||
| [JSONCompactEachRow](#jsoncompacteachrow) | ✔ | ✔ |
|
||||
| [JSONCompactEachRowWithNames](#jsoncompacteachrowwithnames) | ✔ | ✔ |
|
||||
| [JSONCompactEachRowWithNamesAndTypes](#jsoncompacteachrowwithnamesandtypes) | ✔ | ✔ |
|
||||
| [JSONCompactStringsEachRow](#jsoncompactstringseachrow) | ✔ | ✔ |
|
||||
| [JSONCompactStringsEachRowWithNames](#jsoncompactstringseachrowwithnames) | ✔ | ✔ |
|
||||
| [JSONCompactStringsEachRowWithNamesAndTypes](#jsoncompactstringseachrowwithnamesandtypes) | ✔ | ✔ |
|
||||
| [TSKV](#tskv) | ✔ | ✔ |
|
||||
| [Pretty](#pretty) | ✗ | ✔ |
|
||||
@ -51,6 +56,7 @@ The supported formats are:
|
||||
| [ArrowStream](#data-format-arrow-stream) | ✔ | ✔ |
|
||||
| [ORC](#data-format-orc) | ✔ | ✔ |
|
||||
| [RowBinary](#rowbinary) | ✔ | ✔ |
|
||||
| [RowBinaryWithNames](#rowbinarywithnamesandtypes) | ✔ | ✔ |
|
||||
| [RowBinaryWithNamesAndTypes](#rowbinarywithnamesandtypes) | ✔ | ✔ |
|
||||
| [Native](#native) | ✔ | ✔ |
|
||||
| [Null](#null) | ✗ | ✔ |
|
||||
@ -124,10 +130,17 @@ Only a small set of symbols are escaped. You can easily stumble onto a string va
|
||||
|
||||
Arrays are written as a list of comma-separated values in square brackets. Number items in the array are formatted as normally. `Date` and `DateTime` types are written in single quotes. Strings are written in single quotes with the same escaping rules as above.
|
||||
|
||||
[NULL](../sql-reference/syntax.md) is formatted as `\N`.
|
||||
[NULL](../sql-reference/syntax.md) is formatted according to setting [format_tsv_null_representation](../operations/settings/settings.md#settings-format_tsv_null_representation) (default value is `\N`).
|
||||
|
||||
|
||||
If setting [input_format_tsv_empty_as_default](../operations/settings/settings.md#settings-input_format_tsv_empty_as_default) is enabled,
|
||||
empty input fields are replaced with default values. For complex default expressions [input_format_defaults_for_omitted_fields](../operations/settings/settings.md#settings-input_format_defaults_for_omitted_fields) must be enabled too.
|
||||
|
||||
Each element of [Nested](../sql-reference/data-types/nested-data-structures/nested.md) structures is represented as array.
|
||||
|
||||
In input data, ENUM values can be represented as names or as ids. First, we try to match the input value to the ENUM name. If we fail and the input value is a number, we try to match this number to ENUM id.
|
||||
If input data contains only ENUM ids, it's recommended to enable the setting [input_format_tsv_enum_as_number](../operations/settings/settings.md#settings-input_format_tsv_enum_as_number) to optimize ENUM parsing.
|
||||
|
||||
For example:
|
||||
|
||||
``` sql
|
||||
@ -164,17 +177,35 @@ This format is also available under the name `TSVRaw`.
|
||||
## TabSeparatedWithNames {#tabseparatedwithnames}
|
||||
|
||||
Differs from the `TabSeparated` format in that the column names are written in the first row.
|
||||
During parsing, the first row is expected to contain the column names. You can use column names to determine their position and to check their correctness.
|
||||
If setting [input_format_with_names_use_header](../operations/settings/settings.md#settings-input_format_with_names_use_header) is set to 1,
|
||||
the columns from input data will be mapped to the columns from the table by their names, columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](../operations/settings/settings.md#settings-input_format_skip_unknown_fields) is set to 1.
|
||||
Otherwise, the first row will be skipped.
|
||||
|
||||
This format is also available under the name `TSVWithNames`.
|
||||
|
||||
## TabSeparatedWithNamesAndTypes {#tabseparatedwithnamesandtypes}
|
||||
|
||||
Differs from the `TabSeparated` format in that the column names are written to the first row, while the column types are in the second row.
|
||||
During parsing, the first and second rows are completely ignored.
|
||||
The first row with names is processed the same way as in `TabSeparatedWithNames` format.
|
||||
If setting [input_format_with_types_use_header](../operations/settings/settings.md#settings-input_format_with_types_use_header) is set to 1,
|
||||
the types from input data will be compared with the types of the corresponding columns from the table. Otherwise, the second row will be skipped.
|
||||
|
||||
This format is also available under the name `TSVWithNamesAndTypes`.
|
||||
|
||||
## TabSeparatedRawWithNames {#tabseparatedrawwithnames}
|
||||
|
||||
Differs from `TabSeparatedWithNames` format in that the rows are written without escaping.
|
||||
When parsing with this format, tabs or linefeeds are not allowed in each field.
|
||||
|
||||
This format is also available under the name `TSVRawWithNames`.
|
||||
|
||||
## TabSeparatedWithNamesAndTypes {#tabseparatedrawwithnamesandtypes}
|
||||
|
||||
Differs from `TabSeparatedWithNamesAndTypes` format in that the rows are written without escaping.
|
||||
When parsing with this format, tabs or linefeeds are not allowed in each field.
|
||||
|
||||
This format is also available under the name `TSVRawWithNamesAndNames`.
|
||||
|
||||
## Template {#format-template}
|
||||
|
||||
This format allows specifying a custom format string with placeholders for values with a specified escaping rule.
|
||||
@ -195,7 +226,7 @@ where `delimiter_i` is a delimiter between values (`$` symbol can be escaped as
|
||||
- `Raw` (without escaping, similarly to `TSVRaw`)
|
||||
- `None` (no escaping rule, see further)
|
||||
|
||||
If an escaping rule is omitted, then `None` will be used. `XML` and `Raw` are suitable only for output.
|
||||
If an escaping rule is omitted, then `None` will be used. `XML` is suitable only for output.
|
||||
|
||||
So, for the following format string:
|
||||
|
||||
@ -375,17 +406,23 @@ $ clickhouse-client --format_csv_delimiter="|" --query="INSERT INTO test.csv FOR
|
||||
|
||||
When parsing, all values can be parsed either with or without quotes. Both double and single quotes are supported. Rows can also be arranged without quotes. In this case, they are parsed up to the delimiter character or line feed (CR or LF). In violation of the RFC, when parsing rows without quotes, the leading and trailing spaces and tabs are ignored. For the line feed, Unix (LF), Windows (CR LF) and Mac OS Classic (CR LF) types are all supported.
|
||||
|
||||
Empty unquoted input values are replaced with default values for the respective columns, if
|
||||
[input_format_defaults_for_omitted_fields](../operations/settings/settings.md#session_settings-input_format_defaults_for_omitted_fields)
|
||||
is enabled.
|
||||
If setting [input_format_csv_empty_as_default](../operations/settings/settings.md#settings-input_format_csv_empty_as_default) is enabled,
|
||||
empty unquoted input values are replaced with default values. For complex default expressions [input_format_defaults_for_omitted_fields](../operations/settings/settings.md#settings-input_format_defaults_for_omitted_fields) must be enabled too.
|
||||
|
||||
`NULL` is formatted as `\N` or `NULL` or an empty unquoted string (see settings [input_format_csv_unquoted_null_literal_as_null](../operations/settings/settings.md#settings-input_format_csv_unquoted_null_literal_as_null) and [input_format_defaults_for_omitted_fields](../operations/settings/settings.md#session_settings-input_format_defaults_for_omitted_fields)).
|
||||
`NULL` is formatted according to setting [format_csv_null_representation](../operations/settings/settings.md#settings-format_csv_null_representation) (default value is `\N`).
|
||||
|
||||
In input data, ENUM values can be represented as names or as ids. First, we try to match the input value to the ENUM name. If we fail and the input value is a number, we try to match this number to ENUM id.
|
||||
If input data contains only ENUM ids, it's recommended to enable the setting [input_format_csv_enum_as_number](../operations/settings/settings.md#settings-input_format_csv_enum_as_number) to optimize ENUM parsing.
|
||||
|
||||
The CSV format supports the output of totals and extremes the same way as `TabSeparated`.
|
||||
|
||||
## CSVWithNames {#csvwithnames}
|
||||
|
||||
Also prints the header row, similar to [TabSeparatedWithNames](#tabseparatedwithnames).
|
||||
Also prints the header row with column names, similar to [TabSeparatedWithNames](#tabseparatedwithnames).
|
||||
|
||||
## CSVWithNamesAndTypes {#csvwithnamesandtypes}
|
||||
|
||||
Also prints two header rows with column names and types, similar to [TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes).
|
||||
|
||||
## CustomSeparated {#format-customseparated}
|
||||
|
||||
@ -657,10 +694,21 @@ Differs from `JSONEachRow`/`JSONStringsEachRow` in that ClickHouse will also yie
|
||||
{"progress":{"read_rows":"3","read_bytes":"24","written_rows":"0","written_bytes":"0","total_rows_to_read":"3"}}
|
||||
```
|
||||
|
||||
## JSONCompactEachRowWithNames {#jsoncompacteachrowwithnames}
|
||||
|
||||
Differs from `JSONCompactEachRow` format in that it also prints the header row with column names, similar to [TabSeparatedWithNames](#tabseparatedwithnames).
|
||||
|
||||
## JSONCompactEachRowWithNamesAndTypes {#jsoncompacteachrowwithnamesandtypes}
|
||||
|
||||
Differs from `JSONCompactEachRow` format in that it also prints two header rows with column names and types, similar to [TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes).
|
||||
|
||||
## JSONCompactStringsEachRowWithNames {#jsoncompactstringseachrowwithnames}
|
||||
|
||||
Differs from `JSONCompactStringsEachRow` in that in that it also prints the header row with column names, similar to [TabSeparatedWithNames](#tabseparatedwithnames).
|
||||
|
||||
## JSONCompactStringsEachRowWithNamesAndTypes {#jsoncompactstringseachrowwithnamesandtypes}
|
||||
|
||||
Differs from `JSONCompactEachRow`/`JSONCompactStringsEachRow` in that the column names and types are written as the first two rows.
|
||||
Differs from `JSONCompactStringsEachRow` in that it also prints two header rows with column names and types, similar to [TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes).
|
||||
|
||||
```json
|
||||
["'hello'", "multiply(42, number)", "range(5)"]
|
||||
@ -703,7 +751,7 @@ CREATE TABLE IF NOT EXISTS example_table
|
||||
- If `input_format_defaults_for_omitted_fields = 1`, then the default value for `x` equals `0`, but the default value of `a` equals `x * 2`.
|
||||
|
||||
!!! note "Warning"
|
||||
When inserting data with `insert_sample_with_metadata = 1`, ClickHouse consumes more computational resources, compared to insertion with `insert_sample_with_metadata = 0`.
|
||||
When inserting data with `input_format_defaults_for_omitted_fields = 1`, ClickHouse consumes more computational resources, compared to insertion with `input_format_defaults_for_omitted_fields = 0`.
|
||||
|
||||
### Selecting Data {#selecting-data}
|
||||
|
||||
@ -910,6 +958,13 @@ Array is represented as a varint length (unsigned [LEB128](https://en.wikipedia.
|
||||
|
||||
For [NULL](../sql-reference/syntax.md#null-literal) support, an additional byte containing 1 or 0 is added before each [Nullable](../sql-reference/data-types/nullable.md) value. If 1, then the value is `NULL` and this byte is interpreted as a separate value. If 0, the value after the byte is not `NULL`.
|
||||
|
||||
## RowBinaryWithNames {#rowbinarywithnames}
|
||||
|
||||
Similar to [RowBinary](#rowbinary), but with added header:
|
||||
|
||||
- [LEB128](https://en.wikipedia.org/wiki/LEB128)-encoded number of columns (N)
|
||||
- N `String`s specifying column names
|
||||
|
||||
## RowBinaryWithNamesAndTypes {#rowbinarywithnamesandtypes}
|
||||
|
||||
Similar to [RowBinary](#rowbinary), but with added header:
|
||||
|
@ -29,7 +29,7 @@ toc_title: Adopters
|
||||
| <a href="https://www.benocs.com/" class="favicon">Benocs</a> | Network Telemetry and Analytics | Main Product | — | — | [Slides in English, October 2017](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup9/lpm.pdf) |
|
||||
| <a href="https://www.bigo.sg/" class="favicon">BIGO</a> | Video | Computing Platform | — | — | [Blog Article, August 2020](https://www.programmersought.com/article/44544895251/) |
|
||||
| <a href="https://www.bilibili.com/" class="favicon">BiliBili</a> | Video sharing | — | — | — | [Blog post, June 2021](https://chowdera.com/2021/06/20210622012241476b.html) |
|
||||
| <a href="https://www.bloomberg.com/">Bloomberg</a> | Finance, Media | Monitoring | — | — | [Slides, May 2018](https://www.slideshare.net/Altinity/http-analytics-for-6m-requests-per-second-using-clickhouse-by-alexander-bocharov) |
|
||||
| <a href="https://www.bloomberg.com/">Bloomberg</a> | Finance, Media | Monitoring | — | — | [Job opening, September 2021](https://careers.bloomberg.com/job/detail/94913), [slides, May 2018](https://www.slideshare.net/Altinity/http-analytics-for-6m-requests-per-second-using-clickhouse-by-alexander-bocharov) |
|
||||
| <a href="https://bloxy.info" class="favicon">Bloxy</a> | Blockchain | Analytics | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/4_bloxy.pptx) |
|
||||
| <a href="https://www.bytedance.com" class="favicon">Bytedance</a> | Social platforms | — | — | — | [The ClickHouse Meetup East, October 2020](https://www.youtube.com/watch?v=ckChUkC3Pns) |
|
||||
| <a href="https://cardsmobile.ru/" class="favicon">CardsMobile</a> | Finance | Analytics | — | — | [VC.ru](https://vc.ru/s/cardsmobile/143449-rukovoditel-gruppy-analiza-dannyh) |
|
||||
@ -121,7 +121,7 @@ toc_title: Adopters
|
||||
| <a href="https://rspamd.com/" class="favicon">Rspamd</a> | Antispam | Analytics | — | — | [Official Website](https://rspamd.com/doc/modules/clickhouse.html) |
|
||||
| <a href="https://rusiem.com/en" class="favicon">RuSIEM</a> | SIEM | Main Product | — | — | [Official Website](https://rusiem.com/en/products/architecture) |
|
||||
| <a href="https://www.s7.ru" class="favicon">S7 Airlines</a> | Airlines | Metrics, Logging | — | — | [Talk in Russian, March 2019](https://www.youtube.com/watch?v=nwG68klRpPg&t=15s) |
|
||||
| <a href="https://www.sberbank.com/index" class="favicon">Sber</a> | Banking, Fintech, Retail, Cloud, Media | — | — | — | [Job advertisement, March 2021](https://career.habr.com/vacancies/1000073536) |
|
||||
| <a href="https://www.sberbank.com/index" class="favicon">Sber</a> | Banking, Fintech, Retail, Cloud, Media | — | 128 servers | >1 PB | [Job advertisement, March 2021](https://career.habr.com/vacancies/1000073536) |
|
||||
| <a href="https://www.scireum.de/" class="favicon">scireum GmbH</a> | e-Commerce | Main product | — | — | [Talk in German, February 2020](https://www.youtube.com/watch?v=7QWAn5RbyR4) |
|
||||
| <a href="https://segment.com/" class="favicon">Segment</a> | Data processing | Main product | 9 * i3en.3xlarge nodes 7.5TB NVME SSDs, 96GB Memory, 12 vCPUs | — | [Slides, 2019](https://slides.com/abraithwaite/segment-clickhouse) |
|
||||
| <a href="https://sembot.io/" class="favicon">sembot.io</a> | Shopping Ads | — | — | — | A comment on LinkedIn, 2020 |
|
||||
@ -170,5 +170,7 @@ toc_title: Adopters
|
||||
| <a href="https://cft.ru/" class="favicon">ЦФТ</a> | Banking, Financial products, Payments | — | — | — | [Meetup in Russian, April 2020](https://team.cft.ru/events/162) |
|
||||
| <a href="https://promo.croc.ru/digitalworker" class="favicon">Цифровой Рабочий</a> | Industrial IoT, Analytics | — | — | — | [Blog post in Russian, March 2021](https://habr.com/en/company/croc/blog/548018/) |
|
||||
| <a href="https://shop.okraina.ru/" class="favicon">ООО «МПЗ Богородский»</a> | Agriculture | — | — | — | [Article in Russian, November 2020](https://cloud.yandex.ru/cases/okraina) |
|
||||
| <a href="https://domclick.ru/" class="favicon">ДомКлик</a> | Real Estate | — | — | — | [Article in Russian, October 2021](https://habr.com/ru/company/domclick/blog/585936/) |
|
||||
| <a href="https://www.deepl.com/" class="favicon">Deepl</a> | Machine Learning | — | — | — | [Video, October 2021](https://www.youtube.com/watch?v=WIYJiPwxXdM&t=1182s) |
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/introduction/adopters/) <!--hide-->
|
||||
|
@ -21,7 +21,7 @@ By default, ClickHouse Keeper provides the same guarantees as ZooKeeper (lineari
|
||||
ClickHouse Keeper can be used as a standalone replacement for ZooKeeper or as an internal part of the ClickHouse server, but in both cases configuration is almost the same `.xml` file. The main ClickHouse Keeper configuration tag is `<keeper_server>`. Keeper configuration has the following parameters:
|
||||
|
||||
- `tcp_port` — Port for a client to connect (default for ZooKeeper is `2181`).
|
||||
- `tcp_port_secure` — Secure port for a client to connect.
|
||||
- `tcp_port_secure` — Secure port for an SSL connection between client and keeper-server.
|
||||
- `server_id` — Unique server id, each participant of the ClickHouse Keeper cluster must have a unique number (1, 2, 3, and so on).
|
||||
- `log_storage_path` — Path to coordination logs, better to store logs on the non-busy device (same for ZooKeeper).
|
||||
- `snapshot_storage_path` — Path to coordination snapshots.
|
||||
@ -50,7 +50,11 @@ Internal coordination settings are located in `<keeper_server>.<coordination_set
|
||||
- `shutdown_timeout` — Wait to finish internal connections and shutdown (ms) (default: 5000).
|
||||
- `startup_timeout` — If the server doesn't connect to other quorum participants in the specified timeout it will terminate (ms) (default: 30000).
|
||||
|
||||
Quorum configuration is located in `<keeper_server>.<raft_configuration>` section and contain servers description. The only parameter for the whole quorum is `secure`, which enables encrypted connection for communication between quorum participants. The main parameters for each `<server>` are:
|
||||
Quorum configuration is located in `<keeper_server>.<raft_configuration>` section and contain servers description.
|
||||
|
||||
The only parameter for the whole quorum is `secure`, which enables encrypted connection for communication between quorum participants. The parameter can be set `true` if SSL connection is required for internal communication between nodes, or left unspecified otherwise.
|
||||
|
||||
The main parameters for each `<server>` are:
|
||||
|
||||
- `id` — Server identifier in a quorum.
|
||||
- `hostname` — Hostname where this server is placed.
|
||||
|
@ -7,7 +7,7 @@ toc_title: Configuration Files
|
||||
|
||||
ClickHouse supports multi-file configuration management. The main server configuration file is `/etc/clickhouse-server/config.xml` or `/etc/clickhouse-server/config.yaml`. Other files must be in the `/etc/clickhouse-server/config.d` directory. Note, that any configuration file can be written either in XML or YAML, but mixing formats in one file is not supported. For example, you can have main configs as `config.xml` and `users.xml` and write additional files in `config.d` and `users.d` directories in `.yaml`.
|
||||
|
||||
All XML files should have the same root element, usually `<yandex>`. As for YAML, `yandex:` should not be present, the parser will insert it automatically.
|
||||
All XML files should have the same root element, usually `<clickhouse>`. As for YAML, `clickhouse:` should not be present, the parser will insert it automatically.
|
||||
|
||||
## Override {#override}
|
||||
|
||||
@ -21,13 +21,13 @@ Some settings specified in the main configuration file can be overridden in othe
|
||||
You can also declare attributes as coming from environment variables by using `from_env="VARIABLE_NAME"`:
|
||||
|
||||
```xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<macros>
|
||||
<replica from_env="REPLICA" />
|
||||
<layer from_env="LAYER" />
|
||||
<shard from_env="SHARD" />
|
||||
</macros>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
## Substitution {#substitution}
|
||||
@ -39,7 +39,7 @@ If you want to replace an entire element with a substitution use `include` as el
|
||||
XML substitution example:
|
||||
|
||||
```xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<!-- Appends XML subtree found at `/profiles-in-zookeeper` ZK path to `<profiles>` element. -->
|
||||
<profiles from_zk="/profiles-in-zookeeper" />
|
||||
|
||||
@ -48,7 +48,7 @@ XML substitution example:
|
||||
<include from_zk="/users-in-zookeeper" />
|
||||
<include from_zk="/other-users-in-zookeeper" />
|
||||
</users>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
Substitutions can also be performed from ZooKeeper. To do this, specify the attribute `from_zk = "/path/to/node"`. The element value is replaced with the contents of the node at `/path/to/node` in ZooKeeper. You can also put an entire XML subtree on the ZooKeeper node and it will be fully inserted into the source element.
|
||||
@ -72,7 +72,7 @@ $ cat /etc/clickhouse-server/users.d/alice.xml
|
||||
```
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<users>
|
||||
<alice>
|
||||
<profile>analytics</profile>
|
||||
@ -83,7 +83,7 @@ $ cat /etc/clickhouse-server/users.d/alice.xml
|
||||
<quota>analytics</quota>
|
||||
</alice>
|
||||
</users>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
## YAML examples {#example}
|
||||
|
@ -23,32 +23,32 @@ To enable Kerberos, one should include `kerberos` section in `config.xml`. This
|
||||
Example (goes into `config.xml`):
|
||||
|
||||
```xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<!- ... -->
|
||||
<kerberos />
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
With principal specification:
|
||||
|
||||
```xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<!- ... -->
|
||||
<kerberos>
|
||||
<principal>HTTP/clickhouse.example.com@EXAMPLE.COM</principal>
|
||||
</kerberos>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
With filtering by realm:
|
||||
|
||||
```xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<!- ... -->
|
||||
<kerberos>
|
||||
<realm>EXAMPLE.COM</realm>
|
||||
</kerberos>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
!!! warning "Note"
|
||||
@ -80,7 +80,7 @@ Parameters:
|
||||
Example (goes into `users.xml`):
|
||||
|
||||
```xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<!- ... -->
|
||||
<users>
|
||||
<!- ... -->
|
||||
@ -91,7 +91,7 @@ Example (goes into `users.xml`):
|
||||
</kerberos>
|
||||
</my_user>
|
||||
</users>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
!!! warning "Warning"
|
||||
|
@ -14,7 +14,7 @@ To define LDAP server you must add `ldap_servers` section to the `config.xml`.
|
||||
**Example**
|
||||
|
||||
```xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<!- ... -->
|
||||
<ldap_servers>
|
||||
<!- Typical LDAP server. -->
|
||||
@ -45,7 +45,7 @@ To define LDAP server you must add `ldap_servers` section to the `config.xml`.
|
||||
<enable_tls>no</enable_tls>
|
||||
</my_ad_server>
|
||||
</ldap_servers>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
Note, that you can define multiple LDAP servers inside the `ldap_servers` section using distinct names.
|
||||
@ -90,7 +90,7 @@ At each login attempt, ClickHouse tries to "bind" to the specified DN defined by
|
||||
**Example**
|
||||
|
||||
```xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<!- ... -->
|
||||
<users>
|
||||
<!- ... -->
|
||||
@ -101,7 +101,7 @@ At each login attempt, ClickHouse tries to "bind" to the specified DN defined by
|
||||
</ldap>
|
||||
</my_user>
|
||||
</users>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
Note, that user `my_user` refers to `my_ldap_server`. This LDAP server must be configured in the main `config.xml` file as described previously.
|
||||
@ -125,7 +125,7 @@ At each login attempt, ClickHouse tries to find the user definition locally and
|
||||
Goes into `config.xml`.
|
||||
|
||||
```xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<!- ... -->
|
||||
<user_directories>
|
||||
<!- Typical LDAP server. -->
|
||||
@ -156,7 +156,7 @@ Goes into `config.xml`.
|
||||
</role_mapping>
|
||||
</ldap>
|
||||
</user_directories>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
Note that `my_ldap_server` referred in the `ldap` section inside the `user_directories` section must be a previously defined LDAP server that is configured in the `config.xml` (see [LDAP Server Definition](#ldap-server-definition)).
|
||||
|
@ -23,7 +23,7 @@ chmod a+x ./hardware.sh
|
||||
./hardware.sh
|
||||
```
|
||||
|
||||
3. Copy the output and send it to clickhouse-feedback@yandex-team.com
|
||||
3. Copy the output and send it to feedback@clickhouse.com
|
||||
|
||||
All the results are published here: https://clickhouse.com/benchmark/hardware/
|
||||
|
||||
|
@ -69,6 +69,8 @@ If no conditions met for a data part, ClickHouse uses the `lz4` compression.
|
||||
</compression>
|
||||
```
|
||||
|
||||
<!--
|
||||
|
||||
## encryption {#server-settings-encryption}
|
||||
|
||||
Configures a command to obtain a key to be used by [encryption codecs](../../sql-reference/statements/create/table.md#create-query-encryption-codecs). Key (or keys) should be written in environment variables or set in the configuration file.
|
||||
@ -131,7 +133,7 @@ Also, users can add nonce that must be 12 bytes long (by default encryption and
|
||||
```xml
|
||||
<encryption_codecs>
|
||||
<aes_128_gcm_siv>
|
||||
<nonce>0123456789101</nonce>
|
||||
<nonce>012345678910</nonce>
|
||||
</aes_128_gcm_siv>
|
||||
</encryption_codecs>
|
||||
```
|
||||
@ -148,6 +150,8 @@ Or it can be set in hex:
|
||||
|
||||
Everything mentioned above can be applied for `aes_256_gcm_siv` (but the key must be 32 bytes long).
|
||||
|
||||
-->
|
||||
|
||||
## custom_settings_prefixes {#custom_settings_prefixes}
|
||||
|
||||
List of prefixes for [custom settings](../../operations/settings/index.md#custom_settings). The prefixes must be separated with commas.
|
||||
@ -366,7 +370,7 @@ Opens `https://tabix.io/` when accessing `http://localhost: http_port`.
|
||||
<![CDATA[<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>]]>
|
||||
</http_server_default_response>
|
||||
```
|
||||
## hsts_max_age
|
||||
## hsts_max_age {#hsts-max-age}
|
||||
|
||||
Expired time for HSTS in seconds. The default value is 0 means clickhouse disabled HSTS. If you set a positive number, the HSTS will be enabled and the max-age is the number you set.
|
||||
|
||||
@ -485,13 +489,9 @@ Usually this value does not need to be changed, since:
|
||||
- default value is large enough,
|
||||
- and for accepting client's connections server has separate thread.
|
||||
|
||||
So even if you have `TcpExtListenOverflows` (from `nstat`) non zero and this
|
||||
counter grows for ClickHouse server it does not mean that this value need to be
|
||||
increased, since:
|
||||
- usually if 4096 is not enough it shows some internal ClickHouse scaling
|
||||
issue, so it is better to report an issue.
|
||||
- and it does not mean that the server can handle more connections later (and
|
||||
even if it can, clients can already goes away / disconnect).
|
||||
So even if you have `TcpExtListenOverflows` (from `nstat`) non zero and this counter grows for ClickHouse server it does not mean that this value need to be increased, since:
|
||||
- usually if 4096 is not enough it shows some internal ClickHouse scaling issue, so it is better to report an issue.
|
||||
- and it does not mean that the server can handle more connections later (and even if it could, by that moment clients may be gone or disconnected).
|
||||
|
||||
Examples:
|
||||
|
||||
@ -786,14 +786,14 @@ It is enabled by default. If it`s not, you can do this manually.
|
||||
To manually turn on metrics history collection [`system.metric_log`](../../operations/system-tables/metric_log.md), create `/etc/clickhouse-server/config.d/metric_log.xml` with the following content:
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<metric_log>
|
||||
<database>system</database>
|
||||
<table>metric_log</table>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<collect_interval_milliseconds>1000</collect_interval_milliseconds>
|
||||
</metric_log>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
**Disabling**
|
||||
@ -801,9 +801,9 @@ To manually turn on metrics history collection [`system.metric_log`](../../opera
|
||||
To disable `metric_log` setting, you should create the following file `/etc/clickhouse-server/config.d/disable_metric_log.xml` with the following content:
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<metric_log remove="1" />
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
## replicated_merge_tree {#server_configuration_parameters-replicated_merge_tree}
|
||||
@ -1039,7 +1039,7 @@ Parameters:
|
||||
|
||||
**Example**
|
||||
```xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<text_log>
|
||||
<level>notice</level>
|
||||
<database>system</database>
|
||||
@ -1048,7 +1048,7 @@ Parameters:
|
||||
<!-- <partition_by>event_date</partition_by> -->
|
||||
<engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + INTERVAL 30 day</engine>
|
||||
</text_log>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
|
||||
@ -1290,6 +1290,7 @@ This section contains the following parameters:
|
||||
|
||||
- [Replication](../../engines/table-engines/mergetree-family/replication.md)
|
||||
- [ZooKeeper Programmer’s Guide](http://zookeeper.apache.org/doc/current/zookeeperProgrammers.html)
|
||||
- [Optional secured communication between ClickHouse and Zookeeper](../ssl-zookeeper.md#secured-communication-with-zookeeper)
|
||||
|
||||
## use_minimalistic_part_header_in_zookeeper {#server-settings-use_minimalistic_part_header_in_zookeeper}
|
||||
|
||||
|
@ -328,3 +328,18 @@ Possible values:
|
||||
Default value: `true`.
|
||||
|
||||
By default, the ClickHouse server checks at table creation the data type of a column for sampling or sampling expression. If you already have tables with incorrect sampling expression and do not want the server to raise an exception during startup, set `check_sample_column_is_correct` to `false`.
|
||||
|
||||
## min_bytes_to_rebalance_partition_over_jbod {#min-bytes-to-rebalance-partition-over-jbod}
|
||||
|
||||
Sets minimal amount of bytes to enable balancing when distributing new big parts over volume disks [JBOD](https://en.wikipedia.org/wiki/Non-RAID_drive_architectures).
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
- 0 — Balancing is disabled.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
**Usage**
|
||||
|
||||
The value of the `min_bytes_to_rebalance_partition_over_jbod` setting should be less than the value of the [max_bytes_to_merge_at_max_space_in_pool](../../operations/settings/merge-tree-settings.md#max-bytes-to-merge-at-max-space-in-pool) setting. Otherwise, ClickHouse throws an exception.
|
||||
|
@ -399,7 +399,7 @@ Default value: 1.
|
||||
|
||||
## input_format_defaults_for_omitted_fields {#session_settings-input_format_defaults_for_omitted_fields}
|
||||
|
||||
When performing `INSERT` queries, replace omitted input column values with default values of the respective columns. This option only applies to [JSONEachRow](../../interfaces/formats.md#jsoneachrow), [CSV](../../interfaces/formats.md#csv) and [TabSeparated](../../interfaces/formats.md#tabseparated) formats.
|
||||
When performing `INSERT` queries, replace omitted input column values with default values of the respective columns. This option only applies to [JSONEachRow](../../interfaces/formats.md#jsoneachrow), [CSV](../../interfaces/formats.md#csv), [TabSeparated](../../interfaces/formats.md#tabseparated) formats and formats with `WithNames`/`WithNamesAndTypes` suffixes.
|
||||
|
||||
!!! note "Note"
|
||||
When this option is enabled, extended table metadata are sent from server to client. It consumes additional computing resources on the server and can reduce performance.
|
||||
@ -417,14 +417,20 @@ When enabled, replace empty input fields in TSV with default values. For complex
|
||||
|
||||
Disabled by default.
|
||||
|
||||
## input_format_csv_empty_as_default {#settings-input-format-csv-empty-as-default}
|
||||
|
||||
When enabled, replace empty input fields in CSV with default values. For complex default expressions `input_format_defaults_for_omitted_fields` must be enabled too.
|
||||
|
||||
Enabled by default.
|
||||
|
||||
## input_format_tsv_enum_as_number {#settings-input_format_tsv_enum_as_number}
|
||||
|
||||
Enables or disables parsing enum values as enum ids for TSV input format.
|
||||
When enabled, always treat enum values as enum ids for TSV input format. It's recommended to enable this setting if data contains only enum ids to optimize enum parsing.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Enum values are parsed as values.
|
||||
- 1 — Enum values are parsed as enum IDs.
|
||||
- 0 — Enum values are parsed as values or as enum IDs.
|
||||
- 1 — Enum values are parsed only as enum IDs.
|
||||
|
||||
Default value: 0.
|
||||
|
||||
@ -438,10 +444,39 @@ CREATE TABLE table_with_enum_column_for_tsv_insert (Id Int32,Value Enum('first'
|
||||
|
||||
When the `input_format_tsv_enum_as_number` setting is enabled:
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SET input_format_tsv_enum_as_number = 1;
|
||||
INSERT INTO table_with_enum_column_for_tsv_insert FORMAT TSV 102 2;
|
||||
INSERT INTO table_with_enum_column_for_tsv_insert FORMAT TSV 103 1;
|
||||
SELECT * FROM table_with_enum_column_for_tsv_insert;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌──Id─┬─Value──┐
|
||||
│ 102 │ second │
|
||||
└─────┴────────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SET input_format_tsv_enum_as_number = 1;
|
||||
INSERT INTO table_with_enum_column_for_tsv_insert FORMAT TSV 103 'first';
|
||||
```
|
||||
|
||||
throws an exception.
|
||||
|
||||
When the `input_format_tsv_enum_as_number` setting is disabled:
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SET input_format_tsv_enum_as_number = 0;
|
||||
INSERT INTO table_with_enum_column_for_tsv_insert FORMAT TSV 102 2;
|
||||
INSERT INTO table_with_enum_column_for_tsv_insert FORMAT TSV 103 'first';
|
||||
SELECT * FROM table_with_enum_column_for_tsv_insert;
|
||||
```
|
||||
|
||||
@ -456,15 +491,6 @@ Result:
|
||||
└─────┴────────┘
|
||||
```
|
||||
|
||||
When the `input_format_tsv_enum_as_number` setting is disabled, the `INSERT` query:
|
||||
|
||||
```sql
|
||||
SET input_format_tsv_enum_as_number = 0;
|
||||
INSERT INTO table_with_enum_column_for_tsv_insert FORMAT TSV 102 2;
|
||||
```
|
||||
|
||||
throws an exception.
|
||||
|
||||
## input_format_null_as_default {#settings-input-format-null-as-default}
|
||||
|
||||
Enables or disables the initialization of [NULL](../../sql-reference/syntax.md#null-literal) fields with [default values](../../sql-reference/statements/create/table.md#create-default-values), if data type of these fields is not [nullable](../../sql-reference/data-types/nullable.md#data_type-nullable).
|
||||
@ -541,7 +567,39 @@ To improve insert performance, we recommend disabling this check if you are sure
|
||||
Supported formats:
|
||||
|
||||
- [CSVWithNames](../../interfaces/formats.md#csvwithnames)
|
||||
- [CSVWithNames](../../interfaces/formats.md#csvwithnamesandtypes)
|
||||
- [TabSeparatedWithNames](../../interfaces/formats.md#tabseparatedwithnames)
|
||||
- [TabSeparatedWithNamesAndTypes](../../interfaces/formats.md#tabseparatedwithnamesandtypes)
|
||||
- [JSONCompactEachRowWithNames](../../interfaces/formats.md#jsoncompacteachrowwithnames)
|
||||
- [JSONCompactEachRowWithNamesAndTypes](../../interfaces/formats.md#jsoncompacteachrowwithnamesandtypes)
|
||||
- [JSONCompactStringsEachRowWithNames](../../interfaces/formats.md#jsoncompactstringseachrowwithnames)
|
||||
- [JSONCompactStringsEachRowWithNamesAndTypes](../../interfaces/formats.md#jsoncompactstringseachrowwithnamesandtypes)
|
||||
- [RowBinaryWithNames](../../interfaces/formats.md#rowbinarywithnames-rowbinarywithnames)
|
||||
- [RowBinaryWithNamesAndTypes](../../interfaces/formats.md#rowbinarywithnamesandtypes-rowbinarywithnamesandtypes)
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Disabled.
|
||||
- 1 — Enabled.
|
||||
|
||||
Default value: 1.
|
||||
|
||||
## input_format_with_types_use_header {#settings-input-format-with-types-use-header}
|
||||
|
||||
Controls whether format parser should check if data types from the input data match data types from the target table.
|
||||
|
||||
Supported formats:
|
||||
|
||||
- [CSVWithNames](../../interfaces/formats.md#csvwithnames)
|
||||
- [CSVWithNames](../../interfaces/formats.md#csvwithnamesandtypes)
|
||||
- [TabSeparatedWithNames](../../interfaces/formats.md#tabseparatedwithnames)
|
||||
- [TabSeparatedWithNamesAndTypes](../../interfaces/formats.md#tabseparatedwithnamesandtypes)
|
||||
- [JSONCompactEachRowWithNames](../../interfaces/formats.md#jsoncompacteachrowwithnames)
|
||||
- [JSONCompactEachRowWithNamesAndTypes](../../interfaces/formats.md#jsoncompacteachrowwithnamesandtypes)
|
||||
- [JSONCompactStringsEachRowWithNames](../../interfaces/formats.md#jsoncompactstringseachrowwithnames)
|
||||
- [JSONCompactStringsEachRowWithNamesAndTypes](../../interfaces/formats.md#jsoncompactstringseachrowwithnamesandtypes)
|
||||
- [RowBinaryWithNames](../../interfaces/formats.md#rowbinarywithnames-rowbinarywithnames)
|
||||
- [RowBinaryWithNamesAndTypes](../../interfaces/formats.md#rowbinarywithnamesandtypes-rowbinarywithnamesandtypes)
|
||||
|
||||
Possible values:
|
||||
|
||||
@ -954,6 +1012,16 @@ Example:
|
||||
log_query_views=1
|
||||
```
|
||||
|
||||
## log_formatted_queries {#settings-log-formatted-queries}
|
||||
|
||||
Allows to log formatted queries to the [system.query_log](../../operations/system-tables/query_log.md) system table.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Formatted queries are not logged in the system table.
|
||||
- 1 — Formatted queries are logged in the system table.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## log_comment {#settings-log-comment}
|
||||
|
||||
@ -1397,6 +1465,32 @@ Minimum count of executing same expression before it is get compiled.
|
||||
|
||||
Default value: `3`.
|
||||
|
||||
## compile_aggregate_expressions {#compile_aggregate_expressions}
|
||||
|
||||
Enables or disables JIT-compilation of aggregate functions to native code. Enabling this setting can improve the performance.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Aggregation is done without JIT compilation.
|
||||
- 1 — Aggregation is done using JIT compilation.
|
||||
|
||||
Default value: `1`.
|
||||
|
||||
**See Also**
|
||||
|
||||
- [min_count_to_compile_aggregate_expression](#min_count_to_compile_aggregate_expression)
|
||||
|
||||
## min_count_to_compile_aggregate_expression {#min_count_to_compile_aggregate_expression}
|
||||
|
||||
The minimum number of identical aggregate expressions to start JIT-compilation. Works only if the [compile_aggregate_expressions](#compile_aggregate_expressions) setting is enabled.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
- 0 — Identical aggregate expressions are always JIT-compiled.
|
||||
|
||||
Default value: `3`.
|
||||
|
||||
## output_format_json_quote_64bit_integers {#session_settings-output_format_json_quote_64bit_integers}
|
||||
|
||||
Controls quoting of 64-bit or bigger [integers](../../sql-reference/data-types/int-uint.md) (like `UInt64` or `Int128`) when they are output in a [JSON](../../interfaces/formats.md#json) format.
|
||||
@ -1512,18 +1606,14 @@ When `output_format_json_quote_denormals = 1`, the query returns:
|
||||
|
||||
The character is interpreted as a delimiter in the CSV data. By default, the delimiter is `,`.
|
||||
|
||||
## input_format_csv_unquoted_null_literal_as_null {#settings-input_format_csv_unquoted_null_literal_as_null}
|
||||
|
||||
For CSV input format enables or disables parsing of unquoted `NULL` as literal (synonym for `\N`).
|
||||
|
||||
## input_format_csv_enum_as_number {#settings-input_format_csv_enum_as_number}
|
||||
|
||||
Enables or disables parsing enum values as enum ids for CSV input format.
|
||||
When enabled, always treat enum values as enum ids for CSV input format. It's recommended to enable this setting if data contains only enum ids to optimize enum parsing.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Enum values are parsed as values.
|
||||
- 1 — Enum values are parsed as enum IDs.
|
||||
- 0 — Enum values are parsed as values or as enum IDs.
|
||||
- 1 — Enum values are parsed only as enum IDs.
|
||||
|
||||
Default value: 0.
|
||||
|
||||
@ -1537,29 +1627,52 @@ CREATE TABLE table_with_enum_column_for_csv_insert (Id Int32,Value Enum('first'
|
||||
|
||||
When the `input_format_csv_enum_as_number` setting is enabled:
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SET input_format_csv_enum_as_number = 1;
|
||||
INSERT INTO table_with_enum_column_for_csv_insert FORMAT CSV 102,2;
|
||||
INSERT INTO table_with_enum_column_for_csv_insert FORMAT CSV 102,2
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌──Id─┬─Value──┐
|
||||
│ 102 │ second │
|
||||
└─────┴────────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SET input_format_csv_enum_as_number = 1;
|
||||
INSERT INTO table_with_enum_column_for_csv_insert FORMAT CSV 103,'first'
|
||||
```
|
||||
|
||||
throws an exception.
|
||||
|
||||
When the `input_format_csv_enum_as_number` setting is disabled:
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SET input_format_csv_enum_as_number = 0;
|
||||
INSERT INTO table_with_enum_column_for_csv_insert FORMAT CSV 102,2
|
||||
INSERT INTO table_with_enum_column_for_csv_insert FORMAT CSV 103,'first'
|
||||
SELECT * FROM table_with_enum_column_for_csv_insert;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌──Id─┬─Value─────┐
|
||||
┌──Id─┬─Value──┐
|
||||
│ 102 │ second │
|
||||
└─────┴───────────┘
|
||||
└─────┴────────┘
|
||||
┌──Id─┬─Value─┐
|
||||
│ 103 │ first │
|
||||
└─────┴───────┘
|
||||
```
|
||||
|
||||
When the `input_format_csv_enum_as_number` setting is disabled, the `INSERT` query:
|
||||
|
||||
```sql
|
||||
SET input_format_csv_enum_as_number = 0;
|
||||
INSERT INTO table_with_enum_column_for_csv_insert FORMAT CSV 102,2;
|
||||
```
|
||||
|
||||
throws an exception.
|
||||
|
||||
## output_format_csv_crlf_end_of_line {#settings-output-format-csv-crlf-end-of-line}
|
||||
|
||||
Use DOS/Windows-style line separator (CRLF) in CSV instead of Unix style (LF).
|
||||
@ -1751,9 +1864,11 @@ Do not merge aggregation states from different servers for distributed query pro
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Disabled (final query processing is done on the initiator node).
|
||||
- 1 - Do not merge aggregation states from different servers for distributed query processing (query completelly processed on the shard, initiator only proxy the data), can be used in case it is for certain that there are different keys on different shards.
|
||||
- 2 - Same as `1` but applies `ORDER BY` and `LIMIT` (it is not possible when the query processed completelly on the remote node, like for `distributed_group_by_no_merge=1`) on the initiator (can be used for queries with `ORDER BY` and/or `LIMIT`).
|
||||
- `0` — Disabled (final query processing is done on the initiator node).
|
||||
- `1` - Do not merge aggregation states from different servers for distributed query processing (query completelly processed on the shard, initiator only proxy the data), can be used in case it is for certain that there are different keys on different shards.
|
||||
- `2` - Same as `1` but applies `ORDER BY` and `LIMIT` (it is not possible when the query processed completelly on the remote node, like for `distributed_group_by_no_merge=1`) on the initiator (can be used for queries with `ORDER BY` and/or `LIMIT`).
|
||||
|
||||
Default value: `0`
|
||||
|
||||
**Example**
|
||||
|
||||
@ -1784,29 +1899,27 @@ FORMAT PrettyCompactMonoBlock
|
||||
└───────┘
|
||||
```
|
||||
|
||||
Default value: 0
|
||||
## distributed_push_down_limit {#distributed-push-down-limit}
|
||||
|
||||
## distributed_push_down_limit (#distributed-push-down-limit}
|
||||
|
||||
LIMIT will be applied on each shard separatelly.
|
||||
Enables or disables [LIMIT](#limit) applying on each shard separatelly.
|
||||
|
||||
This will allow to avoid:
|
||||
- Sending extra rows over network;
|
||||
- Processing rows behind the limit on the initiator.
|
||||
|
||||
- sending extra rows over network,
|
||||
- processing rows behind the limit on the initiator.
|
||||
|
||||
It is possible if at least one of the following conditions met:
|
||||
|
||||
- `distributed_group_by_no_merge` > 0
|
||||
- query **does not have `GROUP BY`/`DISTINCT`/`LIMIT BY`**, but it has `ORDER BY`/`LIMIT`.
|
||||
- query **has `GROUP BY`/`DISTINCT`/`LIMIT BY`** with `ORDER BY`/`LIMIT` and:
|
||||
- `optimize_skip_unused_shards_limit` is enabled
|
||||
- `optimize_distributed_group_by_sharding_key` is enabled
|
||||
Starting from 21.9 version you cannot get inaccurate results anymore, since `distributed_push_down_limit` changes query execution only if at least one of the conditions met:
|
||||
- [distributed_group_by_no_merge](#distributed-group-by-no-merge) > 0.
|
||||
- Query **does not have** `GROUP BY`/`DISTINCT`/`LIMIT BY`, but it has `ORDER BY`/`LIMIT`.
|
||||
- Query **has** `GROUP BY`/`DISTINCT`/`LIMIT BY` with `ORDER BY`/`LIMIT` and:
|
||||
- [optimize_skip_unused_shards](#optimize-skip-unused-shards) is enabled.
|
||||
- [optimize_distributed_group_by_sharding_key](#optimize-distributed-group-by-sharding-key) is enabled.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 - Disabled
|
||||
- 1 - Enabled
|
||||
- 0 — Disabled.
|
||||
- 1 — Enabled.
|
||||
|
||||
Default value: `1`.
|
||||
|
||||
See also:
|
||||
|
||||
@ -1920,6 +2033,7 @@ Default value: 0
|
||||
See also:
|
||||
|
||||
- [distributed_group_by_no_merge](#distributed-group-by-no-merge)
|
||||
- [distributed_push_down_limit](#distributed-push-down-limit)
|
||||
- [optimize_skip_unused_shards](#optimize-skip-unused-shards)
|
||||
|
||||
!!! note "Note"
|
||||
@ -2875,9 +2989,9 @@ Possible values:
|
||||
|
||||
Default value: `1`.
|
||||
|
||||
## output_format_csv_null_representation {#output_format_csv_null_representation}
|
||||
## format_csv_null_representation {#format_csv_null_representation}
|
||||
|
||||
Defines the representation of `NULL` for [CSV](../../interfaces/formats.md#csv) output format. User can set any string as a value, for example, `My NULL`.
|
||||
Defines the representation of `NULL` for [CSV](../../interfaces/formats.md#csv) output and input formats. User can set any string as a value, for example, `My NULL`.
|
||||
|
||||
Default value: `\N`.
|
||||
|
||||
@ -2900,7 +3014,7 @@ Result
|
||||
Query
|
||||
|
||||
```sql
|
||||
SET output_format_csv_null_representation = 'My NULL';
|
||||
SET format_csv_null_representation = 'My NULL';
|
||||
SELECT * FROM csv_custom_null FORMAT CSV;
|
||||
```
|
||||
|
||||
@ -2912,9 +3026,9 @@ My NULL
|
||||
My NULL
|
||||
```
|
||||
|
||||
## output_format_tsv_null_representation {#output_format_tsv_null_representation}
|
||||
## format_tsv_null_representation {#format_tsv_null_representation}
|
||||
|
||||
Defines the representation of `NULL` for [TSV](../../interfaces/formats.md#tabseparated) output format. User can set any string as a value, for example, `My NULL`.
|
||||
Defines the representation of `NULL` for [TSV](../../interfaces/formats.md#tabseparated) output and input formats. User can set any string as a value, for example, `My NULL`.
|
||||
|
||||
Default value: `\N`.
|
||||
|
||||
@ -2937,7 +3051,7 @@ Result
|
||||
Query
|
||||
|
||||
```sql
|
||||
SET output_format_tsv_null_representation = 'My NULL';
|
||||
SET format_tsv_null_representation = 'My NULL';
|
||||
SELECT * FROM tsv_custom_null FORMAT TSV;
|
||||
```
|
||||
|
||||
@ -3831,6 +3945,21 @@ Default value: `0`.
|
||||
|
||||
- [optimize_move_to_prewhere](#optimize_move_to_prewhere) setting
|
||||
|
||||
## describe_include_subcolumns {#describe_include_subcolumns}
|
||||
|
||||
Enables describing subcolumns for a [DESCRIBE](../../sql-reference/statements/describe-table.md) query. For example, members of a [Tuple](../../sql-reference/data-types/tuple.md) or subcolumns of a [Map](../../sql-reference/data-types/map.md#map-subcolumns), [Nullable](../../sql-reference/data-types/nullable.md#finding-null) or an [Array](../../sql-reference/data-types/array.md#array-size) data type.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Subcolumns are not included in `DESCRIBE` queries.
|
||||
- 1 — Subcolumns are included in `DESCRIBE` queries.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
**Example**
|
||||
|
||||
See an example for the [DESCRIBE](../../sql-reference/statements/describe-table.md) statement.
|
||||
|
||||
## async_insert {#async-insert}
|
||||
|
||||
Enables or disables asynchronous inserts. This makes sense only for insertion over HTTP protocol. Note that deduplication isn't working for such inserts.
|
||||
|
74
docs/en/operations/ssl-zookeeper.md
Normal file
74
docs/en/operations/ssl-zookeeper.md
Normal file
@ -0,0 +1,74 @@
|
||||
---
|
||||
toc_priority: 45
|
||||
toc_title: Secured communication with Zookeeper
|
||||
---
|
||||
|
||||
# Optional secured communication between ClickHouse and Zookeeper {#secured-communication-with-zookeeper}
|
||||
|
||||
You should specify `ssl.keyStore.location`, `ssl.keyStore.password` and `ssl.trustStore.location`, `ssl.trustStore.password` for communication with ClickHouse client over SSL. These options are available from Zookeeper version 3.5.2.
|
||||
|
||||
You can add `zookeeper.crt` to trusted certificates.
|
||||
|
||||
``` bash
|
||||
sudo cp zookeeper.crt /usr/local/share/ca-certificates/zookeeper.crt
|
||||
sudo update-ca-certificates
|
||||
```
|
||||
|
||||
Client section in `config.xml` will look like:
|
||||
|
||||
``` xml
|
||||
<client>
|
||||
<certificateFile>/etc/clickhouse-server/client.crt</certificateFile>
|
||||
<privateKeyFile>/etc/clickhouse-server/client.key</privateKeyFile>
|
||||
<loadDefaultCAFile>true</loadDefaultCAFile>
|
||||
<cacheSessions>true</cacheSessions>
|
||||
<disableProtocols>sslv2,sslv3</disableProtocols>
|
||||
<preferServerCiphers>true</preferServerCiphers>
|
||||
<invalidCertificateHandler>
|
||||
<name>RejectCertificateHandler</name>
|
||||
</invalidCertificateHandler>
|
||||
</client>
|
||||
```
|
||||
|
||||
Add Zookeeper to ClickHouse config with some cluster and macros:
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<zookeeper>
|
||||
<node>
|
||||
<host>localhost</host>
|
||||
<port>2281</port>
|
||||
<secure>1</secure>
|
||||
</node>
|
||||
</zookeeper>
|
||||
</yandex>
|
||||
```
|
||||
|
||||
Start `clickhouse-server`. In logs you should see:
|
||||
|
||||
```text
|
||||
<Trace> ZooKeeper: initialized, hosts: secure://localhost:2281
|
||||
```
|
||||
|
||||
Prefix `secure://` indicates that connection is secured by SSL.
|
||||
|
||||
To ensure traffic is encrypted run `tcpdump` on secured port:
|
||||
|
||||
```bash
|
||||
tcpdump -i any dst port 2281 -nnXS
|
||||
```
|
||||
|
||||
And query in `clickhouse-client`:
|
||||
|
||||
```sql
|
||||
SELECT * FROM system.zookeeper WHERE path = '/';
|
||||
```
|
||||
|
||||
On unencrypted connection you will see in `tcpdump` output something like this:
|
||||
|
||||
```text
|
||||
..../zookeeper/q
|
||||
uota.
|
||||
```
|
||||
|
||||
On encrypted connection you should not see this.
|
@ -22,7 +22,7 @@ ClickHouse supports zero-copy replication for `S3` and `HDFS` disks, which means
|
||||
Configuration markup:
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<storage_configuration>
|
||||
<disks>
|
||||
<hdfs>
|
||||
@ -44,7 +44,7 @@ Configuration markup:
|
||||
<merge_tree>
|
||||
<min_bytes_for_wide_part>0</min_bytes_for_wide_part>
|
||||
</merge_tree>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
Required parameters:
|
||||
@ -96,7 +96,7 @@ Optional parameters:
|
||||
Example of disk configuration:
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<storage_configuration>
|
||||
<disks>
|
||||
<disk_s3>
|
||||
@ -113,7 +113,7 @@ Example of disk configuration:
|
||||
</disk_s3_encrypted>
|
||||
</disks>
|
||||
</storage_configuration>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
## Storing Data on Web Server {#storing-data-on-webserver}
|
||||
@ -127,7 +127,7 @@ Web server storage is supported only for the [MergeTree](../engines/table-engine
|
||||
A ready test case. You need to add this configuration to config:
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<storage_configuration>
|
||||
<disks>
|
||||
<web>
|
||||
@ -145,7 +145,7 @@ A ready test case. You need to add this configuration to config:
|
||||
</web>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
And then execute this query:
|
||||
|
@ -24,6 +24,11 @@ Columns:
|
||||
- `is_in_primary_key` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Flag that indicates whether the column is in the primary key expression.
|
||||
- `is_in_sampling_key` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Flag that indicates whether the column is in the sampling key expression.
|
||||
- `compression_codec` ([String](../../sql-reference/data-types/string.md)) — Compression codec name.
|
||||
- `character_octet_length` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — Maximum length in bytes for binary data, character data, or text data and images. In ClickHouse makes sense only for `FixedString` data type. Otherwise, the `NULL` value is returned.
|
||||
- `numeric_precision` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — Accuracy of approximate numeric data, exact numeric data, integer data, or monetary data. In ClickHouse it is bitness for integer types and decimal precision for `Decimal` types. Otherwise, the `NULL` value is returned.
|
||||
- `numeric_precision_radix` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — The base of the number system is the accuracy of approximate numeric data, exact numeric data, integer data or monetary data. In ClickHouse it's 2 for integer types and 10 for `Decimal` types. Otherwise, the `NULL` value is returned.
|
||||
- `numeric_scale` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — The scale of approximate numeric data, exact numeric data, integer data, or monetary data. In ClickHouse makes sense only for `Decimal` types. Otherwise, the `NULL` value is returned.
|
||||
- `datetime_precision` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — Decimal precision of `DateTime64` data type. For other data types, the `NULL` value is returned.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -34,10 +39,11 @@ SELECT * FROM system.columns LIMIT 2 FORMAT Vertical;
|
||||
```text
|
||||
Row 1:
|
||||
──────
|
||||
database: system
|
||||
table: aggregate_function_combinators
|
||||
name: name
|
||||
database: INFORMATION_SCHEMA
|
||||
table: COLUMNS
|
||||
name: table_catalog
|
||||
type: String
|
||||
position: 1
|
||||
default_kind:
|
||||
default_expression:
|
||||
data_compressed_bytes: 0
|
||||
@ -49,13 +55,19 @@ is_in_sorting_key: 0
|
||||
is_in_primary_key: 0
|
||||
is_in_sampling_key: 0
|
||||
compression_codec:
|
||||
character_octet_length: ᴺᵁᴸᴸ
|
||||
numeric_precision: ᴺᵁᴸᴸ
|
||||
numeric_precision_radix: ᴺᵁᴸᴸ
|
||||
numeric_scale: ᴺᵁᴸᴸ
|
||||
datetime_precision: ᴺᵁᴸᴸ
|
||||
|
||||
Row 2:
|
||||
──────
|
||||
database: system
|
||||
table: aggregate_function_combinators
|
||||
name: is_internal
|
||||
type: UInt8
|
||||
database: INFORMATION_SCHEMA
|
||||
table: COLUMNS
|
||||
name: table_schema
|
||||
type: String
|
||||
position: 2
|
||||
default_kind:
|
||||
default_expression:
|
||||
data_compressed_bytes: 0
|
||||
@ -67,6 +79,11 @@ is_in_sorting_key: 0
|
||||
is_in_primary_key: 0
|
||||
is_in_sampling_key: 0
|
||||
compression_codec:
|
||||
character_octet_length: ᴺᵁᴸᴸ
|
||||
numeric_precision: ᴺᵁᴸᴸ
|
||||
numeric_precision_radix: ᴺᵁᴸᴸ
|
||||
numeric_scale: ᴺᵁᴸᴸ
|
||||
datetime_precision: ᴺᵁᴸᴸ
|
||||
```
|
||||
|
||||
The `system.columns` table contains the following columns (the column type is shown in brackets):
|
||||
|
@ -9,6 +9,7 @@ Columns:
|
||||
- `data_path` ([String](../../sql-reference/data-types/string.md)) — Data path.
|
||||
- `metadata_path` ([String](../../sql-reference/data-types/enum.md)) — Metadata path.
|
||||
- `uuid` ([UUID](../../sql-reference/data-types/uuid.md)) — Database UUID.
|
||||
- `comment` ([String](../../sql-reference/data-types/enum.md)) — Database comment.
|
||||
|
||||
The `name` column from this system table is used for implementing the `SHOW DATABASES` query.
|
||||
|
||||
@ -17,22 +18,20 @@ The `name` column from this system table is used for implementing the `SHOW DATA
|
||||
Create a database.
|
||||
|
||||
``` sql
|
||||
CREATE DATABASE test
|
||||
CREATE DATABASE test;
|
||||
```
|
||||
|
||||
Check all of the available databases to the user.
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.databases
|
||||
SELECT * FROM system.databases;
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─name───────────────────────────┬─engine─┬─data_path──────────────────┬─metadata_path───────────────────────────────────────────────────────┬─────────────────────────────────uuid─┐
|
||||
│ _temporary_and_external_tables │ Memory │ /var/lib/clickhouse/ │ │ 00000000-0000-0000-0000-000000000000 │
|
||||
│ default │ Atomic │ /var/lib/clickhouse/store/ │ /var/lib/clickhouse/store/d31/d317b4bd-3595-4386-81ee-c2334694128a/ │ d317b4bd-3595-4386-81ee-c2334694128a │
|
||||
│ test │ Atomic │ /var/lib/clickhouse/store/ │ /var/lib/clickhouse/store/39b/39bf0cc5-4c06-4717-87fe-c75ff3bd8ebb/ │ 39bf0cc5-4c06-4717-87fe-c75ff3bd8ebb │
|
||||
│ system │ Atomic │ /var/lib/clickhouse/store/ │ /var/lib/clickhouse/store/1d1/1d1c869d-e465-4b1b-a51f-be033436ebf9/ │ 1d1c869d-e465-4b1b-a51f-be033436ebf9 │
|
||||
└────────────────────────────────┴────────┴────────────────────────────┴─────────────────────────────────────────────────────────────────────┴──────────────────────────────────────┘
|
||||
┌─name───────────────┬─engine─┬─data_path──────────────────┬─metadata_path───────────────────────────────────────────────────────┬─uuid─────────────────────────────────┬─comment─┐
|
||||
│ INFORMATION_SCHEMA │ Memory │ /var/lib/clickhouse/ │ │ 00000000-0000-0000-0000-000000000000 │ │
|
||||
│ default │ Atomic │ /var/lib/clickhouse/store/ │ /var/lib/clickhouse/store/d31/d317b4bd-3595-4386-81ee-c2334694128a/ │ 24363899-31d7-42a0-a436-389931d752a0 │ │
|
||||
│ information_schema │ Memory │ /var/lib/clickhouse/ │ │ 00000000-0000-0000-0000-000000000000 │ │
|
||||
│ system │ Atomic │ /var/lib/clickhouse/store/ │ /var/lib/clickhouse/store/1d1/1d1c869d-e465-4b1b-a51f-be033436ebf9/ │ 03e9f3d1-cc88-4a49-83e9-f3d1cc881a49 │ │
|
||||
└────────────────────┴────────┴────────────────────────────┴─────────────────────────────────────────────────────────────────────┴──────────────────────────────────────┴─────────┘
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/operations/system-tables/databases) <!--hide-->
|
||||
|
@ -34,7 +34,7 @@ System log tables can be customized by creating a config file with the same name
|
||||
An example:
|
||||
|
||||
```xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<query_log>
|
||||
<database>system</database>
|
||||
<table>query_log</table>
|
||||
@ -45,7 +45,7 @@ An example:
|
||||
-->
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
</query_log>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
By default, table growth is unlimited. To control a size of a table, you can use [TTL](../../sql-reference/statements/alter/ttl.md#manipulations-with-table-ttl) settings for removing outdated log records. Also you can use the partitioning feature of `MergeTree`-engine tables.
|
||||
|
210
docs/en/operations/system-tables/information_schema.md
Normal file
210
docs/en/operations/system-tables/information_schema.md
Normal file
@ -0,0 +1,210 @@
|
||||
# INFORMATION_SCHEMA {#information-schema}
|
||||
|
||||
`INFORMATION_SCHEMA` (`information_schema`) is a system database that contains views. Using these views, you can get information about the metadata of database objects. These views read data from the columns of the [system.columns](../../operations/system-tables/columns.md), [system.databases](../../operations/system-tables/databases.md) and [system.tables](../../operations/system-tables/tables.md) system tables.
|
||||
|
||||
The structure and composition of system tables may change in different versions of the product, but the support of the `information_schema` makes it possible to change the structure of system tables without changing the method of access to metadata. Metadata requests do not depend on the DBMS used.
|
||||
|
||||
``` sql
|
||||
SHOW TABLES FROM INFORMATION_SCHEMA;
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─name─────┐
|
||||
│ COLUMNS │
|
||||
│ SCHEMATA │
|
||||
│ TABLES │
|
||||
│ VIEWS │
|
||||
└──────────┘
|
||||
```
|
||||
|
||||
`INFORMATION_SCHEMA` contains the following views:
|
||||
|
||||
- [COLUMNS](#columns)
|
||||
- [SCHEMATA](#schemata)
|
||||
- [TABLES](#tables)
|
||||
- [VIEWS](#views)
|
||||
|
||||
## COLUMNS {#columns}
|
||||
|
||||
Contains columns read from the [system.columns](../../operations/system-tables/columns.md) system table and columns that are not supported in ClickHouse or do not make sense (always `NULL`), but must be by the standard.
|
||||
|
||||
Columns:
|
||||
|
||||
- `table_catalog` ([String](../../sql-reference/data-types/string.md)) — The name of the database in which the table is located.
|
||||
- `table_schema` ([String](../../sql-reference/data-types/string.md)) — The name of the database in which the table is located.
|
||||
- `table_name` ([String](../../sql-reference/data-types/string.md)) — Table name.
|
||||
- `column_name` ([String](../../sql-reference/data-types/string.md)) — Column name.
|
||||
- `ordinal_position` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Ordinal position of a column in a table starting with 1.
|
||||
- `column_default` ([String](../../sql-reference/data-types/string.md)) — Expression for the default value, or an empty string if it is not defined.
|
||||
- `is_nullable` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Flag that indicates whether the column type is `Nullable`.
|
||||
- `data_type` ([String](../../sql-reference/data-types/string.md)) — Column type.
|
||||
- `character_maximum_length` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — Maximum length in bytes for binary data, character data, or text data and images. In ClickHouse makes sense only for `FixedString` data type. Otherwise, the `NULL` value is returned.
|
||||
- `character_octet_length` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — Maximum length in bytes for binary data, character data, or text data and images. In ClickHouse makes sense only for `FixedString` data type. Otherwise, the `NULL` value is returned.
|
||||
- `numeric_precision` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — Accuracy of approximate numeric data, exact numeric data, integer data, or monetary data. In ClickHouse it is bitness for integer types and decimal precision for `Decimal` types. Otherwise, the `NULL` value is returned.
|
||||
- `numeric_precision_radix` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — The base of the number system is the accuracy of approximate numeric data, exact numeric data, integer data or monetary data. In ClickHouse it's 2 for integer types and 10 for `Decimal` types. Otherwise, the `NULL` value is returned.
|
||||
- `numeric_scale` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — The scale of approximate numeric data, exact numeric data, integer data, or monetary data. In ClickHouse makes sense only for `Decimal` types. Otherwise, the `NULL` value is returned.
|
||||
- `datetime_precision` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — Decimal precision of `DateTime64` data type. For other data types, the `NULL` value is returned.
|
||||
- `character_set_catalog` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — `NULL`, not supported.
|
||||
- `character_set_schema` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — `NULL`, not supported.
|
||||
- `character_set_name` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — `NULL`, not supported.
|
||||
- `collation_catalog` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — `NULL`, not supported.
|
||||
- `collation_schema` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — `NULL`, not supported.
|
||||
- `collation_name` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — `NULL`, not supported.
|
||||
- `domain_catalog` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — `NULL`, not supported.
|
||||
- `domain_schema` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — `NULL`, not supported.
|
||||
- `domain_name` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — `NULL`, not supported.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM INFORMATION_SCHEMA.COLUMNS WHERE (table_schema=currentDatabase() OR table_schema='') AND table_name NOT LIKE '%inner%' LIMIT 1 FORMAT Vertical;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
Row 1:
|
||||
──────
|
||||
table_catalog: default
|
||||
table_schema: default
|
||||
table_name: describe_example
|
||||
column_name: id
|
||||
ordinal_position: 1
|
||||
column_default:
|
||||
is_nullable: 0
|
||||
data_type: UInt64
|
||||
character_maximum_length: ᴺᵁᴸᴸ
|
||||
character_octet_length: ᴺᵁᴸᴸ
|
||||
numeric_precision: 64
|
||||
numeric_precision_radix: 2
|
||||
numeric_scale: 0
|
||||
datetime_precision: ᴺᵁᴸᴸ
|
||||
character_set_catalog: ᴺᵁᴸᴸ
|
||||
character_set_schema: ᴺᵁᴸᴸ
|
||||
character_set_name: ᴺᵁᴸᴸ
|
||||
collation_catalog: ᴺᵁᴸᴸ
|
||||
collation_schema: ᴺᵁᴸᴸ
|
||||
collation_name: ᴺᵁᴸᴸ
|
||||
domain_catalog: ᴺᵁᴸᴸ
|
||||
domain_schema: ᴺᵁᴸᴸ
|
||||
domain_name: ᴺᵁᴸᴸ
|
||||
```
|
||||
|
||||
## SCHEMATA {#schemata}
|
||||
|
||||
Contains columns read from the [system.databases](../../operations/system-tables/databases.md) system table and columns that are not supported in ClickHouse or do not make sense (always `NULL`), but must be by the standard.
|
||||
|
||||
Columns:
|
||||
|
||||
- `catalog_name` ([String](../../sql-reference/data-types/string.md)) — The name of the database.
|
||||
- `schema_name` ([String](../../sql-reference/data-types/string.md)) — The name of the database.
|
||||
- `schema_owner` ([String](../../sql-reference/data-types/string.md)) — Schema owner name, always `'default'`.
|
||||
- `default_character_set_catalog` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — `NULL`, not supported.
|
||||
- `default_character_set_schema` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — `NULL`, not supported.
|
||||
- `default_character_set_name` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — `NULL`, not supported.
|
||||
- `sql_path` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — `NULL`, not supported.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM information_schema.schemata WHERE schema_name ILIKE 'information_schema' LIMIT 1 FORMAT Vertical;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
Row 1:
|
||||
──────
|
||||
catalog_name: INFORMATION_SCHEMA
|
||||
schema_name: INFORMATION_SCHEMA
|
||||
schema_owner: default
|
||||
default_character_set_catalog: ᴺᵁᴸᴸ
|
||||
default_character_set_schema: ᴺᵁᴸᴸ
|
||||
default_character_set_name: ᴺᵁᴸᴸ
|
||||
sql_path: ᴺᵁᴸᴸ
|
||||
```
|
||||
|
||||
## TABLES {#tables}
|
||||
|
||||
Contains columns read from the [system.tables](../../operations/system-tables/tables.md) system table.
|
||||
|
||||
Columns:
|
||||
|
||||
- `table_catalog` ([String](../../sql-reference/data-types/string.md)) — The name of the database in which the table is located.
|
||||
- `table_schema` ([String](../../sql-reference/data-types/string.md)) — The name of the database in which the table is located.
|
||||
- `table_name` ([String](../../sql-reference/data-types/string.md)) — Table name.
|
||||
- `table_type` ([Enum8](../../sql-reference/data-types/enum.md)) — Table type. Possible values:
|
||||
- `BASE TABLE`
|
||||
- `VIEW`
|
||||
- `FOREIGN TABLE`
|
||||
- `LOCAL TEMPORARY`
|
||||
- `SYSTEM VIEW`
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE (table_schema = currentDatabase() OR table_schema = '') AND table_name NOT LIKE '%inner%' LIMIT 1 FORMAT Vertical;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
Row 1:
|
||||
──────
|
||||
table_catalog: default
|
||||
table_schema: default
|
||||
table_name: describe_example
|
||||
table_type: BASE TABLE
|
||||
```
|
||||
|
||||
## VIEWS {#views}
|
||||
|
||||
Contains columns read from the [system.tables](../../operations/system-tables/tables.md) system table, when the table engine [View](../../engines/table-engines/special/view.md) is used.
|
||||
|
||||
Columns:
|
||||
|
||||
- `table_catalog` ([String](../../sql-reference/data-types/string.md)) — The name of the database in which the table is located.
|
||||
- `table_schema` ([String](../../sql-reference/data-types/string.md)) — The name of the database in which the table is located.
|
||||
- `table_name` ([String](../../sql-reference/data-types/string.md)) — Table name.
|
||||
- `view_definition` ([String](../../sql-reference/data-types/string.md)) — `SELECT` query for view.
|
||||
- `check_option` ([String](../../sql-reference/data-types/string.md)) — `NONE`, no checking.
|
||||
- `is_updatable` ([Enum8](../../sql-reference/data-types/enum.md)) — `NO`, the view is not updated.
|
||||
- `is_insertable_into` ([Enum8](../../sql-reference/data-types/enum.md)) — Shows whether the created view is [materialized](../../sql-reference/statements/create/view/#materialized). Possible values:
|
||||
- `NO` — The created view is not materialized.
|
||||
- `YES` — The created view is materialized.
|
||||
- `is_trigger_updatable` ([Enum8](../../sql-reference/data-types/enum.md)) — `NO`, the trigger is not updated.
|
||||
- `is_trigger_deletable` ([Enum8](../../sql-reference/data-types/enum.md)) — `NO`, the trigger is not deleted.
|
||||
- `is_trigger_insertable_into` ([Enum8](../../sql-reference/data-types/enum.md)) — `NO`, no data is inserted into the trigger.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
CREATE VIEW v (n Nullable(Int32), f Float64) AS SELECT n, f FROM t;
|
||||
CREATE MATERIALIZED VIEW mv ENGINE = Null AS SELECT * FROM system.one;
|
||||
SELECT * FROM information_schema.views WHERE table_schema = currentDatabase() LIMIT 1 FORMAT Vertical;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
Row 1:
|
||||
──────
|
||||
table_catalog: default
|
||||
table_schema: default
|
||||
table_name: mv
|
||||
view_definition: SELECT * FROM system.one
|
||||
check_option: NONE
|
||||
is_updatable: NO
|
||||
is_insertable_into: YES
|
||||
is_trigger_updatable: NO
|
||||
is_trigger_deletable: NO
|
||||
is_trigger_insertable_into: NO
|
||||
```
|
@ -26,6 +26,8 @@ Each query creates one or two rows in the `query_log` table, depending on the st
|
||||
|
||||
You can use the [log_queries_probability](../../operations/settings/settings.md#log-queries-probability) setting to reduce the number of queries, registered in the `query_log` table.
|
||||
|
||||
You can use the [log_formatted_queries](../../operations/settings/settings.md#settings-log-formatted-queries) setting to log formatted queries to the `formatted_query` column.
|
||||
|
||||
Columns:
|
||||
|
||||
- `type` ([Enum8](../../sql-reference/data-types/enum.md)) — Type of an event that occurred when executing the query. Values:
|
||||
@ -48,6 +50,7 @@ Columns:
|
||||
- `memory_usage` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Memory consumption by the query.
|
||||
- `current_database` ([String](../../sql-reference/data-types/string.md)) — Name of the current database.
|
||||
- `query` ([String](../../sql-reference/data-types/string.md)) — Query string.
|
||||
- `formatted_query` ([String](../../sql-reference/data-types/string.md)) — Formatted query string.
|
||||
- `normalized_query_hash` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Identical hash value without the values of literals for similar queries.
|
||||
- `query_kind` ([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md)) — Type of the query.
|
||||
- `databases` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — Names of the databases present in the query.
|
||||
@ -114,68 +117,68 @@ SELECT * FROM system.query_log WHERE type = 'QueryFinish' ORDER BY query_start_t
|
||||
Row 1:
|
||||
──────
|
||||
type: QueryFinish
|
||||
event_date: 2021-07-28
|
||||
event_time: 2021-07-28 13:46:56
|
||||
event_time_microseconds: 2021-07-28 13:46:56.719791
|
||||
query_start_time: 2021-07-28 13:46:56
|
||||
query_start_time_microseconds: 2021-07-28 13:46:56.704542
|
||||
query_duration_ms: 14
|
||||
read_rows: 8393
|
||||
read_bytes: 374325
|
||||
event_date: 2021-11-03
|
||||
event_time: 2021-11-03 16:13:54
|
||||
event_time_microseconds: 2021-11-03 16:13:54.953024
|
||||
query_start_time: 2021-11-03 16:13:54
|
||||
query_start_time_microseconds: 2021-11-03 16:13:54.952325
|
||||
query_duration_ms: 0
|
||||
read_rows: 69
|
||||
read_bytes: 6187
|
||||
written_rows: 0
|
||||
written_bytes: 0
|
||||
result_rows: 4201
|
||||
result_bytes: 153024
|
||||
memory_usage: 4714038
|
||||
result_rows: 69
|
||||
result_bytes: 48256
|
||||
memory_usage: 0
|
||||
current_database: default
|
||||
query: SELECT DISTINCT arrayJoin(extractAll(name, '[\\w_]{2,}')) AS res FROM (SELECT name FROM system.functions UNION ALL SELECT name FROM system.table_engines UNION ALL SELECT name FROM system.formats UNION ALL SELECT name FROM system.table_functions UNION ALL SELECT name FROM system.data_type_families UNION ALL SELECT name FROM system.merge_tree_settings UNION ALL SELECT name FROM system.settings UNION ALL SELECT cluster FROM system.clusters UNION ALL SELECT macro FROM system.macros UNION ALL SELECT policy_name FROM system.storage_policies UNION ALL SELECT concat(func.name, comb.name) FROM system.functions AS func CROSS JOIN system.aggregate_function_combinators AS comb WHERE is_aggregate UNION ALL SELECT name FROM system.databases LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.tables LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.dictionaries LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.columns LIMIT 10000) WHERE notEmpty(res)
|
||||
normalized_query_hash: 6666026786019643712
|
||||
query_kind: Select
|
||||
databases: ['system']
|
||||
tables: ['system.aggregate_function_combinators','system.clusters','system.columns','system.data_type_families','system.databases','system.dictionaries','system.formats','system.functions','system.macros','system.merge_tree_settings','system.settings','system.storage_policies','system.table_engines','system.table_functions','system.tables']
|
||||
columns: ['system.aggregate_function_combinators.name','system.clusters.cluster','system.columns.name','system.data_type_families.name','system.databases.name','system.dictionaries.name','system.formats.name','system.functions.is_aggregate','system.functions.name','system.macros.macro','system.merge_tree_settings.name','system.settings.name','system.storage_policies.policy_name','system.table_engines.name','system.table_functions.name','system.tables.name']
|
||||
query: DESCRIBE TABLE system.query_log
|
||||
formatted_query:
|
||||
normalized_query_hash: 8274064835331539124
|
||||
query_kind:
|
||||
databases: []
|
||||
tables: []
|
||||
columns: []
|
||||
projections: []
|
||||
views: []
|
||||
exception_code: 0
|
||||
exception:
|
||||
stack_trace:
|
||||
is_initial_query: 1
|
||||
user: default
|
||||
query_id: a3361f6e-a1fd-4d54-9f6f-f93a08bab0bf
|
||||
query_id: 7c28bbbb-753b-4eba-98b1-efcbe2b9bdf6
|
||||
address: ::ffff:127.0.0.1
|
||||
port: 51006
|
||||
port: 40452
|
||||
initial_user: default
|
||||
initial_query_id: a3361f6e-a1fd-4d54-9f6f-f93a08bab0bf
|
||||
initial_query_id: 7c28bbbb-753b-4eba-98b1-efcbe2b9bdf6
|
||||
initial_address: ::ffff:127.0.0.1
|
||||
initial_port: 51006
|
||||
initial_query_start_time: 2021-07-28 13:46:56
|
||||
initial_query_start_time_microseconds: 2021-07-28 13:46:56.704542
|
||||
initial_port: 40452
|
||||
initial_query_start_time: 2021-11-03 16:13:54
|
||||
initial_query_start_time_microseconds: 2021-11-03 16:13:54.952325
|
||||
interface: 1
|
||||
os_user:
|
||||
client_hostname:
|
||||
client_name: ClickHouse client
|
||||
os_user: sevirov
|
||||
client_hostname: clickhouse.ru-central1.internal
|
||||
client_name: ClickHouse
|
||||
client_revision: 54449
|
||||
client_version_major: 21
|
||||
client_version_minor: 8
|
||||
client_version_patch: 0
|
||||
client_version_minor: 10
|
||||
client_version_patch: 1
|
||||
http_method: 0
|
||||
http_user_agent:
|
||||
http_referer:
|
||||
forwarded_for:
|
||||
quota_key:
|
||||
revision: 54453
|
||||
revision: 54456
|
||||
log_comment:
|
||||
thread_ids: [5058,22097,22110,22094]
|
||||
ProfileEvents.Names: ['Query','SelectQuery','ArenaAllocChunks','ArenaAllocBytes','FunctionExecute','NetworkSendElapsedMicroseconds','SelectedRows','SelectedBytes','ContextLock','RWLockAcquiredReadLocks','RealTimeMicroseconds','UserTimeMicroseconds','SystemTimeMicroseconds','SoftPageFaults','OSCPUWaitMicroseconds','OSCPUVirtualTimeMicroseconds','OSWriteBytes','OSWriteChars']
|
||||
ProfileEvents.Values: [1,1,39,352256,64,360,8393,374325,412,440,34480,13108,4723,671,19,17828,8192,10240]
|
||||
Settings.Names: ['load_balancing','max_memory_usage']
|
||||
Settings.Values: ['random','10000000000']
|
||||
thread_ids: [30776,31174]
|
||||
ProfileEvents: {'Query':1,'NetworkSendElapsedMicroseconds':59,'NetworkSendBytes':2643,'SelectedRows':69,'SelectedBytes':6187,'ContextLock':9,'RWLockAcquiredReadLocks':1,'RealTimeMicroseconds':817,'UserTimeMicroseconds':427,'SystemTimeMicroseconds':212,'OSCPUVirtualTimeMicroseconds':639,'OSReadChars':894,'OSWriteChars':319}
|
||||
Settings: {'load_balancing':'random','max_memory_usage':'10000000000'}
|
||||
used_aggregate_functions: []
|
||||
used_aggregate_function_combinators: []
|
||||
used_database_engines: []
|
||||
used_data_type_families: ['UInt64','UInt8','Nullable','String','date']
|
||||
used_data_type_families: []
|
||||
used_dictionaries: []
|
||||
used_formats: []
|
||||
used_functions: ['concat','notEmpty','extractAll']
|
||||
used_functions: []
|
||||
used_storages: []
|
||||
used_table_functions: []
|
||||
```
|
||||
@ -183,6 +186,3 @@ used_table_functions: []
|
||||
**See Also**
|
||||
|
||||
- [system.query_thread_log](../../operations/system-tables/query_thread_log.md#system_tables-query_thread_log) — This table contains information about each query execution thread.
|
||||
- [system.query_views_log](../../operations/system-tables/query_views_log.md#system_tables-query_views_log) — This table contains information about each view executed during a query.
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/operations/system-tables/query_log) <!--hide-->
|
||||
|
@ -30,6 +30,8 @@ Columns:
|
||||
|
||||
- `engine_full` ([String](../../sql-reference/data-types/string.md)) - Parameters of the table engine.
|
||||
|
||||
- `as_select` ([String](../../sql-reference/data-types/string.md)) - `SELECT` query for view.
|
||||
|
||||
- `partition_key` ([String](../../sql-reference/data-types/string.md)) - The partition key expression specified in the table.
|
||||
|
||||
- `sorting_key` ([String](../../sql-reference/data-types/string.md)) - The sorting key expression specified in the table.
|
||||
@ -56,6 +58,7 @@ Columns:
|
||||
|
||||
- `comment` ([String](../../sql-reference/data-types/string.md)) - The comment for the table.
|
||||
|
||||
- `has_own_data` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Flag that indicates whether the table itself stores some data on disk or only accesses some other source.
|
||||
|
||||
The `system.tables` table is used in `SHOW TABLES` query implementation.
|
||||
|
||||
@ -80,6 +83,7 @@ dependencies_database: []
|
||||
dependencies_table: []
|
||||
create_table_query: CREATE TABLE base.t1 (`n` UInt64) ENGINE = MergeTree ORDER BY n SETTINGS index_granularity = 8192
|
||||
engine_full: MergeTree ORDER BY n SETTINGS index_granularity = 8192
|
||||
as_select: SELECT database AS table_catalog
|
||||
partition_key:
|
||||
sorting_key: n
|
||||
primary_key: n
|
||||
@ -90,6 +94,7 @@ total_bytes: 99
|
||||
lifetime_rows: ᴺᵁᴸᴸ
|
||||
lifetime_bytes: ᴺᵁᴸᴸ
|
||||
comment:
|
||||
has_own_data: 0
|
||||
|
||||
Row 2:
|
||||
──────
|
||||
@ -105,6 +110,7 @@ dependencies_database: []
|
||||
dependencies_table: []
|
||||
create_table_query: CREATE TABLE default.`53r93yleapyears` (`id` Int8, `febdays` Int8) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 8192
|
||||
engine_full: MergeTree ORDER BY id SETTINGS index_granularity = 8192
|
||||
as_select: SELECT name AS catalog_name
|
||||
partition_key:
|
||||
sorting_key: id
|
||||
primary_key: id
|
||||
@ -115,6 +121,5 @@ total_bytes: 155
|
||||
lifetime_rows: ᴺᵁᴸᴸ
|
||||
lifetime_bytes: ᴺᵁᴸᴸ
|
||||
comment:
|
||||
has_own_data: 0
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/operations/system-tables/tables) <!--hide-->
|
||||
|
@ -1,3 +1,4 @@
|
||||
---
|
||||
toc_priority: 58
|
||||
toc_title: Usage Recommendations
|
||||
---
|
||||
@ -71,8 +72,8 @@ For HDD, enable the write cache.
|
||||
## File System {#file-system}
|
||||
|
||||
Ext4 is the most reliable option. Set the mount options `noatime`.
|
||||
XFS is also suitable, but it hasn’t been as thoroughly tested with ClickHouse.
|
||||
Most other file systems should also work fine. File systems with delayed allocation work better.
|
||||
XFS should be avoided. It works mostly fine but there are some reports about lower performance.
|
||||
Most other file systems should also work fine.
|
||||
|
||||
## Linux Kernel {#linux-kernel}
|
||||
|
||||
|
@ -47,7 +47,7 @@ Parameters:
|
||||
## Format of Zookeeper.xml {#format-of-zookeeper-xml}
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<logger>
|
||||
<level>trace</level>
|
||||
<size>100M</size>
|
||||
@ -60,13 +60,13 @@ Parameters:
|
||||
<port>2181</port>
|
||||
</node>
|
||||
</zookeeper>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
## Configuration of Copying Tasks {#configuration-of-copying-tasks}
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<!-- Configuration of clusters as in an ordinary server config -->
|
||||
<remote_servers>
|
||||
<source_cluster>
|
||||
@ -179,7 +179,7 @@ Parameters:
|
||||
</table_visits>
|
||||
...
|
||||
</tables>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
`clickhouse-copier` tracks the changes in `/task/path/description` and applies them on the fly. For instance, if you change the value of `max_workers`, the number of processes running tasks will also change.
|
||||
|
@ -25,6 +25,12 @@ Example 2: `uniqArray(arr)` – Counts the number of unique elements in all ‘a
|
||||
|
||||
-If and -Array can be combined. However, ‘Array’ must come first, then ‘If’. Examples: `uniqArrayIf(arr, cond)`, `quantilesTimingArrayIf(level1, level2)(arr, cond)`. Due to this order, the ‘cond’ argument won’t be an array.
|
||||
|
||||
## -Map {#agg-functions-combinator-map}
|
||||
|
||||
The -Map suffix can be appended to any aggregate function. This will create an aggregate function which gets Map type as an argument, and aggregates values of each key of the map separately using the specified aggregate function. The result is also of a Map type.
|
||||
|
||||
Examples: `sumMap(map(1,1))`, `avgMap(map('a', 1))`.
|
||||
|
||||
## -SimpleState {#agg-functions-combinator-simplestate}
|
||||
|
||||
If you apply this combinator, the aggregate function returns the same value but with a different type. This is a [SimpleAggregateFunction(...)](../../sql-reference/data-types/simpleaggregatefunction.md) that can be stored in a table to work with [AggregatingMergeTree](../../engines/table-engines/mergetree-family/aggregatingmergetree.md) tables.
|
||||
|
@ -26,7 +26,7 @@ You can view the list of external dictionaries and their statuses in the `system
|
||||
The configuration looks like this:
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<dictionary>
|
||||
...
|
||||
<layout>
|
||||
@ -36,7 +36,7 @@ The configuration looks like this:
|
||||
</layout>
|
||||
...
|
||||
</dictionary>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
Corresponding [DDL-query](../../../sql-reference/statements/create/dictionary.md):
|
||||
@ -289,7 +289,7 @@ Details of the algorithm:
|
||||
Configuration example:
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<dictionary>
|
||||
...
|
||||
|
||||
@ -317,7 +317,7 @@ Configuration example:
|
||||
</structure>
|
||||
|
||||
</dictionary>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
or
|
||||
|
@ -10,7 +10,7 @@ An external dictionary can be connected from many different sources.
|
||||
If dictionary is configured using xml-file, the configuration looks like this:
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<dictionary>
|
||||
...
|
||||
<source>
|
||||
@ -21,7 +21,7 @@ If dictionary is configured using xml-file, the configuration looks like this:
|
||||
...
|
||||
</dictionary>
|
||||
...
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
In case of [DDL-query](../../../sql-reference/statements/create/dictionary.md), equal configuration will looks like:
|
||||
@ -311,7 +311,7 @@ Configuring `/etc/odbc.ini` (or `~/.odbc.ini` if you signed in under a user that
|
||||
The dictionary configuration in ClickHouse:
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<dictionary>
|
||||
<name>table_name</name>
|
||||
<source>
|
||||
@ -340,7 +340,7 @@ The dictionary configuration in ClickHouse:
|
||||
</attribute>
|
||||
</structure>
|
||||
</dictionary>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
or
|
||||
@ -416,7 +416,7 @@ Remarks:
|
||||
Configuring the dictionary in ClickHouse:
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<dictionary>
|
||||
<name>test</name>
|
||||
<source>
|
||||
@ -446,7 +446,7 @@ Configuring the dictionary in ClickHouse:
|
||||
</attribute>
|
||||
</structure>
|
||||
</dictionary>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
or
|
||||
|
@ -26,7 +26,7 @@ The [dictionaries](../../../operations/system-tables/dictionaries.md#system_tabl
|
||||
The dictionary configuration file has the following format:
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<comment>An optional element with any content. Ignored by the ClickHouse server.</comment>
|
||||
|
||||
<!--Optional element. File name with substitutions-->
|
||||
@ -38,7 +38,7 @@ The dictionary configuration file has the following format:
|
||||
<!-- There can be any number of <dictionary> sections in the configuration file. -->
|
||||
</dictionary>
|
||||
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
You can [configure](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md) any number of dictionaries in the same file.
|
||||
|
@ -53,7 +53,7 @@ The first column is `id`, the second column is `c1`.
|
||||
Configure the external dictionary:
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<dictionary>
|
||||
<name>ext-dict-test</name>
|
||||
<source>
|
||||
@ -77,7 +77,7 @@ Configure the external dictionary:
|
||||
</structure>
|
||||
<lifetime>0</lifetime>
|
||||
</dictionary>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
Perform the query:
|
||||
@ -113,7 +113,7 @@ The first column is `id`, the second is `c1`, the third is `c2`.
|
||||
Configure the external dictionary:
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<dictionary>
|
||||
<name>ext-dict-mult</name>
|
||||
<source>
|
||||
@ -142,7 +142,7 @@ Configure the external dictionary:
|
||||
</structure>
|
||||
<lifetime>0</lifetime>
|
||||
</dictionary>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
Perform the query:
|
||||
|
@ -2,13 +2,13 @@
|
||||
toc_title: S2 Geometry
|
||||
---
|
||||
|
||||
# Functions for Working with S2 Index {#s2Index}
|
||||
# Functions for Working with S2 Index {#s2index}
|
||||
|
||||
[S2](https://s2geometry.io/) is a geographical indexing system where all geographical data is represented on a three-dimensional sphere (similar to a globe).
|
||||
|
||||
In the S2 library points are represented as unit length vectors called S2 point indices (points on the surface of a three dimensional unit sphere) as opposed to traditional (latitude, longitude) pairs.
|
||||
In the S2 library points are represented as the S2 Index - a specific number which encodes internally a point on the surface of a unit sphere, unlike traditional (latitude, longitude) pairs. To get the S2 point index for a given point specified in the format (latitude, longitude) use the [geoToS2](#geotos2) function. Also, you can use the [s2ToGeo](#s2togeo) function for getting geographical coordinates corresponding to the specified S2 point index.
|
||||
|
||||
## geoToS2 {#geoToS2}
|
||||
## geoToS2 {#geotos2}
|
||||
|
||||
Returns [S2](#s2index) point index corresponding to the provided coordinates `(longitude, latitude)`.
|
||||
|
||||
@ -34,7 +34,7 @@ Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT geoToS2(37.79506683, 55.71290588) as s2Index;
|
||||
SELECT geoToS2(37.79506683, 55.71290588) AS s2Index;
|
||||
```
|
||||
|
||||
Result:
|
||||
@ -45,7 +45,7 @@ Result:
|
||||
└─────────────────────┘
|
||||
```
|
||||
|
||||
## s2ToGeo {#s2ToGeo}
|
||||
## s2ToGeo {#s2togeo}
|
||||
|
||||
Returns geo coordinates `(longitude, latitude)` corresponding to the provided [S2](#s2index) point index.
|
||||
|
||||
@ -57,20 +57,20 @@ s2ToGeo(s2index)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `s2Index` — S2 Index. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2index` — S2 Index. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- A tuple consisting of two values: `tuple(lon,lat)`.
|
||||
|
||||
Type: `lon` - [Float64](../../../sql-reference/data-types/float.md). `lat` — [Float64](../../../sql-reference/data-types/float.md).
|
||||
Type: `lon` — [Float64](../../../sql-reference/data-types/float.md). `lat` — [Float64](../../../sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT s2ToGeo(4704772434919038107) as s2Coodrinates;
|
||||
SELECT s2ToGeo(4704772434919038107) AS s2Coodrinates;
|
||||
```
|
||||
|
||||
Result:
|
||||
@ -81,9 +81,9 @@ Result:
|
||||
└──────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## s2GetNeighbors {#s2GetNeighbors}
|
||||
## s2GetNeighbors {#s2getneighbors}
|
||||
|
||||
Returns S2 neighbor indices corresponding to the provided [S2](#s2index)). Each cell in the S2 system is a quadrilateral bounded by four geodesics. So, each cell has 4 neighbors.
|
||||
Returns S2 neighbor indixes corresponding to the provided [S2](#s2index). Each cell in the S2 system is a quadrilateral bounded by four geodesics. So, each cell has 4 neighbors.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -97,16 +97,16 @@ s2GetNeighbors(s2index)
|
||||
|
||||
**Returned values**
|
||||
|
||||
- An array consisting of the 4 neighbor indices: `array[s2index1, s2index3, s2index2, s2index4]`.
|
||||
- An array consisting of 4 neighbor indexes: `array[s2index1, s2index3, s2index2, s2index4]`.
|
||||
|
||||
Type: Each S2 index is [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
select s2GetNeighbors(5074766849661468672) AS s2Neighbors;
|
||||
SELECT s2GetNeighbors(5074766849661468672) AS s2Neighbors;
|
||||
```
|
||||
|
||||
Result:
|
||||
@ -117,9 +117,9 @@ Result:
|
||||
└───────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## s2CellsIntersect {#s2CellsIntersect}
|
||||
## s2CellsIntersect {#s2cellsintersect}
|
||||
|
||||
Determines if the two provided [S2](#s2index)) cell indices intersect or not.
|
||||
Determines if the two provided [S2](#s2index) cells intersect or not.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -133,8 +133,8 @@ s2CellsIntersect(s2index1, s2index2)
|
||||
|
||||
**Returned values**
|
||||
|
||||
- 1 — If the S2 cell indices intersect.
|
||||
- 0 — If the S2 cell indices don't intersect.
|
||||
- 1 — If the cells intersect.
|
||||
- 0 — If the cells don't intersect.
|
||||
|
||||
Type: [UInt8](../../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
@ -143,7 +143,7 @@ Type: [UInt8](../../../sql-reference/data-types/int-uint.md).
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
select s2CellsIntersect(9926595209846587392, 9926594385212866560) as intersect;
|
||||
SELECT s2CellsIntersect(9926595209846587392, 9926594385212866560) AS intersect;
|
||||
```
|
||||
|
||||
Result:
|
||||
@ -154,11 +154,9 @@ Result:
|
||||
└───────────┘
|
||||
```
|
||||
|
||||
## s2CapContains {#s2CapContains}
|
||||
## s2CapContains {#s2capcontains}
|
||||
|
||||
A cap represents a portion of the sphere that has been cut off by a plane. It is defined by a point on a sphere and a radius in degrees.
|
||||
|
||||
Determines if a cap contains a s2 point index.
|
||||
Determines if a cap contains a S2 point. A cap represents a part of the sphere that has been cut off by a plane. It is defined by a point on a sphere and a radius in degrees.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -168,9 +166,9 @@ s2CapContains(center, degrees, point)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `center` - S2 point index corresponding to the cap. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `degrees` - Radius of the cap in degrees. [Float64](../../../sql-reference/data-types/float.md).
|
||||
- `point` - S2 point index. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `center` — S2 point index corresponding to the cap. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `degrees` — Radius of the cap in degrees. [Float64](../../../sql-reference/data-types/float.md).
|
||||
- `point` — S2 point index. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
@ -184,7 +182,7 @@ Type: [UInt8](../../../sql-reference/data-types/int-uint.md).
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
select s2CapContains(1157339245694594829, 1.0, 1157347770437378819) as capContains;
|
||||
SELECT s2CapContains(1157339245694594829, 1.0, 1157347770437378819) AS capContains;
|
||||
```
|
||||
|
||||
Result:
|
||||
@ -195,11 +193,9 @@ Result:
|
||||
└─────────────┘
|
||||
```
|
||||
|
||||
## s2CapUnion {#s2CapUnion}
|
||||
## s2CapUnion {#s2capunion}
|
||||
|
||||
A cap represents a portion of the sphere that has been cut off by a plane. It is defined by a point on a sphere and a radius in degrees.
|
||||
|
||||
Determines the smallest cap that contains the given two input caps.
|
||||
Determines the smallest cap that contains the given two input caps. A cap represents a portion of the sphere that has been cut off by a plane. It is defined by a point on a sphere and a radius in degrees.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -209,13 +205,13 @@ s2CapUnion(center1, radius1, center2, radius2)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `center1`, `center2` - S2 point indices corresponding to the two input caps. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `radius1`, `radius2` - Radii of the two input caps in degrees. [Float64](../../../sql-reference/data-types/float.md).
|
||||
- `center1`, `center2` — S2 point indixes corresponding to the two input caps. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `radius1`, `radius2` — Radius of the two input caps in degrees. [Float64](../../../sql-reference/data-types/float.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- `center` - S2 point index corresponding the center of the smallest cap containing the two input caps. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `radius` - Radius of the smallest cap containing the two input caps. Type: [Float64](../../../sql-reference/data-types/float.md).
|
||||
- `center` — S2 point index corresponding the center of the smallest cap containing the two input caps. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `radius` — Radius of the smallest cap containing the two input caps. Type: [Float64](../../../sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
@ -233,11 +229,9 @@ Result:
|
||||
└────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## s2RectAdd{#s2RectAdd}
|
||||
## s2RectAdd {#s2rectadd}
|
||||
|
||||
In the S2 system, a rectangle is represented by a type of S2Region called a S2LatLngRect that represents a rectangle in latitude-longitude space.
|
||||
|
||||
Increases the size of the bounding rectangle to include the given S2 point index.
|
||||
Increases the size of the bounding rectangle to include the given S2 point. In the S2 system, a rectangle is represented by a type of S2Region called a `S2LatLngRect` that represents a rectangle in latitude-longitude space.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -247,21 +241,21 @@ s2RectAdd(s2pointLow, s2pointHigh, s2Point)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `s2PointLow` - Low S2 point index corresponding to the rectangle. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2PointHigh` - High S2 point index corresponding to the rectangle. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2Point` - Target S2 point index that the bound rectangle should be grown to include. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2PointLow` — Low S2 point index corresponding to the rectangle. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2PointHigh` — High S2 point index corresponding to the rectangle. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2Point` — Target S2 point index that the bound rectangle should be grown to include. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- `s2PointLow` - Low S2 cell id corresponding to the grown rectangle. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2PointHigh` - Hight S2 cell id corresponding to the grown rectangle. Type: [UInt64](../../../sql-reference/data-types/float.md).
|
||||
- `s2PointLow` — Low S2 cell id corresponding to the grown rectangle. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2PointHigh` — Hight S2 cell id corresponding to the grown rectangle. Type: [UInt64](../../../sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT s2RectAdd(5178914411069187297, 5177056748191934217, 5179056748191934217) as rectAdd;
|
||||
SELECT s2RectAdd(5178914411069187297, 5177056748191934217, 5179056748191934217) AS rectAdd;
|
||||
```
|
||||
|
||||
Result:
|
||||
@ -272,11 +266,9 @@ Result:
|
||||
└───────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## s2RectContains{#s2RectContains}
|
||||
## s2RectContains {#s2rectcontains}
|
||||
|
||||
In the S2 system, a rectangle is represented by a type of S2Region called a S2LatLngRect that represents a rectangle in latitude-longitude space.
|
||||
|
||||
Determines if a given rectangle contains a S2 point index.
|
||||
Determines if a given rectangle contains a S2 point. In the S2 system, a rectangle is represented by a type of S2Region called a `S2LatLngRect` that represents a rectangle in latitude-longitude space.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -286,9 +278,9 @@ s2RectContains(s2PointLow, s2PointHi, s2Point)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `s2PointLow` - Low S2 point index corresponding to the rectangle. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2PointHigh` - High S2 point index corresponding to the rectangle. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2Point` - Target S2 point index. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2PointLow` — Low S2 point index corresponding to the rectangle. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2PointHigh` — High S2 point index corresponding to the rectangle. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2Point` — Target S2 point index. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
@ -300,7 +292,7 @@ s2RectContains(s2PointLow, s2PointHi, s2Point)
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT s2RectContains(5179062030687166815, 5177056748191934217, 5177914411069187297) AS rectContains
|
||||
SELECT s2RectContains(5179062030687166815, 5177056748191934217, 5177914411069187297) AS rectContains;
|
||||
```
|
||||
|
||||
Result:
|
||||
@ -311,11 +303,9 @@ Result:
|
||||
└──────────────┘
|
||||
```
|
||||
|
||||
## s2RectUinion{#s2RectUnion}
|
||||
## s2RectUinion {#s2rectunion}
|
||||
|
||||
In the S2 system, a rectangle is represented by a type of S2Region called a S2LatLngRect that represents a rectangle in latitude-longitude space.
|
||||
|
||||
Returns the smallest rectangle containing the union of this rectangle and the given rectangle.
|
||||
Returns the smallest rectangle containing the union of this rectangle and the given rectangle. In the S2 system, a rectangle is represented by a type of S2Region called a `S2LatLngRect` that represents a rectangle in latitude-longitude space.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -325,20 +315,20 @@ s2RectUnion(s2Rect1PointLow, s2Rect1PointHi, s2Rect2PointLow, s2Rect2PointHi)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `s2Rect1PointLow`, `s2Rect1PointHi` - Low and High S2 point indices corresponding to the first rectangle. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2Rect2PointLow`, `s2Rect2PointHi` - Low and High S2 point indices corresponding to the second rectangle. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2Rect1PointLow`, `s2Rect1PointHi` — Low and High S2 point indexes corresponding to the first rectangle. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2Rect2PointLow`, `s2Rect2PointHi` — Low and High S2 point indexes corresponding to the second rectangle. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- `s2UnionRect2PointLow` - Low S2 cell id corresponding to the union rectangle. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2UnionRect2PointHi` - High S2 cell id corresponding to the union rectangle. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2UnionRect2PointLow` — Low S2 cell id corresponding to the union rectangle. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2UnionRect2PointHi` — High S2 cell id corresponding to the union rectangle. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT s2RectUnion(5178914411069187297, 5177056748191934217, 5179062030687166815, 5177056748191934217) AS rectUnion
|
||||
SELECT s2RectUnion(5178914411069187297, 5177056748191934217, 5179062030687166815, 5177056748191934217) AS rectUnion;
|
||||
```
|
||||
|
||||
Result:
|
||||
@ -349,9 +339,9 @@ Result:
|
||||
└───────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## s2RectIntersection{#s2RectIntersection}
|
||||
## s2RectIntersection {#s2rectintersection}
|
||||
|
||||
Returns the smallest Rectangle containing the intersection of this rectangle and the given rectangle.
|
||||
Returns the smallest rectangle containing the intersection of this rectangle and the given rectangle. In the S2 system, a rectangle is represented by a type of S2Region called a `S2LatLngRect` that represents a rectangle in latitude-longitude space.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -361,20 +351,20 @@ s2RectIntersection(s2Rect1PointLow, s2Rect1PointHi, s2Rect2PointLow, s2Rect2Poin
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `s2Rect1PointLow`, `s2Rect1PointHi` - Low and High S2 point indices corresponding to the first rectangle. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2Rect2PointLow`, `s2Rect2PointHi` - Low and High S2 point indices corresponding to the second rectangle. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2Rect1PointLow`, `s2Rect1PointHi` — Low and High S2 point indexes corresponding to the first rectangle. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2Rect2PointLow`, `s2Rect2PointHi` — Low and High S2 point indexes corresponding to the second rectangle. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- `s2UnionRect2PointLow` - Low S2 cell id corresponding to the rectangle containing the intersection of the given rectangles. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2UnionRect2PointHi` - Hi S2 cell id corresponding to the rectangle containing the intersection of the given rectangles. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2UnionRect2PointLow` — Low S2 cell id corresponding to the rectangle containing the intersection of the given rectangles. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2UnionRect2PointHi` — High S2 cell id corresponding to the rectangle containing the intersection of the given rectangles. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT s2RectIntersection(5178914411069187297, 5177056748191934217, 5179062030687166815, 5177056748191934217) AS rectIntersection
|
||||
SELECT s2RectIntersection(5178914411069187297, 5177056748191934217, 5179062030687166815, 5177056748191934217) AS rectIntersection;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
@ -16,81 +16,3 @@ The [stochasticLinearRegression](../../sql-reference/aggregate-functions/referen
|
||||
## stochasticLogisticRegression {#stochastic-logistic-regression}
|
||||
|
||||
The [stochasticLogisticRegression](../../sql-reference/aggregate-functions/reference/stochasticlogisticregression.md#agg_functions-stochasticlogisticregression) aggregate function implements stochastic gradient descent method for binary classification problem. Uses `evalMLMethod` to predict on new data.
|
||||
|
||||
## bayesAB {#bayesab}
|
||||
|
||||
Compares test groups (variants) and calculates for each group the probability to be the best one. The first group is used as a control group.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
bayesAB(distribution_name, higher_is_better, variant_names, x, y)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `distribution_name` — Name of the probability distribution. [String](../../sql-reference/data-types/string.md). Possible values:
|
||||
|
||||
- `beta` for [Beta distribution](https://en.wikipedia.org/wiki/Beta_distribution)
|
||||
- `gamma` for [Gamma distribution](https://en.wikipedia.org/wiki/Gamma_distribution)
|
||||
|
||||
- `higher_is_better` — Boolean flag. [Boolean](../../sql-reference/data-types/boolean.md). Possible values:
|
||||
|
||||
- `0` — lower values are considered to be better than higher
|
||||
- `1` — higher values are considered to be better than lower
|
||||
|
||||
- `variant_names` — Variant names. [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)).
|
||||
|
||||
- `x` — Numbers of tests for the corresponding variants. [Array](../../sql-reference/data-types/array.md)([Float64](../../sql-reference/data-types/float.md)).
|
||||
|
||||
- `y` — Numbers of successful tests for the corresponding variants. [Array](../../sql-reference/data-types/array.md)([Float64](../../sql-reference/data-types/float.md)).
|
||||
|
||||
!!! note "Note"
|
||||
All three arrays must have the same size. All `x` and `y` values must be non-negative constant numbers. `y` cannot be larger than `x`.
|
||||
|
||||
**Returned values**
|
||||
|
||||
For each variant the function calculates:
|
||||
- `beats_control` — long-term probability to out-perform the first (control) variant
|
||||
- `to_be_best` — long-term probability to out-perform all other variants
|
||||
|
||||
Type: JSON.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT bayesAB('beta', 1, ['Control', 'A', 'B'], [3000., 3000., 3000.], [100., 90., 110.]) FORMAT PrettySpace;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
{
|
||||
"data":[
|
||||
{
|
||||
"variant_name":"Control",
|
||||
"x":3000,
|
||||
"y":100,
|
||||
"beats_control":0,
|
||||
"to_be_best":0.22619
|
||||
},
|
||||
{
|
||||
"variant_name":"A",
|
||||
"x":3000,
|
||||
"y":90,
|
||||
"beats_control":0.23469,
|
||||
"to_be_best":0.04671
|
||||
},
|
||||
{
|
||||
"variant_name":"B",
|
||||
"x":3000,
|
||||
"y":110,
|
||||
"beats_control":0.7580899999999999,
|
||||
"to_be_best":0.7271
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -213,7 +213,7 @@ SELECT splitByNonAlpha(' 1! a, b. ');
|
||||
|
||||
## arrayStringConcat(arr\[, separator\]) {#arraystringconcatarr-separator}
|
||||
|
||||
Concatenates the strings (values of type String or Nullable(String)) listed in the array with the separator. ’separator’ is an optional parameter: a constant string, set to an empty string by default.
|
||||
Concatenates string representations of values listed in the array with the separator. `separator` is an optional parameter: a constant string, set to an empty string by default.
|
||||
Returns the string.
|
||||
|
||||
## alphaTokens(s) {#alphatokenss}
|
||||
@ -307,3 +307,33 @@ Result:
|
||||
│ ['Cli','lic','ick','ckH','kHo','Hou','ous','use'] │
|
||||
└───────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## tokens {#tokens}
|
||||
|
||||
Splits a string into tokens using non-alphanumeric ASCII characters as separators.
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `input_string` — Any set of bytes represented as the [String](../../sql-reference/data-types/string.md) data type object.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The resulting array of tokens from input string.
|
||||
|
||||
Type: [Array](../data-types/array.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT tokens('test1,;\\ test2,;\\ test3,;\\ test4') AS tokens;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─tokens────────────────────────────┐
|
||||
│ ['test1','test2','test3','test4'] │
|
||||
└───────────────────────────────────┘
|
||||
```
|
@ -313,32 +313,6 @@ SELECT toValidUTF8('\x61\xF0\x80\x80\x80b');
|
||||
└───────────────────────┘
|
||||
```
|
||||
|
||||
## tokens {#tokens}
|
||||
|
||||
Split string into tokens using non-alpha numeric ASCII characters as separators.
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `input_string` — Any set of bytes represented as the [String](../../sql-reference/data-types/string.md) data type object.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The resulting array of tokens from input string.
|
||||
|
||||
Type: [Array](../data-types/array.md).
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
SELECT tokens('test1,;\\ test2,;\\ test3,;\\ test4') AS tokens;
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─tokens────────────────────────────┐
|
||||
│ ['test1','test2','test3','test4'] │
|
||||
└───────────────────────────────────┘
|
||||
```
|
||||
|
||||
## repeat {#repeat}
|
||||
|
||||
Repeats a string as many times as specified and concatenates the replicated values as a single string.
|
||||
|
@ -166,6 +166,80 @@ Result:
|
||||
└─────────────────┘
|
||||
```
|
||||
|
||||
## tupleToNameValuePairs {#tupletonamevaluepairs}
|
||||
|
||||
Turns a named tuple into an array of (name, value) pairs. For a `Tuple(a T, b T, ..., c T)` returns `Array(Tuple(String, T), ...)`
|
||||
in which the `Strings` represents the named fields of the tuple and `T` are the values associated with those names. All values in the tuple should be of the same type.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
tupleToNameValuePairs(tuple)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `tuple` — Named tuple. [Tuple](../../sql-reference/data-types/tuple.md) with any types of values.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- An array with (name, value) pairs.
|
||||
|
||||
Type: [Array](../../sql-reference/data-types/array.md)([Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md), ...)).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE tupletest (`col` Tuple(user_ID UInt64, session_ID UInt64) ENGINE = Memory;
|
||||
|
||||
INSERT INTO tupletest VALUES (tuple( 100, 2502)), (tuple(1,100));
|
||||
|
||||
SELECT tupleToNameValuePairs(col) FROM tupletest;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─tupleToNameValuePairs(col)────────────┐
|
||||
│ [('user_ID',100),('session_ID',2502)] │
|
||||
│ [('user_ID',1),('session_ID',100)] │
|
||||
└───────────────────────────────────────┘
|
||||
```
|
||||
|
||||
It is possible to transform colums to rows using this function:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE tupletest (`col` Tuple(CPU Float64, Memory Float64, Disk Float64)) ENGINE = Memory;
|
||||
|
||||
INSERT INTO tupletest VALUES(tuple(3.3, 5.5, 6.6));
|
||||
|
||||
SELECT arrayJoin(tupleToNameValuePairs(col))FROM tupletest;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─arrayJoin(tupleToNameValuePairs(col))─┐
|
||||
│ ('CPU',3.3) │
|
||||
│ ('Memory',5.5) │
|
||||
│ ('Disk',6.6) │
|
||||
└───────────────────────────────────────┘
|
||||
```
|
||||
|
||||
If you pass a simple tuple to the function, ClickHouse uses the indexes of the values as their names:
|
||||
|
||||
``` sql
|
||||
SELECT tupleToNameValuePairs(tuple(3, 2, 1));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─tupleToNameValuePairs(tuple(3, 2, 1))─┐
|
||||
│ [('1',3),('2',2),('3',1)] │
|
||||
└───────────────────────────────────────┘
|
||||
|
||||
## tuplePlus {#tupleplus}
|
||||
|
||||
Calculates the sum of corresponding values of two tuples of the same size.
|
||||
@ -895,7 +969,6 @@ Result:
|
||||
|
||||
Calculates the unit vector of a given vector (the values of the tuple are the coordinates) in `Lp` space (using [p-norm](https://en.wikipedia.org/wiki/Norm_(mathematics)#p-norm)).
|
||||
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
|
@ -165,9 +165,6 @@ Result:
|
||||
## mapPopulateSeries {#function-mappopulateseries}
|
||||
|
||||
Fills missing keys in the maps (key and value array pair), where keys are integers. Also, it supports specifying the max key, which is used to extend the keys array.
|
||||
Arguments are [maps](../../sql-reference/data-types/map.md) or two [arrays](../../sql-reference/data-types/array.md#data-type-array), where the first array represent keys, and the second array contains values for the each key.
|
||||
|
||||
For array arguments the number of elements in `keys` and `values` must be the same for each row.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -178,12 +175,17 @@ mapPopulateSeries(map[, max])
|
||||
|
||||
Generates a map (a tuple with two arrays or a value of `Map` type, depending on the arguments), where keys are a series of numbers, from minimum to maximum keys (or `max` argument if it specified) taken from the map with a step size of one, and corresponding values. If the value is not specified for the key, then it uses the default value in the resulting map. For repeated keys, only the first value (in order of appearing) gets associated with the key.
|
||||
|
||||
For array arguments the number of elements in `keys` and `values` must be the same for each row.
|
||||
|
||||
**Arguments**
|
||||
|
||||
Arguments are [maps](../../sql-reference/data-types/map.md) or two [arrays](../../sql-reference/data-types/array.md#data-type-array), where the first array represent keys, and the second array contains values for the each key.
|
||||
|
||||
Mapped arrays:
|
||||
|
||||
- `keys` — Array of keys. [Array](../../sql-reference/data-types/array.md#data-type-array)([Int](../../sql-reference/data-types/int-uint.md#uint-ranges)).
|
||||
- `values` — Array of values. [Array](../../sql-reference/data-types/array.md#data-type-array)([Int](../../sql-reference/data-types/int-uint.md#uint-ranges)).
|
||||
- `max` — Maximum key value. Optional. [Int8, Int16, Int32, Int64, Int128, Int256](../../sql-reference/data-types/int-uint.md#int-ranges).
|
||||
|
||||
or
|
||||
|
||||
@ -198,7 +200,7 @@ or
|
||||
Query with mapped arrays:
|
||||
|
||||
```sql
|
||||
select mapPopulateSeries([1,2,4], [11,22,44], 5) as res, toTypeName(res) as type;
|
||||
SELECT mapPopulateSeries([1,2,4], [11,22,44], 5) AS res, toTypeName(res) AS type;
|
||||
```
|
||||
|
||||
Result:
|
||||
@ -390,5 +392,43 @@ Result:
|
||||
└─────────────────────────────┘
|
||||
```
|
||||
|
||||
## mapExtractKeyLike {#mapExtractKeyLike}
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
mapExtractKeyLike(map, pattern)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `map` — Map. [Map](../../sql-reference/data-types/map.md).
|
||||
- `pattern` - String pattern to match.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A map contained elements the key of which matchs the specified pattern. If there are no elements matched the pattern, it will return an empty map.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
CREATE TABLE test (a Map(String,String)) ENGINE = Memory;
|
||||
|
||||
INSERT INTO test VALUES ({'abc':'abc','def':'def'}), ({'hij':'hij','klm':'klm'});
|
||||
|
||||
SELECT mapExtractKeyLike(a, 'a%') FROM test;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─mapExtractKeyLike(a, 'a%')─┐
|
||||
│ {'abc':'abc'} │
|
||||
│ {} │
|
||||
└────────────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/sql-reference/functions/tuple-map-functions/) <!--hide-->
|
||||
|
@ -155,7 +155,7 @@ ALTER TABLE visits CLEAR COLUMN hour in PARTITION 201902
|
||||
## FREEZE PARTITION {#alter_freeze-partition}
|
||||
|
||||
``` sql
|
||||
ALTER TABLE table_name FREEZE [PARTITION partition_expr]
|
||||
ALTER TABLE table_name FREEZE [PARTITION partition_expr] [WITH NAME 'backup_name']
|
||||
```
|
||||
|
||||
This query creates a local backup of a specified partition. If the `PARTITION` clause is omitted, the query creates the backup of all partitions at once.
|
||||
@ -169,6 +169,7 @@ At the time of execution, for a data snapshot, the query creates hardlinks to a
|
||||
|
||||
- `/var/lib/clickhouse/` is the working ClickHouse directory specified in the config.
|
||||
- `N` is the incremental number of the backup.
|
||||
- if the `WITH NAME` parameter is specified, then the value of the `'backup_name'` parameter is used instead of the incremental number.
|
||||
|
||||
!!! note "Note"
|
||||
If you use [a set of disks for data storage in a table](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes), the `shadow/N` directory appears on every disk, storing data parts that matched by the `PARTITION` expression.
|
||||
|
@ -7,7 +7,7 @@ toc_title: PROJECTION
|
||||
|
||||
The following operations with [projections](../../../engines/table-engines/mergetree-family/mergetree.md#projections) are available:
|
||||
|
||||
- `ALTER TABLE [db].name ADD PROJECTION name AS SELECT <COLUMN LIST EXPR> [GROUP BY] [ORDER BY]` - Adds projection description to tables metadata.
|
||||
- `ALTER TABLE [db].name ADD PROJECTION name ( SELECT <COLUMN LIST EXPR> [GROUP BY] [ORDER BY] )` - Adds projection description to tables metadata.
|
||||
|
||||
- `ALTER TABLE [db].name DROP PROJECTION name` - Removes projection description from tables metadata and deletes projection files from disk.
|
||||
|
||||
|
@ -8,7 +8,7 @@ toc_title: DATABASE
|
||||
Creates a new database.
|
||||
|
||||
``` sql
|
||||
CREATE DATABASE [IF NOT EXISTS] db_name [ON CLUSTER cluster] [ENGINE = engine(...)]
|
||||
CREATE DATABASE [IF NOT EXISTS] db_name [ON CLUSTER cluster] [ENGINE = engine(...)] [COMMENT 'Comment']
|
||||
```
|
||||
|
||||
## Clauses {#clauses}
|
||||
@ -26,4 +26,33 @@ ClickHouse creates the `db_name` database on all the servers of a specified clus
|
||||
|
||||
### ENGINE {#engine}
|
||||
|
||||
[MySQL](../../../engines/database-engines/mysql.md) allows you to retrieve data from the remote MySQL server. By default, ClickHouse uses its own [database engine](../../../engines/database-engines/index.md). There’s also a [lazy](../../../engines/database-engines/lazy.md) engine.
|
||||
By default, ClickHouse uses its own [Atomic](../../../engines/database-engines/atomic.md) database engine. There are also [Lazy](../../../engines/database-engines/lazy.md), [MySQL](../../../engines/database-engines/mysql.md), [PostgresSQL](../../../engines/database-engines/postgresql.md), [MaterializedMySQL](../../../engines/database-engines/materialized-mysql.md), [MaterializedPostgreSQL](../../../engines/database-engines/materialized-postgresql.md), [Replicated](../../../engines/database-engines/replicated.md), [SQLite](../../../engines/database-engines/sqlite.md).
|
||||
|
||||
### COMMENT {#comment}
|
||||
|
||||
You can add a comment to the database when you creating it.
|
||||
|
||||
The comment is supported for all database engines.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
CREATE DATABASE db_name ENGINE = engine(...) COMMENT 'Comment'
|
||||
```
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
CREATE DATABASE db_comment ENGINE = Memory COMMENT 'The temporary database';
|
||||
SELECT name, comment FROM system.databases WHERE name = 'db_comment';
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─name───────┬─comment────────────────┐
|
||||
│ db_comment │ The temporary database │
|
||||
└────────────┴────────────────────────┘
|
||||
```
|
||||
|
@ -3,18 +3,67 @@ toc_priority: 42
|
||||
toc_title: DESCRIBE
|
||||
---
|
||||
|
||||
# DESCRIBE TABLE Statement {#misc-describe-table}
|
||||
# DESCRIBE TABLE {#misc-describe-table}
|
||||
|
||||
Returns information about table columns.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
DESC|DESCRIBE TABLE [db.]table [INTO OUTFILE filename] [FORMAT format]
|
||||
```
|
||||
|
||||
Returns the following `String` type columns:
|
||||
The `DESCRIBE` statement returns a row for each table column with the following [String](../../sql-reference/data-types/string.md) values:
|
||||
|
||||
- `name` — Column name.
|
||||
- `type`— Column type.
|
||||
- `default_type` — Clause that is used in [default expression](../../sql-reference/statements/create/table.md#create-default-values) (`DEFAULT`, `MATERIALIZED` or `ALIAS`). Column contains an empty string, if the default expression isn’t specified.
|
||||
- `default_expression` — Value specified in the `DEFAULT` clause.
|
||||
- `comment_expression` — Comment text.
|
||||
- `name` — A column name.
|
||||
- `type` — A column type.
|
||||
- `default_type` — A clause that is used in the column [default expression](../../sql-reference/statements/create/table.md#create-default-values): `DEFAULT`, `MATERIALIZED` or `ALIAS`. If there is no default expression, then empty string is returned.
|
||||
- `default_expression` — An expression specified after the `DEFAULT` clause.
|
||||
- `comment` — A [column comment](../../sql-reference/statements/alter/column.md#alter_comment-column).
|
||||
- `codec_expression` — A [codec](../../sql-reference/statements/create/table.md#codecs) that is applied to the column.
|
||||
- `ttl_expression` — A [TTL](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl) expression.
|
||||
- `is_subcolumn` — A flag that equals `1` for internal subcolumns. It is included into the result only if subcolumn description is enabled by the [describe_include_subcolumns](../../operations/settings/settings.md#describe_include_subcolumns) setting.
|
||||
|
||||
Nested data structures are output in “expanded” format. Each column is shown separately, with the name after a dot.
|
||||
All columns in [Nested](../../sql-reference/data-types/nested-data-structures/nested.md) data structures are described separately. The name of each column is prefixed with a parent column name and a dot.
|
||||
|
||||
To show internal subcolumns of other data types, use the [describe_include_subcolumns](../../operations/settings/settings.md#describe_include_subcolumns) setting.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE describe_example (
|
||||
id UInt64, text String DEFAULT 'unknown' CODEC(ZSTD),
|
||||
user Tuple (name String, age UInt8)
|
||||
) ENGINE = MergeTree() ORDER BY id;
|
||||
|
||||
DESCRIBE TABLE describe_example;
|
||||
DESCRIBE TABLE describe_example SETTINGS describe_include_subcolumns=1;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─name─┬─type──────────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
|
||||
│ id │ UInt64 │ │ │ │ │ │
|
||||
│ text │ String │ DEFAULT │ 'unknown' │ │ ZSTD(1) │ │
|
||||
│ user │ Tuple(name String, age UInt8) │ │ │ │ │ │
|
||||
└──────┴───────────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||
```
|
||||
|
||||
The second query additionally shows subcolumns:
|
||||
|
||||
``` text
|
||||
┌─name──────┬─type──────────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┬─is_subcolumn─┐
|
||||
│ id │ UInt64 │ │ │ │ │ │ 0 │
|
||||
│ text │ String │ DEFAULT │ 'unknown' │ │ ZSTD(1) │ │ 0 │
|
||||
│ user │ Tuple(name String, age UInt8) │ │ │ │ │ │ 0 │
|
||||
│ user.name │ String │ │ │ │ │ │ 1 │
|
||||
│ user.age │ UInt8 │ │ │ │ │ │ 1 │
|
||||
└───────────┴───────────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┴──────────────┘
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [describe_include_subcolumns](../../operations/settings/settings.md#describe_include_subcolumns) setting.
|
||||
|
@ -5,7 +5,23 @@ toc_title: merge
|
||||
|
||||
# merge {#merge}
|
||||
|
||||
`merge(db_name, 'tables_regexp')` – Creates a temporary Merge table. For more information, see the section “Table engines, Merge”.
|
||||
Creates a temporary [Merge](../../engines/table-engines/special/merge.md) table. The table structure is taken from the first table encountered that matches the regular expression.
|
||||
|
||||
The table structure is taken from the first table encountered that matches the regular expression.
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
merge('db_name', 'tables_regexp')
|
||||
```
|
||||
**Arguments**
|
||||
|
||||
- `db_name` — Possible values:
|
||||
- database name,
|
||||
- constant expression that returns a string with a database name, for example, `currentDatabase()`,
|
||||
- `REGEXP(expression)`, where `expression` is a regular expression to match the DB names.
|
||||
|
||||
- `tables_regexp` — A regular expression to match the table names in the specified DB or DBs.
|
||||
|
||||
**See Also**
|
||||
|
||||
- [Merge](../../engines/table-engines/special/merge.md) table engine
|
||||
|
||||
|
@ -559,7 +559,7 @@ CREATE TABLE IF NOT EXISTS example_table
|
||||
- もし `input_format_defaults_for_omitted_fields = 1` のデフォルト値 `x` 等しい `0` しかし、デフォルト値は `a` 等しい `x * 2`.
|
||||
|
||||
!!! note "警告"
|
||||
データを挿入するとき `insert_sample_with_metadata = 1`,ClickHouseは、挿入と比較して、より多くの計算リソースを消費します `insert_sample_with_metadata = 0`.
|
||||
データを挿入するとき `input_format_defaults_for_omitted_fields = 1`,ClickHouseは、挿入と比較して、より多くの計算リソースを消費します `input_format_defaults_for_omitted_fields = 0`.
|
||||
|
||||
### データの選択 {#selecting-data}
|
||||
|
||||
|
@ -10,7 +10,7 @@ toc_title: "\u8A2D\u5B9A\u30D5\u30A1\u30A4\u30EB"
|
||||
ClickHouseは複数のファイル構成管理をサポートします。 主サーバ設定ファイルで指定することがで `/etc/clickhouse-server/config.xml`. その他のファイルは `/etc/clickhouse-server/config.d` ディレクトリ。
|
||||
|
||||
!!! note "注"
|
||||
すべての構成ファイルはXML形式である必要があります。 また、通常は同じルート要素を持つ必要があります `<yandex>`.
|
||||
すべての構成ファイルはXML形式である必要があります。 また、通常は同じルート要素を持つ必要があります `<clickhouse>`.
|
||||
|
||||
メイン構成ファイルで指定された一部の設定は、他の構成ファイルで上書きできます。 その `replace` または `remove` これらの構成ファイルの要素に属性を指定できます。
|
||||
|
||||
@ -36,7 +36,7 @@ $ cat /etc/clickhouse-server/users.d/alice.xml
|
||||
```
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<users>
|
||||
<alice>
|
||||
<profile>analytics</profile>
|
||||
@ -47,7 +47,7 @@ $ cat /etc/clickhouse-server/users.d/alice.xml
|
||||
<quota>analytics</quota>
|
||||
</alice>
|
||||
</users>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
各設定ファイルでは、サーバともある `file-preprocessed.xml` 起動時のファイル。 これらのファイルには、完了したすべての置換と上書きが含まれており、情報提供を目的としています。 設定ファイルでZooKeeperの置換が使用されていても、サーバーの起動時にZooKeeperが使用できない場合、サーバーは前処理されたファイルから設定をロードします。
|
||||
|
@ -335,14 +335,14 @@ SELECT * FROM system.metrics LIMIT 10
|
||||
メトリック履歴の収集を有効にするには `system.metric_log`,作成 `/etc/clickhouse-server/config.d/metric_log.xml` 次の内容を使って:
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<metric_log>
|
||||
<database>system</database>
|
||||
<table>metric_log</table>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<collect_interval_milliseconds>1000</collect_interval_milliseconds>
|
||||
</metric_log>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
**例**
|
||||
|
@ -46,7 +46,7 @@ $ clickhouse-copier copier --daemon --config zookeeper.xml --task-path /task/pat
|
||||
## 飼育係の形式。xml {#format-of-zookeeper-xml}
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<logger>
|
||||
<level>trace</level>
|
||||
<size>100M</size>
|
||||
@ -59,13 +59,13 @@ $ clickhouse-copier copier --daemon --config zookeeper.xml --task-path /task/pat
|
||||
<port>2181</port>
|
||||
</node>
|
||||
</zookeeper>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
## コピータスクの構成 {#configuration-of-copying-tasks}
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<!-- Configuration of clusters as in an ordinary server config -->
|
||||
<remote_servers>
|
||||
<source_cluster>
|
||||
@ -168,7 +168,7 @@ $ clickhouse-copier copier --daemon --config zookeeper.xml --task-path /task/pat
|
||||
</table_visits>
|
||||
...
|
||||
</tables>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
`clickhouse-copier` の変更を追跡します `/task/path/description` そしてその場でそれらを適用します。 たとえば、次の値を変更すると `max_workers`、タスクを実行しているプロセスの数も変更されます。
|
||||
|
@ -28,7 +28,7 @@ ClickHouseは、辞書のエラーに対して例外を生成します。 エラ
|
||||
設定は次のようになります:
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<dictionary>
|
||||
...
|
||||
<layout>
|
||||
@ -38,7 +38,7 @@ ClickHouseは、辞書のエラーに対して例外を生成します。 エラ
|
||||
</layout>
|
||||
...
|
||||
</dictionary>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
対応する [DDL-クエリ](../../statements/create.md#create-dictionary-query):
|
||||
@ -208,7 +208,7 @@ dictGetT('dict_name', 'attr_name', id, date)
|
||||
設定例:
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<dictionary>
|
||||
|
||||
...
|
||||
@ -237,7 +237,7 @@ dictGetT('dict_name', 'attr_name', id, date)
|
||||
</structure>
|
||||
|
||||
</dictionary>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
または
|
||||
|
@ -12,7 +12,7 @@ toc_title: "\u5916\u90E8\u8F9E\u66F8\u306E\u30BD\u30FC\u30B9"
|
||||
辞書がxml-fileを使用して構成されている場合、構成は次のようになります:
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<dictionary>
|
||||
...
|
||||
<source>
|
||||
@ -23,7 +23,7 @@ toc_title: "\u5916\u90E8\u8F9E\u66F8\u306E\u30BD\u30FC\u30B9"
|
||||
...
|
||||
</dictionary>
|
||||
...
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
の場合 [DDL-クエリ](../../statements/create.md#create-dictionary-query)、等しい構成は次のようになります:
|
||||
@ -272,7 +272,7 @@ $ sudo apt-get install -y unixodbc odbcinst odbc-postgresql
|
||||
ClickHouseの辞書構成:
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<dictionary>
|
||||
<name>table_name</name>
|
||||
<source>
|
||||
@ -301,7 +301,7 @@ ClickHouseの辞書構成:
|
||||
</attribute>
|
||||
</structure>
|
||||
</dictionary>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
または
|
||||
@ -367,7 +367,7 @@ $ sudo apt-get install tdsodbc freetds-bin sqsh
|
||||
ClickHouseでの辞書の構成:
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<dictionary>
|
||||
<name>test</name>
|
||||
<source>
|
||||
@ -397,7 +397,7 @@ ClickHouseでの辞書の構成:
|
||||
</attribute>
|
||||
</structure>
|
||||
</dictionary>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
または
|
||||
|
@ -28,7 +28,7 @@ toc_title: "\u4E00\u822C\u7684\u306A\u8AAC\u660E"
|
||||
辞書構成ファイルの形式は次のとおりです:
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<comment>An optional element with any content. Ignored by the ClickHouse server.</comment>
|
||||
|
||||
<!--Optional element. File name with substitutions-->
|
||||
@ -40,7 +40,7 @@ toc_title: "\u4E00\u822C\u7684\u306A\u8AAC\u660E"
|
||||
<!-- There can be any number of <dictionary> sections in the configuration file. -->
|
||||
</dictionary>
|
||||
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
あなたはできる [設定](external-dicts-dict.md) 同じファイル内の任意の数の辞書。
|
||||
|
@ -50,7 +50,7 @@ ClickHouseは、属性の値を解析できない場合、または値が属性
|
||||
外部辞書の構成:
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<dictionary>
|
||||
<name>ext-dict-test</name>
|
||||
<source>
|
||||
@ -74,7 +74,7 @@ ClickHouseは、属性の値を解析できない場合、または値が属性
|
||||
</structure>
|
||||
<lifetime>0</lifetime>
|
||||
</dictionary>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
クエリの実行:
|
||||
|
@ -23,6 +23,20 @@ ENGINE = MaterializedPostgreSQL('host:port', ['database' | database], 'user', 'p
|
||||
- `user` — пользователь PostgreSQL.
|
||||
- `password` — пароль пользователя.
|
||||
|
||||
## Динамическое добавление новых таблиц в репликацию {#dynamically-adding-table-to-replication}
|
||||
|
||||
``` sql
|
||||
ATTACH TABLE postgres_database.new_table;
|
||||
```
|
||||
|
||||
При указании конкретного списка таблиц в базе с помощью настройки [materialized_postgresql_tables_list](../../operations/settings/settings.md#materialized-postgresql-tables-list), он будет обновлен (в `.sql` метаданных) на актуальный с учетом таблиц, добавленных с помощью запроса `ATTACH TABLE`.
|
||||
|
||||
## Динамическое удаление таблиц из репликации {#dynamically-removing-table-from-replication}
|
||||
|
||||
``` sql
|
||||
DETACH TABLE postgres_database.table_to_remove;
|
||||
```
|
||||
|
||||
## Настройки {#settings}
|
||||
|
||||
- [materialized_postgresql_max_block_size](../../operations/settings/settings.md#materialized-postgresql-max-block-size)
|
||||
@ -44,6 +58,12 @@ SETTINGS materialized_postgresql_max_block_size = 65536,
|
||||
SELECT * FROM database1.table1;
|
||||
```
|
||||
|
||||
Настройки можно при необходимости изменить с помощью DDL запроса. Однако с помощью него нельзя изменить настройку `materialized_postgresql_tables_list`, для обновления списка таблиц в данной настройке нужно использовать запрос `ATTACH TABLE`.
|
||||
|
||||
``` sql
|
||||
ALTER DATABASE postgres_database MODIFY SETTING materialized_postgresql_max_block_size = <new_size>;
|
||||
```
|
||||
|
||||
## Требования {#requirements}
|
||||
|
||||
1. Настройка [wal_level](https://postgrespro.ru/docs/postgrespro/10/runtime-config-wal) должна иметь значение `logical`, параметр `max_replication_slots` должен быть равен по меньшей мере `2` в конфигурационном файле в PostgreSQL.
|
||||
|
@ -30,6 +30,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
[rabbitmq_skip_broken_messages = N,]
|
||||
[rabbitmq_max_block_size = N,]
|
||||
[rabbitmq_flush_interval_ms = N]
|
||||
[rabbitmq_queue_settings_list = 'x-dead-letter-exchange=my-dlx,x-max-length=10,x-overflow=reject-publish']
|
||||
```
|
||||
|
||||
Обязательные параметры:
|
||||
@ -51,6 +52,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
- `rabbitmq_skip_broken_messages` – максимальное количество некорректных сообщений в блоке. Если `rabbitmq_skip_broken_messages = N`, то движок отбрасывает `N` сообщений, которые не получилось обработать. Одно сообщение в точности соответствует одной записи (строке). Значение по умолчанию – 0.
|
||||
- `rabbitmq_max_block_size`
|
||||
- `rabbitmq_flush_interval_ms`
|
||||
- `rabbitmq_queue_settings_list` - позволяет самостоятельно установить настройки RabbitMQ при создании очереди. Доступные настройки: `x-max-length`, `x-max-length-bytes`, `x-message-ttl`, `x-expires`, `x-priority`, `x-max-priority`, `x-overflow`, `x-dead-letter-exchange`, `x-queue-type`. Настрока `durable` для очереди ставится автоматически.
|
||||
|
||||
Настройки форматов данных также могут быть добавлены в списке RabbitMQ настроек.
|
||||
|
||||
|
@ -775,6 +775,8 @@ SETTINGS storage_policy = 'moving_from_ssd_to_hdd'
|
||||
|
||||
После выполнения фоновых слияний или мутаций старые куски не удаляются сразу, а через некоторое время (табличная настройка `old_parts_lifetime`). Также они не перемещаются на другие тома или диски, поэтому до момента удаления они продолжают учитываться при подсчёте занятого дискового пространства.
|
||||
|
||||
Пользователь может сбалансированно распределять новые большие куски данных по разным дискам тома [JBOD](https://en.wikipedia.org/wiki/Non-RAID_drive_architectures), используя настройку [min_bytes_to_rebalance_partition_over_jbod](../../../operations/settings/merge-tree-settings.md#min-bytes-to-rebalance-partition-over-jbod).
|
||||
|
||||
## Использование сервиса S3 для хранения данных {#table_engine-mergetree-s3}
|
||||
|
||||
Таблицы семейства `MergeTree` могут хранить данные в сервисе [S3](https://aws.amazon.com/s3/) при использовании диска типа `s3`.
|
||||
|
@ -7,27 +7,41 @@ toc_title: Merge
|
||||
|
||||
Движок `Merge` (не путайте с движком `MergeTree`) не хранит данные самостоятельно, а позволяет читать одновременно из произвольного количества других таблиц.
|
||||
Чтение автоматически распараллеливается. Запись в таблицу не поддерживается. При чтении будут использованы индексы тех таблиц, из которых реально идёт чтение, если они существуют.
|
||||
Движок `Merge` принимает параметры: имя базы данных и регулярное выражение для таблиц.
|
||||
|
||||
Пример:
|
||||
## Создание таблицы {#creating-a-table}
|
||||
|
||||
``` sql
|
||||
Merge(hits, '^WatchLog')
|
||||
CREATE TABLE ... Engine=Merge(db_name, tables_regexp)
|
||||
```
|
||||
|
||||
Данные будут читаться из таблиц в базе `hits`, имена которых соответствуют регулярному выражению ‘`^WatchLog`’.
|
||||
**Параметры движка**
|
||||
|
||||
Вместо имени базы данных может использоваться константное выражение, возвращающее строку. Например, `currentDatabase()`.
|
||||
- `db_name` — Возможные варианты:
|
||||
- имя БД,
|
||||
- выражение, возвращающее строку с именем БД, например, `currentDatabase()`,
|
||||
- `REGEXP(expression)`, где `expression` — регулярное выражение для отбора БД.
|
||||
|
||||
- `tables_regexp` — регулярное выражение для имен таблиц в указанной БД или нескольких БД.
|
||||
|
||||
## Использование {#usage}
|
||||
|
||||
Регулярные выражения — [re2](https://github.com/google/re2) (поддерживает подмножество PCRE), регистрозависимые.
|
||||
Смотрите замечание об экранировании в регулярных выражениях в разделе «match».
|
||||
|
||||
При выборе таблиц для чтения, сама `Merge`-таблица не будет выбрана, даже если попадает под регулярное выражение, чтобы не возникло циклов.
|
||||
Впрочем, вы можете создать две `Merge`-таблицы, которые будут пытаться бесконечно читать данные друг друга, но делать этого не нужно.
|
||||
При выборе таблиц для чтения сама `Merge`-таблица не будет выбрана, даже если попадает под регулярное выражение, чтобы не возникло циклов.
|
||||
Впрочем, вы можете создать две `Merge`-таблицы, которые будут пытаться бесконечно читать данные друг друга, но делать этого не рекомендуется.
|
||||
|
||||
Типичный способ использования движка `Merge` — работа с большим количеством таблиц типа `TinyLog`, как с одной.
|
||||
Типичный способ использования движка `Merge` — работа с большим количеством таблиц типа `TinyLog` как с одной.
|
||||
|
||||
Пример 2:
|
||||
**Пример 1**
|
||||
|
||||
Пусть есть две БД `ABC_corporate_site` и `ABC_store`. Таблица `all_visitors` будет содержать ID из таблиц `visitors` в обеих БД.
|
||||
|
||||
``` sql
|
||||
CREATE TABLE all_visitors (id UInt32) ENGINE=Merge(REGEXP('ABC_*'), 'visitors');
|
||||
```
|
||||
|
||||
**Пример 2**
|
||||
|
||||
Пусть есть старая таблица `WatchLog_old`. Необходимо изменить партиционирование без перемещения данных в новую таблицу `WatchLog_new`. При этом в выборке должны участвовать данные обеих таблиц.
|
||||
|
||||
@ -42,8 +56,7 @@ INSERT INTO WatchLog_new VALUES ('2018-01-02', 2, 'hit', 3);
|
||||
|
||||
CREATE TABLE WatchLog as WatchLog_old ENGINE=Merge(currentDatabase(), '^WatchLog');
|
||||
|
||||
SELECT *
|
||||
FROM WatchLog
|
||||
SELECT * FROM WatchLog;
|
||||
```
|
||||
|
||||
``` text
|
||||
@ -61,7 +74,7 @@ FROM WatchLog
|
||||
|
||||
В секции `WHERE/PREWHERE` можно установить константное условие на столбец `_table` (например, `WHERE _table='xyz'`). В этом случае операции чтения выполняются только для тех таблиц, для которых выполняется условие на значение `_table`, таким образом, столбец `_table` работает как индекс.
|
||||
|
||||
**Смотрите также**
|
||||
**См. также**
|
||||
|
||||
- [Виртуальные столбцы](index.md#table_engines-virtual_columns)
|
||||
|
||||
- Табличная функция [merge](../../../sql-reference/table-functions/merge.md)
|
||||
|
@ -30,11 +30,13 @@ toc_title: "Отличительные возможности ClickHouse"
|
||||
Почти все перечисленные ранее столбцовые СУБД не поддерживают распределённую обработку запроса.
|
||||
В ClickHouse данные могут быть расположены на разных шардах. Каждый шард может представлять собой группу реплик, которые используются для отказоустойчивости. Запрос будет выполнен на всех шардах параллельно. Это делается прозрачно для пользователя.
|
||||
|
||||
## Поддержка SQL {#podderzhka-sql}
|
||||
## Поддержка SQL {#sql-support}
|
||||
|
||||
ClickHouse поддерживает декларативный язык запросов на основе SQL и во многих случаях совпадающий с SQL стандартом.
|
||||
Поддерживаются GROUP BY, ORDER BY, подзапросы в секциях FROM, IN, JOIN, а также скалярные подзапросы.
|
||||
Зависимые подзапросы и оконные функции не поддерживаются.
|
||||
ClickHouse поддерживает [декларативный язык запросов на основе SQL](../sql-reference/index.md) и во [многих случаях](../sql-reference/ansi.md) совпадающий с SQL стандартом.
|
||||
|
||||
Поддерживаются [GROUP BY](../sql-reference/statements/select/group-by.md), [ORDER BY](../sql-reference/statements/select/order-by.md), подзапросы в секциях [FROM](../sql-reference/statements/select/from.md), [IN](../sql-reference/operators/in.md), [JOIN](../sql-reference/statements/select/join.md), [функции window](../sql-reference/window-functions/index.md), а также скалярные подзапросы.
|
||||
|
||||
Зависимые подзапросы не поддерживаются, но могут стать доступными в будущем.
|
||||
|
||||
## Векторный движок {#vektornyi-dvizhok}
|
||||
|
||||
|
@ -8,7 +8,7 @@ toc_title: "Конфигурационные файлы"
|
||||
|
||||
ClickHouse поддерживает многофайловое управление конфигурацией. Основной конфигурационный файл сервера — `/etc/clickhouse-server/config.xml` или `/etc/clickhouse-server/config.yaml`. Остальные файлы должны находиться в директории `/etc/clickhouse-server/config.d`. Обратите внимание, что конфигурационные файлы могут быть записаны в форматах XML или YAML, но смешение этих форматов в одном файле не поддерживается. Например, можно хранить основные конфигурационные файлы как `config.xml` и `users.xml`, а дополнительные файлы записать в директории `config.d` и `users.d` в формате `.yaml`.
|
||||
|
||||
Все XML файлы должны иметь одинаковый корневой элемент, обычно `<yandex>`. Для YAML элемент `yandex:` должен отсутствовать, так как парсер вставляет его автоматически.
|
||||
Все XML файлы должны иметь одинаковый корневой элемент, обычно `<clickhouse>`. Для YAML элемент `clickhouse:` должен отсутствовать, так как парсер вставляет его автоматически.
|
||||
|
||||
## Переопределение {#override}
|
||||
|
||||
@ -22,13 +22,13 @@ ClickHouse поддерживает многофайловое управлен
|
||||
Также возможно указать атрибуты как переменные среды с помощью `from_env="VARIABLE_NAME"`:
|
||||
|
||||
```xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<macros>
|
||||
<replica from_env="REPLICA" />
|
||||
<layer from_env="LAYER" />
|
||||
<shard from_env="SHARD" />
|
||||
</macros>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
## Подстановки {#substitution}
|
||||
@ -40,7 +40,7 @@ ClickHouse поддерживает многофайловое управлен
|
||||
Пример подстановки XML:
|
||||
|
||||
```xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<!-- Appends XML subtree found at `/profiles-in-zookeeper` ZK path to `<profiles>` element. -->
|
||||
<profiles from_zk="/profiles-in-zookeeper" />
|
||||
|
||||
@ -49,7 +49,7 @@ ClickHouse поддерживает многофайловое управлен
|
||||
<include from_zk="/users-in-zookeeper" />
|
||||
<include from_zk="/other-users-in-zookeeper" />
|
||||
</users>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
Подстановки могут также выполняться из ZooKeeper. Для этого укажите у элемента атрибут `from_zk = "/path/to/node"`. Значение элемента заменится на содержимое узла `/path/to/node` в ZooKeeper. В ZooKeeper-узел также можно положить целое XML-поддерево, оно будет целиком вставлено в исходный элемент.
|
||||
@ -66,7 +66,7 @@ $ cat /etc/clickhouse-server/users.d/alice.xml
|
||||
```
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<users>
|
||||
<alice>
|
||||
<profile>analytics</profile>
|
||||
@ -77,7 +77,7 @@ $ cat /etc/clickhouse-server/users.d/alice.xml
|
||||
<quota>analytics</quota>
|
||||
</alice>
|
||||
</users>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
Для каждого конфигурационного файла, сервер при запуске генерирует также файлы `file-preprocessed.xml`. Эти файлы содержат все выполненные подстановки и переопределения, и предназначены для информационных целей. Если в конфигурационных файлах были использованы ZooKeeper-подстановки, но при старте сервера ZooKeeper недоступен, то сервер загрузит конфигурацию из preprocessed-файла.
|
||||
|
@ -24,32 +24,32 @@ ClickHouse предоставляет возможность аутентифи
|
||||
Примеры, как должен выглядеть файл `config.xml`:
|
||||
|
||||
```xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<!- ... -->
|
||||
<kerberos />
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
Или, с указанием принципала:
|
||||
|
||||
```xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<!- ... -->
|
||||
<kerberos>
|
||||
<principal>HTTP/clickhouse.example.com@EXAMPLE.COM</principal>
|
||||
</kerberos>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
Или, с фильтрацией по реалм:
|
||||
|
||||
```xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<!- ... -->
|
||||
<kerberos>
|
||||
<realm>EXAMPLE.COM</realm>
|
||||
</kerberos>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
!!! Warning "Важно"
|
||||
@ -81,7 +81,7 @@ ClickHouse предоставляет возможность аутентифи
|
||||
Пример, как выглядит конфигурация Kerberos в `users.xml`:
|
||||
|
||||
```xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<!- ... -->
|
||||
<users>
|
||||
<!- ... -->
|
||||
@ -92,7 +92,7 @@ ClickHouse предоставляет возможность аутентифи
|
||||
</kerberos>
|
||||
</my_user>
|
||||
</users>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user