mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-26 17:41:59 +00:00
Merge branch 'master' into fix-async-inserts
This commit is contained in:
commit
fa8f9671f4
2
.gitattributes
vendored
2
.gitattributes
vendored
@ -1,4 +1,2 @@
|
||||
contrib/* linguist-vendored
|
||||
*.h linguist-language=C++
|
||||
# to avoid frequent conflicts
|
||||
tests/queries/0_stateless/arcadia_skip_list.txt text merge=union
|
||||
|
38
.github/workflows/main.yml
vendored
38
.github/workflows/main.yml
vendored
@ -857,7 +857,7 @@ jobs:
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatelessTestAsan:
|
||||
FunctionalStatelessTestAsan0:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
@ -874,6 +874,39 @@ jobs:
|
||||
CHECK_NAME: 'Stateless tests (address, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT: 10800
|
||||
RUN_BY_HASH_NUM: 0
|
||||
RUN_BY_HASH_TOTAL: 2
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatelessTestAsan1:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stateless_debug
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stateless tests (address, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT: 10800
|
||||
RUN_BY_HASH_NUM: 1
|
||||
RUN_BY_HASH_TOTAL: 2
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
@ -2143,7 +2176,8 @@ jobs:
|
||||
- FunctionalStatelessTestRelease
|
||||
- FunctionalStatelessTestReleaseDatabaseReplicated
|
||||
- FunctionalStatelessTestReleaseWideParts
|
||||
- FunctionalStatelessTestAsan
|
||||
- FunctionalStatelessTestAsan0
|
||||
- FunctionalStatelessTestAsan1
|
||||
- FunctionalStatelessTestTsan0
|
||||
- FunctionalStatelessTestTsan1
|
||||
- FunctionalStatelessTestTsan2
|
||||
|
66
.github/workflows/master.yml
vendored
66
.github/workflows/master.yml
vendored
@ -770,7 +770,7 @@ jobs:
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatelessTestAsan:
|
||||
FunctionalStatelessTestAsan0:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
@ -787,6 +787,39 @@ jobs:
|
||||
CHECK_NAME: 'Stateless tests (address, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT: 10800
|
||||
RUN_BY_HASH_NUM: 0
|
||||
RUN_BY_HASH_TOTAL: 2
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatelessTestAsan1:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stateless_debug
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stateless tests (address, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT: 10800
|
||||
RUN_BY_HASH_NUM: 1
|
||||
RUN_BY_HASH_TOTAL: 2
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
@ -1729,34 +1762,6 @@ jobs:
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
IntegrationTestsFlakyCheck:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Integration test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/integration_tests_asan_flaky_check
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Integration tests flaky check (asan, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/integration_tests_asan_flaky_check/ClickHouse
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
##############################################################################################
|
||||
##################################### AST FUZZERS ############################################
|
||||
##############################################################################################
|
||||
@ -2052,7 +2057,8 @@ jobs:
|
||||
- FunctionalStatelessTestDebug2
|
||||
- FunctionalStatelessTestRelease
|
||||
- FunctionalStatelessTestReleaseDatabaseOrdinary
|
||||
- FunctionalStatelessTestAsan
|
||||
- FunctionalStatelessTestAsan0
|
||||
- FunctionalStatelessTestAsan1
|
||||
- FunctionalStatelessTestTsan0
|
||||
- FunctionalStatelessTestTsan1
|
||||
- FunctionalStatelessTestTsan2
|
||||
|
178
CHANGELOG.md
178
CHANGELOG.md
@ -1,3 +1,181 @@
|
||||
### ClickHouse release v21.12, 2021-12-13
|
||||
|
||||
#### Backward Incompatible Change
|
||||
|
||||
* *A fix for a feature that previously had unwanted behaviour.* Do not allow direct select for Kafka/RabbitMQ/FileLog. Can be enabled by setting `stream_like_engine_allow_direct_select`. Direct select will be not allowed even if enabled by setting, in case there is an attached materialized view. For Kafka and RabbitMQ direct selectm if allowed, will not commit massages by default. To enable commits with direct select, user must use storage level setting `kafka{rabbitmq}_commit_on_select=1` (default `0`). [#31053](https://github.com/ClickHouse/ClickHouse/pull/31053) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* *A slight change in behaviour of a new function.* Return unquoted string in JSON_VALUE. Closes [#27965](https://github.com/ClickHouse/ClickHouse/issues/27965). [#31008](https://github.com/ClickHouse/ClickHouse/pull/31008) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* *Setting rename.* Add custom null representation support for TSV/CSV input formats. Fix deserialing Nullable(String) in TSV/CSV/JSONCompactStringsEachRow/JSONStringsEachRow input formats. Rename `output_format_csv_null_representation` and `output_format_tsv_null_representation` to `format_csv_null_representation` and `format_tsv_null_representation` accordingly. [#30497](https://github.com/ClickHouse/ClickHouse/pull/30497) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* *Further deprecation of already unused code.* This is relevant only for users of ClickHouse versions older than 20.6. A "leader election" mechanism is removed from `ReplicatedMergeTree`, because multiple leaders are supported since 20.6. If you are upgrading from an older version and some replica with an old version is a leader, then server will fail to start after upgrade. Stop replicas with old version to make new version start. After that it will not be possible to downgrade to version older than 20.6. [#32140](https://github.com/ClickHouse/ClickHouse/pull/32140) ([tavplubix](https://github.com/tavplubix)).
|
||||
|
||||
#### New Feature
|
||||
|
||||
* Implemented more of the ZooKeeper Four Letter Words commands in clickhouse-keeper: https://zookeeper.apache.org/doc/r3.4.8/zookeeperAdmin.html#sc_zkCommands. [#28981](https://github.com/ClickHouse/ClickHouse/pull/28981) ([JackyWoo](https://github.com/JackyWoo)). Now `clickhouse-keeper` is feature complete.
|
||||
* Support for `Bool` data type. [#31072](https://github.com/ClickHouse/ClickHouse/pull/31072) ([kevin wan](https://github.com/MaxWk)).
|
||||
* Support for `PARTITION BY` in File, URL, HDFS storages and with `INSERT INTO` table function. Closes [#30273](https://github.com/ClickHouse/ClickHouse/issues/30273). [#30690](https://github.com/ClickHouse/ClickHouse/pull/30690) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Added `CONSTRAINT ... ASSUME ...` (without checking during `INSERT`). Added query transformation to CNF (https://github.com/ClickHouse/ClickHouse/issues/11749) for more convenient optimization. Added simple query rewriting using constraints (only simple matching now, will be improved to support <,=,>... later). Added ability to replace heavy columns with light columns if it's possible. [#18787](https://github.com/ClickHouse/ClickHouse/pull/18787) ([Nikita Vasilev](https://github.com/nikvas0)).
|
||||
* Basic access authentication for http/url functions. [#31648](https://github.com/ClickHouse/ClickHouse/pull/31648) ([michael1589](https://github.com/michael1589)).
|
||||
* Support `INTERVAL` type in `STEP` clause for `WITH FILL` modifier. [#30927](https://github.com/ClickHouse/ClickHouse/pull/30927) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Add support for parallel reading from multiple files and support globs in `FROM INFILE` clause. [#30135](https://github.com/ClickHouse/ClickHouse/pull/30135) ([Filatenkov Artur](https://github.com/FArthur-cmd)).
|
||||
* Add support for `Identifier` table and database query parameters. Closes [#27226](https://github.com/ClickHouse/ClickHouse/issues/27226). [#28668](https://github.com/ClickHouse/ClickHouse/pull/28668) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* *TLDR: Major improvements of completeness and consistency of text formats.* Refactor formats `TSV`, `TSVRaw`, `CSV` and `JSONCompactEachRow`, `JSONCompactStringsEachRow`, remove code duplication, add base interface for formats with `-WithNames` and `-WithNamesAndTypes` suffixes. Add formats `CSVWithNamesAndTypes`, `TSVRawWithNames`, `TSVRawWithNamesAndTypes`, `JSONCompactEachRowWIthNames`, `JSONCompactStringsEachRowWIthNames`, `RowBinaryWithNames`. Support parallel parsing for formats `TSVWithNamesAndTypes`, `TSVRaw(WithNames/WIthNamesAndTypes)`, `CSVWithNamesAndTypes`, `JSONCompactEachRow(WithNames/WIthNamesAndTypes)`, `JSONCompactStringsEachRow(WithNames/WIthNamesAndTypes)`. Support columns mapping and types checking for `RowBinaryWithNamesAndTypes` format. Add setting `input_format_with_types_use_header` which specify if we should check that types written in <format_name>`WIthNamesAndTypes` format matches with table structure. Add setting `input_format_csv_empty_as_default` and use it in CSV format instead of `input_format_defaults_for_omitted_fields` (because this setting should not control `csv_empty_as_default`). Fix usage of setting `input_format_defaults_for_omitted_fields` (it was used only as `csv_empty_as_default`, but it should control calculation of default expressions for omitted fields). Fix Nullable input/output in `TSVRaw` format, make this format fully compatible with inserting into TSV. Fix inserting NULLs in `LowCardinality(Nullable)` when `input_format_null_as_default` is enabled (previously default values was inserted instead of actual NULLs). Fix strings deserialization in `JSONStringsEachRow`/`JSONCompactStringsEachRow` formats (strings were parsed just until first '\n' or '\t'). Add ability to use `Raw` escaping rule in Template input format. Add diagnostic info for JSONCompactEachRow(WithNames/WIthNamesAndTypes) input format. Fix bug with parallel parsing of `-WithNames` formats in case when setting `min_chunk_bytes_for_parallel_parsing` is less than bytes in a single row. [#30178](https://github.com/ClickHouse/ClickHouse/pull/30178) ([Kruglov Pavel](https://github.com/Avogar)). Allow to print/parse names and types of colums in `CustomSeparated` input/output format. Add formats `CustomSeparatedWithNames/WithNamesAndTypes` similar to `TSVWithNames/WithNamesAndTypes`. [#31434](https://github.com/ClickHouse/ClickHouse/pull/31434) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Aliyun OSS Storage support. [#31286](https://github.com/ClickHouse/ClickHouse/pull/31286) ([cfcz48](https://github.com/cfcz48)).
|
||||
* Exposes all settings of the global thread pool in the configuration file. [#31285](https://github.com/ClickHouse/ClickHouse/pull/31285) ([Tomáš Hromada](https://github.com/gyfis)).
|
||||
* Introduced window functions `exponentialTimeDecayedSum`, `exponentialTimeDecayedMax`, `exponentialTimeDecayedCount` and `exponentialTimeDecayedAvg` which are more effective than `exponentialMovingAverage` for bigger windows. Also more use-cases were covered. [#29799](https://github.com/ClickHouse/ClickHouse/pull/29799) ([Vladimir Chebotarev](https://github.com/excitoon)).
|
||||
* Add option to compress logs before writing them to a file using LZ4. Closes [#23860](https://github.com/ClickHouse/ClickHouse/issues/23860). [#29219](https://github.com/ClickHouse/ClickHouse/pull/29219) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Support `JOIN ON 1 = 1` that have CROSS JOIN semantic. This closes [#25578](https://github.com/ClickHouse/ClickHouse/issues/25578). [#25894](https://github.com/ClickHouse/ClickHouse/pull/25894) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Add Map combinator for `Map` type. - Rename old `sum-, min-, max- Map` for mapped arrays to `sum-, min-, max- MappedArrays`. [#24539](https://github.com/ClickHouse/ClickHouse/pull/24539) ([Ildus Kurbangaliev](https://github.com/ildus)).
|
||||
* Make reading from HTTP retriable. Closes [#29696](https://github.com/ClickHouse/ClickHouse/issues/29696). [#29894](https://github.com/ClickHouse/ClickHouse/pull/29894) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
|
||||
#### Experimental Feature
|
||||
|
||||
* `WINDOW VIEW` to enable stream processing in ClickHouse. [#8331](https://github.com/ClickHouse/ClickHouse/pull/8331) ([vxider](https://github.com/Vxider)).
|
||||
* Drop support for using Ordinary databases with `MaterializedMySQL`. [#31292](https://github.com/ClickHouse/ClickHouse/pull/31292) ([Stig Bakken](https://github.com/stigsb)).
|
||||
* Implement the commands BACKUP and RESTORE for the Log family. This feature is under development. [#30688](https://github.com/ClickHouse/ClickHouse/pull/30688) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
|
||||
#### Performance Improvement
|
||||
|
||||
* Reduce memory usage when reading with `s3` / `url` / `hdfs` formats `Parquet`, `ORC`, `Arrow` (controlled by setting `input_format_allow_seeks`, enabled by default). Also add setting `remote_read_min_bytes_for_seek` to control seeks. Closes [#10461](https://github.com/ClickHouse/ClickHouse/issues/10461). Closes [#16857](https://github.com/ClickHouse/ClickHouse/issues/16857). [#30936](https://github.com/ClickHouse/ClickHouse/pull/30936) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Add optimizations for constant conditions in JOIN ON, ref [#26928](https://github.com/ClickHouse/ClickHouse/issues/26928). [#27021](https://github.com/ClickHouse/ClickHouse/pull/27021) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Support parallel formatting for all text formats, except `JSONEachRowWithProgress` and `PrettyCompactMonoBlock`. [#31489](https://github.com/ClickHouse/ClickHouse/pull/31489) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Speed up count over nullable columns. [#31806](https://github.com/ClickHouse/ClickHouse/pull/31806) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Speed up `avg` and `sumCount` aggregate functions. [#31694](https://github.com/ClickHouse/ClickHouse/pull/31694) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Improve performance of JSON and XML output formats. [#31673](https://github.com/ClickHouse/ClickHouse/pull/31673) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Improve performance of syncing data to block device. This closes [#31181](https://github.com/ClickHouse/ClickHouse/issues/31181). [#31229](https://github.com/ClickHouse/ClickHouse/pull/31229) ([zhanglistar](https://github.com/zhanglistar)).
|
||||
* Fixing query performance issue in `LiveView` tables. Fixes [#30831](https://github.com/ClickHouse/ClickHouse/issues/30831). [#31006](https://github.com/ClickHouse/ClickHouse/pull/31006) ([vzakaznikov](https://github.com/vzakaznikov)).
|
||||
* Speed up query parsing. [#31949](https://github.com/ClickHouse/ClickHouse/pull/31949) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Allow to split `GraphiteMergeTree` rollup rules for plain/tagged metrics (optional `rule_type` field). [#25122](https://github.com/ClickHouse/ClickHouse/pull/25122) ([Michail Safronov](https://github.com/msaf1980)).
|
||||
* Remove excessive `DESC TABLE` requests for `remote()` (in case of `remote('127.1', system.one)` (i.e. identifier as the db.table instead of string) there was excessive `DESC TABLE` request). [#32019](https://github.com/ClickHouse/ClickHouse/pull/32019) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Optimize function `tupleElement` to reading of subcolumn with enabled setting `optimize_functions_to_subcolumns`. [#31261](https://github.com/ClickHouse/ClickHouse/pull/31261) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Optimize function `mapContains` to reading of subcolumn `key` with enabled settings `optimize_functions_to_subcolumns`. [#31218](https://github.com/ClickHouse/ClickHouse/pull/31218) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Add settings `merge_tree_min_rows_for_concurrent_read_for_remote_filesystem` and `merge_tree_min_bytes_for_concurrent_read_for_remote_filesystem`. [#30970](https://github.com/ClickHouse/ClickHouse/pull/30970) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Skipping mutations of different partitions in `StorageMergeTree`. [#21326](https://github.com/ClickHouse/ClickHouse/pull/21326) ([Vladimir Chebotarev](https://github.com/excitoon)).
|
||||
|
||||
#### Improvement
|
||||
|
||||
* Do not allow to drop a table or dictionary if some tables or dictionaries depend on it. [#30977](https://github.com/ClickHouse/ClickHouse/pull/30977) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Allow versioning of aggregate function states. Now we can introduce backward compatible changes in serialization format of aggregate function states. Closes [#12552](https://github.com/ClickHouse/ClickHouse/issues/12552). [#24820](https://github.com/ClickHouse/ClickHouse/pull/24820) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Support PostgreSQL style `ALTER MODIFY COLUMN` syntax. [#32003](https://github.com/ClickHouse/ClickHouse/pull/32003) ([SuperDJY](https://github.com/cmsxbc)).
|
||||
* Added `update_field` support for `RangeHashedDictionary`, `ComplexKeyRangeHashedDictionary`. [#32185](https://github.com/ClickHouse/ClickHouse/pull/32185) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* The `murmurHash3_128` and `sipHash128` functions now accept an arbitrary number of arguments. This closes [#28774](https://github.com/ClickHouse/ClickHouse/issues/28774). [#28965](https://github.com/ClickHouse/ClickHouse/pull/28965) ([小路](https://github.com/nicelulu)).
|
||||
* Support default expression for `HDFS` storage and optimize fetching when source is column oriented. [#32256](https://github.com/ClickHouse/ClickHouse/pull/32256) ([李扬](https://github.com/taiyang-li)).
|
||||
* Improve the operation name of an opentelemetry span. [#32234](https://github.com/ClickHouse/ClickHouse/pull/32234) ([Frank Chen](https://github.com/FrankChen021)).
|
||||
* Use `Content-Type: application/x-ndjson` (http://ndjson.org/) for output format `JSONEachRow`. [#32223](https://github.com/ClickHouse/ClickHouse/pull/32223) ([Dmitriy Dorofeev](https://github.com/deem0n)).
|
||||
* Improve skipping unknown fields with quoted escaping rule in Template/CustomSeparated formats. Previously you could skip only quoted strings, now you can skip values with any type. [#32204](https://github.com/ClickHouse/ClickHouse/pull/32204) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Now `clickhouse-keeper` refuses to start or apply configuration changes when they contain duplicated IDs or endpoints. Fixes [#31339](https://github.com/ClickHouse/ClickHouse/issues/31339). [#32121](https://github.com/ClickHouse/ClickHouse/pull/32121) ([alesapin](https://github.com/alesapin)).
|
||||
* Set Content-Type in HTTP packets issued from URL engine. [#32113](https://github.com/ClickHouse/ClickHouse/pull/32113) ([Frank Chen](https://github.com/FrankChen021)).
|
||||
* Return Content-Type as 'application/json' for `JSONEachRow` format if `output_format_json_array_of_rows` is enabled. [#32112](https://github.com/ClickHouse/ClickHouse/pull/32112) ([Frank Chen](https://github.com/FrankChen021)).
|
||||
* Allow to parse `+` before `Float32`/`Float64` values. [#32079](https://github.com/ClickHouse/ClickHouse/pull/32079) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Allow a user configured `hdfs_replication` parameter for `DiskHDFS` and `StorageHDFS`. Closes [#32039](https://github.com/ClickHouse/ClickHouse/issues/32039). [#32049](https://github.com/ClickHouse/ClickHouse/pull/32049) ([leosunli](https://github.com/leosunli)).
|
||||
* Added ClickHouse `exception` and `exception_code` fields to opentelemetry span log. [#32040](https://github.com/ClickHouse/ClickHouse/pull/32040) ([Frank Chen](https://github.com/FrankChen021)).
|
||||
* Improve opentelemetry span log duration - it was is zero at the query level if there is a query exception. [#32038](https://github.com/ClickHouse/ClickHouse/pull/32038) ([Frank Chen](https://github.com/FrankChen021)).
|
||||
* Fix the issue that `LowCardinality` of `Int256` cannot be created. [#31832](https://github.com/ClickHouse/ClickHouse/pull/31832) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Recreate `system.*_log` tables in case of different engine/partition_by. [#31824](https://github.com/ClickHouse/ClickHouse/pull/31824) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* `MaterializedMySQL`: Fix issue with table named 'table'. [#31781](https://github.com/ClickHouse/ClickHouse/pull/31781) ([Håvard Kvålen](https://github.com/havardk)).
|
||||
* ClickHouse dictionary source: support named collections. Closes [#31705](https://github.com/ClickHouse/ClickHouse/issues/31705). [#31749](https://github.com/ClickHouse/ClickHouse/pull/31749) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Allow to use named collections configuration for Kafka and RabbitMQ engines (the same way as for other integration table engines). [#31691](https://github.com/ClickHouse/ClickHouse/pull/31691) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Always re-render prompt while navigating history in clickhouse-client. This will improve usability of manipulating very long queries that don't fit on screen. [#31675](https://github.com/ClickHouse/ClickHouse/pull/31675) ([alexey-milovidov](https://github.com/alexey-milovidov)) (author: Amos Bird).
|
||||
* Add key bindings for navigating through history (instead of lines/history). [#31641](https://github.com/ClickHouse/ClickHouse/pull/31641) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Improve the `max_execution_time` checks. Fixed some cases when timeout checks do not happen and query could run too long. [#31636](https://github.com/ClickHouse/ClickHouse/pull/31636) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Better exception message when `users.xml` cannot be loaded due to bad password hash. This closes [#24126](https://github.com/ClickHouse/ClickHouse/issues/24126). [#31557](https://github.com/ClickHouse/ClickHouse/pull/31557) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Use shard and replica name from `Replicated` database arguments when expanding macros in `ReplicatedMergeTree` arguments if these macros are not defined in config. Closes [#31471](https://github.com/ClickHouse/ClickHouse/issues/31471). [#31488](https://github.com/ClickHouse/ClickHouse/pull/31488) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Better analysis for `min/max/count` projection. Now, with enabled `allow_experimental_projection_optimization`, virtual `min/max/count` projection can be used together with columns from partition key. [#31474](https://github.com/ClickHouse/ClickHouse/pull/31474) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Add `--pager` support for `clickhouse-local`. [#31457](https://github.com/ClickHouse/ClickHouse/pull/31457) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix waiting of the editor during interactive query edition (`waitpid()` returns -1 on `SIGWINCH` and `EDITOR` and `clickhouse-local`/`clickhouse-client` works concurrently). [#31456](https://github.com/ClickHouse/ClickHouse/pull/31456) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Throw an exception if there is some garbage after field in `JSONCompactStrings(EachRow)` format. [#31455](https://github.com/ClickHouse/ClickHouse/pull/31455) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Default value of `http_send_timeout` and `http_receive_timeout` settings changed from 1800 (30 minutes) to 180 (3 minutes). [#31450](https://github.com/ClickHouse/ClickHouse/pull/31450) ([tavplubix](https://github.com/tavplubix)).
|
||||
* `MaterializedMySQL` now handles `CREATE TABLE ... LIKE ...` DDL queries. [#31410](https://github.com/ClickHouse/ClickHouse/pull/31410) ([Stig Bakken](https://github.com/stigsb)).
|
||||
* Return artificial create query when executing `show create table` on system's tables. [#31391](https://github.com/ClickHouse/ClickHouse/pull/31391) ([SuperDJY](https://github.com/cmsxbc)).
|
||||
* Previously progress was shown only for `numbers` table function. Now for `numbers_mt` it is also shown. [#31318](https://github.com/ClickHouse/ClickHouse/pull/31318) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Initial user's roles are used now to find row policies, see [#31080](https://github.com/ClickHouse/ClickHouse/issues/31080). [#31262](https://github.com/ClickHouse/ClickHouse/pull/31262) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* If some obsolete setting is changed - show warning in `system.warnings`. [#31252](https://github.com/ClickHouse/ClickHouse/pull/31252) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Improved backoff for background cleanup tasks in `MergeTree`. Settings `merge_tree_clear_old_temporary_directories_interval_seconds` and `merge_tree_clear_old_parts_interval_seconds` moved from users settings to merge tree settings. [#31180](https://github.com/ClickHouse/ClickHouse/pull/31180) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Now every replica will send to client only incremental information about profile events counters. [#31155](https://github.com/ClickHouse/ClickHouse/pull/31155) ([Dmitry Novik](https://github.com/novikd)). This makes `--hardware_utilization` option in `clickhouse-client` usable.
|
||||
* Enable multiline editing in clickhouse-client by default. This addresses [#31121](https://github.com/ClickHouse/ClickHouse/issues/31121) . [#31123](https://github.com/ClickHouse/ClickHouse/pull/31123) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Function name normalization for `ALTER` queries. This helps avoid metadata mismatch between creating table with indices/projections and adding indices/projections via alter commands. This is a follow-up PR of https://github.com/ClickHouse/ClickHouse/pull/20174. Mark as improvements as there are no bug reports and the senario is somehow rare. [#31095](https://github.com/ClickHouse/ClickHouse/pull/31095) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Support `IF EXISTS` modifier for `RENAME DATABASE`/`TABLE`/`DICTIONARY` query. If this directive is used, one will not get an error if the DATABASE/TABLE/DICTIONARY to be renamed doesn't exist. [#31081](https://github.com/ClickHouse/ClickHouse/pull/31081) ([victorgao](https://github.com/kafka1991)).
|
||||
* Cancel vertical merges when partition is dropped. This is a follow-up of https://github.com/ClickHouse/ClickHouse/pull/25684 and https://github.com/ClickHouse/ClickHouse/pull/30996. [#31057](https://github.com/ClickHouse/ClickHouse/pull/31057) ([Amos Bird](https://github.com/amosbird)).
|
||||
* The local session inside a Clickhouse dictionary source won't send its events to the session log anymore. This fixes a possible deadlock (tsan alert) on shutdown. Also this PR fixes flaky `test_dictionaries_dependency_xml/`. [#31013](https://github.com/ClickHouse/ClickHouse/pull/31013) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Less locking in ALTER command. [#31010](https://github.com/ClickHouse/ClickHouse/pull/31010) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix `--verbose` option in clickhouse-local interactive mode and allow logging into file. [#30881](https://github.com/ClickHouse/ClickHouse/pull/30881) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Added `\l`, `\d`, `\c` commands in `clickhouse-client` like in MySQL and PostgreSQL. [#30876](https://github.com/ClickHouse/ClickHouse/pull/30876) ([Pavel Medvedev](https://github.com/pmed)).
|
||||
* For clickhouse-local or clickhouse-client: if there is `--interactive` option with `--query` or `--queries-file`, then first execute them like in non-interactive and then start interactive mode. [#30851](https://github.com/ClickHouse/ClickHouse/pull/30851) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix possible "The local set of parts of X doesn't look like the set of parts in ZooKeeper" error (if DROP fails during removing znodes from zookeeper). [#30826](https://github.com/ClickHouse/ClickHouse/pull/30826) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Avro format works against Kafka. Setting `output_format_avro_rows_in_file` added. [#30351](https://github.com/ClickHouse/ClickHouse/pull/30351) ([Ilya Golshtein](https://github.com/ilejn)).
|
||||
* Allow to specify one or any number of PostgreSQL schemas for one `MaterializedPostgreSQL` database. Closes [#28901](https://github.com/ClickHouse/ClickHouse/issues/28901). Closes [#29324](https://github.com/ClickHouse/ClickHouse/issues/29324). [#28933](https://github.com/ClickHouse/ClickHouse/pull/28933) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Replaced default ports for clickhouse-keeper internal communication from 44444 to 9234. Fixes [#30879](https://github.com/ClickHouse/ClickHouse/issues/30879). [#31799](https://github.com/ClickHouse/ClickHouse/pull/31799) ([alesapin](https://github.com/alesapin)).
|
||||
* Implement function transform with Decimal arguments. [#31839](https://github.com/ClickHouse/ClickHouse/pull/31839) ([李帅](https://github.com/loneylee)).
|
||||
* Fix abort in debug server and `DB::Exception: std::out_of_range: basic_string` error in release server in case of bad hdfs url by adding additional check of hdfs url structure. [#31042](https://github.com/ClickHouse/ClickHouse/pull/31042) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix possible assert in `hdfs` table function/engine, add test. [#31036](https://github.com/ClickHouse/ClickHouse/pull/31036) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
||||
#### Bug Fixes
|
||||
|
||||
* Fix group by / order by / limit by aliases with positional arguments enabled. Closes [#31173](https://github.com/ClickHouse/ClickHouse/issues/31173). [#31741](https://github.com/ClickHouse/ClickHouse/pull/31741) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix usage of `Buffer` table engine with type `Map`. Fixes [#30546](https://github.com/ClickHouse/ClickHouse/issues/30546). [#31742](https://github.com/ClickHouse/ClickHouse/pull/31742) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix reading from `MergeTree` tables with enabled `use_uncompressed_cache`. [#31826](https://github.com/ClickHouse/ClickHouse/pull/31826) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fixed the behavior when mutations that have nothing to do are stuck (with enabled setting `empty_result_for_aggregation_by_empty_set`). [#32358](https://github.com/ClickHouse/ClickHouse/pull/32358) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Fix skipping columns while writing protobuf. This PR fixes [#31160](https://github.com/ClickHouse/ClickHouse/issues/31160), see the comment [#31160](https://github.com/ClickHouse/ClickHouse/issues/31160)#issuecomment-980595318. [#31988](https://github.com/ClickHouse/ClickHouse/pull/31988) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix bug when remove unneeded columns in subquery. If there is an aggregation function in query without group by, do not remove if it is unneeded. [#32289](https://github.com/ClickHouse/ClickHouse/pull/32289) ([dongyifeng](https://github.com/dyf6372)).
|
||||
* Quota limit was not reached, but the limit was exceeded. This PR fixes [#31174](https://github.com/ClickHouse/ClickHouse/issues/31174). [#31337](https://github.com/ClickHouse/ClickHouse/pull/31337) ([sunny](https://github.com/sunny19930321)).
|
||||
* Fix SHOW GRANTS when partial revokes are used. This PR fixes [#31138](https://github.com/ClickHouse/ClickHouse/issues/31138). [#31249](https://github.com/ClickHouse/ClickHouse/pull/31249) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Memory amount was incorrectly estimated when ClickHouse is run in containers with cgroup limits. [#31157](https://github.com/ClickHouse/ClickHouse/pull/31157) ([Pavel Medvedev](https://github.com/pmed)).
|
||||
* Fix `ALTER ... MATERIALIZE COLUMN ...` queries in case when data type of default expression is not equal to the data type of column. [#32348](https://github.com/ClickHouse/ClickHouse/pull/32348) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fixed crash with SIGFPE in aggregate function `avgWeighted` with `Decimal` argument. Fixes [#32053](https://github.com/ClickHouse/ClickHouse/issues/32053). [#32303](https://github.com/ClickHouse/ClickHouse/pull/32303) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Server might fail to start with `Cannot attach 1 tables due to cyclic dependencies` error if `Dictionary` table looks at XML-dictionary with the same name, it's fixed. Fixes [#31315](https://github.com/ClickHouse/ClickHouse/issues/31315). [#32288](https://github.com/ClickHouse/ClickHouse/pull/32288) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix parsing error while NaN deserializing for `Nullable(Float)` for `Quoted` escaping rule. [#32190](https://github.com/ClickHouse/ClickHouse/pull/32190) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* XML dictionaries: identifiers, used in table create query, can be qualified to `default_database` during upgrade to newer version. Closes [#31963](https://github.com/ClickHouse/ClickHouse/issues/31963). [#32187](https://github.com/ClickHouse/ClickHouse/pull/32187) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Number of active replicas might be determined incorrectly when inserting with quorum if setting `replicated_can_become_leader` is disabled on some replicas. It's fixed. [#32157](https://github.com/ClickHouse/ClickHouse/pull/32157) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Dictionaries: fix cases when `{condition}` does not work for custom database queries. [#32117](https://github.com/ClickHouse/ClickHouse/pull/32117) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix `CAST` from `Nullable` with `cast_keep_nullable` (`PARAMETER_OUT_OF_BOUND` error before for i.e. `toUInt32OrDefault(toNullable(toUInt32(1)))`). [#32080](https://github.com/ClickHouse/ClickHouse/pull/32080) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix CREATE TABLE of Join Storage in some obscure cases. Close [#31680](https://github.com/ClickHouse/ClickHouse/issues/31680). [#32066](https://github.com/ClickHouse/ClickHouse/pull/32066) ([SuperDJY](https://github.com/cmsxbc)).
|
||||
* Fixed `Directory ... already exists and is not empty` error when detaching part. [#32063](https://github.com/ClickHouse/ClickHouse/pull/32063) ([tavplubix](https://github.com/tavplubix)).
|
||||
* `MaterializedMySQL` (experimental feature): Fix misinterpretation of `DECIMAL` data from MySQL. [#31990](https://github.com/ClickHouse/ClickHouse/pull/31990) ([Håvard Kvålen](https://github.com/havardk)).
|
||||
* `FileLog` (experimental feature) engine unnesessary created meta data directory when create table failed. Fix [#31962](https://github.com/ClickHouse/ClickHouse/issues/31962). [#31967](https://github.com/ClickHouse/ClickHouse/pull/31967) ([flynn](https://github.com/ucasfl)).
|
||||
* Some `GET_PART` entry might hang in replication queue if part is lost on all replicas and there are no other parts in the same partition. It's fixed in cases when partition key contains only columns of integer types or `Date[Time]`. Fixes [#31485](https://github.com/ClickHouse/ClickHouse/issues/31485). [#31887](https://github.com/ClickHouse/ClickHouse/pull/31887) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix functions `empty` and `notEmpty` with arguments of `UUID` type. Fixes [#31819](https://github.com/ClickHouse/ClickHouse/issues/31819). [#31883](https://github.com/ClickHouse/ClickHouse/pull/31883) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Change configuration path from `keeper_server.session_timeout_ms` to `keeper_server.coordination_settings.session_timeout_ms` when constructing a `KeeperTCPHandler`. Same with `operation_timeout`. [#31859](https://github.com/ClickHouse/ClickHouse/pull/31859) ([JackyWoo](https://github.com/JackyWoo)).
|
||||
* Fix invalid cast of Nullable type when nullable primary key is used. (Nullable primary key is a discouraged feature - please do not use). This fixes [#31075](https://github.com/ClickHouse/ClickHouse/issues/31075). [#31823](https://github.com/ClickHouse/ClickHouse/pull/31823) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix crash in recursive UDF in SQL. Closes [#30856](https://github.com/ClickHouse/ClickHouse/issues/30856). [#31820](https://github.com/ClickHouse/ClickHouse/pull/31820) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix crash when function `dictGet` with type is used for dictionary attribute when type is `Nullable`. Fixes [#30980](https://github.com/ClickHouse/ClickHouse/issues/30980). [#31800](https://github.com/ClickHouse/ClickHouse/pull/31800) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix crash with empty result of ODBC query (with some ODBC drivers). Closes [#31465](https://github.com/ClickHouse/ClickHouse/issues/31465). [#31766](https://github.com/ClickHouse/ClickHouse/pull/31766) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix disabling query profiler (In case of `query_profiler_real_time_period_ns>0`/`query_profiler_cpu_time_period_ns>0` query profiler can stayed enabled even after query finished). [#31740](https://github.com/ClickHouse/ClickHouse/pull/31740) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fixed rare segfault on concurrent `ATTACH PARTITION` queries. [#31738](https://github.com/ClickHouse/ClickHouse/pull/31738) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix race in JSONEachRowWithProgress output format when data and lines with progress are mixed in output. [#31736](https://github.com/ClickHouse/ClickHouse/pull/31736) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fixed `there are no such cluster here` error on execution of `ON CLUSTER` query if specified cluster name is name of `Replicated` database. [#31723](https://github.com/ClickHouse/ClickHouse/pull/31723) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix exception on some of the applications of `decrypt` function on Nullable columns. This closes [#31662](https://github.com/ClickHouse/ClickHouse/issues/31662). This closes [#31426](https://github.com/ClickHouse/ClickHouse/issues/31426). [#31707](https://github.com/ClickHouse/ClickHouse/pull/31707) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fixed function ngrams when string contains UTF-8 characters. [#31706](https://github.com/ClickHouse/ClickHouse/pull/31706) ([yandd](https://github.com/yandd)).
|
||||
* Settings `input_format_allow_errors_num` and `input_format_allow_errors_ratio` did not work for parsing of domain types, such as `IPv4`, it's fixed. Fixes [#31686](https://github.com/ClickHouse/ClickHouse/issues/31686). [#31697](https://github.com/ClickHouse/ClickHouse/pull/31697) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fixed null pointer exception in `MATERIALIZE COLUMN`. [#31679](https://github.com/ClickHouse/ClickHouse/pull/31679) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* `RENAME TABLE` query worked incorrectly on attempt to rename an DDL dictionary in `Ordinary` database, it's fixed. [#31638](https://github.com/ClickHouse/ClickHouse/pull/31638) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Implement `sparkbar` aggregate function as it was intended, see: [#26175](https://github.com/ClickHouse/ClickHouse/issues/26175)#issuecomment-960353867, [comment](https://github.com/ClickHouse/ClickHouse/issues/26175#issuecomment-961155065). [#31624](https://github.com/ClickHouse/ClickHouse/pull/31624) ([小路](https://github.com/nicelulu)).
|
||||
* Fix invalid generated JSON when only column names contain invalid UTF-8 sequences. [#31534](https://github.com/ClickHouse/ClickHouse/pull/31534) ([Kevin Michel](https://github.com/kmichel-aiven)).
|
||||
* Disable `partial_merge_join_left_table_buffer_bytes` before bug in this optimization is fixed. See [#31009](https://github.com/ClickHouse/ClickHouse/issues/31009)). Remove redundant option `partial_merge_join_optimizations`. [#31528](https://github.com/ClickHouse/ClickHouse/pull/31528) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix progress for short `INSERT SELECT` queries. [#31510](https://github.com/ClickHouse/ClickHouse/pull/31510) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix wrong behavior with group by and positional arguments. Closes [#31280](https://github.com/ClickHouse/ClickHouse/issues/31280)#issuecomment-968696186. [#31420](https://github.com/ClickHouse/ClickHouse/pull/31420) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Resolve `nullptr` in STS credentials provider for S3. [#31409](https://github.com/ClickHouse/ClickHouse/pull/31409) ([Vladimir Chebotarev](https://github.com/excitoon)).
|
||||
* Remove `notLike` function from index analysis, because it was wrong. [#31169](https://github.com/ClickHouse/ClickHouse/pull/31169) ([sundyli](https://github.com/sundy-li)).
|
||||
* Fix bug in Keeper which can lead to inability to start when some coordination logs was lost and we have more fresh snapshot than our latest log. [#31150](https://github.com/ClickHouse/ClickHouse/pull/31150) ([alesapin](https://github.com/alesapin)).
|
||||
* Rewrite right distributed table in local join. solves [#25809](https://github.com/ClickHouse/ClickHouse/issues/25809). [#31105](https://github.com/ClickHouse/ClickHouse/pull/31105) ([abel-cheng](https://github.com/abel-cheng)).
|
||||
* Fix `Merge` table with aliases and where (it did not work before at all). Closes [#28802](https://github.com/ClickHouse/ClickHouse/issues/28802). [#31044](https://github.com/ClickHouse/ClickHouse/pull/31044) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix JSON_VALUE/JSON_QUERY with quoted identifiers. This allows to have spaces in json path. Closes [#30971](https://github.com/ClickHouse/ClickHouse/issues/30971). [#31003](https://github.com/ClickHouse/ClickHouse/pull/31003) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Using `formatRow` function with not row-oriented formats led to segfault. Don't allow to use this function with such formats (because it doesn't make sense). [#31001](https://github.com/ClickHouse/ClickHouse/pull/31001) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix bug which broke select queries if they happened after dropping materialized view. Found in [#30691](https://github.com/ClickHouse/ClickHouse/issues/30691). [#30997](https://github.com/ClickHouse/ClickHouse/pull/30997) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Skip `max_partition_size_to_drop check` in case of ATTACH PARTITION ... FROM and MOVE PARTITION ... [#30995](https://github.com/ClickHouse/ClickHouse/pull/30995) ([Amr Alaa](https://github.com/amralaa-MSFT)).
|
||||
* Fix some corner cases with `INTERSECT` and `EXCEPT` operators. Closes [#30803](https://github.com/ClickHouse/ClickHouse/issues/30803). [#30965](https://github.com/ClickHouse/ClickHouse/pull/30965) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
|
||||
* Fix incorrect filtering result on non-x86 builds. This closes [#31417](https://github.com/ClickHouse/ClickHouse/issues/31417). This closes [#31524](https://github.com/ClickHouse/ClickHouse/issues/31524). [#31574](https://github.com/ClickHouse/ClickHouse/pull/31574) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Make ClickHouse build fully reproducible (byte identical on different machines). This closes [#22113](https://github.com/ClickHouse/ClickHouse/issues/22113). [#31899](https://github.com/ClickHouse/ClickHouse/pull/31899) ([alexey-milovidov](https://github.com/alexey-milovidov)). Remove filesystem path to the build directory from binaries to enable reproducible builds. This needed for [#22113](https://github.com/ClickHouse/ClickHouse/issues/22113). [#31838](https://github.com/ClickHouse/ClickHouse/pull/31838) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Use our own CMakeLists for `zlib-ng`, `cassandra`, `mariadb-connector-c` and `xz`, `re2`, `sentry`, `gsasl`, `arrow`, `protobuf`. This is needed for [#20151](https://github.com/ClickHouse/ClickHouse/issues/20151). Part of [#9226](https://github.com/ClickHouse/ClickHouse/issues/9226). A small step towards removal of annoying trash from the build system. [#30599](https://github.com/ClickHouse/ClickHouse/pull/30599) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Hermetic builds: use fixed version of libc and make sure that no source or binary files from the host OS are using during build. This closes [#27133](https://github.com/ClickHouse/ClickHouse/issues/27133). This closes [#21435](https://github.com/ClickHouse/ClickHouse/issues/21435). This closes [#30462](https://github.com/ClickHouse/ClickHouse/issues/30462). [#30011](https://github.com/ClickHouse/ClickHouse/pull/30011) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Adding function `getFuzzerData()` to easily fuzz particular functions. This closes [#23227](https://github.com/ClickHouse/ClickHouse/issues/23227). [#27526](https://github.com/ClickHouse/ClickHouse/pull/27526) ([Alexey Boykov](https://github.com/mathalex)).
|
||||
* More correct setting up capabilities inside Docker. [#31802](https://github.com/ClickHouse/ClickHouse/pull/31802) ([Constantine Peresypkin](https://github.com/pkit)).
|
||||
* Enable clang `-fstrict-vtable-pointers`, `-fwhole-program-vtables` compile options. [#20151](https://github.com/ClickHouse/ClickHouse/pull/20151) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Avoid downloading toolchain tarballs for cross-compiling for FreeBSD. [#31672](https://github.com/ClickHouse/ClickHouse/pull/31672) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Initial support for risc-v. See development/build-cross-riscv for quirks and build command that was tested. [#31309](https://github.com/ClickHouse/ClickHouse/pull/31309) ([Vladimir Smirnov](https://github.com/Civil)).
|
||||
* Support compile in arm machine with parameter "-DENABLE_TESTS=OFF". [#31007](https://github.com/ClickHouse/ClickHouse/pull/31007) ([zhanghuajie](https://github.com/zhanghuajieHIT)).
|
||||
|
||||
|
||||
### ClickHouse release v21.11, 2021-11-09
|
||||
|
||||
#### Backward Incompatible Change
|
||||
|
@ -24,8 +24,6 @@ set (SRCS
|
||||
|
||||
if (ENABLE_REPLXX)
|
||||
list (APPEND SRCS ReplxxLineReader.cpp)
|
||||
elseif (ENABLE_READLINE)
|
||||
list (APPEND SRCS ReadlineLineReader.cpp)
|
||||
endif ()
|
||||
|
||||
if (USE_DEBUG_HELPERS)
|
||||
@ -52,28 +50,6 @@ if (OS_DARWIN AND NOT MAKE_STATIC_LIBRARIES)
|
||||
target_link_libraries(common PUBLIC -Wl,-U,_inside_main)
|
||||
endif()
|
||||
|
||||
# Allow explicit fallback to readline
|
||||
if (NOT ENABLE_REPLXX AND ENABLE_READLINE)
|
||||
message (STATUS "Attempt to fallback to readline explicitly")
|
||||
set (READLINE_PATHS "/usr/local/opt/readline/lib")
|
||||
# First try find custom lib for macos users (default lib without history support)
|
||||
find_library (READLINE_LIB NAMES readline PATHS ${READLINE_PATHS} NO_DEFAULT_PATH)
|
||||
if (NOT READLINE_LIB)
|
||||
find_library (READLINE_LIB NAMES readline PATHS ${READLINE_PATHS})
|
||||
endif ()
|
||||
|
||||
set(READLINE_INCLUDE_PATHS "/usr/local/opt/readline/include")
|
||||
find_path (READLINE_INCLUDE_DIR NAMES readline/readline.h PATHS ${READLINE_INCLUDE_PATHS} NO_DEFAULT_PATH)
|
||||
if (NOT READLINE_INCLUDE_DIR)
|
||||
find_path (READLINE_INCLUDE_DIR NAMES readline/readline.h PATHS ${READLINE_INCLUDE_PATHS})
|
||||
endif ()
|
||||
if (READLINE_INCLUDE_DIR AND READLINE_LIB)
|
||||
target_link_libraries(common PUBLIC ${READLINE_LIB})
|
||||
target_compile_definitions(common PUBLIC USE_READLINE=1)
|
||||
message (STATUS "Using readline: ${READLINE_INCLUDE_DIR} : ${READLINE_LIB}")
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
target_link_libraries (common
|
||||
PUBLIC
|
||||
${CITYHASH_LIBRARIES}
|
||||
|
@ -10,16 +10,6 @@
|
||||
#include <sys/types.h>
|
||||
|
||||
|
||||
#ifdef OS_LINUX
|
||||
/// We can detect if code is linked with one or another readline variants or open the library dynamically.
|
||||
# include <dlfcn.h>
|
||||
extern "C"
|
||||
{
|
||||
char * readline(const char *) __attribute__((__weak__));
|
||||
char * (*readline_ptr)(const char *) = readline;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef HAS_RESERVED_IDENTIFIER
|
||||
#pragma clang diagnostic ignored "-Wreserved-identifier"
|
||||
#endif
|
||||
@ -152,33 +142,6 @@ LineReader::InputStatus LineReader::readOneLine(const String & prompt)
|
||||
{
|
||||
input.clear();
|
||||
|
||||
#ifdef OS_LINUX
|
||||
if (!readline_ptr)
|
||||
{
|
||||
for (const auto * name : {"libreadline.so", "libreadline.so.0", "libeditline.so", "libeditline.so.0"})
|
||||
{
|
||||
void * dl_handle = dlopen(name, RTLD_LAZY);
|
||||
if (dl_handle)
|
||||
{
|
||||
readline_ptr = reinterpret_cast<char * (*)(const char *)>(dlsym(dl_handle, "readline"));
|
||||
if (readline_ptr)
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Minimal support for readline
|
||||
if (readline_ptr)
|
||||
{
|
||||
char * line_read = (*readline_ptr)(prompt.c_str());
|
||||
if (!line_read)
|
||||
return ABORT;
|
||||
input = line_read;
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
std::cout << prompt;
|
||||
std::getline(std::cin, input);
|
||||
|
@ -1,187 +0,0 @@
|
||||
#include <base/ReadlineLineReader.h>
|
||||
#include <base/errnoToString.h>
|
||||
#include <base/scope_guard.h>
|
||||
|
||||
#include <errno.h>
|
||||
#include <signal.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <iostream>
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
/// Trim ending whitespace inplace
|
||||
void trim(String & s)
|
||||
{
|
||||
s.erase(std::find_if(s.rbegin(), s.rend(), [](int ch) { return !std::isspace(ch); }).base(), s.end());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static const LineReader::Suggest * suggest;
|
||||
|
||||
/// Points to current word to suggest.
|
||||
static LineReader::Suggest::Words::const_iterator pos;
|
||||
/// Points after the last possible match.
|
||||
static LineReader::Suggest::Words::const_iterator end;
|
||||
|
||||
/// Set iterators to the matched range of words if any.
|
||||
static void findRange(const char * prefix, size_t prefix_length)
|
||||
{
|
||||
std::string prefix_str(prefix);
|
||||
if (auto completions = suggest->getCompletions(prefix_str, prefix_length))
|
||||
std::tie(pos, end) = *completions;
|
||||
}
|
||||
|
||||
/// Iterates through matched range.
|
||||
static char * nextMatch()
|
||||
{
|
||||
if (pos >= end)
|
||||
return nullptr;
|
||||
|
||||
/// readline will free memory by itself.
|
||||
char * word = strdup(pos->c_str());
|
||||
++pos;
|
||||
return word;
|
||||
}
|
||||
|
||||
static char * generate(const char * text, int state)
|
||||
{
|
||||
if (!suggest->ready)
|
||||
return nullptr;
|
||||
if (state == 0)
|
||||
findRange(text, strlen(text));
|
||||
|
||||
/// Do not append whitespace after word. For unknown reason, rl_completion_append_character = '\0' does not work.
|
||||
rl_completion_suppress_append = 1;
|
||||
|
||||
return nextMatch();
|
||||
};
|
||||
|
||||
ReadlineLineReader::ReadlineLineReader(
|
||||
const Suggest & suggest_, const String & history_file_path_, bool multiline_, Patterns extenders_, Patterns delimiters_)
|
||||
: LineReader(history_file_path_, multiline_, std::move(extenders_), std::move(delimiters_))
|
||||
{
|
||||
suggest = &suggest_;
|
||||
|
||||
if (!history_file_path.empty())
|
||||
{
|
||||
int res = read_history(history_file_path.c_str());
|
||||
if (res)
|
||||
std::cerr << "Cannot read history from file " + history_file_path + ": "+ errnoToString(errno) << std::endl;
|
||||
}
|
||||
|
||||
/// Added '.' to the default list. Because it is used to separate database and table.
|
||||
rl_basic_word_break_characters = word_break_characters;
|
||||
|
||||
/// Not append whitespace after single suggestion. Because whitespace after function name is meaningless.
|
||||
rl_completion_append_character = '\0';
|
||||
|
||||
rl_completion_entry_function = generate;
|
||||
|
||||
/// Install Ctrl+C signal handler that will be used in interactive mode.
|
||||
|
||||
if (rl_initialize())
|
||||
throw std::runtime_error("Cannot initialize readline");
|
||||
|
||||
auto clear_prompt_or_exit = [](int)
|
||||
{
|
||||
/// This is signal safe.
|
||||
ssize_t res = write(STDOUT_FILENO, "\n", 1);
|
||||
|
||||
/// Allow to quit client while query is in progress by pressing Ctrl+C twice.
|
||||
/// (First press to Ctrl+C will try to cancel query by InterruptListener).
|
||||
if (res == 1 && rl_line_buffer[0] && !RL_ISSTATE(RL_STATE_DONE))
|
||||
{
|
||||
rl_replace_line("", 0);
|
||||
if (rl_forced_update_display())
|
||||
_exit(0);
|
||||
}
|
||||
else
|
||||
{
|
||||
/// A little dirty, but we struggle to find better way to correctly
|
||||
/// force readline to exit after returning from the signal handler.
|
||||
_exit(0);
|
||||
}
|
||||
};
|
||||
|
||||
if (signal(SIGINT, clear_prompt_or_exit) == SIG_ERR)
|
||||
throw std::runtime_error(std::string("Cannot set signal handler for readline: ") + errnoToString(errno));
|
||||
|
||||
rl_variable_bind("completion-ignore-case", "on");
|
||||
// TODO: it doesn't work
|
||||
// history_write_timestamps = 1;
|
||||
}
|
||||
|
||||
ReadlineLineReader::~ReadlineLineReader()
|
||||
{
|
||||
}
|
||||
|
||||
LineReader::InputStatus ReadlineLineReader::readOneLine(const String & prompt)
|
||||
{
|
||||
input.clear();
|
||||
|
||||
const char* cinput = readline(prompt.c_str());
|
||||
if (cinput == nullptr)
|
||||
return (errno != EAGAIN) ? ABORT : RESET_LINE;
|
||||
input = cinput;
|
||||
|
||||
trim(input);
|
||||
return INPUT_LINE;
|
||||
}
|
||||
|
||||
void ReadlineLineReader::addToHistory(const String & line)
|
||||
{
|
||||
add_history(line.c_str());
|
||||
|
||||
// Flush changes to the disk
|
||||
// NOTE readline builds a buffer of all the lines to write, and write them in one syscall.
|
||||
// Thus there is no need to lock the history file here.
|
||||
write_history(history_file_path.c_str());
|
||||
}
|
||||
|
||||
#if RL_VERSION_MAJOR >= 7
|
||||
|
||||
#define BRACK_PASTE_PREF "\033[200~"
|
||||
#define BRACK_PASTE_SUFF "\033[201~"
|
||||
|
||||
#define BRACK_PASTE_LAST '~'
|
||||
#define BRACK_PASTE_SLEN 6
|
||||
|
||||
/// This handler bypasses some unused macro/event checkings and remove trailing newlines before insertion.
|
||||
static int clickhouse_rl_bracketed_paste_begin(int /* count */, int /* key */)
|
||||
{
|
||||
std::string buf;
|
||||
buf.reserve(128);
|
||||
|
||||
RL_SETSTATE(RL_STATE_MOREINPUT);
|
||||
SCOPE_EXIT(RL_UNSETSTATE(RL_STATE_MOREINPUT));
|
||||
int c;
|
||||
while ((c = rl_read_key()) >= 0)
|
||||
{
|
||||
if (c == '\r')
|
||||
c = '\n';
|
||||
buf.push_back(c);
|
||||
if (buf.size() >= BRACK_PASTE_SLEN && c == BRACK_PASTE_LAST && buf.substr(buf.size() - BRACK_PASTE_SLEN) == BRACK_PASTE_SUFF)
|
||||
{
|
||||
buf.resize(buf.size() - BRACK_PASTE_SLEN);
|
||||
break;
|
||||
}
|
||||
}
|
||||
trim(buf);
|
||||
return static_cast<size_t>(rl_insert_text(buf.c_str())) == buf.size() ? 0 : 1;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void ReadlineLineReader::enableBracketedPaste()
|
||||
{
|
||||
#if RL_VERSION_MAJOR >= 7
|
||||
rl_variable_bind("enable-bracketed-paste", "on");
|
||||
|
||||
/// Use our bracketed paste handler to get better user experience. See comments above.
|
||||
rl_bind_keyseq(BRACK_PASTE_PREF, clickhouse_rl_bracketed_paste_begin);
|
||||
#endif
|
||||
};
|
@ -1,19 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include "LineReader.h"
|
||||
|
||||
#include <readline/readline.h>
|
||||
#include <readline/history.h>
|
||||
|
||||
class ReadlineLineReader : public LineReader
|
||||
{
|
||||
public:
|
||||
ReadlineLineReader(const Suggest & suggest, const String & history_file_path, bool multiline, Patterns extenders_, Patterns delimiters_);
|
||||
~ReadlineLineReader() override;
|
||||
|
||||
void enableBracketedPaste() override;
|
||||
|
||||
private:
|
||||
InputStatus readOneLine(const String & prompt) override;
|
||||
void addToHistory(const String & line) override;
|
||||
};
|
2
contrib/poco
vendored
2
contrib/poco
vendored
@ -1 +1 @@
|
||||
Subproject commit 258b9ba6cd245ff88e9346f75c43464c403f329d
|
||||
Subproject commit 520a90e02e3e5cb90afeae1846d161dbc508a6f1
|
@ -8,7 +8,7 @@ if (NOT ENABLE_REPLXX)
|
||||
add_library(replxx INTERFACE)
|
||||
target_compile_definitions(replxx INTERFACE USE_REPLXX=0)
|
||||
|
||||
message (STATUS "Not using replxx (Beware! Runtime fallback to readline is possible!)")
|
||||
message (STATUS "Not using replxx")
|
||||
return()
|
||||
endif()
|
||||
|
||||
|
@ -202,10 +202,10 @@
|
||||
#define HAVE_READDIR 1
|
||||
|
||||
/* Add readline support */
|
||||
#define HAVE_READLINE 1
|
||||
/* #undef HAVE_READLINE */
|
||||
|
||||
/* Define to 1 if you have the <readline/history.h> header file. */
|
||||
#define HAVE_READLINE_HISTORY_H 1
|
||||
/* #undef HAVE_READLINE_HISTORY_H */
|
||||
|
||||
/* Use the scandir lib */
|
||||
/* #undef HAVE_SCANDIR */
|
||||
|
@ -52,7 +52,6 @@ RUN apt-get update \
|
||||
llvm-${LLVM_VERSION} \
|
||||
llvm-${LLVM_VERSION}-dev \
|
||||
libicu-dev \
|
||||
libreadline-dev \
|
||||
moreutils \
|
||||
ninja-build \
|
||||
pigz \
|
||||
|
@ -7,7 +7,6 @@ RUN apt-get update \
|
||||
&& env DEBIAN_FRONTEND=noninteractive apt-get -y install \
|
||||
tzdata \
|
||||
python3 \
|
||||
libreadline-dev \
|
||||
libicu-dev \
|
||||
bsdutils \
|
||||
gdb \
|
||||
|
@ -21,7 +21,6 @@ RUN apt-get update \
|
||||
cgroupfs-mount \
|
||||
python3-pip \
|
||||
tzdata \
|
||||
libreadline-dev \
|
||||
libicu-dev \
|
||||
bsdutils \
|
||||
curl \
|
||||
@ -76,7 +75,7 @@ RUN python3 -m pip install \
|
||||
minio \
|
||||
protobuf \
|
||||
psycopg2-binary==2.8.6 \
|
||||
pymongo \
|
||||
pymongo==3.11.0 \
|
||||
pytest \
|
||||
pytest-timeout \
|
||||
pytest-xdist \
|
||||
|
@ -1,7 +1,7 @@
|
||||
version: '2.3'
|
||||
services:
|
||||
mongo1:
|
||||
image: mongo:3.6
|
||||
image: mongo:5.0
|
||||
restart: always
|
||||
environment:
|
||||
MONGO_INITDB_ROOT_USERNAME: root
|
||||
@ -9,3 +9,9 @@ services:
|
||||
ports:
|
||||
- ${MONGO_EXTERNAL_PORT}:${MONGO_INTERNAL_PORT}
|
||||
command: --profile=2 --verbose
|
||||
|
||||
mongo2:
|
||||
image: mongo:5.0
|
||||
restart: always
|
||||
ports:
|
||||
- "27018:27017"
|
||||
|
@ -261,16 +261,24 @@ function run_tests
|
||||
# Use awk because bash doesn't support floating point arithmetic.
|
||||
profile_seconds=$(awk "BEGIN { print ($profile_seconds_left > 0 ? 10 : 0) }")
|
||||
|
||||
TIMEFORMAT=$(printf "$test_name\t%%3R\t%%3U\t%%3S\n")
|
||||
# The grep is to filter out set -x output and keep only time output.
|
||||
# The '2>&1 >/dev/null' redirects stderr to stdout, and discards stdout.
|
||||
{ \
|
||||
time "$script_dir/perf.py" --host localhost localhost --port $LEFT_SERVER_PORT $RIGHT_SERVER_PORT \
|
||||
--runs "$CHPC_RUNS" --max-queries "$CHPC_MAX_QUERIES" \
|
||||
--profile-seconds "$profile_seconds" \
|
||||
-- "$test" > "$test_name-raw.tsv" 2> "$test_name-err.log" ; \
|
||||
} 2>&1 >/dev/null | tee >(grep -v ^+ >> "wall-clock-times.tsv") \
|
||||
|| echo "Test $test_name failed with error code $?" >> "$test_name-err.log"
|
||||
(
|
||||
set +x
|
||||
argv=(
|
||||
--host localhost localhost
|
||||
--port "$LEFT_SERVER_PORT" "$RIGHT_SERVER_PORT"
|
||||
--runs "$CHPC_RUNS"
|
||||
--max-queries "$CHPC_MAX_QUERIES"
|
||||
--profile-seconds "$profile_seconds"
|
||||
|
||||
"$test"
|
||||
)
|
||||
TIMEFORMAT=$(printf "$test_name\t%%3R\t%%3U\t%%3S\n")
|
||||
# one more subshell to suppress trace output for "set +x"
|
||||
(
|
||||
time "$script_dir/perf.py" "${argv[@]}" > "$test_name-raw.tsv" 2> "$test_name-err.log"
|
||||
) 2>>wall-clock-times.tsv >/dev/null \
|
||||
|| echo "Test $test_name failed with error code $?" >> "$test_name-err.log"
|
||||
) 2>/dev/null
|
||||
|
||||
profile_seconds_left=$(awk -F' ' \
|
||||
'BEGIN { s = '$profile_seconds_left'; } /^profile-total/ { s -= $2 } END { print s }' \
|
||||
@ -278,8 +286,6 @@ function run_tests
|
||||
current_test=$((current_test + 1))
|
||||
done
|
||||
|
||||
unset TIMEFORMAT
|
||||
|
||||
wait
|
||||
}
|
||||
|
||||
|
@ -354,11 +354,9 @@ for query_index in queries_to_run:
|
||||
print(f'query\t{query_index}\t{run_id}\t{conn_index}\t{elapsed}')
|
||||
|
||||
if elapsed > args.max_query_seconds:
|
||||
# Stop processing pathologically slow queries, to avoid timing out
|
||||
# the entire test task. This shouldn't really happen, so we don't
|
||||
# need much handling for this case and can just exit.
|
||||
# Do not stop processing pathologically slow queries,
|
||||
# since this may hide errors in other queries.
|
||||
print(f'The query no. {query_index} is taking too long to run ({elapsed} s)', file=sys.stderr)
|
||||
exit(2)
|
||||
|
||||
# Be careful with the counter, after this line it's the next iteration
|
||||
# already.
|
||||
|
@ -123,7 +123,12 @@ function run_tests()
|
||||
export -f run_tests
|
||||
timeout "$MAX_RUN_TIME" bash -c run_tests ||:
|
||||
|
||||
./process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
|
||||
echo "Files in current directory"
|
||||
ls -la ./
|
||||
echo "Files in root directory"
|
||||
ls -la /
|
||||
|
||||
/process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
|
||||
|
||||
grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server.log ||:
|
||||
|
||||
|
@ -115,7 +115,12 @@ export -f run_tests
|
||||
|
||||
timeout "$MAX_RUN_TIME" bash -c run_tests ||:
|
||||
|
||||
./process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
|
||||
echo "Files in current directory"
|
||||
ls -la ./
|
||||
echo "Files in root directory"
|
||||
ls -la /
|
||||
|
||||
/process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
|
||||
|
||||
clickhouse-client -q "system flush logs" ||:
|
||||
|
||||
|
@ -21,7 +21,6 @@ RUN apt-get update \
|
||||
cgroupfs-mount \
|
||||
python3-pip \
|
||||
tzdata \
|
||||
libreadline-dev \
|
||||
libicu-dev \
|
||||
bsdutils \
|
||||
curl \
|
||||
|
@ -8,24 +8,43 @@ toc_title: Distributed
|
||||
Tables with Distributed engine do not store any data of their own, but allow distributed query processing on multiple servers.
|
||||
Reading is automatically parallelized. During a read, the table indexes on remote servers are used, if there are any.
|
||||
|
||||
The Distributed engine accepts parameters:
|
||||
## Creating a Table {#distributed-creating-a-table}
|
||||
|
||||
- the cluster name in the server’s config file
|
||||
``` sql
|
||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
(
|
||||
name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1],
|
||||
name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2],
|
||||
...
|
||||
) ENGINE = Distributed(cluster, database, table[, sharding_key[, policy_name]])
|
||||
[SETTINGS name=value, ...]
|
||||
```
|
||||
|
||||
- the name of a remote database
|
||||
### From a Table {#distributed-from-a-table}
|
||||
When the `Distributed` table is pointing to a table on the current server you can adopt that table's schema:
|
||||
|
||||
- the name of a remote table
|
||||
``` sql
|
||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] AS [db2.]name2 ENGINE = Distributed(cluster, database, table[, sharding_key[, policy_name]]) [SETTINGS name=value, ...]
|
||||
```
|
||||
|
||||
- (optionally) sharding key
|
||||
**Distributed Parameters**
|
||||
|
||||
- (optionally) policy name, it will be used to store temporary files for async send
|
||||
- `cluster` - the cluster name in the server’s config file
|
||||
|
||||
See also:
|
||||
- `database` - the name of a remote database
|
||||
|
||||
- `table` - the name of a remote table
|
||||
|
||||
- `sharding_key` - (optionally) sharding key
|
||||
|
||||
- `policy_name` - (optionally) policy name, it will be used to store temporary files for async send
|
||||
|
||||
See also:
|
||||
|
||||
- [insert_distributed_sync](../../../operations/settings/settings.md#insert_distributed_sync) setting
|
||||
- [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes) for the examples
|
||||
|
||||
Also, it accepts the following settings:
|
||||
**Distributed Settings**
|
||||
|
||||
- `fsync_after_insert` - do the `fsync` for the file data after asynchronous insert to Distributed. Guarantees that the OS flushed the whole inserted data to a file **on the initiator node** disk.
|
||||
|
||||
@ -59,24 +78,25 @@ Also, it accepts the following settings:
|
||||
- [prefer_localhost_replica](../../../operations/settings/settings.md#settings-prefer-localhost-replica) setting
|
||||
- `bytes_to_throw_insert` handled before `bytes_to_delay_insert`, so you should not set it to the value less then `bytes_to_delay_insert`
|
||||
|
||||
Example:
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
Distributed(logs, default, hits[, sharding_key[, policy_name]])
|
||||
CREATE TABLE hits_all AS hits
|
||||
ENGINE = Distributed(logs, default, hits[, sharding_key[, policy_name]])
|
||||
SETTINGS
|
||||
fsync_after_insert=0,
|
||||
fsync_directories=0;
|
||||
```
|
||||
|
||||
Data will be read from all servers in the `logs` cluster, from the default.hits table located on every server in the cluster.
|
||||
Data will be read from all servers in the `logs` cluster, from the `default.hits` table located on every server in the cluster.
|
||||
Data is not only read but is partially processed on the remote servers (to the extent that this is possible).
|
||||
For example, for a query with GROUP BY, data will be aggregated on remote servers, and the intermediate states of aggregate functions will be sent to the requestor server. Then data will be further aggregated.
|
||||
For example, for a query with `GROUP BY`, data will be aggregated on remote servers, and the intermediate states of aggregate functions will be sent to the requestor server. Then data will be further aggregated.
|
||||
|
||||
Instead of the database name, you can use a constant expression that returns a string. For example: currentDatabase().
|
||||
Instead of the database name, you can use a constant expression that returns a string. For example: `currentDatabase()`.
|
||||
|
||||
logs – The cluster name in the server’s config file.
|
||||
## Clusters {#distributed-clusters}
|
||||
|
||||
Clusters are set like this:
|
||||
Clusters are configured in the [server configuration file](../../../operations/configuration-files.md):
|
||||
|
||||
``` xml
|
||||
<remote_servers>
|
||||
@ -132,12 +152,13 @@ Replicas are duplicating servers (in order to read all the data, you can access
|
||||
Cluster names must not contain dots.
|
||||
|
||||
The parameters `host`, `port`, and optionally `user`, `password`, `secure`, `compression` are specified for each server:
|
||||
|
||||
- `host` – The address of the remote server. You can use either the domain or the IPv4 or IPv6 address. If you specify the domain, the server makes a DNS request when it starts, and the result is stored as long as the server is running. If the DNS request fails, the server does not start. If you change the DNS record, restart the server.
|
||||
- `port` – The TCP port for messenger activity (`tcp_port` in the config, usually set to 9000). Do not confuse it with http_port.
|
||||
- `user` – Name of the user for connecting to a remote server. Default value: default. This user must have access to connect to the specified server. Access is configured in the users.xml file. For more information, see the section [Access rights](../../../operations/access-rights.md).
|
||||
- `port` – The TCP port for messenger activity (`tcp_port` in the config, usually set to 9000). Not to be confused with `http_port`.
|
||||
- `user` – Name of the user for connecting to a remote server. Default value is the `default` user. This user must have access to connect to the specified server. Access is configured in the `users.xml` file. For more information, see the section [Access rights](../../../operations/access-rights.md).
|
||||
- `password` – The password for connecting to a remote server (not masked). Default value: empty string.
|
||||
- `secure` - Use ssl for connection, usually you also should define `port` = 9440. Server should listen on `<tcp_port_secure>9440</tcp_port_secure>` and have correct certificates.
|
||||
- `compression` - Use data compression. Default value: true.
|
||||
- `secure` - Whether to use a secure SSL/TLS connection. Usually also requires specifying the port (the default secure port is `9440`). The server should listen on `<tcp_port_secure>9440</tcp_port_secure>` and be configured with correct certificates.
|
||||
- `compression` - Use data compression. Default value: `true`.
|
||||
|
||||
When specifying replicas, one of the available replicas will be selected for each of the shards when reading. You can configure the algorithm for load balancing (the preference for which replica to access) – see the [load_balancing](../../../operations/settings/settings.md#settings-load_balancing) setting.
|
||||
If the connection with the server is not established, there will be an attempt to connect with a short timeout. If the connection failed, the next replica will be selected, and so on for all the replicas. If the connection attempt failed for all the replicas, the attempt will be repeated the same way, several times.
|
||||
@ -149,40 +170,42 @@ You can specify as many clusters as you wish in the configuration.
|
||||
|
||||
To view your clusters, use the `system.clusters` table.
|
||||
|
||||
The Distributed engine allows working with a cluster like a local server. However, the cluster is inextensible: you must write its configuration in the server config file (even better, for all the cluster’s servers).
|
||||
The `Distributed` engine allows working with a cluster like a local server. However, the cluster's configuration cannot be specified dynamically, it has to be configured in the server config file. Usually, all servers in a cluster will have the same cluster config (though this is not required). Clusters from the config file are updated on the fly, without restarting the server.
|
||||
|
||||
The Distributed engine requires writing clusters to the config file. Clusters from the config file are updated on the fly, without restarting the server. If you need to send a query to an unknown set of shards and replicas each time, you do not need to create a Distributed table – use the `remote` table function instead. See the section [Table functions](../../../sql-reference/table-functions/index.md).
|
||||
If you need to send a query to an unknown set of shards and replicas each time, you do not need to create a `Distributed` table – use the `remote` table function instead. See the section [Table functions](../../../sql-reference/table-functions/index.md).
|
||||
|
||||
## Writing data {#distributed-writing-data}
|
||||
|
||||
There are two methods for writing data to a cluster:
|
||||
|
||||
First, you can define which servers to write which data to and perform the write directly on each shard. In other words, perform INSERT in the tables that the distributed table “looks at”. This is the most flexible solution as you can use any sharding scheme, which could be non-trivial due to the requirements of the subject area. This is also the most optimal solution since data can be written to different shards completely independently.
|
||||
First, you can define which servers to write which data to and perform the write directly on each shard. In other words, perform direct `INSERT` statements on the remote tables in the cluster that the `Distributed` table is pointing to. This is the most flexible solution as you can use any sharding scheme, even one that is non-trivial due to the requirements of the subject area. This is also the most optimal solution since data can be written to different shards completely independently.
|
||||
|
||||
Second, you can perform INSERT in a Distributed table. In this case, the table will distribute the inserted data across the servers itself. In order to write to a Distributed table, it must have a sharding key set (the last parameter). In addition, if there is only one shard, the write operation works without specifying the sharding key, since it does not mean anything in this case.
|
||||
Second, you can perform `INSERT` statements on a `Distributed` table. In this case, the table will distribute the inserted data across the servers itself. In order to write to a `Distributed` table, it must have the `sharding_key` parameter configured (except if there is only one shard).
|
||||
|
||||
Each shard can have a weight defined in the config file. By default, the weight is equal to one. Data is distributed across shards in the amount proportional to the shard weight. For example, if there are two shards and the first has a weight of 9 while the second has a weight of 10, the first will be sent 9 / 19 parts of the rows, and the second will be sent 10 / 19.
|
||||
Each shard can have a `<weight>` defined in the config file. By default, the weight is `1`. Data is distributed across shards in the amount proportional to the shard weight. All shard weights are summed up, then each shard's weight is divided by the total to determine each shard's proportion. For example, if there are two shards and the first has a weight of 1 while the second has a weight of 2, the first will be sent one third (1 / 3) of inserted rows and the second will be sent two thirds (2 / 3).
|
||||
|
||||
Each shard can have the `internal_replication` parameter defined in the config file.
|
||||
Each shard can have the `internal_replication` parameter defined in the config file. If this parameter is set to `true`, the write operation selects the first healthy replica and writes data to it. Use this if the tables underlying the `Distributed` table are replicated tables (e.g. any of the `Replicated*MergeTree` table engines). One of the table replicas will receive the write and it will be replicated to the other replicas automatically.
|
||||
|
||||
If this parameter is set to `true`, the write operation selects the first healthy replica and writes data to it. Use this alternative if the Distributed table “looks at” replicated tables. In other words, if the table where data will be written is going to replicate them itself.
|
||||
|
||||
If it is set to `false` (the default), data is written to all replicas. In essence, this means that the Distributed table replicates data itself. This is worse than using replicated tables, because the consistency of replicas is not checked, and over time they will contain slightly different data.
|
||||
If `internal_replication` is set to `false` (the default), data is written to all replicas. In this case, the `Distributed` table replicates data itself. This is worse than using replicated tables because the consistency of replicas is not checked and, over time, they will contain slightly different data.
|
||||
|
||||
To select the shard that a row of data is sent to, the sharding expression is analyzed, and its remainder is taken from dividing it by the total weight of the shards. The row is sent to the shard that corresponds to the half-interval of the remainders from `prev_weights` to `prev_weights + weight`, where `prev_weights` is the total weight of the shards with the smallest number, and `weight` is the weight of this shard. For example, if there are two shards, and the first has a weight of 9 while the second has a weight of 10, the row will be sent to the first shard for the remainders from the range \[0, 9), and to the second for the remainders from the range \[9, 19).
|
||||
|
||||
The sharding expression can be any expression from constants and table columns that returns an integer. For example, you can use the expression `rand()` for random distribution of data, or `UserID` for distribution by the remainder from dividing the user’s ID (then the data of a single user will reside on a single shard, which simplifies running IN and JOIN by users). If one of the columns is not distributed evenly enough, you can wrap it in a hash function: intHash64(UserID).
|
||||
The sharding expression can be any expression from constants and table columns that returns an integer. For example, you can use the expression `rand()` for random distribution of data, or `UserID` for distribution by the remainder from dividing the user’s ID (then the data of a single user will reside on a single shard, which simplifies running `IN` and `JOIN` by users). If one of the columns is not distributed evenly enough, you can wrap it in a hash function e.g. `intHash64(UserID)`.
|
||||
|
||||
A simple remainder from the division is a limited solution for sharding and isn’t always appropriate. It works for medium and large volumes of data (dozens of servers), but not for very large volumes of data (hundreds of servers or more). In the latter case, use the sharding scheme required by the subject area, rather than using entries in Distributed tables.
|
||||
|
||||
SELECT queries are sent to all the shards and work regardless of how data is distributed across the shards (they can be distributed completely randomly). When you add a new shard, you do not have to transfer old data into it. Instead, you can write new data to it by using a heavier weight – the data will be distributed slightly unevenly, but queries will work correctly and efficiently.
|
||||
A simple remainder from the division is a limited solution for sharding and isn’t always appropriate. It works for medium and large volumes of data (dozens of servers), but not for very large volumes of data (hundreds of servers or more). In the latter case, use the sharding scheme required by the subject area rather than using entries in `Distributed` tables.
|
||||
|
||||
You should be concerned about the sharding scheme in the following cases:
|
||||
|
||||
- Queries are used that require joining data (IN or JOIN) by a specific key. If data is sharded by this key, you can use local IN or JOIN instead of GLOBAL IN or GLOBAL JOIN, which is much more efficient.
|
||||
- A large number of servers is used (hundreds or more) with a large number of small queries (queries of individual clients - websites, advertisers, or partners). In order for the small queries to not affect the entire cluster, it makes sense to locate data for a single client on a single shard. Alternatively, as we’ve done in Yandex.Metrica, you can set up bi-level sharding: divide the entire cluster into “layers”, where a layer may consist of multiple shards. Data for a single client is located on a single layer, but shards can be added to a layer as necessary, and data is randomly distributed within them. Distributed tables are created for each layer, and a single shared distributed table is created for global queries.
|
||||
- Queries are used that require joining data (`IN` or `JOIN`) by a specific key. If data is sharded by this key, you can use local `IN` or `JOIN` instead of `GLOBAL IN` or `GLOBAL JOIN`, which is much more efficient.
|
||||
- A large number of servers is used (hundreds or more) with a large number of small queries, for example, queries for data of individual clients (e.g. websites, advertisers, or partners). In order for the small queries to not affect the entire cluster, it makes sense to locate data for a single client on a single shard. Alternatively, as we’ve done in Yandex.Metrica, you can set up bi-level sharding: divide the entire cluster into “layers”, where a layer may consist of multiple shards. Data for a single client is located on a single layer, but shards can be added to a layer as necessary, and data is randomly distributed within them. `Distributed` tables are created for each layer, and a single shared distributed table is created for global queries.
|
||||
|
||||
Data is written asynchronously. When inserted in the table, the data block is just written to the local file system. The data is sent to the remote servers in the background as soon as possible. The periodicity for sending data is managed by the [distributed_directory_monitor_sleep_time_ms](../../../operations/settings/settings.md#distributed_directory_monitor_sleep_time_ms) and [distributed_directory_monitor_max_sleep_time_ms](../../../operations/settings/settings.md#distributed_directory_monitor_max_sleep_time_ms) settings. The `Distributed` engine sends each file with inserted data separately, but you can enable batch sending of files with the [distributed_directory_monitor_batch_inserts](../../../operations/settings/settings.md#distributed_directory_monitor_batch_inserts) setting. This setting improves cluster performance by better utilizing local server and network resources. You should check whether data is sent successfully by checking the list of files (data waiting to be sent) in the table directory: `/var/lib/clickhouse/data/database/table/`. The number of threads performing background tasks can be set by [background_distributed_schedule_pool_size](../../../operations/settings/settings.md#background_distributed_schedule_pool_size) setting.
|
||||
|
||||
If the server ceased to exist or had a rough restart (for example, after a device failure) after an INSERT to a Distributed table, the inserted data might be lost. If a damaged data part is detected in the table directory, it is transferred to the `broken` subdirectory and no longer used.
|
||||
If the server ceased to exist or had a rough restart (for example, due to a hardware failure) after an `INSERT` to a `Distributed` table, the inserted data might be lost. If a damaged data part is detected in the table directory, it is transferred to the `broken` subdirectory and no longer used.
|
||||
|
||||
## Reading data {#distributed-reading-data}
|
||||
|
||||
When querying a `Distributed` table, `SELECT` queries are sent to all shards and work regardless of how data is distributed across the shards (they can be distributed completely randomly). When you add a new shard, you do not have to transfer old data into it. Instead, you can write new data to it by using a heavier weight – the data will be distributed slightly unevenly, but queries will work correctly and efficiently.
|
||||
|
||||
When the `max_parallel_replicas` option is enabled, query processing is parallelized across all replicas within a single shard. For more information, see the section [max_parallel_replicas](../../../operations/settings/settings.md#settings-max_parallel_replicas).
|
||||
|
||||
|
99
docs/en/interfaces/grpc.md
Normal file
99
docs/en/interfaces/grpc.md
Normal file
@ -0,0 +1,99 @@
|
||||
---
|
||||
toc_priority: 19
|
||||
toc_title: gRPC Interface
|
||||
---
|
||||
|
||||
# gRPC Interface {#grpc-interface}
|
||||
|
||||
## Introduction {#grpc-interface-introduction}
|
||||
|
||||
ClickHouse supports [gRPC](https://grpc.io/) interface. It is an open source remote procedure call system that uses HTTP/2 and [Protocol Buffers](https://en.wikipedia.org/wiki/Protocol_Buffers). The implementation of gRPC in ClickHouse supports:
|
||||
|
||||
- SSL;
|
||||
- authentication;
|
||||
- sessions;
|
||||
- compression;
|
||||
- parallel queries through the same channel;
|
||||
- cancellation of queries;
|
||||
- getting progress and logs;
|
||||
- external tables.
|
||||
|
||||
The specification of the interface is described in [clickhouse_grpc.proto](https://github.com/ClickHouse/ClickHouse/blob/master/src/Server/grpc_protos/clickhouse_grpc.proto).
|
||||
|
||||
## gRPC Configuration {#grpc-interface-configuration}
|
||||
|
||||
To use the gRPC interface set `grpc_port` in the main [server configuration](../operations/configuration-files.md). Other configuration options see in the following example:
|
||||
|
||||
```xml
|
||||
<grpc_port>9100</grpc_port>
|
||||
<grpc>
|
||||
<enable_ssl>false</enable_ssl>
|
||||
|
||||
<!-- The following two files are used only if SSL is enabled -->
|
||||
<ssl_cert_file>/path/to/ssl_cert_file</ssl_cert_file>
|
||||
<ssl_key_file>/path/to/ssl_key_file</ssl_key_file>
|
||||
|
||||
<!-- Whether server requests client for a certificate -->
|
||||
<ssl_require_client_auth>false</ssl_require_client_auth>
|
||||
|
||||
<!-- The following file is used only if ssl_require_client_auth=true -->
|
||||
<ssl_ca_cert_file>/path/to/ssl_ca_cert_file</ssl_ca_cert_file>
|
||||
|
||||
<!-- Default compression algorithm (applied if client doesn't specify another algorithm, see result_compression in QueryInfo).
|
||||
Supported algorithms: none, deflate, gzip, stream_gzip -->
|
||||
<compression>deflate</compression>
|
||||
|
||||
<!-- Default compression level (applied if client doesn't specify another level, see result_compression in QueryInfo).
|
||||
Supported levels: none, low, medium, high -->
|
||||
<compression_level>medium</compression_level>
|
||||
|
||||
<!-- Send/receive message size limits in bytes. -1 means unlimited -->
|
||||
<max_send_message_size>-1</max_send_message_size>
|
||||
<max_receive_message_size>-1</max_receive_message_size>
|
||||
|
||||
<!-- Enable if you want to get detailed logs -->
|
||||
<verbose_logs>false</verbose_logs>
|
||||
</grpc>
|
||||
```
|
||||
|
||||
## Built-in Client {#grpc-client}
|
||||
|
||||
You can write a client in any of the programming languages supported by gRPC using the provided [specification](https://github.com/ClickHouse/ClickHouse/blob/master/src/Server/grpc_protos/clickhouse_grpc.proto).
|
||||
Or you can use a built-in Python client. It is placed in [utils/grpc-client/clickhouse-grpc-client.py](https://github.com/ClickHouse/ClickHouse/blob/master/utils/grpc-client/clickhouse-grpc-client.py) in the repository. The built-in client requires [grpcio and grpcio-tools](https://grpc.io/docs/languages/python/quickstart) Python modules.
|
||||
|
||||
The client supports the following arguments:
|
||||
|
||||
- `--help` – Shows a help message and exits.
|
||||
- `--host HOST, -h HOST` – A server name. Default value: `localhost`. You can use IPv4 or IPv6 addresses also.
|
||||
- `--port PORT` – A port to connect to. This port should be enabled in the ClickHouse server configuration (see `grpc_port`). Default value: `9100`.
|
||||
- `--user USER_NAME, -u USER_NAME` – A user name. Default value: `default`.
|
||||
- `--password PASSWORD` – A password. Default value: empty string.
|
||||
- `--query QUERY, -q QUERY` – A query to process when using non-interactive mode.
|
||||
- `--database DATABASE, -d DATABASE` – A default database. If not specified, the current database set in the server settings is used (`default` by default).
|
||||
- `--format OUTPUT_FORMAT, -f OUTPUT_FORMAT` – A result output [format](formats.md). Default value for interactive mode: `PrettyCompact`.
|
||||
- `--debug` – Enables showing debug information.
|
||||
|
||||
To run the client in an interactive mode call it without `--query` argument.
|
||||
|
||||
In a batch mode query data can be passed via `stdin`.
|
||||
|
||||
**Client Usage Example**
|
||||
|
||||
In the following example a table is created and loaded with data from a CSV file. Then the content of the table is queried.
|
||||
|
||||
``` bash
|
||||
./clickhouse-grpc-client.py -q "CREATE TABLE grpc_example_table (id UInt32, text String) ENGINE = MergeTree() ORDER BY id;"
|
||||
echo "0,Input data for" > a.txt ; echo "1,gRPC protocol example" >> a.txt
|
||||
cat a.txt | ./clickhouse-grpc-client.py -q "INSERT INTO grpc_example_table FORMAT CSV"
|
||||
|
||||
./clickhouse-grpc-client.py --format PrettyCompact -q "SELECT * FROM grpc_example_table;"
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─id─┬─text──────────────────┐
|
||||
│ 0 │ Input data for │
|
||||
│ 1 │ gRPC protocol example │
|
||||
└────┴───────────────────────┘
|
||||
```
|
@ -6,10 +6,11 @@ toc_title: Introduction
|
||||
|
||||
# Interfaces {#interfaces}
|
||||
|
||||
ClickHouse provides two network interfaces (both can be optionally wrapped in TLS for additional security):
|
||||
ClickHouse provides three network interfaces (they can be optionally wrapped in TLS for additional security):
|
||||
|
||||
- [HTTP](http.md), which is documented and easy to use directly.
|
||||
- [Native TCP](../interfaces/tcp.md), which has less overhead.
|
||||
- [gRPC](grpc.md).
|
||||
|
||||
In most cases it is recommended to use appropriate tool or library instead of interacting with those directly. Officially supported by Yandex are the following:
|
||||
|
||||
@ -24,4 +25,3 @@ There are also a wide range of third-party libraries for working with ClickHouse
|
||||
- [Integrations](../interfaces/third-party/integrations.md)
|
||||
- [Visual interfaces](../interfaces/third-party/gui.md)
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/interfaces/) <!--hide-->
|
||||
|
@ -505,7 +505,7 @@ Keys:
|
||||
- `level` – Logging level. Acceptable values: `trace`, `debug`, `information`, `warning`, `error`.
|
||||
- `log` – The log file. Contains all the entries according to `level`.
|
||||
- `errorlog` – Error log file.
|
||||
- `size` – Size of the file. Applies to `log`and`errorlog`. Once the file reaches `size`, ClickHouse archives and renames it, and creates a new log file in its place.
|
||||
- `size` – Size of the file. Applies to `log` and `errorlog`. Once the file reaches `size`, ClickHouse archives and renames it, and creates a new log file in its place.
|
||||
- `count` – The number of archived log files that ClickHouse stores.
|
||||
|
||||
**Example**
|
||||
@ -750,9 +750,13 @@ The value 0 means that you can delete all tables without any restrictions.
|
||||
|
||||
## max_thread_pool_size {#max-thread-pool-size}
|
||||
|
||||
The maximum number of threads in the Global Thread pool.
|
||||
ClickHouse uses threads from the Global Thread pool to process queries. If there is no idle thread to process a query, then a new thread is created in the pool. `max_thread_pool_size` limits the maximum number of threads in the pool.
|
||||
|
||||
Default value: 10000.
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
|
||||
Default value: `10000`.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -762,9 +766,13 @@ Default value: 10000.
|
||||
|
||||
## max_thread_pool_free_size {#max-thread-pool-free-size}
|
||||
|
||||
The number of threads that are always held in the Global Thread pool.
|
||||
If the number of **idle** threads in the Global Thread pool is greater than `max_thread_pool_free_size`, then ClickHouse releases resources occupied by some threads and the pool size is decreased. Threads can be created again if necessary.
|
||||
|
||||
Default value: 1000.
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
|
||||
Default value: `1000`.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -774,9 +782,13 @@ Default value: 1000.
|
||||
|
||||
## thread_pool_queue_size {#thread-pool-queue-size}
|
||||
|
||||
The limit to the number of jobs that can be scheduled on the Global Thread pool. Increasing queue size leads to larger memory usage. It is recommended to keep this value equal to the `max_thread_pool_size`.
|
||||
The maximum number of jobs that can be scheduled on the Global Thread pool. Increasing queue size leads to larger memory usage. It is recommended to keep this value equal to [max_thread_pool_size](#max-thread-pool-size).
|
||||
|
||||
Default value: 10000.
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
|
||||
Default value: `10000`.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -1443,7 +1455,7 @@ You can also define sections `memory` — means storing information only in memo
|
||||
|
||||
To add an LDAP server as a remote user directory of users that are not defined locally, define a single `ldap` section with a following parameters:
|
||||
- `server` — one of LDAP server names defined in `ldap_servers` config section. This parameter is mandatory and cannot be empty.
|
||||
- `roles` — section with a list of locally defined roles that will be assigned to each user retrieved from the LDAP server. If no roles are specified, user will not be able to perform any actions after authentication. If any of the listed roles is not defined locally at the time of authentication, the authenthication attept will fail as if the provided password was incorrect.
|
||||
- `roles` — section with a list of locally defined roles that will be assigned to each user retrieved from the LDAP server. If no roles are specified, user will not be able to perform any actions after authentication. If any of the listed roles is not defined locally at the time of authentication, the authentication attempt will fail as if the provided password was incorrect.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -1507,3 +1519,4 @@ Possible values:
|
||||
- Positive integer.
|
||||
|
||||
Default value: `10000`.
|
||||
|
||||
|
@ -34,7 +34,7 @@ Use `perf top` to watch the time spent in the kernel for memory management.
|
||||
Permanent huge pages also do not need to be allocated.
|
||||
|
||||
!!! warning "Attention"
|
||||
If your system has less than 16 GB of RAM you may experience various memory exceptions because default settings does not match this amount of RAM. Recommended amount of RAM is 32 GB or more. You can use ClickHouse in system with small amount of RAM, even with 2 GB of RAM, but it requires an additional tuning and able to process small ingestion rate.
|
||||
If your system has less than 16 GB of RAM, you may experience various memory exceptions because default settings do not match this amount of memory. The recommended amount of RAM is 32 GB or more. You can use ClickHouse in a system with a small amount of RAM, even with 2 GB of RAM, but it requires additional tuning and can ingest at a low rate.
|
||||
|
||||
## Storage Subsystem {#storage-subsystem}
|
||||
|
||||
|
@ -1,13 +1,13 @@
|
||||
---
|
||||
toc_priority: 68
|
||||
toc_title: Window View
|
||||
toc_title: Time Window
|
||||
---
|
||||
|
||||
# Window View Functions {#window-view-functions}
|
||||
# Time Window Functions {#time-window-functions}
|
||||
|
||||
Window view functions return the inclusive lower and exclusive upper bound of the corresponding window. The functions for working with WindowView are listed below:
|
||||
Time window functions return the inclusive lower and exclusive upper bound of the corresponding window. The functions for working with WindowView are listed below:
|
||||
|
||||
## tumble {#window-view-functions-tumble}
|
||||
## tumble {#time-window-functions-tumble}
|
||||
|
||||
A tumbling time window assigns records to non-overlapping, continuous windows with a fixed duration (`interval`).
|
||||
|
||||
@ -42,7 +42,7 @@ Result:
|
||||
└───────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## hop {#window-view-functions-hop}
|
||||
## hop {#time-window-functions-hop}
|
||||
|
||||
A hopping time window has a fixed duration (`window_interval`) and hops by a specified hop interval (`hop_interval`). If the `hop_interval` is smaller than the `window_interval`, hopping windows are overlapping. Thus, records can be assigned to multiple windows.
|
||||
|
||||
@ -79,7 +79,7 @@ Result:
|
||||
└───────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## tumbleStart {#window-view-functions-tumblestart}
|
||||
## tumbleStart {#time-window-functions-tumblestart}
|
||||
|
||||
Returns the inclusive lower bound of the corresponding tumbling window.
|
||||
|
||||
@ -87,7 +87,7 @@ Returns the inclusive lower bound of the corresponding tumbling window.
|
||||
tumbleStart(time_attr, interval [, timezone]);
|
||||
```
|
||||
|
||||
## tumbleEnd {#window-view-functions-tumbleend}
|
||||
## tumbleEnd {#time-window-functions-tumbleend}
|
||||
|
||||
Returns the exclusive upper bound of the corresponding tumbling window.
|
||||
|
||||
@ -95,7 +95,7 @@ Returns the exclusive upper bound of the corresponding tumbling window.
|
||||
tumbleEnd(time_attr, interval [, timezone]);
|
||||
```
|
||||
|
||||
## hopStart {#window-view-functions-hopstart}
|
||||
## hopStart {#time-window-functions-hopstart}
|
||||
|
||||
Returns the inclusive lower bound of the corresponding hopping window.
|
||||
|
||||
@ -103,7 +103,7 @@ Returns the inclusive lower bound of the corresponding hopping window.
|
||||
hopStart(time_attr, hop_interval, window_interval [, timezone]);
|
||||
```
|
||||
|
||||
## hopEnd {#window-view-functions-hopend}
|
||||
## hopEnd {#time-window-functions-hopend}
|
||||
|
||||
Returns the exclusive upper bound of the corresponding hopping window.
|
||||
|
@ -251,22 +251,22 @@ Most common uses of live view tables include:
|
||||
Enable usage of window views and `WATCH` query using [allow_experimental_window_view](../../../operations/settings/settings.md#allow-experimental-window-view) setting. Input the command `set allow_experimental_window_view = 1`.
|
||||
|
||||
``` sql
|
||||
CREATE WINDOW VIEW [IF NOT EXISTS] [db.]table_name [TO [db.]table_name] [ENGINE = engine] [WATERMARK = strategy] [ALLOWED_LATENESS = interval_function] AS SELECT ... GROUP BY window_view_function
|
||||
CREATE WINDOW VIEW [IF NOT EXISTS] [db.]table_name [TO [db.]table_name] [ENGINE = engine] [WATERMARK = strategy] [ALLOWED_LATENESS = interval_function] AS SELECT ... GROUP BY time_window_function
|
||||
```
|
||||
|
||||
Window view can aggregate data by time window and output the results when the window is ready to fire. It stores the partial aggregation results in an inner(or specified) table to reduce latency and can push the processing result to a specified table or push notifications using the WATCH query.
|
||||
|
||||
Creating a window view is similar to creating `MATERIALIZED VIEW`. Window view needs an inner storage engine to store intermediate data. The inner storage will use `AggregatingMergeTree` as the default engine.
|
||||
|
||||
### Window View Functions {#window-view-windowviewfunctions}
|
||||
### Time Window Functions {#window-view-timewindowfunctions}
|
||||
|
||||
[Window view functions](../../functions/window-view-functions.md) are used to get the lower and upper window bound of records. The window view needs to be used with a window view function.
|
||||
[Time window functions](../../functions/time-window-functions.md) are used to get the lower and upper window bound of records. The window view needs to be used with a time window function.
|
||||
|
||||
### TIME ATTRIBUTES {#window-view-timeattributes}
|
||||
|
||||
Window view supports **processing time** and **event time** process.
|
||||
|
||||
**Processing time** allows window view to produce results based on the local machine's time and is used by default. It is the most straightforward notion of time but does not provide determinism. The processing time attribute can be defined by setting the `time_attr` of the window view function to a table column or using the function `now()`. The following query creates a window view with processing time.
|
||||
**Processing time** allows window view to produce results based on the local machine's time and is used by default. It is the most straightforward notion of time but does not provide determinism. The processing time attribute can be defined by setting the `time_attr` of the time window function to a table column or using the function `now()`. The following query creates a window view with processing time.
|
||||
|
||||
``` sql
|
||||
CREATE WINDOW VIEW wv AS SELECT count(number), tumbleStart(w_id) as w_start from date GROUP BY tumble(now(), INTERVAL '5' SECOND) as w_id
|
||||
|
99
docs/ru/interfaces/grpc.md
Normal file
99
docs/ru/interfaces/grpc.md
Normal file
@ -0,0 +1,99 @@
|
||||
---
|
||||
toc_priority: 18
|
||||
toc_title: gRPC интерфейс
|
||||
---
|
||||
|
||||
# Интерфейс gRPC {#grpc-interface}
|
||||
|
||||
## Введение {#grpc-interface-introduction}
|
||||
|
||||
ClickHouse поддерживает интерфейс [gRPC](https://grpc.io/). Это система удаленного вызова процедур с открытым исходным кодом, которая использует HTTP/2 и [Protocol Buffers](https://ru.wikipedia.org/wiki/Protocol_Buffers). В реализации gRPC в ClickHouse поддерживаются:
|
||||
|
||||
- SSL;
|
||||
- аутентификация;
|
||||
- сессии;
|
||||
- сжатие;
|
||||
- параллельные запросы, выполняемые через один канал;
|
||||
- отмена запросов;
|
||||
- получение прогресса операций и логов;
|
||||
- внешние таблицы.
|
||||
|
||||
Спецификация интерфейса содержится в [clickhouse_grpc.proto](https://github.com/ClickHouse/ClickHouse/blob/master/src/Server/grpc_protos/clickhouse_grpc.proto).
|
||||
|
||||
## Конфигурация gRPC {#grpc-interface-configuration}
|
||||
|
||||
Чтобы сделать доступным интерфейс gRPC, нужно задать порт с помощью настройки `grpc_port` в [конфигурации сервера](../operations/configuration-files.md). Другие настройки приведены в примере:
|
||||
|
||||
```xml
|
||||
<grpc_port>9100</grpc_port>
|
||||
<grpc>
|
||||
<enable_ssl>false</enable_ssl>
|
||||
|
||||
<!-- Пути к файлам сертификатов и ключей. Используются при включенном SSL -->
|
||||
<ssl_cert_file>/path/to/ssl_cert_file</ssl_cert_file>
|
||||
<ssl_key_file>/path/to/ssl_key_file</ssl_key_file>
|
||||
|
||||
<!-- Запрашивает ли сервер сертификат клиента -->
|
||||
<ssl_require_client_auth>false</ssl_require_client_auth>
|
||||
|
||||
<!-- Используется, если необходимо запрашивать сертификат -->
|
||||
<ssl_ca_cert_file>/path/to/ssl_ca_cert_file</ssl_ca_cert_file>
|
||||
|
||||
<!-- Алгоритм сжатия по умолчанию (применяется, если клиент не указывает алгоритм, см. result_compression в QueryInfo).
|
||||
Поддерживаются алгоритмы: none, deflate, gzip, stream_gzip -->
|
||||
<compression>deflate</compression>
|
||||
|
||||
<!-- Уровень сжатия по умолчанию (применяется, если клиент не указывает уровень сжатия, см. result_compression в QueryInfo).
|
||||
Поддерживаемые уровни: none, low, medium, high -->
|
||||
<compression_level>medium</compression_level>
|
||||
|
||||
<!-- Ограничение в байтах на размер отправляемых и принимаемых сообщений. -1 означает отсутствие ограничения -->
|
||||
<max_send_message_size>-1</max_send_message_size>
|
||||
<max_receive_message_size>-1</max_receive_message_size>
|
||||
|
||||
<!-- Выводить ли детализированные логи -->
|
||||
<verbose_logs>false</verbose_logs>
|
||||
</grpc>
|
||||
```
|
||||
|
||||
## Встроенный клиент {#grpc-client}
|
||||
|
||||
Можно написать клиент на любом языке программирования, который поддерживается gRPC, с использованием [спецификации](https://github.com/ClickHouse/ClickHouse/blob/master/src/Server/grpc_protos/clickhouse_grpc.proto).
|
||||
Также можно воспользоваться встроенным Python клиентом. Он расположен в [utils/grpc-client/clickhouse-grpc-client.py](https://github.com/ClickHouse/ClickHouse/blob/master/utils/grpc-client/clickhouse-grpc-client.py) в репозитории. Для работы встроенного клиента требуются Python модули [grpcio и grpcio-tools](https://grpc.io/docs/languages/python/quickstart).
|
||||
|
||||
Клиент поддерживает аргументы:
|
||||
|
||||
- `--help` – вывести справку и завершить работу.
|
||||
- `--host HOST, -h HOST` – имя сервера. Значение по умолчанию: `localhost`. Можно задать адрес IPv4 или IPv6.
|
||||
- `--port PORT` – номер порта. Этот порт должен быть задан в конфигурации сервера ClickHouse настройкой `grpc_port`. Значение по умолчанию: `9100`.
|
||||
- `--user USER_NAME, -u USER_NAME` – имя пользователя. Значение по умолчанию: `default`.
|
||||
- `--password PASSWORD` – пароль. Значение по умолчанию: пустая строка.
|
||||
- `--query QUERY, -q QUERY` – запрос, который выполнится, когда используется неинтерактивный режим работы.
|
||||
- `--database DATABASE, -d DATABASE` – база данных по умолчанию. Если не указана, то будет использована база данных, заданная в настройках сервера (по умолчанию `default`).
|
||||
- `--format OUTPUT_FORMAT, -f OUTPUT_FORMAT` – [формат](formats.md) вывода результата. Значение по умолчанию для интерактивного режима: `PrettyCompact`.
|
||||
- `--debug` – вывод отладочной информации.
|
||||
|
||||
Чтобы запустить клиент в интерактивном режиме, не указывайте аргумент `--query`.
|
||||
|
||||
В неинтерактивном режиме данные запроса можно передать через `stdin`.
|
||||
|
||||
**Пример использования клиента**
|
||||
|
||||
В примере создается таблица, и в нее загружаются данные из CSV файла. Затем выводится содержимое таблицы.
|
||||
|
||||
``` bash
|
||||
./clickhouse-grpc-client.py -q "CREATE TABLE grpc_example_table (id UInt32, text String) ENGINE = MergeTree() ORDER BY id;"
|
||||
echo "0,Input data for" > a.txt ; echo "1,gRPC protocol example" >> a.txt
|
||||
cat a.txt | ./clickhouse-grpc-client.py -q "INSERT INTO grpc_example_table FORMAT CSV"
|
||||
|
||||
./clickhouse-grpc-client.py --format PrettyCompact -q "SELECT * FROM grpc_example_table;"
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─id─┬─text──────────────────┐
|
||||
│ 0 │ Input data for │
|
||||
│ 1 │ gRPC protocol example │
|
||||
└────┴───────────────────────┘
|
||||
```
|
@ -6,12 +6,13 @@ toc_title: "Введение"
|
||||
|
||||
# Интерфейсы {#interfaces}
|
||||
|
||||
ClickHouse предоставляет два сетевых интерфейса (оба могут быть дополнительно обернуты в TLS для дополнительной безопасности):
|
||||
ClickHouse предоставляет три сетевых интерфейса (они могут быть обернуты в TLS для дополнительной безопасности):
|
||||
|
||||
- [HTTP](http.md), который задокументирован и прост для использования напрямую;
|
||||
- [Native TCP](tcp.md), который имеет меньше накладных расходов.
|
||||
- [Native TCP](tcp.md), который имеет меньше накладных расходов;
|
||||
- [gRPC](grpc.md).
|
||||
|
||||
В большинстве случаев рекомендуется использовать подходящий инструмент или библиотеку, а не напрямую взаимодействовать с ClickHouse по сути. Официально поддерживаемые Яндексом:
|
||||
В большинстве случаев рекомендуется использовать подходящий инструмент или библиотеку, а не напрямую взаимодействовать с ClickHouse. Официально поддерживаемые Яндексом:
|
||||
|
||||
- [Консольный клиент](cli.md);
|
||||
- [JDBC-драйвер](jdbc.md);
|
||||
|
@ -52,7 +52,7 @@ ClickHouse перезагружает встроенные словари с з
|
||||
ClickHouse проверяет условия для `min_part_size` и `min_part_size_ratio` и выполнит те блоки `case`, для которых условия совпали.
|
||||
|
||||
- Если кусок данных совпадает с условиями, ClickHouse использует указанные метод сжатия.
|
||||
- Если кусок данных совпадает с несколькими блоками `case`, ClickHouse использует перый совпавший блок условий.
|
||||
- Если кусок данных совпадает с несколькими блоками `case`, ClickHouse использует первый совпавший блок условий.
|
||||
|
||||
Если ни один `<case>` не подходит, то ClickHouse применит алгоритм сжатия `lz4`.
|
||||
|
||||
@ -554,13 +554,13 @@ ClickHouse проверяет условия для `min_part_size` и `min_part
|
||||
Ключи:
|
||||
|
||||
- `enabled` – Булевый флаг чтобы включить функциональность, по умолчанию `false`. Установите `true` чтобы разрешить отправку отчетов о сбоях.
|
||||
- `endpoint` – Вы можете переопределить URL на который будут отсылаться отчеты об ошибках и использовать собственную инсталяцию Sentry. Используйте URL синтаксис [Sentry DSN](https://docs.sentry.io/error-reporting/quickstart/?platform=native#configure-the-sdk).
|
||||
- `endpoint` – Вы можете переопределить URL на который будут отсылаться отчеты об ошибках и использовать собственную инсталляцию Sentry. Используйте URL синтаксис [Sentry DSN](https://docs.sentry.io/error-reporting/quickstart/?platform=native#configure-the-sdk).
|
||||
- `anonymize` - Запретить отсылку имени хоста сервера в отчете о сбое.
|
||||
- `http_proxy` - Настройка HTTP proxy для отсылки отчетов о сбоях.
|
||||
- `debug` - Настроить клиентскую библиотеку Sentry в debug режим.
|
||||
- `tmp_path` - Путь в файловой системе для временного хранения состояния отчетов о сбоях перед отправкой на сервер Sentry.
|
||||
|
||||
**Рекомендованые настройки**
|
||||
**Рекомендованные настройки**
|
||||
|
||||
``` xml
|
||||
<send_crash_reports>
|
||||
@ -751,9 +751,13 @@ ClickHouse проверяет условия для `min_part_size` и `min_part
|
||||
|
||||
## max_thread_pool_size {#max-thread-pool-size}
|
||||
|
||||
Максимальное количество потоков в глобальном пуле потоков.
|
||||
ClickHouse использует потоки из глобального пула потоков для обработки запросов. Если в пуле нет свободных потоков, то в нем создается еще один. Параметр `max_thread_pool_size` ограничивает максимальное количество потоков в пуле.
|
||||
|
||||
Значение по умолчанию: 10000.
|
||||
Возможные значения:
|
||||
|
||||
- Положительное целое число.
|
||||
|
||||
Значение по умолчанию: `10000`.
|
||||
|
||||
**Пример**
|
||||
|
||||
@ -761,6 +765,38 @@ ClickHouse проверяет условия для `min_part_size` и `min_part
|
||||
<max_thread_pool_size>12000</max_thread_pool_size>
|
||||
```
|
||||
|
||||
## max_thread_pool_free_size {#max-thread-pool-free-size}
|
||||
|
||||
Если в глобальном пуле потоков количество **свободных** потоков больше, чем задано параметром `max_thread_pool_free_size`, то ClickHouse освобождает ресурсы, занятые некоторыми потоками. В таком случае размер пула уменьшается. При необходимости потоки будут созданы заново.
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- Положительное целое число.
|
||||
|
||||
Значение по умолчанию: `1000`.
|
||||
|
||||
**Пример**
|
||||
|
||||
``` xml
|
||||
<max_thread_pool_free_size>1200</max_thread_pool_free_size>
|
||||
```
|
||||
|
||||
## thread_pool_queue_size {#thread-pool-queue-size}
|
||||
|
||||
Максимальное количество задач, которые запланированы для выполнения в глобальном пуле потоков. При увеличении этого параметра возрастает использование памяти. Рекомендуется, чтобы значение этого параметра совпадало со значением параметра [max_thread_pool_size](#max-thread-pool-size).
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- Положительное целое число.
|
||||
|
||||
Значение по умолчанию: `10000`.
|
||||
|
||||
**Пример**
|
||||
|
||||
``` xml
|
||||
<thread_pool_queue_size>12000</thread_pool_queue_size>
|
||||
```
|
||||
|
||||
## merge_tree {#server_configuration_parameters-merge_tree}
|
||||
|
||||
Тонкая настройка таблиц семейства [MergeTree](../../operations/server-configuration-parameters/settings.md).
|
||||
@ -1011,7 +1047,7 @@ ClickHouse проверяет условия для `min_part_size` и `min_part
|
||||
|
||||
Если таблица не существует, то ClickHouse создаст её. Если структура журнала запросов изменилась при обновлении сервера ClickHouse, то таблица со старой структурой переименовывается, а новая таблица создается автоматически.
|
||||
|
||||
**Example**
|
||||
**Пример**
|
||||
|
||||
``` xml
|
||||
<query_views_log>
|
||||
@ -1075,9 +1111,8 @@ Parameters:
|
||||
|
||||
## query_masking_rules {#query-masking-rules}
|
||||
|
||||
Правила основанные на регурялных выражениях, которые будут применены для всех запросов а также для всех сообщений перед сохранением их в лог на сервере,
|
||||
`system.query_log`, `system.text_log`, `system.processes` таблицы, а также в логах отсылаемых клиенту. Это позволяет предотвратить
|
||||
утечку конфиденциальных данных из SQL запросов (такие как имена, электронные письма, личные идентификаторы или номера кредитных карт) в логи.
|
||||
Правила, основанные на регулярных выражениях, которые будут применены для всех запросов, а также для всех сообщений перед сохранением их в лог на сервере,
|
||||
`system.query_log`, `system.text_log`, `system.processes` таблицы, а также в логах, отсылаемых клиенту. Это позволяет предотвратить утечку конфиденциальных данных из SQL запросов (такие как имена, электронные письма, личные идентификаторы или номера кредитных карт) в логи.
|
||||
|
||||
**Пример**
|
||||
|
||||
@ -1096,7 +1131,7 @@ Parameters:
|
||||
- `regexp` - совместимое с RE2 регулярное выражение (обязательное)
|
||||
- `replace` - строка замены для конфиденциальных данных (опционально, по умолчанию - шесть звездочек)
|
||||
|
||||
Правила маскировки применяются ко всему запросу (для предотвращения утечки конфиденциальных данных из неправильно оформленных / не интерпритируемых запросов).
|
||||
Правила маскировки применяются ко всему запросу (для предотвращения утечки конфиденциальных данных из неправильно оформленных / не интерпретируемых запросов).
|
||||
|
||||
`system.events` таблица содержит счетчик `QueryMaskingRulesMatch` который считает общее кол-во совпадений правил маскировки.
|
||||
|
||||
@ -1418,7 +1453,7 @@ ClickHouse использует ZooKeeper для хранения метадан
|
||||
Также вы можете добавить секции `memory` — означает хранение информации только в памяти, без записи на диск, и `ldap` — означает хранения информации на [LDAP-сервере](https://en.wikipedia.org/wiki/Lightweight_Directory_Access_Protocol).
|
||||
|
||||
Чтобы добавить LDAP-сервер в качестве удаленного каталога пользователей, которые не определены локально, определите один раздел `ldap` со следующими параметрами:
|
||||
- `server` — имя одного из LDAP-серверов, определенных в секции `ldap_servers` конфигурациионного файла. Этот параметр явялется необязательным и может быть пустым.
|
||||
- `server` — имя одного из LDAP-серверов, определенных в секции `ldap_servers` конфигурационного файла. Этот параметр является необязательным и может быть пустым.
|
||||
- `roles` — раздел со списком локально определенных ролей, которые будут назначены каждому пользователю, полученному с LDAP-сервера. Если роли не заданы, пользователь не сможет выполнять никаких действий после аутентификации. Если какая-либо из перечисленных ролей не определена локально во время проверки подлинности, попытка проверки подлинности завершится неудачей, как если бы предоставленный пароль был неверным.
|
||||
|
||||
**Пример**
|
||||
|
@ -1,13 +1,13 @@
|
||||
---
|
||||
toc_priority: 68
|
||||
toc_title: Window View
|
||||
toc_title: 时间窗口
|
||||
---
|
||||
|
||||
# Window View 函数 {#window-view-han-shu}
|
||||
# 时间窗口函数 {#time-window-han-shu}
|
||||
|
||||
Window view函数用于获取窗口的起始(包含边界)和结束时间(不包含边界)。系统支持的window view函数如下:
|
||||
时间窗口函数用于获取窗口的起始(包含边界)和结束时间(不包含边界)。系统支持的时间窗口函数如下:
|
||||
|
||||
## tumble {#window-view-functions-tumble}
|
||||
## tumble {#time-window-functions-tumble}
|
||||
|
||||
tumble窗口是连续的、不重叠的固定大小(`interval`)时间窗口。
|
||||
|
||||
@ -42,7 +42,7 @@ SELECT tumble(now(), toIntervalDay('1'))
|
||||
└───────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## hop {#window-view-functions-hop}
|
||||
## hop {#time-window-functions-hop}
|
||||
|
||||
hop窗口是一个固定大小(`window_interval`)的时间窗口,并按照一个固定的滑动间隔(`hop_interval`)滑动。当滑动间隔小于窗口大小时,滑动窗口间存在重叠,此时一个数据可能存在于多个窗口。
|
||||
|
||||
@ -79,7 +79,7 @@ SELECT hop(now(), INTERVAL '1' SECOND, INTERVAL '2' SECOND)
|
||||
└───────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## tumbleStart {#window-view-functions-tumblestart}
|
||||
## tumbleStart {#time-window-functions-tumblestart}
|
||||
|
||||
返回tumble窗口的开始时间(包含边界)。
|
||||
|
||||
@ -87,7 +87,7 @@ SELECT hop(now(), INTERVAL '1' SECOND, INTERVAL '2' SECOND)
|
||||
tumbleStart(time_attr, interval [, timezone]);
|
||||
```
|
||||
|
||||
## tumbleEnd {#window-view-functions-tumbleend}
|
||||
## tumbleEnd {#time-window-functions-tumbleend}
|
||||
|
||||
返回tumble窗口的结束时间(不包含边界)。
|
||||
|
||||
@ -95,7 +95,7 @@ tumbleStart(time_attr, interval [, timezone]);
|
||||
tumbleEnd(time_attr, interval [, timezone]);
|
||||
```
|
||||
|
||||
## hopStart {#window-view-functions-hopstart}
|
||||
## hopStart {#time-window-functions-hopstart}
|
||||
|
||||
返回hop窗口的开始时间(包含边界)。
|
||||
|
||||
@ -103,7 +103,7 @@ tumbleEnd(time_attr, interval [, timezone]);
|
||||
hopStart(time_attr, hop_interval, window_interval [, timezone]);
|
||||
```
|
||||
|
||||
## hopEnd {#window-view-functions-hopend}
|
||||
## hopEnd {#time-window-functions-hopend}
|
||||
|
||||
返回hop窗口的结束时间(不包含边界)。
|
||||
|
@ -250,28 +250,28 @@ Code: 60. DB::Exception: Received from localhost:9000. DB::Exception: Table defa
|
||||
`set allow_experimental_window_view = 1`。
|
||||
|
||||
``` sql
|
||||
CREATE WINDOW VIEW [IF NOT EXISTS] [db.]table_name [TO [db.]table_name] [ENGINE = engine] [WATERMARK = strategy] [ALLOWED_LATENESS = interval_function] AS SELECT ... GROUP BY window_view_function
|
||||
CREATE WINDOW VIEW [IF NOT EXISTS] [db.]table_name [TO [db.]table_name] [ENGINE = engine] [WATERMARK = strategy] [ALLOWED_LATENESS = interval_function] AS SELECT ... GROUP BY time_window_function
|
||||
```
|
||||
|
||||
Window view可以通过时间窗口聚合数据,并在满足窗口触发条件时自动触发对应窗口计算。其通过将计算状态保存降低处理延迟,支持将处理结果输出至目标表或通过`WATCH`语句输出至终端。
|
||||
|
||||
创建window view的方式和创建物化视图类似。Window view使用默认为`AggregatingMergeTree`的内部存储引擎存储计算中间状态。
|
||||
|
||||
### Window View 函数 {#window-view-han-shu}
|
||||
### 时间窗口函数 {#window-view-shi-jian-chuang-kou-han-shu}
|
||||
|
||||
[Window view函数](../../functions/window-view-functions.md)用于获取窗口的起始和结束时间。Window view需要和window view函数配合使用。
|
||||
[时间窗口函数](../../functions/time-window-functions.md)用于获取窗口的起始和结束时间。Window view需要和时间窗口函数配合使用。
|
||||
|
||||
### 时间属性 {#window-view-shi-jian-shu-xing}
|
||||
|
||||
Window view 支持**处理时间**和**事件时间**两种时间类型。
|
||||
|
||||
**处理时间**为默认时间类型,该模式下window view使用本地机器时间计算窗口数据。“处理时间”时间类型计算简单,但具有不确定性。该模式下时间可以为window view函数的第一个参数`time_attr`,或通过函数`now()`使用当前机器时间。下面的例子展示了使用“处理时间”创建的window view的例子。
|
||||
**处理时间**为默认时间类型,该模式下window view使用本地机器时间计算窗口数据。“处理时间”时间类型计算简单,但具有不确定性。该模式下时间可以为时间窗口函数的第一个参数`time_attr`,或通过函数`now()`使用当前机器时间。下面的例子展示了使用“处理时间”创建window view的例子。
|
||||
|
||||
``` sql
|
||||
CREATE WINDOW VIEW wv AS SELECT count(number), tumbleStart(w_id) as w_start from date GROUP BY tumble(now(), INTERVAL '5' SECOND) as w_id
|
||||
```
|
||||
|
||||
**事件时间** 是事件真实发生的时间,该时间往往在事件发生时便嵌入数据记录。事件时间处理提供较高的确定性,可以处理乱序数据以及迟到数据。Window view 通过水位线(`WATERMARK`)启用事件时间处理。
|
||||
**事件时间** 是事件真实发生的时间,该时间往往在事件发生时便嵌入数据记录。事件时间处理提供较高的确定性,可以处理乱序数据以及迟到数据。Window view通过水位线(`WATERMARK`)启用事件时间处理。
|
||||
|
||||
Window view提供如下三种水位线策略:
|
||||
|
||||
|
@ -20,9 +20,7 @@
|
||||
#include <base/argsToConfig.h>
|
||||
#include <base/find_symbols.h>
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
# include <Common/config_version.h>
|
||||
#endif
|
||||
#include <Common/config_version.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/formatReadable.h>
|
||||
#include <Common/TerminalSize.h>
|
||||
|
@ -22,10 +22,8 @@
|
||||
#include <pwd.h>
|
||||
#include <Coordination/FourLetterCommand.h>
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
# include "config_core.h"
|
||||
# include "Common/config_version.h"
|
||||
#endif
|
||||
#include "config_core.h"
|
||||
#include "Common/config_version.h"
|
||||
|
||||
#if USE_SSL
|
||||
# include <Poco/Net/Context.h>
|
||||
|
@ -388,12 +388,6 @@ void LocalServer::setupUsers()
|
||||
}
|
||||
|
||||
|
||||
String LocalServer::getQueryTextPrefix()
|
||||
{
|
||||
return getInitialCreateTableQuery();
|
||||
}
|
||||
|
||||
|
||||
void LocalServer::connect()
|
||||
{
|
||||
connection_parameters = ConnectionParameters(config());
|
||||
@ -463,6 +457,10 @@ try
|
||||
}
|
||||
#endif
|
||||
|
||||
String initial_query = getInitialCreateTableQuery();
|
||||
if (!initial_query.empty())
|
||||
processQueryText(initial_query);
|
||||
|
||||
if (is_interactive && !delayed_interactive)
|
||||
{
|
||||
runInteractive();
|
||||
|
@ -37,7 +37,6 @@ protected:
|
||||
void processError(const String & query) const override;
|
||||
String getName() const override { return "local"; }
|
||||
|
||||
String getQueryTextPrefix() override;
|
||||
void printHelpMessage(const OptionsDescription & options_description) override;
|
||||
|
||||
void addOptions(OptionsDescription & options_description) override;
|
||||
|
@ -13,9 +13,7 @@
|
||||
#include <tuple>
|
||||
#include <utility> /// pair
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
# include "config_tools.h"
|
||||
#endif
|
||||
#include "config_tools.h"
|
||||
|
||||
#include <Common/StringUtils/StringUtils.h>
|
||||
#include <Common/getHashOfLoadedBinary.h>
|
||||
|
@ -82,10 +82,8 @@
|
||||
#include <Compression/CompressionCodecEncrypted.h>
|
||||
#include <filesystem>
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
# include "config_core.h"
|
||||
# include "Common/config_version.h"
|
||||
#endif
|
||||
#include "config_core.h"
|
||||
#include "Common/config_version.h"
|
||||
|
||||
#if defined(OS_LINUX)
|
||||
# include <sys/mman.h>
|
||||
@ -96,7 +94,7 @@
|
||||
#endif
|
||||
|
||||
#if USE_SSL
|
||||
# if USE_INTERNAL_SSL_LIBRARY && !defined(ARCADIA_BUILD)
|
||||
# if USE_INTERNAL_SSL_LIBRARY
|
||||
# include <Compression/CompressionCodecEncrypted.h>
|
||||
# endif
|
||||
# include <Poco/Net/Context.h>
|
||||
|
@ -2,6 +2,7 @@
|
||||
#include <Access/QuotaUsage.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/quoteString.h>
|
||||
#include <Common/thread_local_rng.h>
|
||||
#include <base/chrono_io.h>
|
||||
#include <base/range.h>
|
||||
#include <boost/smart_ptr/make_shared.hpp>
|
||||
@ -15,6 +16,7 @@ namespace ErrorCodes
|
||||
extern const int QUOTA_EXPIRED;
|
||||
}
|
||||
|
||||
|
||||
struct EnabledQuota::Impl
|
||||
{
|
||||
[[noreturn]] static void throwQuotaExceed(
|
||||
@ -35,54 +37,6 @@ struct EnabledQuota::Impl
|
||||
}
|
||||
|
||||
|
||||
/// Returns the end of the current interval. If the passed `current_time` is greater than that end,
|
||||
/// the function automatically recalculates the interval's end by adding the interval's duration
|
||||
/// one or more times until the interval's end is greater than `current_time`.
|
||||
/// If that recalculation occurs the function also resets amounts of resources used and sets the variable
|
||||
/// `counters_were_reset`.
|
||||
static std::chrono::system_clock::time_point getEndOfInterval(
|
||||
const Interval & interval, std::chrono::system_clock::time_point current_time, bool & counters_were_reset)
|
||||
{
|
||||
auto & end_of_interval = interval.end_of_interval;
|
||||
auto end_loaded = end_of_interval.load();
|
||||
auto end = std::chrono::system_clock::time_point{end_loaded};
|
||||
if (current_time < end)
|
||||
{
|
||||
counters_were_reset = false;
|
||||
return end;
|
||||
}
|
||||
|
||||
bool need_reset_counters = false;
|
||||
|
||||
do
|
||||
{
|
||||
/// Calculate the end of the next interval:
|
||||
/// | X |
|
||||
/// end current_time next_end = end + duration * n
|
||||
/// where n is an integer number, n >= 1.
|
||||
const auto duration = interval.duration;
|
||||
UInt64 n = static_cast<UInt64>((current_time - end + duration) / duration);
|
||||
end = end + duration * n;
|
||||
if (end_of_interval.compare_exchange_strong(end_loaded, end.time_since_epoch()))
|
||||
{
|
||||
/// We reset counters only if the interval's end has been calculated before.
|
||||
/// If it hasn't we just calculate the interval's end for the first time and don't reset counters yet.
|
||||
need_reset_counters = (end_loaded.count() != 0);
|
||||
break;
|
||||
}
|
||||
end = std::chrono::system_clock::time_point{end_loaded};
|
||||
}
|
||||
while (current_time >= end);
|
||||
|
||||
if (need_reset_counters)
|
||||
{
|
||||
boost::range::fill(interval.used, 0);
|
||||
counters_were_reset = true;
|
||||
}
|
||||
return end;
|
||||
}
|
||||
|
||||
|
||||
static void used(
|
||||
const String & user_name,
|
||||
const Intervals & intervals,
|
||||
@ -91,24 +45,22 @@ struct EnabledQuota::Impl
|
||||
std::chrono::system_clock::time_point current_time,
|
||||
bool check_exceeded)
|
||||
{
|
||||
auto quota_type_i = static_cast<size_t>(quota_type);
|
||||
for (const auto & interval : intervals.intervals)
|
||||
{
|
||||
auto quota_type_i = static_cast<size_t>(quota_type);
|
||||
QuotaValue used = (interval.used[quota_type_i] += value);
|
||||
QuotaValue max = interval.max[quota_type_i];
|
||||
if (!max)
|
||||
continue;
|
||||
|
||||
if (used > max)
|
||||
{
|
||||
bool counters_were_reset = false;
|
||||
auto end_of_interval = getEndOfInterval(interval, current_time, counters_were_reset);
|
||||
auto end_of_interval = interval.getEndOfInterval(current_time, counters_were_reset);
|
||||
if (counters_were_reset)
|
||||
{
|
||||
used = (interval.used[quota_type_i] += value);
|
||||
if ((used > max) && check_exceeded)
|
||||
throwQuotaExceed(user_name, intervals.quota_name, quota_type, used, max, interval.duration, end_of_interval);
|
||||
}
|
||||
else if (check_exceeded)
|
||||
|
||||
if (check_exceeded && (used > max))
|
||||
throwQuotaExceed(user_name, intervals.quota_name, quota_type, used, max, interval.duration, end_of_interval);
|
||||
}
|
||||
}
|
||||
@ -127,10 +79,11 @@ struct EnabledQuota::Impl
|
||||
QuotaValue max = interval.max[quota_type_i];
|
||||
if (!max)
|
||||
continue;
|
||||
|
||||
if (used > max)
|
||||
{
|
||||
bool counters_were_reset = false;
|
||||
std::chrono::system_clock::time_point end_of_interval = getEndOfInterval(interval, current_time, counters_were_reset);
|
||||
auto end_of_interval = interval.getEndOfInterval(current_time, counters_were_reset);
|
||||
if (!counters_were_reset)
|
||||
throwQuotaExceed(user_name, intervals.quota_name, quota_type, used, max, interval.duration, end_of_interval);
|
||||
}
|
||||
@ -145,17 +98,32 @@ struct EnabledQuota::Impl
|
||||
for (auto quota_type : collections::range(QuotaType::MAX))
|
||||
checkExceeded(user_name, intervals, quota_type, current_time);
|
||||
}
|
||||
|
||||
static std::chrono::system_clock::duration randomDuration(std::chrono::seconds max)
|
||||
{
|
||||
auto count = std::chrono::duration_cast<std::chrono::system_clock::duration>(max).count();
|
||||
std::uniform_int_distribution<Int64> distribution{0, count - 1};
|
||||
return std::chrono::system_clock::duration(distribution(thread_local_rng));
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
EnabledQuota::Interval::Interval()
|
||||
EnabledQuota::Interval::Interval(std::chrono::seconds duration_, bool randomize_interval_, std::chrono::system_clock::time_point current_time_)
|
||||
: duration(duration_) , randomize_interval(randomize_interval_)
|
||||
{
|
||||
std::chrono::system_clock::time_point initial_end{};
|
||||
if (randomize_interval_)
|
||||
initial_end += Impl::randomDuration(duration_);
|
||||
end_of_interval = initial_end.time_since_epoch();
|
||||
|
||||
for (auto quota_type : collections::range(QuotaType::MAX))
|
||||
{
|
||||
auto quota_type_i = static_cast<size_t>(quota_type);
|
||||
used[quota_type_i].store(0);
|
||||
max[quota_type_i] = 0;
|
||||
}
|
||||
|
||||
getEndOfInterval(current_time_); /// Force updating the end of the interval for the first time.
|
||||
}
|
||||
|
||||
|
||||
@ -177,6 +145,55 @@ EnabledQuota::Interval & EnabledQuota::Interval::operator =(const Interval & src
|
||||
}
|
||||
|
||||
|
||||
/// Returns the end of the current interval. If the passed `current_time` is greater than that end,
|
||||
/// the function automatically recalculates the interval's end by adding the interval's duration
|
||||
/// one or more times until the interval's end is greater than `current_time`.
|
||||
/// If that recalculation occurs the function also resets amounts of resources used and sets the variable
|
||||
/// `counters_were_reset`.
|
||||
std::chrono::system_clock::time_point EnabledQuota::Interval::getEndOfInterval(std::chrono::system_clock::time_point current_time) const
|
||||
{
|
||||
bool counters_were_reset;
|
||||
return getEndOfInterval(current_time, counters_were_reset);
|
||||
}
|
||||
|
||||
std::chrono::system_clock::time_point EnabledQuota::Interval::getEndOfInterval(std::chrono::system_clock::time_point current_time, bool & counters_were_reset) const
|
||||
{
|
||||
auto end_loaded = end_of_interval.load();
|
||||
auto end = std::chrono::system_clock::time_point{end_loaded};
|
||||
if (current_time < end)
|
||||
{
|
||||
counters_were_reset = false;
|
||||
return end;
|
||||
}
|
||||
|
||||
bool need_reset_counters = false;
|
||||
|
||||
do
|
||||
{
|
||||
/// Calculate the end of the next interval:
|
||||
/// | X |
|
||||
/// end current_time next_end = end + duration * n
|
||||
/// where n is an integer number, n >= 1.
|
||||
UInt64 n = static_cast<UInt64>((current_time - end + duration) / duration);
|
||||
end = end + duration * n;
|
||||
if (end_of_interval.compare_exchange_strong(end_loaded, end.time_since_epoch()))
|
||||
{
|
||||
need_reset_counters = true;
|
||||
break;
|
||||
}
|
||||
end = std::chrono::system_clock::time_point{end_loaded};
|
||||
}
|
||||
while (current_time >= end);
|
||||
|
||||
if (need_reset_counters)
|
||||
{
|
||||
boost::range::fill(used, 0);
|
||||
counters_were_reset = true;
|
||||
}
|
||||
return end;
|
||||
}
|
||||
|
||||
|
||||
std::optional<QuotaUsage> EnabledQuota::Intervals::getUsage(std::chrono::system_clock::time_point current_time) const
|
||||
{
|
||||
if (!quota_id)
|
||||
@ -192,8 +209,7 @@ std::optional<QuotaUsage> EnabledQuota::Intervals::getUsage(std::chrono::system_
|
||||
auto & out = usage.intervals.back();
|
||||
out.duration = in.duration;
|
||||
out.randomize_interval = in.randomize_interval;
|
||||
bool counters_were_reset = false;
|
||||
out.end_of_interval = Impl::getEndOfInterval(in, current_time, counters_were_reset);
|
||||
out.end_of_interval = in.getEndOfInterval(current_time);
|
||||
for (auto quota_type : collections::range(QuotaType::MAX))
|
||||
{
|
||||
auto quota_type_i = static_cast<size_t>(quota_type);
|
||||
|
@ -73,9 +73,13 @@ private:
|
||||
bool randomize_interval = false;
|
||||
mutable std::atomic<std::chrono::system_clock::duration> end_of_interval;
|
||||
|
||||
Interval();
|
||||
Interval(std::chrono::seconds duration_, bool randomize_interval_, std::chrono::system_clock::time_point current_time_);
|
||||
|
||||
Interval(const Interval & src) { *this = src; }
|
||||
Interval & operator =(const Interval & src);
|
||||
|
||||
std::chrono::system_clock::time_point getEndOfInterval(std::chrono::system_clock::time_point current_time) const;
|
||||
std::chrono::system_clock::time_point getEndOfInterval(std::chrono::system_clock::time_point current_time, bool & counters_were_reset) const;
|
||||
};
|
||||
|
||||
struct Intervals
|
||||
|
@ -4,7 +4,6 @@
|
||||
#include <Access/QuotaUsage.h>
|
||||
#include <Access/AccessControl.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/thread_local_rng.h>
|
||||
#include <base/range.h>
|
||||
#include <boost/range/adaptor/map.hpp>
|
||||
#include <boost/range/algorithm/copy.hpp>
|
||||
@ -22,17 +21,6 @@ namespace ErrorCodes
|
||||
}
|
||||
|
||||
|
||||
namespace
|
||||
{
|
||||
std::chrono::system_clock::duration randomDuration(std::chrono::seconds max)
|
||||
{
|
||||
auto count = std::chrono::duration_cast<std::chrono::system_clock::duration>(max).count();
|
||||
std::uniform_int_distribution<Int64> distribution{0, count - 1};
|
||||
return std::chrono::system_clock::duration(distribution(thread_local_rng));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void QuotaCache::QuotaInfo::setQuota(const QuotaPtr & quota_, const UUID & quota_id_)
|
||||
{
|
||||
quota = quota_;
|
||||
@ -94,18 +82,21 @@ boost::shared_ptr<const EnabledQuota::Intervals> QuotaCache::QuotaInfo::getOrBui
|
||||
auto it = key_to_intervals.find(key);
|
||||
if (it != key_to_intervals.end())
|
||||
return it->second;
|
||||
return rebuildIntervals(key);
|
||||
return rebuildIntervals(key, std::chrono::system_clock::now());
|
||||
}
|
||||
|
||||
|
||||
void QuotaCache::QuotaInfo::rebuildAllIntervals()
|
||||
{
|
||||
if (key_to_intervals.empty())
|
||||
return;
|
||||
auto current_time = std::chrono::system_clock::now();
|
||||
for (const String & key : key_to_intervals | boost::adaptors::map_keys)
|
||||
rebuildIntervals(key);
|
||||
rebuildIntervals(key, current_time);
|
||||
}
|
||||
|
||||
|
||||
boost::shared_ptr<const EnabledQuota::Intervals> QuotaCache::QuotaInfo::rebuildIntervals(const String & key)
|
||||
boost::shared_ptr<const EnabledQuota::Intervals> QuotaCache::QuotaInfo::rebuildIntervals(const String & key, std::chrono::system_clock::time_point current_time)
|
||||
{
|
||||
auto new_intervals = boost::make_shared<Intervals>();
|
||||
new_intervals->quota_name = quota->getName();
|
||||
@ -115,14 +106,8 @@ boost::shared_ptr<const EnabledQuota::Intervals> QuotaCache::QuotaInfo::rebuildI
|
||||
intervals.reserve(quota->all_limits.size());
|
||||
for (const auto & limits : quota->all_limits)
|
||||
{
|
||||
intervals.emplace_back();
|
||||
intervals.emplace_back(limits.duration, limits.randomize_interval, current_time);
|
||||
auto & interval = intervals.back();
|
||||
interval.duration = limits.duration;
|
||||
std::chrono::system_clock::time_point end_of_interval{};
|
||||
interval.randomize_interval = limits.randomize_interval;
|
||||
if (limits.randomize_interval)
|
||||
end_of_interval += randomDuration(limits.duration);
|
||||
interval.end_of_interval = end_of_interval.time_since_epoch();
|
||||
for (auto quota_type : collections::range(QuotaType::MAX))
|
||||
{
|
||||
auto quota_type_i = static_cast<size_t>(quota_type);
|
||||
|
@ -43,7 +43,7 @@ private:
|
||||
|
||||
String calculateKey(const EnabledQuota & enabled_quota) const;
|
||||
boost::shared_ptr<const Intervals> getOrBuildIntervals(const String & key);
|
||||
boost::shared_ptr<const Intervals> rebuildIntervals(const String & key);
|
||||
boost::shared_ptr<const Intervals> rebuildIntervals(const String & key, std::chrono::system_clock::time_point current_time);
|
||||
void rebuildAllIntervals();
|
||||
|
||||
QuotaPtr quota;
|
||||
|
@ -218,9 +218,9 @@ public:
|
||||
using ColVecType = ColumnVectorOrDecimal<T>;
|
||||
|
||||
|
||||
void NO_SANITIZE_UNDEFINED add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const final
|
||||
void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const final
|
||||
{
|
||||
this->data(place).numerator += static_cast<const ColVecType &>(*columns[0]).getData()[row_num];
|
||||
increment(place, static_cast<const ColVecType &>(*columns[0]).getData()[row_num]);
|
||||
++this->data(place).denominator;
|
||||
}
|
||||
|
||||
@ -240,7 +240,7 @@ public:
|
||||
sum_data.addMany(column.getData().data(), batch_size);
|
||||
this->data(place).denominator += batch_size;
|
||||
}
|
||||
this->data(place).numerator += sum_data.sum;
|
||||
increment(place, sum_data.sum);
|
||||
}
|
||||
|
||||
void addBatchSinglePlaceNotNull(
|
||||
@ -270,7 +270,7 @@ public:
|
||||
sum_data.addManyNotNull(column.getData().data(), null_map, batch_size);
|
||||
this->data(place).denominator += batch_size - countBytesInFilter(null_map, batch_size);
|
||||
}
|
||||
this->data(place).numerator += sum_data.sum;
|
||||
increment(place, sum_data.sum);
|
||||
}
|
||||
|
||||
String getName() const override { return "avg"; }
|
||||
@ -298,5 +298,10 @@ public:
|
||||
|
||||
#endif
|
||||
|
||||
private:
|
||||
void NO_SANITIZE_UNDEFINED increment(AggregateDataPtr __restrict place, Numerator inc) const
|
||||
{
|
||||
this->data(place).numerator += inc;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
@ -1414,9 +1414,6 @@ void ClientBase::runInteractive()
|
||||
highlight_callback = highlight;
|
||||
|
||||
ReplxxLineReader lr(*suggest, history_file, config().has("multiline"), query_extenders, query_delimiters, highlight_callback);
|
||||
|
||||
#elif defined(USE_READLINE) && USE_READLINE
|
||||
ReadlineLineReader lr(*suggest, history_file, config().has("multiline"), query_extenders, query_delimiters);
|
||||
#else
|
||||
LineReader lr(history_file, config().has("multiline"), query_extenders, query_delimiters);
|
||||
#endif
|
||||
@ -1494,17 +1491,14 @@ void ClientBase::runNonInteractive()
|
||||
{
|
||||
auto process_multi_query_from_file = [&](const String & file)
|
||||
{
|
||||
auto text = getQueryTextPrefix();
|
||||
String queries_from_file;
|
||||
|
||||
ReadBufferFromFile in(file);
|
||||
readStringUntilEOF(queries_from_file, in);
|
||||
|
||||
text += queries_from_file;
|
||||
return executeMultiQuery(text);
|
||||
return executeMultiQuery(queries_from_file);
|
||||
};
|
||||
|
||||
/// Read all queries into `text`.
|
||||
for (const auto & queries_file : queries_files)
|
||||
{
|
||||
for (const auto & interleave_file : interleave_queries_files)
|
||||
@ -1519,9 +1513,6 @@ void ClientBase::runNonInteractive()
|
||||
}
|
||||
|
||||
String text;
|
||||
if (is_multiquery)
|
||||
text = getQueryTextPrefix();
|
||||
|
||||
if (config().has("query"))
|
||||
{
|
||||
text += config().getRawString("query"); /// Poco configuration should not process substitutions in form of ${...} inside query.
|
||||
|
@ -78,9 +78,6 @@ protected:
|
||||
String & query_to_execute, ASTPtr & parsed_query, const String & all_queries_text,
|
||||
std::optional<Exception> & current_exception);
|
||||
|
||||
/// For non-interactive multi-query mode get queries text prefix.
|
||||
virtual String getQueryTextPrefix() { return ""; }
|
||||
|
||||
static void clearTerminal();
|
||||
void showClientVersion();
|
||||
|
||||
@ -100,9 +97,10 @@ protected:
|
||||
const std::vector<Arguments> & external_tables_arguments) = 0;
|
||||
virtual void processConfig() = 0;
|
||||
|
||||
private:
|
||||
protected:
|
||||
bool processQueryText(const String & text);
|
||||
|
||||
private:
|
||||
void receiveResult(ASTPtr parsed_query);
|
||||
bool receiveAndProcessPacket(ASTPtr parsed_query, bool cancelled);
|
||||
void receiveLogs(ASTPtr parsed_query);
|
||||
|
@ -601,6 +601,7 @@
|
||||
M(631, UNKNOWN_FILE_SIZE) \
|
||||
M(632, UNEXPECTED_DATA_AFTER_PARSED_VALUE) \
|
||||
M(633, QUERY_IS_NOT_SUPPORTED_IN_WINDOW_VIEW) \
|
||||
M(634, MONGODB_ERROR) \
|
||||
\
|
||||
M(999, KEEPER_EXCEPTION) \
|
||||
M(1000, POCO_EXCEPTION) \
|
||||
|
@ -1,6 +1,5 @@
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <Common/config.h>
|
||||
#include "config_core.h"
|
||||
|
||||
#if USE_NURAFT
|
||||
@ -15,7 +14,6 @@
|
||||
#include <Coordination/WriteBufferFromNuraftBuffer.h>
|
||||
#include <Coordination/ReadBufferFromNuraftBuffer.h>
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <Common/ZooKeeper/ZooKeeperCommon.h>
|
||||
#include <Common/ZooKeeper/ZooKeeperIO.h>
|
||||
#include <Common/Exception.h>
|
||||
|
@ -45,6 +45,15 @@ std::pair<std::string, std::string> splitName(const std::string & name)
|
||||
return {name.substr(0, idx), name.substr(idx + 1)};
|
||||
}
|
||||
|
||||
std::pair<std::string_view, std::string_view> splitName(const std::string_view & name)
|
||||
{
|
||||
auto idx = name.find_first_of('.');
|
||||
if (idx == std::string::npos || idx == 0 || idx + 1 == name.size())
|
||||
return {name, {}};
|
||||
|
||||
return {name.substr(0, idx), name.substr(idx + 1)};
|
||||
}
|
||||
|
||||
|
||||
std::string extractTableName(const std::string & nested_name)
|
||||
{
|
||||
@ -211,6 +220,7 @@ void validateArraySizes(const Block & block)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
std::unordered_set<String> getAllTableNames(const Block & block)
|
||||
{
|
||||
std::unordered_set<String> nested_table_names;
|
||||
|
@ -12,6 +12,7 @@ namespace Nested
|
||||
std::string concatenateName(const std::string & nested_table_name, const std::string & nested_field_name);
|
||||
|
||||
std::pair<std::string, std::string> splitName(const std::string & name);
|
||||
std::pair<std::string_view, std::string_view> splitName(const std::string_view & name);
|
||||
|
||||
/// Returns the prefix of the name to the first '.'. Or the name is unchanged if there is no dot.
|
||||
std::string extractTableName(const std::string & nested_name);
|
||||
|
@ -43,6 +43,7 @@ DatabaseAtomic::DatabaseAtomic(String name_, String metadata_path_, UUID uuid, c
|
||||
, db_uuid(uuid)
|
||||
{
|
||||
assert(db_uuid != UUIDHelpers::Nil);
|
||||
fs::create_directories(fs::path(getContext()->getPath()) / "metadata");
|
||||
fs::create_directories(path_to_table_symlinks);
|
||||
tryCreateMetadataSymlink();
|
||||
}
|
||||
|
@ -1,9 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
#include <Common/config.h>
|
||||
#endif
|
||||
|
||||
#include <IO/ReadBufferFromFile.h>
|
||||
#include <IO/AsynchronousReader.h>
|
||||
#include <utility>
|
||||
|
@ -1,9 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
#include <Common/config.h>
|
||||
#endif
|
||||
|
||||
#include <Disks/IDiskRemote.h>
|
||||
#include <IO/ReadBufferFromFile.h>
|
||||
#include <IO/ReadSettings.h>
|
||||
|
@ -71,6 +71,7 @@ ProtobufSchemas::~ProtobufSchemas() = default;
|
||||
|
||||
const google::protobuf::Descriptor * ProtobufSchemas::getMessageTypeForFormatSchema(const FormatSchemaInfo & info)
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
auto it = importers.find(info.schemaDirectory());
|
||||
if (it == importers.end())
|
||||
it = importers.emplace(info.schemaDirectory(), std::make_unique<ImporterWithSourceTree>(info.schemaDirectory())).first;
|
||||
|
@ -4,6 +4,7 @@
|
||||
#if USE_PROTOBUF
|
||||
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <unordered_map>
|
||||
#include <base/types.h>
|
||||
#include <boost/noncopyable.hpp>
|
||||
@ -39,6 +40,7 @@ public:
|
||||
private:
|
||||
class ImporterWithSourceTree;
|
||||
std::unordered_map<String, std::unique_ptr<ImporterWithSourceTree>> importers;
|
||||
std::mutex mutex;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -28,6 +28,7 @@
|
||||
# include <DataTypes/Serializations/SerializationFixedString.h>
|
||||
# include <Formats/ProtobufReader.h>
|
||||
# include <Formats/ProtobufWriter.h>
|
||||
# include <Formats/RowInputMissingColumnsFiller.h>
|
||||
# include <IO/Operators.h>
|
||||
# include <IO/ReadBufferFromString.h>
|
||||
# include <IO/ReadHelpers.h>
|
||||
@ -2147,9 +2148,11 @@ namespace
|
||||
std::vector<FieldDesc> && field_descs_,
|
||||
const FieldDescriptor * parent_field_descriptor_,
|
||||
bool with_length_delimiter_,
|
||||
std::unique_ptr<RowInputMissingColumnsFiller> missing_columns_filler_,
|
||||
const ProtobufReaderOrWriter & reader_or_writer_)
|
||||
: parent_field_descriptor(parent_field_descriptor_)
|
||||
, with_length_delimiter(with_length_delimiter_)
|
||||
, missing_columns_filler(std::move(missing_columns_filler_))
|
||||
, should_skip_if_empty(parent_field_descriptor ? shouldSkipZeroOrEmpty(*parent_field_descriptor) : false)
|
||||
, reader(reader_or_writer_.reader)
|
||||
, writer(reader_or_writer_.writer)
|
||||
@ -2170,8 +2173,6 @@ namespace
|
||||
if (!num_columns_)
|
||||
wrongNumberOfColumns(num_columns_, ">0");
|
||||
|
||||
columns.assign(columns_, columns_ + num_columns_);
|
||||
|
||||
std::vector<ColumnPtr> field_columns;
|
||||
for (const FieldInfo & info : field_infos)
|
||||
{
|
||||
@ -2188,13 +2189,17 @@ namespace
|
||||
|
||||
if (reader)
|
||||
{
|
||||
missing_column_indices.resize(num_columns_);
|
||||
for (size_t column_index : collections::range(num_columns_))
|
||||
missing_column_indices[column_index] = column_index;
|
||||
for (const auto & field_info : field_infos)
|
||||
for (size_t column_index : field_info.column_indices)
|
||||
missing_column_indices[column_index] = static_cast<size_t>(-1);
|
||||
boost::range::remove_erase(missing_column_indices, static_cast<size_t>(-1));
|
||||
mutable_columns.resize(num_columns_);
|
||||
for (size_t i : collections::range(num_columns_))
|
||||
mutable_columns[i] = columns_[i]->assumeMutable();
|
||||
|
||||
std::vector<UInt8> column_is_missing;
|
||||
column_is_missing.resize(num_columns_, true);
|
||||
for (const FieldInfo & info : field_infos)
|
||||
for (size_t i : info.column_indices)
|
||||
column_is_missing[i] = false;
|
||||
|
||||
has_missing_columns = (std::find(column_is_missing.begin(), column_is_missing.end(), true) != column_is_missing.end());
|
||||
}
|
||||
}
|
||||
|
||||
@ -2243,7 +2248,7 @@ namespace
|
||||
{
|
||||
last_field_index = 0;
|
||||
last_field_tag = field_infos[0].field_tag;
|
||||
size_t old_size = columns.empty() ? 0 : columns[0]->size();
|
||||
size_t old_size = mutable_columns.empty() ? 0 : mutable_columns[0]->size();
|
||||
|
||||
try
|
||||
{
|
||||
@ -2268,10 +2273,10 @@ namespace
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
for (auto & column : columns)
|
||||
for (auto & column : mutable_columns)
|
||||
{
|
||||
if (column->size() > old_size)
|
||||
column->assumeMutableRef().popBack(column->size() - old_size);
|
||||
column->popBack(column->size() - old_size);
|
||||
}
|
||||
throw;
|
||||
}
|
||||
@ -2342,13 +2347,8 @@ namespace
|
||||
|
||||
void addDefaultsToMissingColumns(size_t row_num)
|
||||
{
|
||||
for (size_t column_index : missing_column_indices)
|
||||
{
|
||||
auto & column = columns[column_index];
|
||||
size_t old_size = column->size();
|
||||
if (row_num >= old_size)
|
||||
column->assumeMutableRef().insertDefault();
|
||||
}
|
||||
if (has_missing_columns)
|
||||
missing_columns_filler->addDefaults(mutable_columns, row_num);
|
||||
}
|
||||
|
||||
struct FieldInfo
|
||||
@ -2374,13 +2374,14 @@ namespace
|
||||
|
||||
const FieldDescriptor * const parent_field_descriptor;
|
||||
const bool with_length_delimiter;
|
||||
const std::unique_ptr<RowInputMissingColumnsFiller> missing_columns_filler;
|
||||
const bool should_skip_if_empty;
|
||||
ProtobufReader * const reader;
|
||||
ProtobufWriter * const writer;
|
||||
std::vector<FieldInfo> field_infos;
|
||||
std::unordered_map<int, size_t> field_index_by_field_tag;
|
||||
Columns columns;
|
||||
std::vector<size_t> missing_column_indices;
|
||||
MutableColumns mutable_columns;
|
||||
bool has_missing_columns = false;
|
||||
int last_field_tag = 0;
|
||||
size_t last_field_index = static_cast<size_t>(-1);
|
||||
};
|
||||
@ -2626,7 +2627,8 @@ namespace
|
||||
with_length_delimiter,
|
||||
/* parent_field_descriptor = */ nullptr,
|
||||
used_column_indices,
|
||||
/* columns_are_reordered_outside = */ false);
|
||||
/* columns_are_reordered_outside = */ false,
|
||||
/* check_nested_while_filling_missing_columns = */ true);
|
||||
|
||||
if (!message_serializer)
|
||||
{
|
||||
@ -2813,7 +2815,8 @@ namespace
|
||||
bool with_length_delimiter,
|
||||
const FieldDescriptor * parent_field_descriptor,
|
||||
std::vector<size_t> & used_column_indices,
|
||||
bool columns_are_reordered_outside)
|
||||
bool columns_are_reordered_outside,
|
||||
bool check_nested_while_filling_missing_columns)
|
||||
{
|
||||
std::vector<std::string_view> column_names_sv;
|
||||
column_names_sv.reserve(num_columns);
|
||||
@ -2828,7 +2831,8 @@ namespace
|
||||
with_length_delimiter,
|
||||
parent_field_descriptor,
|
||||
used_column_indices,
|
||||
columns_are_reordered_outside);
|
||||
columns_are_reordered_outside,
|
||||
check_nested_while_filling_missing_columns);
|
||||
}
|
||||
|
||||
std::unique_ptr<ProtobufSerializerMessage> buildMessageSerializerImpl(
|
||||
@ -2839,7 +2843,8 @@ namespace
|
||||
bool with_length_delimiter,
|
||||
const FieldDescriptor * parent_field_descriptor,
|
||||
std::vector<size_t> & used_column_indices,
|
||||
bool columns_are_reordered_outside)
|
||||
bool columns_are_reordered_outside,
|
||||
bool check_nested_while_filling_missing_columns)
|
||||
{
|
||||
std::vector<ProtobufSerializerMessage::FieldDesc> field_descs;
|
||||
boost::container::flat_map<const FieldDescriptor *, std::string_view> field_descriptors_in_use;
|
||||
@ -2962,7 +2967,8 @@ namespace
|
||||
/* with_length_delimiter = */ false,
|
||||
field_descriptor,
|
||||
used_column_indices_in_nested,
|
||||
/* columns_are_reordered_outside = */ true);
|
||||
/* columns_are_reordered_outside = */ true,
|
||||
/* check_nested_while_filling_missing_columns = */ false);
|
||||
|
||||
/// `columns_are_reordered_outside` is true because column indices are
|
||||
/// going to be transformed and then written to the outer message,
|
||||
@ -3001,7 +3007,8 @@ namespace
|
||||
/* with_length_delimiter = */ false,
|
||||
field_descriptor,
|
||||
used_column_indices_in_nested,
|
||||
/* columns_are_reordered_outside = */ true);
|
||||
/* columns_are_reordered_outside = */ true,
|
||||
/* check_nested_while_filling_missing_columns = */ false);
|
||||
|
||||
/// `columns_are_reordered_outside` is true because column indices are
|
||||
/// going to be transformed and then written to the outer message,
|
||||
@ -3040,8 +3047,18 @@ namespace
|
||||
if (field_descs.empty())
|
||||
return nullptr;
|
||||
|
||||
std::unique_ptr<RowInputMissingColumnsFiller> missing_columns_filler;
|
||||
if (reader_or_writer.reader)
|
||||
{
|
||||
if (check_nested_while_filling_missing_columns)
|
||||
missing_columns_filler = std::make_unique<RowInputMissingColumnsFiller>(num_columns, column_names, data_types);
|
||||
else
|
||||
missing_columns_filler = std::make_unique<RowInputMissingColumnsFiller>();
|
||||
}
|
||||
|
||||
return std::make_unique<ProtobufSerializerMessage>(
|
||||
std::move(field_descs), parent_field_descriptor, with_length_delimiter, reader_or_writer);
|
||||
std::move(field_descs), parent_field_descriptor, with_length_delimiter,
|
||||
std::move(missing_columns_filler), reader_or_writer);
|
||||
}
|
||||
|
||||
/// Builds a serializer for one-to-one match:
|
||||
@ -3147,7 +3164,8 @@ namespace
|
||||
/* with_length_delimiter = */ false,
|
||||
&field_descriptor,
|
||||
used_column_indices,
|
||||
/* columns_are_reordered_outside = */ false);
|
||||
/* columns_are_reordered_outside = */ false,
|
||||
/* check_nested_while_filling_missing_columns = */ false);
|
||||
|
||||
if (!message_serializer)
|
||||
{
|
||||
|
140
src/Formats/RowInputMissingColumnsFiller.cpp
Normal file
140
src/Formats/RowInputMissingColumnsFiller.cpp
Normal file
@ -0,0 +1,140 @@
|
||||
#include <Formats/RowInputMissingColumnsFiller.h>
|
||||
#include <Columns/ColumnArray.h>
|
||||
#include <DataTypes/NestedUtils.h>
|
||||
#include <boost/range/adaptor/map.hpp>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int ILLEGAL_COLUMN;
|
||||
}
|
||||
|
||||
|
||||
RowInputMissingColumnsFiller::RowInputMissingColumnsFiller() = default;
|
||||
|
||||
RowInputMissingColumnsFiller::RowInputMissingColumnsFiller(const NamesAndTypesList & names_and_types)
|
||||
{
|
||||
std::unordered_map<std::string_view, std::vector<size_t>> nested_groups; /// Nested prefix -> column indices.
|
||||
size_t i = 0;
|
||||
for (auto it = names_and_types.begin(); it != names_and_types.end(); ++it, ++i)
|
||||
{
|
||||
const auto & name_and_type = *it;
|
||||
if (isArray(name_and_type.type))
|
||||
{
|
||||
auto split = Nested::splitName(name_and_type.name);
|
||||
if (!split.second.empty()) /// Is it really a column of Nested data structure?
|
||||
nested_groups[split.first].push_back(i);
|
||||
}
|
||||
}
|
||||
setNestedGroups(std::move(nested_groups), names_and_types.size());
|
||||
}
|
||||
|
||||
RowInputMissingColumnsFiller::RowInputMissingColumnsFiller(const Names & names, const DataTypes & types)
|
||||
{
|
||||
std::unordered_map<std::string_view, std::vector<size_t>> nested_groups; /// Nested prefix -> column indices.
|
||||
for (size_t i = 0; i != names.size(); ++i)
|
||||
{
|
||||
if (isArray(types[i]))
|
||||
{
|
||||
auto split = Nested::splitName(names[i]);
|
||||
if (!split.second.empty()) /// Is it really a column of Nested data structure?
|
||||
nested_groups[split.first].push_back(i);
|
||||
}
|
||||
}
|
||||
setNestedGroups(std::move(nested_groups), names.size());
|
||||
}
|
||||
|
||||
RowInputMissingColumnsFiller::RowInputMissingColumnsFiller(size_t count, const std::string_view * names, const DataTypePtr * types)
|
||||
{
|
||||
std::unordered_map<std::string_view, std::vector<size_t>> nested_groups; /// Nested prefix -> column indices.
|
||||
for (size_t i = 0; i != count; ++i)
|
||||
{
|
||||
if (isArray(types[i]))
|
||||
{
|
||||
auto split = Nested::splitName(names[i]);
|
||||
if (!split.second.empty()) /// Is it really a column of Nested data structure?
|
||||
nested_groups[split.first].push_back(i);
|
||||
}
|
||||
}
|
||||
setNestedGroups(std::move(nested_groups), count);
|
||||
}
|
||||
|
||||
void RowInputMissingColumnsFiller::setNestedGroups(std::unordered_map<std::string_view, std::vector<size_t>> && nested_groups, size_t num_columns)
|
||||
{
|
||||
if (!nested_groups.empty())
|
||||
{
|
||||
column_infos.resize(num_columns);
|
||||
for (auto & nested_group : nested_groups | boost::adaptors::map_values)
|
||||
{
|
||||
if (nested_group.size() <= 1)
|
||||
continue;
|
||||
auto nested_group_shared = std::make_shared<std::vector<size_t>>(std::move(nested_group));
|
||||
for (size_t i : *nested_group_shared)
|
||||
column_infos[i].nested_group = nested_group_shared;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void RowInputMissingColumnsFiller::addDefaults(MutableColumns & columns, size_t row_num) const
|
||||
{
|
||||
for (size_t i = 0; i != columns.size(); ++i)
|
||||
{
|
||||
auto & column = *columns[i];
|
||||
size_t column_size = column.size();
|
||||
if (row_num < column_size)
|
||||
continue; /// The column already has an element in this position, skipping.
|
||||
|
||||
if (row_num > column_size)
|
||||
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Wrong row_number {}, expected either {} or {}", row_num, column_size - 1, column_size);
|
||||
|
||||
if ((i >= column_infos.size()) || !column_infos[i].nested_group)
|
||||
{
|
||||
column.insertDefault();
|
||||
continue;
|
||||
}
|
||||
|
||||
const auto & nested_group = *column_infos[i].nested_group;
|
||||
size_t size_of_array = 0;
|
||||
for (size_t j : nested_group)
|
||||
{
|
||||
const auto & column_j = columns[j];
|
||||
size_t column_size_j = column_j->size();
|
||||
if (row_num < column_size_j)
|
||||
{
|
||||
const auto * column_array = typeid_cast<const ColumnArray *>(column_j.get());
|
||||
if (!column_array)
|
||||
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Column with Array type is not represented by ColumnArray column: {}", column_j->dumpStructure());
|
||||
const auto & offsets = column_array->getOffsets();
|
||||
size_of_array = offsets[row_num] - offsets[row_num - 1];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
for (size_t j : nested_group)
|
||||
{
|
||||
auto & column_j = columns[j];
|
||||
size_t column_size_j = column_j->size();
|
||||
if (row_num >= column_size_j)
|
||||
{
|
||||
if (row_num > column_size_j)
|
||||
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Wrong row_number {}, expected either {} or {}", row_num, column_size_j - 1, column_size_j);
|
||||
|
||||
auto * column_array = typeid_cast<ColumnArray *>(column_j.get());
|
||||
if (!column_array)
|
||||
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Column with Array type is not represented by ColumnArray column: {}", column_j->dumpStructure());
|
||||
|
||||
auto & data = column_array->getData();
|
||||
auto & offsets = column_array->getOffsets();
|
||||
for (size_t k = 0; k != size_of_array; ++k)
|
||||
data.insertDefault();
|
||||
offsets.push_back(data.size());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
40
src/Formats/RowInputMissingColumnsFiller.h
Normal file
40
src/Formats/RowInputMissingColumnsFiller.h
Normal file
@ -0,0 +1,40 @@
|
||||
#pragma once
|
||||
|
||||
#include <Core/NamesAndTypes.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/// Adds default values to columns if they don't have a specified row yet.
|
||||
/// This class can be useful for implementing IRowInputFormat.
|
||||
/// For missing columns of nested structure, it creates not columns of empty arrays,
|
||||
/// but columns of arrays of correct lengths.
|
||||
class RowInputMissingColumnsFiller
|
||||
{
|
||||
public:
|
||||
/// Makes a column filler which checks nested structures while adding default values to columns.
|
||||
RowInputMissingColumnsFiller(const NamesAndTypesList & names_and_types);
|
||||
RowInputMissingColumnsFiller(const Names & names, const DataTypes & types);
|
||||
RowInputMissingColumnsFiller(size_t count, const std::string_view * names, const DataTypePtr * types);
|
||||
|
||||
/// Default constructor makes a column filler which doesn't check nested structures while
|
||||
/// adding default values to columns.
|
||||
RowInputMissingColumnsFiller();
|
||||
|
||||
/// Adds default values to some columns.
|
||||
/// For each column the function checks the number of rows and if it's less than (row_num + 1)
|
||||
/// the function will add a default value to this column.
|
||||
void addDefaults(MutableColumns & columns, size_t row_num) const;
|
||||
|
||||
private:
|
||||
void setNestedGroups(std::unordered_map<std::string_view, std::vector<size_t>> && nested_groups, size_t num_columns);
|
||||
|
||||
struct ColumnInfo
|
||||
{
|
||||
std::shared_ptr<std::vector<size_t>> nested_group;
|
||||
};
|
||||
std::vector<ColumnInfo> column_infos;
|
||||
};
|
||||
|
||||
}
|
@ -9,7 +9,7 @@
|
||||
#include <Functions/extractTimeZoneFromFunctionArguments.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Functions/FunctionHelpers.h>
|
||||
#include <Functions/FunctionsWindow.h>
|
||||
#include <Functions/FunctionsTimeWindow.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -114,7 +114,7 @@ namespace
|
||||
}
|
||||
|
||||
template <>
|
||||
struct WindowImpl<TUMBLE>
|
||||
struct TimeWindowImpl<TUMBLE>
|
||||
{
|
||||
static constexpr auto name = "tumble";
|
||||
|
||||
@ -211,7 +211,7 @@ struct WindowImpl<TUMBLE>
|
||||
};
|
||||
|
||||
template <>
|
||||
struct WindowImpl<TUMBLE_START>
|
||||
struct TimeWindowImpl<TUMBLE_START>
|
||||
{
|
||||
static constexpr auto name = "tumbleStart";
|
||||
|
||||
@ -231,7 +231,7 @@ struct WindowImpl<TUMBLE_START>
|
||||
}
|
||||
else
|
||||
{
|
||||
return std::static_pointer_cast<const DataTypeTuple>(WindowImpl<TUMBLE>::getReturnType(arguments, function_name))
|
||||
return std::static_pointer_cast<const DataTypeTuple>(TimeWindowImpl<TUMBLE>::getReturnType(arguments, function_name))
|
||||
->getElement(0);
|
||||
}
|
||||
}
|
||||
@ -249,19 +249,19 @@ struct WindowImpl<TUMBLE_START>
|
||||
result_column = time_column.column;
|
||||
}
|
||||
else
|
||||
result_column = WindowImpl<TUMBLE>::dispatchForColumns(arguments, function_name);
|
||||
result_column = TimeWindowImpl<TUMBLE>::dispatchForColumns(arguments, function_name);
|
||||
return executeWindowBound(result_column, 0, function_name);
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct WindowImpl<TUMBLE_END>
|
||||
struct TimeWindowImpl<TUMBLE_END>
|
||||
{
|
||||
static constexpr auto name = "tumbleEnd";
|
||||
|
||||
[[maybe_unused]] static DataTypePtr getReturnType(const ColumnsWithTypeAndName & arguments, const String & function_name)
|
||||
{
|
||||
return WindowImpl<TUMBLE_START>::getReturnType(arguments, function_name);
|
||||
return TimeWindowImpl<TUMBLE_START>::getReturnType(arguments, function_name);
|
||||
}
|
||||
|
||||
[[maybe_unused]] static ColumnPtr dispatchForColumns(const ColumnsWithTypeAndName & arguments, const String& function_name)
|
||||
@ -277,13 +277,13 @@ struct WindowImpl<TUMBLE_END>
|
||||
result_column = time_column.column;
|
||||
}
|
||||
else
|
||||
result_column = WindowImpl<TUMBLE>::dispatchForColumns(arguments, function_name);
|
||||
result_column = TimeWindowImpl<TUMBLE>::dispatchForColumns(arguments, function_name);
|
||||
return executeWindowBound(result_column, 1, function_name);
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct WindowImpl<HOP>
|
||||
struct TimeWindowImpl<HOP>
|
||||
{
|
||||
static constexpr auto name = "hop";
|
||||
|
||||
@ -415,7 +415,7 @@ struct WindowImpl<HOP>
|
||||
};
|
||||
|
||||
template <>
|
||||
struct WindowImpl<WINDOW_ID>
|
||||
struct TimeWindowImpl<WINDOW_ID>
|
||||
{
|
||||
static constexpr auto name = "windowID";
|
||||
|
||||
@ -547,7 +547,7 @@ struct WindowImpl<WINDOW_ID>
|
||||
[[maybe_unused]] static ColumnPtr
|
||||
dispatchForTumbleColumns(const ColumnsWithTypeAndName & arguments, const String & function_name)
|
||||
{
|
||||
ColumnPtr column = WindowImpl<TUMBLE>::dispatchForColumns(arguments, function_name);
|
||||
ColumnPtr column = TimeWindowImpl<TUMBLE>::dispatchForColumns(arguments, function_name);
|
||||
return executeWindowBound(column, 1, function_name);
|
||||
}
|
||||
|
||||
@ -567,7 +567,7 @@ struct WindowImpl<WINDOW_ID>
|
||||
};
|
||||
|
||||
template <>
|
||||
struct WindowImpl<HOP_START>
|
||||
struct TimeWindowImpl<HOP_START>
|
||||
{
|
||||
static constexpr auto name = "hopStart";
|
||||
|
||||
@ -587,7 +587,7 @@ struct WindowImpl<HOP_START>
|
||||
}
|
||||
else
|
||||
{
|
||||
return std::static_pointer_cast<const DataTypeTuple>(WindowImpl<HOP>::getReturnType(arguments, function_name))->getElement(0);
|
||||
return std::static_pointer_cast<const DataTypeTuple>(TimeWindowImpl<HOP>::getReturnType(arguments, function_name))->getElement(0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -604,19 +604,19 @@ struct WindowImpl<HOP_START>
|
||||
result_column = time_column.column;
|
||||
}
|
||||
else
|
||||
result_column = WindowImpl<HOP>::dispatchForColumns(arguments, function_name);
|
||||
result_column = TimeWindowImpl<HOP>::dispatchForColumns(arguments, function_name);
|
||||
return executeWindowBound(result_column, 0, function_name);
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct WindowImpl<HOP_END>
|
||||
struct TimeWindowImpl<HOP_END>
|
||||
{
|
||||
static constexpr auto name = "hopEnd";
|
||||
|
||||
[[maybe_unused]] static DataTypePtr getReturnType(const ColumnsWithTypeAndName & arguments, const String & function_name)
|
||||
{
|
||||
return WindowImpl<HOP_START>::getReturnType(arguments, function_name);
|
||||
return TimeWindowImpl<HOP_START>::getReturnType(arguments, function_name);
|
||||
}
|
||||
|
||||
[[maybe_unused]] static ColumnPtr dispatchForColumns(const ColumnsWithTypeAndName & arguments, const String & function_name)
|
||||
@ -632,25 +632,25 @@ struct WindowImpl<HOP_END>
|
||||
result_column = time_column.column;
|
||||
}
|
||||
else
|
||||
result_column = WindowImpl<HOP>::dispatchForColumns(arguments, function_name);
|
||||
result_column = TimeWindowImpl<HOP>::dispatchForColumns(arguments, function_name);
|
||||
|
||||
return executeWindowBound(result_column, 1, function_name);
|
||||
}
|
||||
};
|
||||
|
||||
template <WindowFunctionName type>
|
||||
DataTypePtr FunctionWindow<type>::getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const
|
||||
template <TimeWindowFunctionName type>
|
||||
DataTypePtr FunctionTimeWindow<type>::getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const
|
||||
{
|
||||
return WindowImpl<type>::getReturnType(arguments, name);
|
||||
return TimeWindowImpl<type>::getReturnType(arguments, name);
|
||||
}
|
||||
|
||||
template <WindowFunctionName type>
|
||||
ColumnPtr FunctionWindow<type>::executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & /*result_type*/, size_t /*input_rows_count*/) const
|
||||
template <TimeWindowFunctionName type>
|
||||
ColumnPtr FunctionTimeWindow<type>::executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & /*result_type*/, size_t /*input_rows_count*/) const
|
||||
{
|
||||
return WindowImpl<type>::dispatchForColumns(arguments, name);
|
||||
return TimeWindowImpl<type>::dispatchForColumns(arguments, name);
|
||||
}
|
||||
|
||||
void registerFunctionsWindow(FunctionFactory& factory)
|
||||
void registerFunctionsTimeWindow(FunctionFactory& factory)
|
||||
{
|
||||
factory.registerFunction<FunctionTumble>();
|
||||
factory.registerFunction<FunctionHop>();
|
@ -7,7 +7,7 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/** Window functions:
|
||||
/** Time window functions:
|
||||
*
|
||||
* tumble(time_attr, interval [, timezone])
|
||||
*
|
||||
@ -30,7 +30,7 @@ namespace DB
|
||||
* hopEnd(time_attr, hop_interval, window_interval [, timezone])
|
||||
*
|
||||
*/
|
||||
enum WindowFunctionName
|
||||
enum TimeWindowFunctionName
|
||||
{
|
||||
TUMBLE,
|
||||
TUMBLE_START,
|
||||
@ -117,8 +117,8 @@ struct ToStartOfTransform;
|
||||
ADD_TIME(Second, 1)
|
||||
#undef ADD_TIME
|
||||
|
||||
template <WindowFunctionName type>
|
||||
struct WindowImpl
|
||||
template <TimeWindowFunctionName type>
|
||||
struct TimeWindowImpl
|
||||
{
|
||||
static constexpr auto name = "UNKNOWN";
|
||||
|
||||
@ -127,12 +127,12 @@ struct WindowImpl
|
||||
static ColumnPtr dispatchForColumns(const ColumnsWithTypeAndName & arguments, const String & function_name);
|
||||
};
|
||||
|
||||
template <WindowFunctionName type>
|
||||
class FunctionWindow : public IFunction
|
||||
template <TimeWindowFunctionName type>
|
||||
class FunctionTimeWindow : public IFunction
|
||||
{
|
||||
public:
|
||||
static constexpr auto name = WindowImpl<type>::name;
|
||||
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionWindow>(); }
|
||||
static constexpr auto name = TimeWindowImpl<type>::name;
|
||||
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionTimeWindow>(); }
|
||||
String getName() const override { return name; }
|
||||
bool isVariadic() const override { return true; }
|
||||
size_t getNumberOfArguments() const override { return 0; }
|
||||
@ -145,11 +145,11 @@ public:
|
||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & /*result_type*/, size_t /*input_rows_count*/) const override;
|
||||
};
|
||||
|
||||
using FunctionTumble = FunctionWindow<TUMBLE>;
|
||||
using FunctionTumbleStart = FunctionWindow<TUMBLE_START>;
|
||||
using FunctionTumbleEnd = FunctionWindow<TUMBLE_END>;
|
||||
using FunctionHop = FunctionWindow<HOP>;
|
||||
using FunctionWindowId = FunctionWindow<WINDOW_ID>;
|
||||
using FunctionHopStart = FunctionWindow<HOP_START>;
|
||||
using FunctionHopEnd = FunctionWindow<HOP_END>;
|
||||
using FunctionTumble = FunctionTimeWindow<TUMBLE>;
|
||||
using FunctionTumbleStart = FunctionTimeWindow<TUMBLE_START>;
|
||||
using FunctionTumbleEnd = FunctionTimeWindow<TUMBLE_END>;
|
||||
using FunctionHop = FunctionTimeWindow<HOP>;
|
||||
using FunctionWindowId = FunctionTimeWindow<WINDOW_ID>;
|
||||
using FunctionHopStart = FunctionTimeWindow<HOP_START>;
|
||||
using FunctionHopEnd = FunctionTimeWindow<HOP_END>;
|
||||
}
|
@ -102,7 +102,7 @@ public:
|
||||
{
|
||||
if (!length_column || length_column->onlyNull())
|
||||
{
|
||||
return array_column;
|
||||
return arguments[0].column;
|
||||
}
|
||||
else if (isColumnConst(*length_column))
|
||||
sink = GatherUtils::sliceFromLeftConstantOffsetBounded(*source, 0, length_column->getInt(0));
|
||||
|
@ -54,7 +54,7 @@ void registerFunctionValidateNestedArraySizes(FunctionFactory & factory);
|
||||
void registerFunctionsSnowflake(FunctionFactory & factory);
|
||||
void registerFunctionTid(FunctionFactory & factory);
|
||||
void registerFunctionLogTrace(FunctionFactory & factory);
|
||||
void registerFunctionsWindow(FunctionFactory &);
|
||||
void registerFunctionsTimeWindow(FunctionFactory &);
|
||||
|
||||
#if USE_SSL
|
||||
void registerFunctionEncrypt(FunctionFactory & factory);
|
||||
@ -115,7 +115,7 @@ void registerFunctions()
|
||||
registerFunctionsStringHash(factory);
|
||||
registerFunctionValidateNestedArraySizes(factory);
|
||||
registerFunctionsSnowflake(factory);
|
||||
registerFunctionsWindow(factory);
|
||||
registerFunctionsTimeWindow(factory);
|
||||
|
||||
#if USE_SSL
|
||||
registerFunctionEncrypt(factory);
|
||||
|
@ -24,6 +24,7 @@
|
||||
|
||||
#include <base/unaligned.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
@ -174,16 +175,14 @@ public:
|
||||
const auto & offsets_from = col_from->getOffsets();
|
||||
size_t size = offsets_from.size();
|
||||
auto & vec_res = col_res->getData();
|
||||
vec_res.resize(size);
|
||||
vec_res.resize_fill(size);
|
||||
|
||||
size_t offset = 0;
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
{
|
||||
ToFieldType value{};
|
||||
memcpy(&value,
|
||||
memcpy(&vec_res[i],
|
||||
&data_from[offset],
|
||||
std::min(static_cast<UInt64>(sizeof(ToFieldType)), offsets_from[i] - offset - 1));
|
||||
vec_res[i] = value;
|
||||
offset = offsets_from[i];
|
||||
}
|
||||
|
||||
@ -201,15 +200,18 @@ public:
|
||||
size_t step = col_from_fixed->getN();
|
||||
size_t size = data_from.size() / step;
|
||||
auto & vec_res = col_res->getData();
|
||||
vec_res.resize(size);
|
||||
|
||||
size_t offset = 0;
|
||||
size_t copy_size = std::min(step, sizeof(ToFieldType));
|
||||
|
||||
if (sizeof(ToFieldType) <= step)
|
||||
vec_res.resize(size);
|
||||
else
|
||||
vec_res.resize_fill(size);
|
||||
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
{
|
||||
ToFieldType value{};
|
||||
memcpy(&value, &data_from[offset], copy_size);
|
||||
vec_res[i] = value;
|
||||
memcpy(&vec_res[i], &data_from[offset], copy_size);
|
||||
offset += step;
|
||||
}
|
||||
|
||||
@ -288,7 +290,7 @@ private:
|
||||
{
|
||||
StringRef data = src.getDataAt(i);
|
||||
|
||||
std::memcpy(&data_to[offset], data.data, std::min(n, data.size));
|
||||
memcpy(&data_to[offset], data.data, std::min(n, data.size));
|
||||
offset += n;
|
||||
}
|
||||
}
|
||||
@ -347,10 +349,13 @@ private:
|
||||
using To = typename ToContainer::value_type;
|
||||
|
||||
size_t size = from.size();
|
||||
to.resize_fill(size);
|
||||
|
||||
static constexpr size_t copy_size = std::min(sizeof(From), sizeof(To));
|
||||
|
||||
if (sizeof(To) <= sizeof(From))
|
||||
to.resize(size);
|
||||
else
|
||||
to.resize_fill(size);
|
||||
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
memcpy(static_cast<void*>(&to[i]), static_cast<const void*>(&from[i]), copy_size);
|
||||
}
|
||||
|
@ -157,7 +157,12 @@ void ArithmeticOperationsInAgrFuncMatcher::visit(const ASTFunction & func, ASTPt
|
||||
void ArithmeticOperationsInAgrFuncMatcher::visit(ASTPtr & ast, Data & data)
|
||||
{
|
||||
if (const auto * function_node = ast->as<ASTFunction>())
|
||||
{
|
||||
if (function_node->is_window_function)
|
||||
return;
|
||||
|
||||
visit(*function_node, ast, data);
|
||||
}
|
||||
}
|
||||
|
||||
bool ArithmeticOperationsInAgrFuncMatcher::needChildVisit(const ASTPtr & node, const ASTPtr &)
|
||||
|
@ -1189,7 +1189,7 @@ void DDLWorker::runMainThread()
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(log, "Unexpected error, will try to restart main thread:");
|
||||
tryLogCurrentException(log, "Unexpected error, will try to restart main thread");
|
||||
reset_state();
|
||||
sleepForSeconds(5);
|
||||
}
|
||||
|
@ -380,13 +380,6 @@ BlockIO InterpreterInsertQuery::execute()
|
||||
|
||||
BlockIO res;
|
||||
|
||||
res.pipeline.addStorageHolder(table);
|
||||
if (const auto * mv = dynamic_cast<const StorageMaterializedView *>(table.get()))
|
||||
{
|
||||
if (auto inner_table = mv->tryGetTargetTable())
|
||||
res.pipeline.addStorageHolder(inner_table);
|
||||
}
|
||||
|
||||
/// What type of query: INSERT or INSERT SELECT or INSERT WATCH?
|
||||
if (is_distributed_insert_select)
|
||||
{
|
||||
@ -445,6 +438,13 @@ BlockIO InterpreterInsertQuery::execute()
|
||||
}
|
||||
}
|
||||
|
||||
res.pipeline.addStorageHolder(table);
|
||||
if (const auto * mv = dynamic_cast<const StorageMaterializedView *>(table.get()))
|
||||
{
|
||||
if (auto inner_table = mv->tryGetTargetTable())
|
||||
res.pipeline.addStorageHolder(inner_table);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -51,7 +51,7 @@ static bool tryExtractConstValueFromCondition(const ASTPtr & condition, bool & v
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (function->name == "toUInt8" || function->name == "toInt8")
|
||||
else if (function->name == "toUInt8" || function->name == "toInt8" || function->name == "identity")
|
||||
{
|
||||
if (const auto * expr_list = function->arguments->as<ASTExpressionList>())
|
||||
{
|
||||
|
@ -63,7 +63,12 @@ bool extractIdentifiers(const ASTFunction & func, std::unordered_set<ASTPtr *> &
|
||||
void RewriteAnyFunctionMatcher::visit(ASTPtr & ast, Data & data)
|
||||
{
|
||||
if (auto * func = ast->as<ASTFunction>())
|
||||
{
|
||||
if (func->is_window_function)
|
||||
return;
|
||||
|
||||
visit(*func, ast, data);
|
||||
}
|
||||
}
|
||||
|
||||
void RewriteAnyFunctionMatcher::visit(const ASTFunction & func, ASTPtr & ast, Data & data)
|
||||
|
@ -10,7 +10,12 @@ namespace DB
|
||||
void RewriteSumIfFunctionMatcher::visit(ASTPtr & ast, Data & data)
|
||||
{
|
||||
if (auto * func = ast->as<ASTFunction>())
|
||||
{
|
||||
if (func->is_window_function)
|
||||
return;
|
||||
|
||||
visit(*func, ast, data);
|
||||
}
|
||||
}
|
||||
|
||||
void RewriteSumIfFunctionMatcher::visit(const ASTFunction & func, ASTPtr & ast, Data &)
|
||||
|
@ -225,7 +225,13 @@ void removeColumnNullability(ColumnWithTypeAndName & column)
|
||||
|
||||
if (column.column && column.column->isNullable())
|
||||
{
|
||||
column.column = column.column->convertToFullColumnIfConst();
|
||||
const auto * nullable_col = checkAndGetColumn<ColumnNullable>(*column.column);
|
||||
if (!nullable_col)
|
||||
{
|
||||
throw DB::Exception(ErrorCodes::LOGICAL_ERROR, "Column '{}' is expected to be nullable", column.dumpStructure());
|
||||
}
|
||||
|
||||
MutableColumnPtr mutable_column = nullable_col->getNestedColumn().cloneEmpty();
|
||||
insertFromNullableOrDefault(mutable_column, nullable_col);
|
||||
column.column = std::move(mutable_column);
|
||||
|
@ -36,6 +36,7 @@ namespace ErrorCodes
|
||||
extern const int MONGODB_CANNOT_AUTHENTICATE;
|
||||
extern const int NOT_FOUND_COLUMN_IN_BLOCK;
|
||||
extern const int UNKNOWN_TYPE;
|
||||
extern const int MONGODB_ERROR;
|
||||
}
|
||||
|
||||
|
||||
@ -327,6 +328,14 @@ Chunk MongoDBSource::generate()
|
||||
|
||||
for (auto & document : response.documents())
|
||||
{
|
||||
if (document->exists("ok") && document->exists("$err")
|
||||
&& document->exists("code") && document->getInteger("ok") == 0)
|
||||
{
|
||||
auto code = document->getInteger("code");
|
||||
const Poco::MongoDB::Element::Ptr value = document->get("$err");
|
||||
auto message = static_cast<const Poco::MongoDB::ConcreteElement<String> &>(*value).value();
|
||||
throw Exception(ErrorCodes::MONGODB_ERROR, "Got error from MongoDB: {}, code: {}", message, code);
|
||||
}
|
||||
++num_rows;
|
||||
|
||||
for (const auto idx : collections::range(0, size))
|
||||
|
@ -4,10 +4,12 @@
|
||||
#include <Common/Arena.h>
|
||||
#include <Common/FieldVisitorConvertToNumber.h>
|
||||
#include <Common/FieldVisitorsAccurateComparison.h>
|
||||
#include <Columns/ColumnLowCardinality.h>
|
||||
#include <base/arithmeticOverflow.h>
|
||||
#include <Columns/ColumnConst.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <DataTypes/getLeastSupertype.h>
|
||||
#include <DataTypes/DataTypeLowCardinality.h>
|
||||
#include <Interpreters/ExpressionActions.h>
|
||||
#include <Interpreters/convertFieldToType.h>
|
||||
|
||||
@ -1005,6 +1007,12 @@ static void assertSameColumns(const Columns & left_all,
|
||||
assert(left_column);
|
||||
assert(right_column);
|
||||
|
||||
if (const auto * left_lc = typeid_cast<const ColumnLowCardinality *>(left_column))
|
||||
left_column = left_lc->getDictionary().getNestedColumn().get();
|
||||
|
||||
if (const auto * right_lc = typeid_cast<const ColumnLowCardinality *>(right_column))
|
||||
right_column = right_lc->getDictionary().getNestedColumn().get();
|
||||
|
||||
assert(typeid(*left_column).hash_code()
|
||||
== typeid(*right_column).hash_code());
|
||||
|
||||
@ -1056,10 +1064,13 @@ void WindowTransform::appendChunk(Chunk & chunk)
|
||||
// Another problem with Const columns is that the aggregate functions
|
||||
// can't work with them, so we have to materialize them like the
|
||||
// Aggregator does.
|
||||
// Likewise, aggregate functions can't work with LowCardinality,
|
||||
// so we have to materialize them too.
|
||||
// Just materialize everything.
|
||||
auto columns = chunk.detachColumns();
|
||||
block.original_input_columns = columns;
|
||||
for (auto & column : columns)
|
||||
column = std::move(column)->convertToFullColumnIfConst();
|
||||
column = recursiveRemoveLowCardinality(std::move(column)->convertToFullColumnIfConst());
|
||||
block.input_columns = std::move(columns);
|
||||
|
||||
// Initialize output columns.
|
||||
@ -1302,7 +1313,7 @@ IProcessor::Status WindowTransform::prepare()
|
||||
// Output the ready block.
|
||||
const auto i = next_output_block_number - first_block_number;
|
||||
auto & block = blocks[i];
|
||||
auto columns = block.input_columns;
|
||||
auto columns = block.original_input_columns;
|
||||
for (auto & res : block.output_columns)
|
||||
{
|
||||
columns.push_back(ColumnPtr(std::move(res)));
|
||||
|
@ -39,6 +39,7 @@ struct WindowFunctionWorkspace
|
||||
|
||||
struct WindowTransformBlock
|
||||
{
|
||||
Columns original_input_columns;
|
||||
Columns input_columns;
|
||||
MutableColumns output_columns;
|
||||
|
||||
|
@ -1,15 +1,12 @@
|
||||
#pragma once
|
||||
|
||||
#include <Common/CurrentMetrics.h>
|
||||
#include <Common/config.h>
|
||||
#include <Core/PostgreSQLProtocol.h>
|
||||
#include <Poco/Net/TCPServerConnection.h>
|
||||
#include <base/logger_useful.h>
|
||||
#include "IServer.h"
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
# include <Common/config.h>
|
||||
#endif
|
||||
|
||||
#if USE_SSL
|
||||
# include <Poco/Net/SecureStreamSocket.h>
|
||||
#endif
|
||||
|
@ -5,10 +5,7 @@
|
||||
#include <memory>
|
||||
#include <Server/IServer.h>
|
||||
#include <Core/PostgreSQLProtocol.h>
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
# include <Common/config.h>
|
||||
#endif
|
||||
#include <Common/config.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
@ -86,7 +86,7 @@ std::optional<ExternalDataSourceConfig> getExternalDataSourceConfiguration(const
|
||||
configuration.username = config.getString(collection_prefix + ".user", "");
|
||||
configuration.password = config.getString(collection_prefix + ".password", "");
|
||||
configuration.database = config.getString(collection_prefix + ".database", "");
|
||||
configuration.table = config.getString(collection_prefix + ".table", "");
|
||||
configuration.table = config.getString(collection_prefix + ".table", config.getString(collection_prefix + ".collection", ""));
|
||||
configuration.schema = config.getString(collection_prefix + ".schema", "");
|
||||
configuration.addresses_expr = config.getString(collection_prefix + ".addresses_expr", "");
|
||||
|
||||
|
@ -40,7 +40,6 @@ struct StorageMySQLConfiguration : ExternalDataSourceConfiguration
|
||||
|
||||
struct StorageMongoDBConfiguration : ExternalDataSourceConfiguration
|
||||
{
|
||||
String collection;
|
||||
String options;
|
||||
};
|
||||
|
||||
|
@ -1350,9 +1350,7 @@ void IMergeTreeDataPart::renameToDetached(const String & prefix) const
|
||||
void IMergeTreeDataPart::makeCloneInDetached(const String & prefix, const StorageMetadataPtr & /*metadata_snapshot*/) const
|
||||
{
|
||||
String destination_path = fs::path(storage.relative_data_path) / getRelativePathForDetachedPart(prefix);
|
||||
|
||||
/// Backup is not recursive (max_level is 0), so do not copy inner directories
|
||||
localBackup(volume->getDisk(), getFullRelativePath(), destination_path, 0);
|
||||
localBackup(volume->getDisk(), getFullRelativePath(), destination_path);
|
||||
volume->getDisk()->removeFileIfExists(fs::path(destination_path) / DELETE_ON_DESTROY_MARKER_FILE_NAME);
|
||||
}
|
||||
|
||||
|
@ -501,7 +501,14 @@ bool MergeTask::VerticalMergeStage::finalizeVerticalMergeForAllColumns() const
|
||||
bool MergeTask::MergeProjectionsStage::mergeMinMaxIndexAndPrepareProjections() const
|
||||
{
|
||||
for (const auto & part : global_ctx->future_part->parts)
|
||||
global_ctx->new_data_part->minmax_idx->merge(*part->minmax_idx);
|
||||
{
|
||||
/// Skip empty parts,
|
||||
/// (that can be created in StorageReplicatedMergeTree::createEmptyPartInsteadOfLost())
|
||||
/// since they can incorrectly set min,
|
||||
/// that will be changed after one more merge/OPTIMIZE.
|
||||
if (!part->isEmpty())
|
||||
global_ctx->new_data_part->minmax_idx->merge(*part->minmax_idx);
|
||||
}
|
||||
|
||||
/// Print overall profiling info. NOTE: it may duplicates previous messages
|
||||
{
|
||||
|
@ -20,7 +20,6 @@ namespace ErrorCodes
|
||||
}
|
||||
|
||||
ReadBufferFromRabbitMQConsumer::ReadBufferFromRabbitMQConsumer(
|
||||
ChannelPtr consumer_channel_,
|
||||
RabbitMQHandler & event_handler_,
|
||||
std::vector<String> & queues_,
|
||||
size_t channel_id_base_,
|
||||
@ -30,7 +29,6 @@ ReadBufferFromRabbitMQConsumer::ReadBufferFromRabbitMQConsumer(
|
||||
uint32_t queue_size_,
|
||||
const std::atomic<bool> & stopped_)
|
||||
: ReadBuffer(nullptr, 0)
|
||||
, consumer_channel(std::move(consumer_channel_))
|
||||
, event_handler(event_handler_)
|
||||
, queues(queues_)
|
||||
, channel_base(channel_base_)
|
||||
@ -129,9 +127,6 @@ void ReadBufferFromRabbitMQConsumer::setupChannel()
|
||||
if (!consumer_channel)
|
||||
return;
|
||||
|
||||
/// We mark initialized only once.
|
||||
initialized = true;
|
||||
|
||||
wait_subscription.store(true);
|
||||
|
||||
consumer_channel->onReady([&]()
|
||||
|
@ -20,7 +20,6 @@ class ReadBufferFromRabbitMQConsumer : public ReadBuffer
|
||||
|
||||
public:
|
||||
ReadBufferFromRabbitMQConsumer(
|
||||
ChannelPtr consumer_channel_,
|
||||
RabbitMQHandler & event_handler_,
|
||||
std::vector<String> & queues_,
|
||||
size_t channel_id_base_,
|
||||
@ -37,7 +36,7 @@ public:
|
||||
UInt64 delivery_tag;
|
||||
String channel_id;
|
||||
|
||||
AckTracker() : delivery_tag(0), channel_id("") {}
|
||||
AckTracker() = default;
|
||||
AckTracker(UInt64 tag, String id) : delivery_tag(tag), channel_id(id) {}
|
||||
};
|
||||
|
||||
@ -75,12 +74,6 @@ public:
|
||||
auto getMessageID() const { return current.message_id; }
|
||||
auto getTimestamp() const { return current.timestamp; }
|
||||
|
||||
void initialize()
|
||||
{
|
||||
if (!initialized)
|
||||
setupChannel();
|
||||
}
|
||||
|
||||
private:
|
||||
bool nextImpl() override;
|
||||
|
||||
@ -105,9 +98,6 @@ private:
|
||||
|
||||
AckTracker last_inserted_record_info;
|
||||
UInt64 prev_tag = 0, channel_id_counter = 0;
|
||||
|
||||
/// Has initial setup after constructor been made?
|
||||
bool initialized = false;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -577,7 +577,7 @@ bool StorageRabbitMQ::updateChannel(ChannelPtr & channel)
|
||||
try
|
||||
{
|
||||
channel = connection->createChannel();
|
||||
return channel->usable();
|
||||
return true;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
@ -587,6 +587,21 @@ bool StorageRabbitMQ::updateChannel(ChannelPtr & channel)
|
||||
}
|
||||
|
||||
|
||||
void StorageRabbitMQ::prepareChannelForBuffer(ConsumerBufferPtr buffer)
|
||||
{
|
||||
if (!buffer)
|
||||
return;
|
||||
|
||||
if (buffer->queuesCount() != queues.size())
|
||||
buffer->updateQueues(queues);
|
||||
|
||||
buffer->updateAckTracker();
|
||||
|
||||
if (updateChannel(buffer->getChannel()))
|
||||
buffer->setupChannel();
|
||||
}
|
||||
|
||||
|
||||
void StorageRabbitMQ::unbindExchange()
|
||||
{
|
||||
/* This is needed because with RabbitMQ (without special adjustments) can't, for example, properly make mv if there was insert query
|
||||
@ -715,9 +730,9 @@ void StorageRabbitMQ::startup()
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(log);
|
||||
if (!is_attach)
|
||||
throw;
|
||||
tryLogCurrentException(log);
|
||||
}
|
||||
}
|
||||
else
|
||||
@ -731,15 +746,14 @@ void StorageRabbitMQ::startup()
|
||||
try
|
||||
{
|
||||
auto buffer = createReadBuffer();
|
||||
if (rabbit_is_ready)
|
||||
buffer->initialize();
|
||||
pushReadBuffer(std::move(buffer));
|
||||
++num_created_consumers;
|
||||
}
|
||||
catch (const AMQP::Exception & e)
|
||||
catch (...)
|
||||
{
|
||||
LOG_ERROR(log, "Got AMQ exception {}", e.what());
|
||||
throw;
|
||||
if (!is_attach)
|
||||
throw;
|
||||
tryLogCurrentException(log);
|
||||
}
|
||||
}
|
||||
|
||||
@ -871,9 +885,8 @@ ConsumerBufferPtr StorageRabbitMQ::popReadBuffer(std::chrono::milliseconds timeo
|
||||
|
||||
ConsumerBufferPtr StorageRabbitMQ::createReadBuffer()
|
||||
{
|
||||
ChannelPtr consumer_channel = connection->createChannel();
|
||||
return std::make_shared<ReadBufferFromRabbitMQConsumer>(
|
||||
std::move(consumer_channel), connection->getHandler(), queues, ++consumer_id,
|
||||
connection->getHandler(), queues, ++consumer_id,
|
||||
unique_strbase, log, row_delimiter, queue_size, shutdown_called);
|
||||
}
|
||||
|
||||
@ -921,7 +934,7 @@ void StorageRabbitMQ::initializeBuffers()
|
||||
if (!initialized)
|
||||
{
|
||||
for (const auto & buffer : buffers)
|
||||
buffer->initialize();
|
||||
prepareChannelForBuffer(buffer);
|
||||
initialized = true;
|
||||
}
|
||||
}
|
||||
@ -1086,19 +1099,7 @@ bool StorageRabbitMQ::streamToViews()
|
||||
if (source->needChannelUpdate())
|
||||
{
|
||||
auto buffer = source->getBuffer();
|
||||
if (buffer)
|
||||
{
|
||||
if (buffer->queuesCount() != queues.size())
|
||||
buffer->updateQueues(queues);
|
||||
|
||||
buffer->updateAckTracker();
|
||||
|
||||
if (updateChannel(buffer->getChannel()))
|
||||
{
|
||||
LOG_TRACE(log, "Connection is active, but channel update is needed");
|
||||
buffer->setupChannel();
|
||||
}
|
||||
}
|
||||
prepareChannelForBuffer(buffer);
|
||||
}
|
||||
|
||||
/* false is returned by the sendAck function in only two cases:
|
||||
|
@ -66,6 +66,7 @@ public:
|
||||
|
||||
bool updateChannel(ChannelPtr & channel);
|
||||
void updateQueues(std::vector<String> & queues_) { queues_ = queues; }
|
||||
void prepareChannelForBuffer(ConsumerBufferPtr buffer);
|
||||
|
||||
void incrementReader();
|
||||
void decrementReader();
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#include <Parsers/ASTSelectWithUnionQuery.h>
|
||||
#include <Parsers/ASTSelectQuery.h>
|
||||
#include <Parsers/formatAST.h>
|
||||
#include <Interpreters/getTableExpressions.h>
|
||||
#include <Interpreters/AddDefaultDatabaseVisitor.h>
|
||||
#include <Interpreters/Context.h>
|
||||
@ -12,7 +13,6 @@ namespace DB
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int QUERY_IS_NOT_SUPPORTED_IN_MATERIALIZED_VIEW;
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
SelectQueryDescription::SelectQueryDescription(const SelectQueryDescription & other)
|
||||
@ -60,9 +60,9 @@ StorageID extractDependentTableFromSelectQuery(ASTSelectQuery & query, ContextPt
|
||||
{
|
||||
auto * ast_select = subquery->as<ASTSelectWithUnionQuery>();
|
||||
if (!ast_select)
|
||||
throw Exception("Logical error while creating StorageMaterializedView. "
|
||||
"Could not retrieve table name from select query.",
|
||||
DB::ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::QUERY_IS_NOT_SUPPORTED_IN_MATERIALIZED_VIEW,
|
||||
"StorageMaterializedView cannot be created from table functions ({})",
|
||||
serializeAST(*subquery));
|
||||
if (ast_select->list_of_selects->children.size() != 1)
|
||||
throw Exception("UNION is not supported for MATERIALIZED VIEW",
|
||||
ErrorCodes::QUERY_IS_NOT_SUPPORTED_IN_MATERIALIZED_VIEW);
|
||||
|
@ -67,9 +67,12 @@ void StorageMongoDB::connectIfNotConnected()
|
||||
if (!authenticated)
|
||||
{
|
||||
# if POCO_VERSION >= 0x01070800
|
||||
Poco::MongoDB::Database poco_db(database_name);
|
||||
if (!poco_db.authenticate(*connection, username, password, Poco::MongoDB::Database::AUTH_SCRAM_SHA1))
|
||||
throw Exception("Cannot authenticate in MongoDB, incorrect user or password", ErrorCodes::MONGODB_CANNOT_AUTHENTICATE);
|
||||
if (!username.empty() && !password.empty())
|
||||
{
|
||||
Poco::MongoDB::Database poco_db(database_name);
|
||||
if (!poco_db.authenticate(*connection, username, password, Poco::MongoDB::Database::AUTH_SCRAM_SHA1))
|
||||
throw Exception("Cannot authenticate in MongoDB, incorrect user or password", ErrorCodes::MONGODB_CANNOT_AUTHENTICATE);
|
||||
}
|
||||
# else
|
||||
authenticate(*connection, database_name, username, password);
|
||||
# endif
|
||||
@ -112,9 +115,7 @@ StorageMongoDBConfiguration StorageMongoDB::getConfiguration(ASTs engine_args, C
|
||||
|
||||
for (const auto & [arg_name, arg_value] : storage_specific_args)
|
||||
{
|
||||
if (arg_name == "collection")
|
||||
configuration.collection = arg_value->as<ASTLiteral>()->value.safeGet<String>();
|
||||
else if (arg_name == "options")
|
||||
if (arg_name == "options")
|
||||
configuration.options = arg_value->as<ASTLiteral>()->value.safeGet<String>();
|
||||
else
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
@ -139,7 +140,7 @@ StorageMongoDBConfiguration StorageMongoDB::getConfiguration(ASTs engine_args, C
|
||||
configuration.host = parsed_host_port.first;
|
||||
configuration.port = parsed_host_port.second;
|
||||
configuration.database = engine_args[1]->as<ASTLiteral &>().value.safeGet<String>();
|
||||
configuration.collection = engine_args[2]->as<ASTLiteral &>().value.safeGet<String>();
|
||||
configuration.table = engine_args[2]->as<ASTLiteral &>().value.safeGet<String>();
|
||||
configuration.username = engine_args[3]->as<ASTLiteral &>().value.safeGet<String>();
|
||||
configuration.password = engine_args[4]->as<ASTLiteral &>().value.safeGet<String>();
|
||||
|
||||
@ -163,7 +164,7 @@ void registerStorageMongoDB(StorageFactory & factory)
|
||||
configuration.host,
|
||||
configuration.port,
|
||||
configuration.database,
|
||||
configuration.collection,
|
||||
configuration.table,
|
||||
configuration.username,
|
||||
configuration.password,
|
||||
configuration.options,
|
||||
|
@ -4,7 +4,7 @@
|
||||
#include <DataTypes/DataTypeDateTime.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Functions/FunctionsWindow.h>
|
||||
#include <Functions/FunctionsTimeWindow.h>
|
||||
#include <Interpreters/AddDefaultDatabaseVisitor.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/InDepthNodeVisitor.h>
|
||||
@ -93,7 +93,7 @@ namespace
|
||||
temp_node->setAlias("");
|
||||
if (startsWith(t->arguments->children[0]->getColumnName(), "toDateTime"))
|
||||
throw Exception(
|
||||
"The first argument of window function should not be a constant value.",
|
||||
"The first argument of time window function should not be a constant value.",
|
||||
ErrorCodes::QUERY_IS_NOT_SUPPORTED_IN_WINDOW_VIEW);
|
||||
if (!data.window_function)
|
||||
{
|
||||
@ -108,7 +108,7 @@ namespace
|
||||
else
|
||||
{
|
||||
if (serializeAST(*temp_node) != data.serialized_window_function)
|
||||
throw Exception("WINDOW VIEW only support ONE WINDOW FUNCTION", ErrorCodes::QUERY_IS_NOT_SUPPORTED_IN_WINDOW_VIEW);
|
||||
throw Exception("WINDOW VIEW only support ONE TIME WINDOW FUNCTION", ErrorCodes::QUERY_IS_NOT_SUPPORTED_IN_WINDOW_VIEW);
|
||||
t->name = "windowID";
|
||||
}
|
||||
}
|
||||
@ -116,7 +116,7 @@ namespace
|
||||
}
|
||||
};
|
||||
|
||||
/// Replace windowID node name with either tumble or hop.
|
||||
/// Replace windowID node name with either tumble or hop
|
||||
struct ReplaceWindowIdMatcher
|
||||
{
|
||||
public:
|
||||
@ -1042,14 +1042,14 @@ ASTPtr StorageWindowView::innerQueryParser(const ASTSelectQuery & query)
|
||||
|
||||
if (!query_info_data.is_tumble && !query_info_data.is_hop)
|
||||
throw Exception(ErrorCodes::INCORRECT_QUERY,
|
||||
"WINDOW FUNCTION is not specified for {}", getName());
|
||||
"TIME WINDOW FUNCTION is not specified for {}", getName());
|
||||
|
||||
window_id_name = query_info_data.window_id_name;
|
||||
window_id_alias = query_info_data.window_id_alias;
|
||||
timestamp_column_name = query_info_data.timestamp_column_name;
|
||||
is_tumble = query_info_data.is_tumble;
|
||||
|
||||
// Parse window function
|
||||
// Parse time window function
|
||||
ASTFunction & window_function = typeid_cast<ASTFunction &>(*query_info_data.window_function);
|
||||
const auto & arguments = window_function.arguments->children;
|
||||
extractWindowArgument(
|
||||
|
@ -39,17 +39,23 @@ def process_results(result_folder):
|
||||
test_files = [f for f in os.listdir(result_folder) if os.path.isfile(os.path.join(result_folder, f))]
|
||||
additional_files = [os.path.join(result_folder, f) for f in test_files]
|
||||
|
||||
status = []
|
||||
status_path = os.path.join(result_folder, "check_status.tsv")
|
||||
logging.info("Found test_results.tsv")
|
||||
status = list(csv.reader(open(status_path, 'r'), delimiter='\t'))
|
||||
if os.path.exists(status_path):
|
||||
logging.info("Found test_results.tsv")
|
||||
with open(status_path, 'r', encoding='utf-8') as status_file:
|
||||
status = list(csv.reader(status_file, delimiter='\t'))
|
||||
if len(status) != 1 or len(status[0]) != 2:
|
||||
logging.info("Files in result folder %s", os.listdir(result_folder))
|
||||
return "error", "Invalid check_status.tsv", test_results, additional_files
|
||||
state, description = status[0][0], status[0][1]
|
||||
|
||||
results_path = os.path.join(result_folder, "test_results.tsv")
|
||||
test_results = list(csv.reader(open(results_path, 'r'), delimiter='\t'))
|
||||
if os.path.exists(results_path):
|
||||
with open(results_path, 'r', encoding='utf-8') as results_file:
|
||||
test_results = list(csv.reader(results_file, delimiter='\t'))
|
||||
if len(test_results) == 0:
|
||||
raise Exception("Empty results")
|
||||
return "error", "Empty test_results.tsv", test_results, additional_files
|
||||
|
||||
return state, description, test_results, additional_files
|
||||
|
||||
|
@ -93,20 +93,30 @@ def process_results(result_folder, server_log_path):
|
||||
server_log_files = [f for f in os.listdir(server_log_path) if os.path.isfile(os.path.join(server_log_path, f))]
|
||||
additional_files = additional_files + [os.path.join(server_log_path, f) for f in server_log_files]
|
||||
|
||||
status = []
|
||||
status_path = os.path.join(result_folder, "check_status.tsv")
|
||||
logging.info("Found test_results.tsv")
|
||||
with open(status_path, 'r', encoding='utf-8') as status_file:
|
||||
status = list(csv.reader(status_file, delimiter='\t'))
|
||||
if os.path.exists(status_path):
|
||||
logging.info("Found test_results.tsv")
|
||||
with open(status_path, 'r', encoding='utf-8') as status_file:
|
||||
status = list(csv.reader(status_file, delimiter='\t'))
|
||||
|
||||
if len(status) != 1 or len(status[0]) != 2:
|
||||
logging.info("Files in result folder %s", os.listdir(result_folder))
|
||||
return "error", "Invalid check_status.tsv", test_results, additional_files
|
||||
state, description = status[0][0], status[0][1]
|
||||
|
||||
results_path = os.path.join(result_folder, "test_results.tsv")
|
||||
|
||||
if os.path.exists(results_path):
|
||||
logging.info("Found test_results.tsv")
|
||||
else:
|
||||
logging.info("Files in result folder %s", os.listdir(result_folder))
|
||||
return "error", "Not found test_results.tsv", test_results, additional_files
|
||||
|
||||
with open(results_path, 'r', encoding='utf-8') as results_file:
|
||||
test_results = list(csv.reader(results_file, delimiter='\t'))
|
||||
if len(test_results) == 0:
|
||||
raise Exception("Empty results")
|
||||
return "error", "Empty test_results.tsv", test_results, additional_files
|
||||
|
||||
return state, description, test_results, additional_files
|
||||
|
||||
|
@ -76,23 +76,24 @@ def process_results(result_folder):
|
||||
test_files = [f for f in os.listdir(result_folder) if os.path.isfile(os.path.join(result_folder, f))]
|
||||
additional_files = [os.path.join(result_folder, f) for f in test_files]
|
||||
|
||||
status = []
|
||||
status_path = os.path.join(result_folder, "check_status.tsv")
|
||||
if os.path.exists(status_path):
|
||||
logging.info("Found test_results.tsv")
|
||||
with open(status_path, 'r', encoding='utf-8') as status_file:
|
||||
status = list(csv.reader(status_file, delimiter='\t'))
|
||||
else:
|
||||
status = []
|
||||
|
||||
if len(status) != 1 or len(status[0]) != 2:
|
||||
logging.info("Files in result folder %s", os.listdir(result_folder))
|
||||
return "error", "Invalid check_status.tsv", test_results, additional_files
|
||||
state, description = status[0][0], status[0][1]
|
||||
|
||||
results_path = os.path.join(result_folder, "test_results.tsv")
|
||||
with open(results_path, 'r', encoding='utf-8') as results_file:
|
||||
test_results = list(csv.reader(results_file, delimiter='\t'))
|
||||
if os.path.exists(results_path):
|
||||
with open(results_path, 'r', encoding='utf-8') as results_file:
|
||||
test_results = list(csv.reader(results_file, delimiter='\t'))
|
||||
if len(test_results) == 0:
|
||||
raise Exception("Empty results")
|
||||
return "error", "Empty test_results.tsv", test_results, additional_files
|
||||
|
||||
return state, description, test_results, additional_files
|
||||
|
||||
|
@ -28,10 +28,14 @@ def process_result(result_folder):
|
||||
test_files = [f for f in os.listdir(result_folder) if os.path.isfile(os.path.join(result_folder, f))]
|
||||
additional_files = [os.path.join(result_folder, f) for f in test_files]
|
||||
|
||||
status = []
|
||||
status_path = os.path.join(result_folder, "check_status.tsv")
|
||||
logging.info("Found test_results.tsv")
|
||||
status = list(csv.reader(open(status_path, 'r'), delimiter='\t'))
|
||||
if os.path.exists(status_path):
|
||||
logging.info("Found test_results.tsv")
|
||||
with open(status_path, 'r', encoding='utf-8') as status_file:
|
||||
status = list(csv.reader(status_file, delimiter='\t'))
|
||||
if len(status) != 1 or len(status[0]) != 2:
|
||||
logging.info("Files in result folder %s", os.listdir(result_folder))
|
||||
return "error", "Invalid check_status.tsv", test_results, additional_files
|
||||
state, description = status[0][0], status[0][1]
|
||||
|
||||
|
@ -23,9 +23,10 @@ SUSPICIOUS_PATTERNS = [
|
||||
]
|
||||
|
||||
MAX_RETRY = 5
|
||||
MAX_WORKFLOW_RERUN = 5
|
||||
|
||||
WorkflowDescription = namedtuple('WorkflowDescription',
|
||||
['name', 'action', 'run_id', 'event', 'workflow_id', 'conclusion', 'status',
|
||||
['name', 'action', 'run_id', 'event', 'workflow_id', 'conclusion', 'status', 'api_url',
|
||||
'fork_owner_login', 'fork_branch', 'rerun_url', 'jobs_url', 'attempt', 'url'])
|
||||
|
||||
TRUSTED_WORKFLOW_IDS = {
|
||||
@ -192,6 +193,7 @@ def get_workflow_description_from_event(event):
|
||||
jobs_url = event['workflow_run']['jobs_url']
|
||||
rerun_url = event['workflow_run']['rerun_url']
|
||||
url = event['workflow_run']['html_url']
|
||||
api_url = event['workflow_run']['url']
|
||||
return WorkflowDescription(
|
||||
name=name,
|
||||
action=action,
|
||||
@ -205,7 +207,8 @@ def get_workflow_description_from_event(event):
|
||||
status=status,
|
||||
jobs_url=jobs_url,
|
||||
rerun_url=rerun_url,
|
||||
url=url
|
||||
url=url,
|
||||
api_url=api_url
|
||||
)
|
||||
|
||||
def get_pr_author_and_orgs(pull_request):
|
||||
@ -273,15 +276,29 @@ def get_token_from_aws():
|
||||
installation_id = get_installation_id(encoded_jwt)
|
||||
return get_access_token(encoded_jwt, installation_id)
|
||||
|
||||
def get_workflow_jobs(workflow_description):
|
||||
jobs_url = workflow_description.api_url + f"/attempts/{workflow_description.attempt}/jobs"
|
||||
jobs = []
|
||||
i = 1
|
||||
while True:
|
||||
got_jobs = _exec_get_with_retry(jobs_url + f"?page={i}")
|
||||
if len(got_jobs['jobs']) == 0:
|
||||
break
|
||||
|
||||
jobs += got_jobs['jobs']
|
||||
i += 1
|
||||
|
||||
return jobs
|
||||
|
||||
def check_need_to_rerun(workflow_description):
|
||||
if workflow_description.attempt >= 2:
|
||||
if workflow_description.attempt >= MAX_WORKFLOW_RERUN:
|
||||
print("Not going to rerun workflow because it's already tried more than two times")
|
||||
return False
|
||||
print("Going to check jobs")
|
||||
|
||||
jobs = _exec_get_with_retry(workflow_description.jobs_url + "?per_page=100")
|
||||
print("Got jobs", len(jobs['jobs']))
|
||||
for job in jobs['jobs']:
|
||||
jobs = get_workflow_jobs(workflow_description)
|
||||
print("Got jobs", len(jobs))
|
||||
for job in jobs:
|
||||
if job['conclusion'] not in ('success', 'skipped'):
|
||||
print("Job", job['name'], "failed, checking steps")
|
||||
for step in job['steps']:
|
||||
|
@ -573,7 +573,7 @@ class TestCase:
|
||||
database = args.testcase_database
|
||||
|
||||
# This is for .sh tests
|
||||
os.environ["CLICKHOUSE_LOG_COMMENT"] = self.case_file
|
||||
os.environ["CLICKHOUSE_LOG_COMMENT"] = args.testcase_basename
|
||||
|
||||
params = {
|
||||
'client': client + ' --database=' + database,
|
||||
|
@ -725,6 +725,8 @@ class ClickHouseCluster:
|
||||
env_variables['MONGO_HOST'] = self.mongo_host
|
||||
env_variables['MONGO_EXTERNAL_PORT'] = str(self.mongo_port)
|
||||
env_variables['MONGO_INTERNAL_PORT'] = "27017"
|
||||
env_variables['MONGO_EXTERNAL_PORT_2'] = "27018"
|
||||
env_variables['MONGO_INTERNAL_PORT_2'] = "27017"
|
||||
self.base_cmd.extend(['--file', p.join(docker_compose_yml_dir, 'docker_compose_mongo.yml')])
|
||||
self.base_mongo_cmd = ['docker-compose', '--env-file', instance.env_file, '--project-name', self.project_name,
|
||||
'--file', p.join(docker_compose_yml_dir, 'docker_compose_mongo.yml')]
|
||||
@ -2107,7 +2109,7 @@ class ClickHouseInstance:
|
||||
except Exception as e:
|
||||
logging.warning(f"Current start attempt failed. Will kill {pid} just in case.")
|
||||
self.exec_in_container(["bash", "-c", f"kill -9 {pid}"], user='root', nothrow=True)
|
||||
time.sleep(time_to_sleep)
|
||||
time.sleep(time_to_sleep)
|
||||
|
||||
raise Exception("Cannot start ClickHouse, see additional info in logs")
|
||||
|
||||
|
@ -985,18 +985,29 @@ def test_abrupt_server_restart_while_heavy_replication(started_cluster):
|
||||
cursor.execute('drop table if exists postgresql_replica_{};'.format(i))
|
||||
|
||||
|
||||
def test_quoting(started_cluster):
|
||||
table_name = 'user'
|
||||
conn = get_postgres_conn(ip=started_cluster.postgres_ip,
|
||||
port=started_cluster.postgres_port,
|
||||
database=True)
|
||||
def test_quoting_1(started_cluster):
|
||||
conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True)
|
||||
cursor = conn.cursor()
|
||||
table_name = 'user'
|
||||
create_postgres_table(cursor, table_name);
|
||||
instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(50)".format(table_name))
|
||||
instance.query(f"INSERT INTO postgres_database.{table_name} SELECT number, number from numbers(50)")
|
||||
create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port)
|
||||
check_tables_are_synchronized(table_name);
|
||||
drop_postgres_table(cursor, table_name)
|
||||
drop_materialized_db()
|
||||
drop_postgres_table(cursor, table_name)
|
||||
|
||||
|
||||
def test_quoting_2(started_cluster):
|
||||
conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True)
|
||||
cursor = conn.cursor()
|
||||
table_name = 'user'
|
||||
create_postgres_table(cursor, table_name);
|
||||
instance.query(f"INSERT INTO postgres_database.{table_name} SELECT number, number from numbers(50)")
|
||||
create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port,
|
||||
settings=[f"materialized_postgresql_tables_list = '{table_name}'"])
|
||||
check_tables_are_synchronized(table_name);
|
||||
drop_materialized_db()
|
||||
drop_postgres_table(cursor, table_name)
|
||||
|
||||
|
||||
def test_user_managed_slots(started_cluster):
|
||||
|
@ -0,0 +1,19 @@
|
||||
syntax = "proto3";
|
||||
option optimize_for = SPEED;
|
||||
message Message {
|
||||
uint32 tnow = 1;
|
||||
string server = 2;
|
||||
string clien = 3;
|
||||
uint32 sPort = 4;
|
||||
uint32 cPort = 5;
|
||||
repeated dd r = 6;
|
||||
string method = 7;
|
||||
}
|
||||
|
||||
message dd {
|
||||
string name = 1;
|
||||
uint32 class = 2;
|
||||
uint32 type = 3;
|
||||
uint64 ttl = 4;
|
||||
bytes data = 5;
|
||||
}
|
@ -0,0 +1,180 @@
|
||||
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
# source: clickhouse_path/format_schemas/message_with_repeated.proto
|
||||
|
||||
import sys
|
||||
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
|
||||
from google.protobuf import descriptor as _descriptor
|
||||
from google.protobuf import message as _message
|
||||
from google.protobuf import reflection as _reflection
|
||||
from google.protobuf import symbol_database as _symbol_database
|
||||
# @@protoc_insertion_point(imports)
|
||||
|
||||
_sym_db = _symbol_database.Default()
|
||||
|
||||
|
||||
|
||||
|
||||
DESCRIPTOR = _descriptor.FileDescriptor(
|
||||
name='clickhouse_path/format_schemas/message_with_repeated.proto',
|
||||
package='',
|
||||
syntax='proto3',
|
||||
serialized_options=_b('H\001'),
|
||||
serialized_pb=_b('\n:clickhouse_path/format_schemas/message_with_repeated.proto\"t\n\x07Message\x12\x0c\n\x04tnow\x18\x01 \x01(\r\x12\x0e\n\x06server\x18\x02 \x01(\t\x12\r\n\x05\x63lien\x18\x03 \x01(\t\x12\r\n\x05sPort\x18\x04 \x01(\r\x12\r\n\x05\x63Port\x18\x05 \x01(\r\x12\x0e\n\x01r\x18\x06 \x03(\x0b\x32\x03.dd\x12\x0e\n\x06method\x18\x07 \x01(\t\"J\n\x02\x64\x64\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05\x63lass\x18\x02 \x01(\r\x12\x0c\n\x04type\x18\x03 \x01(\r\x12\x0b\n\x03ttl\x18\x04 \x01(\x04\x12\x0c\n\x04\x64\x61ta\x18\x05 \x01(\x0c\x42\x02H\x01\x62\x06proto3')
|
||||
)
|
||||
|
||||
|
||||
|
||||
|
||||
_MESSAGE = _descriptor.Descriptor(
|
||||
name='Message',
|
||||
full_name='Message',
|
||||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
fields=[
|
||||
_descriptor.FieldDescriptor(
|
||||
name='tnow', full_name='Message.tnow', index=0,
|
||||
number=1, type=13, cpp_type=3, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='server', full_name='Message.server', index=1,
|
||||
number=2, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=_b("").decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='clien', full_name='Message.clien', index=2,
|
||||
number=3, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=_b("").decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='sPort', full_name='Message.sPort', index=3,
|
||||
number=4, type=13, cpp_type=3, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='cPort', full_name='Message.cPort', index=4,
|
||||
number=5, type=13, cpp_type=3, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='r', full_name='Message.r', index=5,
|
||||
number=6, type=11, cpp_type=10, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='method', full_name='Message.method', index=6,
|
||||
number=7, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=_b("").decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR),
|
||||
],
|
||||
extensions=[
|
||||
],
|
||||
nested_types=[],
|
||||
enum_types=[
|
||||
],
|
||||
serialized_options=None,
|
||||
is_extendable=False,
|
||||
syntax='proto3',
|
||||
extension_ranges=[],
|
||||
oneofs=[
|
||||
],
|
||||
serialized_start=62,
|
||||
serialized_end=178,
|
||||
)
|
||||
|
||||
|
||||
_DD = _descriptor.Descriptor(
|
||||
name='dd',
|
||||
full_name='dd',
|
||||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
fields=[
|
||||
_descriptor.FieldDescriptor(
|
||||
name='name', full_name='dd.name', index=0,
|
||||
number=1, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=_b("").decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='class', full_name='dd.class', index=1,
|
||||
number=2, type=13, cpp_type=3, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='type', full_name='dd.type', index=2,
|
||||
number=3, type=13, cpp_type=3, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='ttl', full_name='dd.ttl', index=3,
|
||||
number=4, type=4, cpp_type=4, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='data', full_name='dd.data', index=4,
|
||||
number=5, type=12, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=_b(""),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR),
|
||||
],
|
||||
extensions=[
|
||||
],
|
||||
nested_types=[],
|
||||
enum_types=[
|
||||
],
|
||||
serialized_options=None,
|
||||
is_extendable=False,
|
||||
syntax='proto3',
|
||||
extension_ranges=[],
|
||||
oneofs=[
|
||||
],
|
||||
serialized_start=180,
|
||||
serialized_end=254,
|
||||
)
|
||||
|
||||
_MESSAGE.fields_by_name['r'].message_type = _DD
|
||||
DESCRIPTOR.message_types_by_name['Message'] = _MESSAGE
|
||||
DESCRIPTOR.message_types_by_name['dd'] = _DD
|
||||
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
|
||||
|
||||
Message = _reflection.GeneratedProtocolMessageType('Message', (_message.Message,), dict(
|
||||
DESCRIPTOR = _MESSAGE,
|
||||
__module__ = 'clickhouse_path.format_schemas.message_with_repeated_pb2'
|
||||
# @@protoc_insertion_point(class_scope:Message)
|
||||
))
|
||||
_sym_db.RegisterMessage(Message)
|
||||
|
||||
dd = _reflection.GeneratedProtocolMessageType('dd', (_message.Message,), dict(
|
||||
DESCRIPTOR = _DD,
|
||||
__module__ = 'clickhouse_path.format_schemas.message_with_repeated_pb2'
|
||||
# @@protoc_insertion_point(class_scope:dd)
|
||||
))
|
||||
_sym_db.RegisterMessage(dd)
|
||||
|
||||
|
||||
DESCRIPTOR._options = None
|
||||
# @@protoc_insertion_point(module_scope)
|
@ -36,6 +36,7 @@ from kafka.admin import NewTopic
|
||||
|
||||
from . import kafka_pb2
|
||||
from . import social_pb2
|
||||
from . import message_with_repeated_pb2
|
||||
|
||||
|
||||
# TODO: add test for run-time offset update in CH, if we manually update it on Kafka side.
|
||||
@ -3219,6 +3220,124 @@ def test_kafka_predefined_configuration(kafka_cluster):
|
||||
kafka_check_result(result, True)
|
||||
|
||||
|
||||
# https://github.com/ClickHouse/ClickHouse/issues/26643
|
||||
def test_issue26643(kafka_cluster):
|
||||
|
||||
# for backporting:
|
||||
# admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
|
||||
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
|
||||
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port), value_serializer=producer_serializer)
|
||||
|
||||
topic_list = []
|
||||
topic_list.append(NewTopic(name="test_issue26643", num_partitions=4, replication_factor=1))
|
||||
admin_client.create_topics(new_topics=topic_list, validate_only=False)
|
||||
|
||||
msg = message_with_repeated_pb2.Message(
|
||||
tnow=1629000000,
|
||||
server='server1',
|
||||
clien='host1',
|
||||
sPort=443,
|
||||
cPort=50000,
|
||||
r=[
|
||||
message_with_repeated_pb2.dd(name='1', type=444, ttl=123123, data=b'adsfasd'),
|
||||
message_with_repeated_pb2.dd(name='2')
|
||||
],
|
||||
method='GET'
|
||||
)
|
||||
|
||||
data = b''
|
||||
serialized_msg = msg.SerializeToString()
|
||||
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
|
||||
|
||||
msg = message_with_repeated_pb2.Message(
|
||||
tnow=1629000002
|
||||
)
|
||||
|
||||
serialized_msg = msg.SerializeToString()
|
||||
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
|
||||
|
||||
producer.send(topic="test_issue26643", value=data)
|
||||
|
||||
data = _VarintBytes(len(serialized_msg)) + serialized_msg
|
||||
producer.send(topic="test_issue26643", value=data)
|
||||
producer.flush()
|
||||
|
||||
instance.query('''
|
||||
CREATE TABLE IF NOT EXISTS test.test_queue
|
||||
(
|
||||
`tnow` UInt32,
|
||||
`server` String,
|
||||
`client` String,
|
||||
`sPort` UInt16,
|
||||
`cPort` UInt16,
|
||||
`r.name` Array(String),
|
||||
`r.class` Array(UInt16),
|
||||
`r.type` Array(UInt16),
|
||||
`r.ttl` Array(UInt32),
|
||||
`r.data` Array(String),
|
||||
`method` String
|
||||
)
|
||||
ENGINE = Kafka
|
||||
SETTINGS
|
||||
kafka_broker_list = 'kafka1:19092',
|
||||
kafka_topic_list = 'test_issue26643',
|
||||
kafka_group_name = 'test_issue26643_group',
|
||||
kafka_format = 'Protobuf',
|
||||
kafka_schema = 'message_with_repeated.proto:Message',
|
||||
kafka_num_consumers = 4,
|
||||
kafka_skip_broken_messages = 10000;
|
||||
|
||||
SET allow_suspicious_low_cardinality_types=1;
|
||||
|
||||
CREATE TABLE test.log
|
||||
(
|
||||
`tnow` DateTime CODEC(DoubleDelta, LZ4),
|
||||
`server` LowCardinality(String),
|
||||
`client` LowCardinality(String),
|
||||
`sPort` LowCardinality(UInt16),
|
||||
`cPort` UInt16 CODEC(T64, LZ4),
|
||||
`r.name` Array(String),
|
||||
`r.class` Array(LowCardinality(UInt16)),
|
||||
`r.type` Array(LowCardinality(UInt16)),
|
||||
`r.ttl` Array(LowCardinality(UInt32)),
|
||||
`r.data` Array(String),
|
||||
`method` LowCardinality(String)
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
PARTITION BY toYYYYMMDD(tnow)
|
||||
ORDER BY (tnow, server)
|
||||
TTL toDate(tnow) + toIntervalMonth(1000)
|
||||
SETTINGS index_granularity = 16384, merge_with_ttl_timeout = 7200;
|
||||
|
||||
CREATE MATERIALIZED VIEW test.test_consumer TO test.log AS
|
||||
SELECT
|
||||
toDateTime(a.tnow) AS tnow,
|
||||
a.server AS server,
|
||||
a.client AS client,
|
||||
a.sPort AS sPort,
|
||||
a.cPort AS cPort,
|
||||
a.`r.name` AS `r.name`,
|
||||
a.`r.class` AS `r.class`,
|
||||
a.`r.type` AS `r.type`,
|
||||
a.`r.ttl` AS `r.ttl`,
|
||||
a.`r.data` AS `r.data`,
|
||||
a.method AS method
|
||||
FROM test.test_queue AS a;
|
||||
''')
|
||||
|
||||
instance.wait_for_log_line("Committed offset")
|
||||
result = instance.query('SELECT * FROM test.log')
|
||||
|
||||
expected = '''\
|
||||
2021-08-15 07:00:00 server1 443 50000 ['1','2'] [0,0] [444,0] [123123,0] ['adsfasd',''] GET
|
||||
2021-08-15 07:00:02 0 0 [] [] [] [] []
|
||||
2021-08-15 07:00:02 0 0 [] [] [] [] []
|
||||
'''
|
||||
assert TSV(result) == TSV(expected)
|
||||
|
||||
# kafka_cluster.open_bash_shell('instance')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
cluster.start()
|
||||
input("Cluster created, press any key to destroy...")
|
||||
|
@ -6,7 +6,7 @@
|
||||
<host>mongo1</host>
|
||||
<port>27017</port>
|
||||
<database>test</database>
|
||||
<table>simple_table</table>
|
||||
<collection>simple_table</collection>
|
||||
</mongo1>
|
||||
</named_collections>
|
||||
</clickhouse>
|
||||
|
@ -20,8 +20,12 @@ def started_cluster(request):
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
def get_mongo_connection(started_cluster, secure=False):
|
||||
connection_str = 'mongodb://root:clickhouse@localhost:{}'.format(started_cluster.mongo_port)
|
||||
def get_mongo_connection(started_cluster, secure=False, with_credentials=True):
|
||||
connection_str = ''
|
||||
if with_credentials:
|
||||
connection_str = 'mongodb://root:clickhouse@localhost:{}'.format(started_cluster.mongo_port)
|
||||
else:
|
||||
connection_str = 'mongodb://localhost:27018'
|
||||
if secure:
|
||||
connection_str += '/?tls=true&tlsAllowInvalidCertificates=true'
|
||||
return pymongo.MongoClient(connection_str)
|
||||
@ -138,4 +142,20 @@ def test_predefined_connection_configuration(started_cluster):
|
||||
|
||||
node = started_cluster.instances['node']
|
||||
node.query("create table simple_mongo_table(key UInt64, data String) engine = MongoDB(mongo1)")
|
||||
assert node.query("SELECT count() FROM simple_mongo_table") == '100\n'
|
||||
simple_mongo_table.drop()
|
||||
|
||||
@pytest.mark.parametrize('started_cluster', [False], indirect=['started_cluster'])
|
||||
def test_no_credentials(started_cluster):
|
||||
mongo_connection = get_mongo_connection(started_cluster, with_credentials=False)
|
||||
db = mongo_connection['test']
|
||||
simple_mongo_table = db['simple_table']
|
||||
data = []
|
||||
for i in range(0, 100):
|
||||
data.append({'key': i, 'data': hex(i * i)})
|
||||
simple_mongo_table.insert_many(data)
|
||||
|
||||
node = started_cluster.instances['node']
|
||||
node.query("create table simple_mongo_table_2(key UInt64, data String) engine = MongoDB('mongo2:27017', 'test', 'simple_table', '', '')")
|
||||
assert node.query("SELECT count() FROM simple_mongo_table_2") == '100\n'
|
||||
simple_mongo_table.drop()
|
||||
|
@ -191,7 +191,7 @@
|
||||
toInt256(number) as d,
|
||||
toString(number) as f,
|
||||
toFixedString(f, 20) as g
|
||||
FROM numbers_mt(200000000)
|
||||
FROM numbers_mt(100000000)
|
||||
SETTINGS max_threads = 8
|
||||
FORMAT Null
|
||||
</query>
|
||||
|
@ -35,6 +35,8 @@ slice
|
||||
[2,NULL,4,5]
|
||||
['b','c','d']
|
||||
['b',NULL,'d']
|
||||
[] 1
|
||||
[] 1
|
||||
push back
|
||||
\N
|
||||
[1,1]
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user