Merge branch 'master' into improve_greatest_least_nullable_type

This commit is contained in:
kevinyhzou 2024-06-11 09:42:57 +08:00
commit 727b75780c
257 changed files with 6901 additions and 1475 deletions

View File

@ -19,3 +19,7 @@ charset = utf-8
indent_style = space indent_style = space
indent_size = 4 indent_size = 4
trim_trailing_whitespace = true trim_trailing_whitespace = true
# Some SQL results have trailing whitespace which is removed by IDEs
[tests/queries/**.reference]
trim_trailing_whitespace = false

View File

@ -48,19 +48,17 @@ At a minimum, the following information should be added (but add more as needed)
- [ ] <!---ci_include_stateful--> Allow: Stateful tests - [ ] <!---ci_include_stateful--> Allow: Stateful tests
- [ ] <!---ci_include_integration--> Allow: Integration Tests - [ ] <!---ci_include_integration--> Allow: Integration Tests
- [ ] <!---ci_include_performance--> Allow: Performance tests - [ ] <!---ci_include_performance--> Allow: Performance tests
- [ ] <!---ci_set_normal_builds--> Allow: Normal Builds
- [ ] <!---ci_set_special_builds--> Allow: Special Builds
- [ ] <!---ci_set_non_required--> Allow: All NOT Required Checks - [ ] <!---ci_set_non_required--> Allow: All NOT Required Checks
- [ ] <!---batch_0_1--> Allow: batch 1, 2 for multi-batch jobs - [ ] <!---batch_0_1--> Allow: batch 1, 2 for multi-batch jobs
- [ ] <!---batch_2_3--> Allow: batch 3, 4, 5, 6 for multi-batch jobs - [ ] <!---batch_2_3--> Allow: batch 3, 4, 5, 6 for multi-batch jobs
--- ---
- [ ] <!---ci_exclude_style--> Exclude: Style check - [ ] <!---ci_exclude_style--> Exclude: Style check
- [ ] <!---ci_exclude_fast--> Exclude: Fast test - [ ] <!---ci_exclude_fast--> Exclude: Fast test
- [ ] <!---ci_exclude_integration--> Exclude: Integration Tests
- [ ] <!---ci_exclude_stateless--> Exclude: Stateless tests
- [ ] <!---ci_exclude_stateful--> Exclude: Stateful tests
- [ ] <!---ci_exclude_performance--> Exclude: Performance tests
- [ ] <!---ci_exclude_asan--> Exclude: All with ASAN - [ ] <!---ci_exclude_asan--> Exclude: All with ASAN
- [ ] <!---ci_exclude_aarch64--> Exclude: All with Aarch64
- [ ] <!---ci_exclude_tsan|msan|ubsan|coverage--> Exclude: All with TSAN, MSAN, UBSAN, Coverage - [ ] <!---ci_exclude_tsan|msan|ubsan|coverage--> Exclude: All with TSAN, MSAN, UBSAN, Coverage
- [ ] <!---ci_exclude_aarch64|release|debug--> Exclude: All with aarch64, release, debug
--- ---
- [ ] <!---do_not_test--> Do not test - [ ] <!---do_not_test--> Do not test
- [ ] <!---upload_all--> Upload binaries for special builds - [ ] <!---upload_all--> Upload binaries for special builds

2
contrib/cld2 vendored

@ -1 +1 @@
Subproject commit bc6d493a2f64ed1fc1c4c4b4294a542a04e04217 Subproject commit 217ba8b8805b41557faadaa47bb6e99f2242eea3

2
contrib/orc vendored

@ -1 +1 @@
Subproject commit e24f2c2a3ca0769c96704ab20ad6f512a83ea2ad Subproject commit 947cebaf9432d708253ac08dc3012daa6b4ede6f

View File

@ -30,6 +30,7 @@ RUN pip3 install \
mypy==1.8.0 \ mypy==1.8.0 \
pylint==3.1.0 \ pylint==3.1.0 \
python-magic==0.4.24 \ python-magic==0.4.24 \
flake8==4.0.1 \
requests \ requests \
thefuzz \ thefuzz \
types-requests \ types-requests \

View File

@ -9,6 +9,8 @@ echo "Check style" | ts
./check-style -n |& tee /test_output/style_output.txt ./check-style -n |& tee /test_output/style_output.txt
echo "Check python formatting with black" | ts echo "Check python formatting with black" | ts
./check-black -n |& tee /test_output/black_output.txt ./check-black -n |& tee /test_output/black_output.txt
echo "Check python with flake8" | ts
./check-flake8 |& tee /test_output/flake8_output.txt
echo "Check python type hinting with mypy" | ts echo "Check python type hinting with mypy" | ts
./check-mypy -n |& tee /test_output/mypy_output.txt ./check-mypy -n |& tee /test_output/mypy_output.txt
echo "Check typos" | ts echo "Check typos" | ts

View File

@ -25,7 +25,8 @@ azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log &
./setup_minio.sh stateless # to have a proper environment ./setup_minio.sh stateless # to have a proper environment
echo "Get previous release tag" echo "Get previous release tag"
previous_release_tag=$(dpkg --info package_folder/clickhouse-client*.deb | grep "Version: " | awk '{print $2}' | cut -f1 -d'+' | get_previous_release_tag) # shellcheck disable=SC2016
previous_release_tag=$(dpkg-deb --showformat='${Version}' --show package_folder/clickhouse-client*.deb | get_previous_release_tag)
echo $previous_release_tag echo $previous_release_tag
echo "Clone previous release repository" echo "Clone previous release repository"

View File

@ -0,0 +1,101 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.4.2.141-stable (9e23d27bd11) FIXME as compared to v24.4.1.2088-stable (6d4b31322d1)
#### Improvement
* Backported in [#63467](https://github.com/ClickHouse/ClickHouse/issues/63467): Make rabbitmq nack broken messages. Closes [#45350](https://github.com/ClickHouse/ClickHouse/issues/45350). [#60312](https://github.com/ClickHouse/ClickHouse/pull/60312) ([Kseniia Sumarokova](https://github.com/kssenii)).
#### Build/Testing/Packaging Improvement
* Backported in [#63612](https://github.com/ClickHouse/ClickHouse/issues/63612): The Dockerfile is reviewed by the docker official library in https://github.com/docker-library/official-images/pull/15846. [#63400](https://github.com/ClickHouse/ClickHouse/pull/63400) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#64279](https://github.com/ClickHouse/ClickHouse/issues/64279): Fix queries with FINAL give wrong result when table does not use adaptive granularity. [#62432](https://github.com/ClickHouse/ClickHouse/pull/62432) ([Duc Canh Le](https://github.com/canhld94)).
* Backported in [#63295](https://github.com/ClickHouse/ClickHouse/issues/63295): Fix crash with untuple and unresolved lambda. [#63131](https://github.com/ClickHouse/ClickHouse/pull/63131) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#63978](https://github.com/ClickHouse/ClickHouse/issues/63978): Fix intersect parts when restart after drop range. [#63202](https://github.com/ClickHouse/ClickHouse/pull/63202) ([Han Fei](https://github.com/hanfei1991)).
* Backported in [#63413](https://github.com/ClickHouse/ClickHouse/issues/63413): Fix a misbehavior when SQL security defaults don't load for old tables during server startup. [#63209](https://github.com/ClickHouse/ClickHouse/pull/63209) ([pufit](https://github.com/pufit)).
* Backported in [#63388](https://github.com/ClickHouse/ClickHouse/issues/63388): JOIN filter push down filled join fix. Closes [#63228](https://github.com/ClickHouse/ClickHouse/issues/63228). [#63234](https://github.com/ClickHouse/ClickHouse/pull/63234) ([Maksim Kita](https://github.com/kitaisreal)).
* Backported in [#63618](https://github.com/ClickHouse/ClickHouse/issues/63618): Fix bug which could potentially lead to rare LOGICAL_ERROR during SELECT query with message: `Unexpected return type from materialize. Expected type_XXX. Got type_YYY.` Introduced in [#59379](https://github.com/ClickHouse/ClickHouse/issues/59379). [#63353](https://github.com/ClickHouse/ClickHouse/pull/63353) ([alesapin](https://github.com/alesapin)).
* Backported in [#63451](https://github.com/ClickHouse/ClickHouse/issues/63451): Fix `X-ClickHouse-Timezone` header returning wrong timezone when using `session_timezone` as query level setting. [#63377](https://github.com/ClickHouse/ClickHouse/pull/63377) ([Andrey Zvonov](https://github.com/zvonand)).
* Backported in [#63605](https://github.com/ClickHouse/ClickHouse/issues/63605): Fix backup of projection part in case projection was removed from table metadata, but part still has projection. [#63426](https://github.com/ClickHouse/ClickHouse/pull/63426) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Backported in [#63510](https://github.com/ClickHouse/ClickHouse/issues/63510): Fix 'Every derived table must have its own alias' error for MYSQL dictionary source, close [#63341](https://github.com/ClickHouse/ClickHouse/issues/63341). [#63481](https://github.com/ClickHouse/ClickHouse/pull/63481) ([vdimir](https://github.com/vdimir)).
* Backported in [#63592](https://github.com/ClickHouse/ClickHouse/issues/63592): Avoid segafult in `MergeTreePrefetchedReadPool` while fetching projection parts. [#63513](https://github.com/ClickHouse/ClickHouse/pull/63513) ([Antonio Andelic](https://github.com/antonio2368)).
* Backported in [#63750](https://github.com/ClickHouse/ClickHouse/issues/63750): Read only the necessary columns from VIEW (new analyzer). Closes [#62594](https://github.com/ClickHouse/ClickHouse/issues/62594). [#63688](https://github.com/ClickHouse/ClickHouse/pull/63688) ([Maksim Kita](https://github.com/kitaisreal)).
* Backported in [#63772](https://github.com/ClickHouse/ClickHouse/issues/63772): Fix [#63539](https://github.com/ClickHouse/ClickHouse/issues/63539). Forbid WINDOW redefinition in new analyzer. [#63694](https://github.com/ClickHouse/ClickHouse/pull/63694) ([Dmitry Novik](https://github.com/novikd)).
* Backported in [#63872](https://github.com/ClickHouse/ClickHouse/issues/63872): Flatten_nested is broken with replicated database. [#63695](https://github.com/ClickHouse/ClickHouse/pull/63695) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#63854](https://github.com/ClickHouse/ClickHouse/issues/63854): Fix `Not found column` and `CAST AS Map from array requires nested tuple of 2 elements` exceptions for distributed queries which use `Map(Nothing, Nothing)` type. Fixes [#63637](https://github.com/ClickHouse/ClickHouse/issues/63637). [#63753](https://github.com/ClickHouse/ClickHouse/pull/63753) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#63847](https://github.com/ClickHouse/ClickHouse/issues/63847): Fix possible `ILLEGAL_COLUMN` error in `partial_merge` join, close [#37928](https://github.com/ClickHouse/ClickHouse/issues/37928). [#63755](https://github.com/ClickHouse/ClickHouse/pull/63755) ([vdimir](https://github.com/vdimir)).
* Backported in [#63908](https://github.com/ClickHouse/ClickHouse/issues/63908): `query_plan_remove_redundant_distinct` can break queries with WINDOW FUNCTIONS (with `allow_experimental_analyzer` is on). Fixes [#62820](https://github.com/ClickHouse/ClickHouse/issues/62820). [#63776](https://github.com/ClickHouse/ClickHouse/pull/63776) ([Igor Nikonov](https://github.com/devcrafter)).
* Backported in [#63955](https://github.com/ClickHouse/ClickHouse/issues/63955): Fix possible crash with SYSTEM UNLOAD PRIMARY KEY. [#63778](https://github.com/ClickHouse/ClickHouse/pull/63778) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#63938](https://github.com/ClickHouse/ClickHouse/issues/63938): Allow JOIN filter push down to both streams if only single equivalent column is used in query. Closes [#63799](https://github.com/ClickHouse/ClickHouse/issues/63799). [#63819](https://github.com/ClickHouse/ClickHouse/pull/63819) ([Maksim Kita](https://github.com/kitaisreal)).
* Backported in [#63991](https://github.com/ClickHouse/ClickHouse/issues/63991): Fix incorrect select query result when parallel replicas were used to read from a Materialized View. [#63861](https://github.com/ClickHouse/ClickHouse/pull/63861) ([Nikita Taranov](https://github.com/nickitat)).
* Backported in [#64033](https://github.com/ClickHouse/ClickHouse/issues/64033): Fix a error `Database name is empty` for remote queries with lambdas over the cluster with modified default database. Fixes [#63471](https://github.com/ClickHouse/ClickHouse/issues/63471). [#63864](https://github.com/ClickHouse/ClickHouse/pull/63864) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#64561](https://github.com/ClickHouse/ClickHouse/issues/64561): Fix SIGSEGV due to CPU/Real (`query_profiler_real_time_period_ns`/`query_profiler_cpu_time_period_ns`) profiler (has been an issue since 2022, that leads to periodic server crashes, especially if you were using distributed engine). [#63865](https://github.com/ClickHouse/ClickHouse/pull/63865) ([Azat Khuzhin](https://github.com/azat)).
* Backported in [#64011](https://github.com/ClickHouse/ClickHouse/issues/64011): Fix analyzer - IN function with arbitrary deep sub-selects in materialized view to use insertion block. [#63930](https://github.com/ClickHouse/ClickHouse/pull/63930) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* Backported in [#64238](https://github.com/ClickHouse/ClickHouse/issues/64238): Fix resolve of unqualified COLUMNS matcher. Preserve the input columns order and forbid usage of unknown identifiers. [#63962](https://github.com/ClickHouse/ClickHouse/pull/63962) ([Dmitry Novik](https://github.com/novikd)).
* Backported in [#64103](https://github.com/ClickHouse/ClickHouse/issues/64103): Deserialize untrusted binary inputs in a safer way. [#64024](https://github.com/ClickHouse/ClickHouse/pull/64024) ([Robert Schulze](https://github.com/rschu1ze)).
* Backported in [#64170](https://github.com/ClickHouse/ClickHouse/issues/64170): Add missing settings to recoverLostReplica. [#64040](https://github.com/ClickHouse/ClickHouse/pull/64040) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#64322](https://github.com/ClickHouse/ClickHouse/issues/64322): This fix will use a proper redefined context with the correct definer for each individual view in the query pipeline Closes [#63777](https://github.com/ClickHouse/ClickHouse/issues/63777). [#64079](https://github.com/ClickHouse/ClickHouse/pull/64079) ([pufit](https://github.com/pufit)).
* Backported in [#64382](https://github.com/ClickHouse/ClickHouse/issues/64382): Fix analyzer: "Not found column" error is fixed when using INTERPOLATE. [#64096](https://github.com/ClickHouse/ClickHouse/pull/64096) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* Backported in [#64568](https://github.com/ClickHouse/ClickHouse/issues/64568): Fix creating backups to S3 buckets with different credentials from the disk containing the file. [#64153](https://github.com/ClickHouse/ClickHouse/pull/64153) ([Antonio Andelic](https://github.com/antonio2368)).
* Backported in [#64272](https://github.com/ClickHouse/ClickHouse/issues/64272): Prevent LOGICAL_ERROR on CREATE TABLE as MaterializedView. [#64174](https://github.com/ClickHouse/ClickHouse/pull/64174) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#64330](https://github.com/ClickHouse/ClickHouse/issues/64330): The query cache now considers two identical queries against different databases as different. The previous behavior could be used to bypass missing privileges to read from a table. [#64199](https://github.com/ClickHouse/ClickHouse/pull/64199) ([Robert Schulze](https://github.com/rschu1ze)).
* Backported in [#64254](https://github.com/ClickHouse/ClickHouse/issues/64254): Ignore `text_log` config when using Keeper. [#64218](https://github.com/ClickHouse/ClickHouse/pull/64218) ([Antonio Andelic](https://github.com/antonio2368)).
* Backported in [#64690](https://github.com/ClickHouse/ClickHouse/issues/64690): Fix Query Tree size validation. Closes [#63701](https://github.com/ClickHouse/ClickHouse/issues/63701). [#64377](https://github.com/ClickHouse/ClickHouse/pull/64377) ([Dmitry Novik](https://github.com/novikd)).
* Backported in [#64409](https://github.com/ClickHouse/ClickHouse/issues/64409): Fix `Logical error: Bad cast` for `Buffer` table with `PREWHERE`. Fixes [#64172](https://github.com/ClickHouse/ClickHouse/issues/64172). [#64388](https://github.com/ClickHouse/ClickHouse/pull/64388) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#64727](https://github.com/ClickHouse/ClickHouse/issues/64727): Fixed `CREATE TABLE AS` queries for tables with default expressions. [#64455](https://github.com/ClickHouse/ClickHouse/pull/64455) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#64623](https://github.com/ClickHouse/ClickHouse/issues/64623): Fix an error `Cannot find column` in distributed queries with constant CTE in the `GROUP BY` key. [#64519](https://github.com/ClickHouse/ClickHouse/pull/64519) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#64680](https://github.com/ClickHouse/ClickHouse/issues/64680): Fix [#64612](https://github.com/ClickHouse/ClickHouse/issues/64612). Do not rewrite aggregation if `-If` combinator is already used. [#64638](https://github.com/ClickHouse/ClickHouse/pull/64638) ([Dmitry Novik](https://github.com/novikd)).
* Backported in [#64942](https://github.com/ClickHouse/ClickHouse/issues/64942): Fix OrderByLimitByDuplicateEliminationVisitor across subqueries. [#64766](https://github.com/ClickHouse/ClickHouse/pull/64766) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#64871](https://github.com/ClickHouse/ClickHouse/issues/64871): Fixed memory possible incorrect memory tracking in several kinds of queries: queries that read any data from S3, queries via http protocol, asynchronous inserts. [#64844](https://github.com/ClickHouse/ClickHouse/pull/64844) ([Anton Popov](https://github.com/CurtizJ)).
#### CI Fix or Improvement (changelog entry is not required)
* Backported in [#63364](https://github.com/ClickHouse/ClickHouse/issues/63364): Implement cumulative A Sync status. [#61464](https://github.com/ClickHouse/ClickHouse/pull/61464) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Backported in [#63338](https://github.com/ClickHouse/ClickHouse/issues/63338): Use `/commit/` to have the URLs in [reports](https://play.clickhouse.com/play?user=play#c2VsZWN0IGRpc3RpbmN0IGNvbW1pdF91cmwgZnJvbSBjaGVja3Mgd2hlcmUgY2hlY2tfc3RhcnRfdGltZSA+PSBub3coKSAtIGludGVydmFsIDEgbW9udGggYW5kIHB1bGxfcmVxdWVzdF9udW1iZXI9NjA1MzI=) like https://github.com/ClickHouse/ClickHouse/commit/44f8bc5308b53797bec8cccc3bd29fab8a00235d and not like https://github.com/ClickHouse/ClickHouse/commits/44f8bc5308b53797bec8cccc3bd29fab8a00235d. [#63331](https://github.com/ClickHouse/ClickHouse/pull/63331) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Backported in [#63376](https://github.com/ClickHouse/ClickHouse/issues/63376):. [#63366](https://github.com/ClickHouse/ClickHouse/pull/63366) ([Aleksei Filatov](https://github.com/aalexfvk)).
* Backported in [#63571](https://github.com/ClickHouse/ClickHouse/issues/63571):. [#63551](https://github.com/ClickHouse/ClickHouse/pull/63551) ([Konstantin Bogdanov](https://github.com/thevar1able)).
* Backported in [#63651](https://github.com/ClickHouse/ClickHouse/issues/63651): Fix 02362_part_log_merge_algorithm flaky test. [#63635](https://github.com/ClickHouse/ClickHouse/pull/63635) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
* Backported in [#63828](https://github.com/ClickHouse/ClickHouse/issues/63828): Fix test_odbc_interaction from aarch64 [#61457](https://github.com/ClickHouse/ClickHouse/issues/61457). [#63787](https://github.com/ClickHouse/ClickHouse/pull/63787) ([alesapin](https://github.com/alesapin)).
* Backported in [#63897](https://github.com/ClickHouse/ClickHouse/issues/63897): Fix test `test_catboost_evaluate` for aarch64. [#61457](https://github.com/ClickHouse/ClickHouse/issues/61457). [#63789](https://github.com/ClickHouse/ClickHouse/pull/63789) ([alesapin](https://github.com/alesapin)).
* Backported in [#63889](https://github.com/ClickHouse/ClickHouse/issues/63889): Remove HDFS from disks config for one integration test for arm. [#61457](https://github.com/ClickHouse/ClickHouse/issues/61457). [#63832](https://github.com/ClickHouse/ClickHouse/pull/63832) ([alesapin](https://github.com/alesapin)).
* Backported in [#63881](https://github.com/ClickHouse/ClickHouse/issues/63881): Bump version for old image in test_short_strings_aggregation to make it work on arm. [#61457](https://github.com/ClickHouse/ClickHouse/issues/61457). [#63836](https://github.com/ClickHouse/ClickHouse/pull/63836) ([alesapin](https://github.com/alesapin)).
* Backported in [#63919](https://github.com/ClickHouse/ClickHouse/issues/63919): Disable test `test_non_default_compression/test.py::test_preconfigured_deflateqpl_codec` on arm. [#61457](https://github.com/ClickHouse/ClickHouse/issues/61457). [#63839](https://github.com/ClickHouse/ClickHouse/pull/63839) ([alesapin](https://github.com/alesapin)).
* Backported in [#63971](https://github.com/ClickHouse/ClickHouse/issues/63971): Fix 02124_insert_deduplication_token_multiple_blocks. [#63950](https://github.com/ClickHouse/ClickHouse/pull/63950) ([Han Fei](https://github.com/hanfei1991)).
* Backported in [#64049](https://github.com/ClickHouse/ClickHouse/issues/64049): Add `ClickHouseVersion.copy` method. Create a branch release in advance without spinning out the release to increase the stability. [#64039](https://github.com/ClickHouse/ClickHouse/pull/64039) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Backported in [#64078](https://github.com/ClickHouse/ClickHouse/issues/64078): The mime type is not 100% reliable for Python and shell scripts without shebangs; add a check for file extension. [#64062](https://github.com/ClickHouse/ClickHouse/pull/64062) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Backported in [#64161](https://github.com/ClickHouse/ClickHouse/issues/64161): Add retries in git submodule update. [#64125](https://github.com/ClickHouse/ClickHouse/pull/64125) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)
* Backported in [#64589](https://github.com/ClickHouse/ClickHouse/issues/64589): Disabled `enable_vertical_final` setting by default. This feature should not be used because it has a bug: [#64543](https://github.com/ClickHouse/ClickHouse/issues/64543). [#64544](https://github.com/ClickHouse/ClickHouse/pull/64544) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Backported in [#64880](https://github.com/ClickHouse/ClickHouse/issues/64880): This PR fixes an error when a user in a specific situation can escalate their privileges on the default database without necessary grants. [#64769](https://github.com/ClickHouse/ClickHouse/pull/64769) ([pufit](https://github.com/pufit)).
#### NO CL CATEGORY
* Backported in [#63306](https://github.com/ClickHouse/ClickHouse/issues/63306):. [#63297](https://github.com/ClickHouse/ClickHouse/pull/63297) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Backported in [#63710](https://github.com/ClickHouse/ClickHouse/issues/63710):. [#63415](https://github.com/ClickHouse/ClickHouse/pull/63415) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
#### NO CL ENTRY
* NO CL ENTRY: 'Revert "Backport [#64363](https://github.com/ClickHouse/ClickHouse/issues/64363) to 24.4: Split tests 03039_dynamic_all_merge_algorithms to avoid timeouts"'. [#64905](https://github.com/ClickHouse/ClickHouse/pull/64905) ([Raúl Marín](https://github.com/Algunenano)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* group_by_use_nulls strikes back [#62922](https://github.com/ClickHouse/ClickHouse/pull/62922) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Add `FROM` keyword to `TRUNCATE ALL TABLES` [#63241](https://github.com/ClickHouse/ClickHouse/pull/63241) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
* More checks for concurrently deleted files and dirs in system.remote_data_paths [#63274](https://github.com/ClickHouse/ClickHouse/pull/63274) ([Alexander Gololobov](https://github.com/davenger)).
* Try fix segfault in `MergeTreeReadPoolBase::createTask` [#63323](https://github.com/ClickHouse/ClickHouse/pull/63323) ([Antonio Andelic](https://github.com/antonio2368)).
* Skip unaccessible table dirs in system.remote_data_paths [#63330](https://github.com/ClickHouse/ClickHouse/pull/63330) ([Alexander Gololobov](https://github.com/davenger)).
* Workaround for `oklch()` inside canvas bug for firefox [#63404](https://github.com/ClickHouse/ClickHouse/pull/63404) ([Sergei Trifonov](https://github.com/serxa)).
* Cancel S3 reads properly when parallel reads are used [#63687](https://github.com/ClickHouse/ClickHouse/pull/63687) ([Antonio Andelic](https://github.com/antonio2368)).
* Userspace page cache: don't collect stats if cache is unused [#63730](https://github.com/ClickHouse/ClickHouse/pull/63730) ([Michael Kolupaev](https://github.com/al13n321)).
* Fix sanitizers [#64090](https://github.com/ClickHouse/ClickHouse/pull/64090) ([Azat Khuzhin](https://github.com/azat)).
* Split tests 03039_dynamic_all_merge_algorithms to avoid timeouts [#64363](https://github.com/ClickHouse/ClickHouse/pull/64363) ([Kruglov Pavel](https://github.com/Avogar)).
* CI: Critical bugfix category in PR template [#64480](https://github.com/ClickHouse/ClickHouse/pull/64480) ([Max K.](https://github.com/maxknv)).

View File

@ -91,6 +91,9 @@ cd ./utils/check-style
# Check python type hinting with mypy # Check python type hinting with mypy
./check-mypy ./check-mypy
# Check python with flake8
./check-flake8
# Check code with codespell # Check code with codespell
./check-typos ./check-typos

View File

@ -54,6 +54,7 @@ SELECT * FROM test_table;
- `_path` — Path to the file. Type: `LowCardinalty(String)`. - `_path` — Path to the file. Type: `LowCardinalty(String)`.
- `_file` — Name of the file. Type: `LowCardinalty(String)`. - `_file` — Name of the file. Type: `LowCardinalty(String)`.
- `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`. - `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`.
- `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`.
## See also ## See also

View File

@ -235,6 +235,7 @@ libhdfs3 support HDFS namenode HA.
- `_path` — Path to the file. Type: `LowCardinalty(String)`. - `_path` — Path to the file. Type: `LowCardinalty(String)`.
- `_file` — Name of the file. Type: `LowCardinalty(String)`. - `_file` — Name of the file. Type: `LowCardinalty(String)`.
- `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`. - `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`.
- `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`.
## Storage Settings {#storage-settings} ## Storage Settings {#storage-settings}

View File

@ -53,14 +53,14 @@ For partitioning by month, use the `toYYYYMM(date_column)` expression, where `da
This example uses the [docker compose recipe](https://github.com/ClickHouse/examples/tree/5fdc6ff72f4e5137e23ea075c88d3f44b0202490/docker-compose-recipes/recipes/ch-and-minio-S3), which integrates ClickHouse and MinIO. You should be able to reproduce the same queries using S3 by replacing the endpoint and authentication values. This example uses the [docker compose recipe](https://github.com/ClickHouse/examples/tree/5fdc6ff72f4e5137e23ea075c88d3f44b0202490/docker-compose-recipes/recipes/ch-and-minio-S3), which integrates ClickHouse and MinIO. You should be able to reproduce the same queries using S3 by replacing the endpoint and authentication values.
Notice that the S3 endpoint in the `ENGINE` configuration uses the parameter token `{_partition_id}` as part of the S3 object (filename), and that the SELECT queries select against those resulting object names (e.g., `test_3.csv`). Notice that the S3 endpoint in the `ENGINE` configuration uses the parameter token `{_partition_id}` as part of the S3 object (filename), and that the SELECT queries select against those resulting object names (e.g., `test_3.csv`).
:::note :::note
As shown in the example, querying from S3 tables that are partitioned is As shown in the example, querying from S3 tables that are partitioned is
not directly supported at this time, but can be accomplished by querying the individual partitions not directly supported at this time, but can be accomplished by querying the individual partitions
using the S3 table function. using the S3 table function.
The primary use-case for writing The primary use-case for writing
partitioned data in S3 is to enable transferring that data into another partitioned data in S3 is to enable transferring that data into another
ClickHouse system (for example, moving from on-prem systems to ClickHouse ClickHouse system (for example, moving from on-prem systems to ClickHouse
Cloud). Because ClickHouse datasets are often very large, and network Cloud). Because ClickHouse datasets are often very large, and network
@ -78,9 +78,9 @@ CREATE TABLE p
) )
ENGINE = S3( ENGINE = S3(
# highlight-next-line # highlight-next-line
'http://minio:10000/clickhouse//test_{_partition_id}.csv', 'http://minio:10000/clickhouse//test_{_partition_id}.csv',
'minioadmin', 'minioadmin',
'minioadminpassword', 'minioadminpassword',
'CSV') 'CSV')
PARTITION BY column3 PARTITION BY column3
``` ```
@ -145,6 +145,7 @@ Code: 48. DB::Exception: Received from localhost:9000. DB::Exception: Reading fr
- `_path` — Path to the file. Type: `LowCardinalty(String)`. - `_path` — Path to the file. Type: `LowCardinalty(String)`.
- `_file` — Name of the file. Type: `LowCardinalty(String)`. - `_file` — Name of the file. Type: `LowCardinalty(String)`.
- `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`. - `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`.
- `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`.
For more information about virtual columns see [here](../../../engines/table-engines/index.md#table_engines-virtual_columns). For more information about virtual columns see [here](../../../engines/table-engines/index.md#table_engines-virtual_columns).

View File

@ -267,7 +267,7 @@ For introspection use `system.s3queue` stateless table and `system.s3queue_log`
`exception` String `exception` String
) )
ENGINE = SystemS3Queue ENGINE = SystemS3Queue
COMMENT 'SYSTEM TABLE is built on the fly.' │ COMMENT 'Contains in-memory state of S3Queue metadata and currently processed rows per file.' │
└────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ └────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
``` ```

View File

@ -6,35 +6,26 @@ sidebar_label: MergeTree
# MergeTree # MergeTree
The `MergeTree` engine and other engines of this family (`*MergeTree`) are the most commonly used and most robust ClickHouse table engines. The `MergeTree` engine and other engines of the `MergeTree` family (e.g. `ReplacingMergeTree`, `AggregatingMergeTree` ) are the most commonly used and most robust table engines in ClickHouse.
Engines in the `MergeTree` family are designed for inserting a very large amount of data into a table. The data is quickly written to the table part by part, then rules are applied for merging the parts in the background. This method is much more efficient than continually rewriting the data in storage during insert. `MergeTree`-family table engines are designed for high data ingest rates and huge data volumes.
Insert operations create table parts which are merged by a background process with other table parts.
Main features: Main features of `MergeTree`-family table engines.
- Stores data sorted by primary key. - The table's primary key determines the sort order within each table part (clustered index). The primary key also does not reference individual rows but blocks of 8192 rows called granules. This makes primary keys of huge data sets small enough to remain loaded in main memory, while still providing fast access to on-disk data.
This allows you to create a small sparse index that helps find data faster. - Tables can be partitioned using an arbitrary partition expression. Partition pruning ensures partitions are omitted from reading when the query allows it.
- Partitions can be used if the [partitioning key](/docs/en/engines/table-engines/mergetree-family/custom-partitioning-key.md) is specified. - Data can be replicated across multiple cluster nodes for high availability, failover, and zero downtime upgrades. See [Data replication](/docs/en/engines/table-engines/mergetree-family/replication.md).
ClickHouse supports certain operations with partitions that are more efficient than general operations on the same data with the same result. ClickHouse also automatically cuts off the partition data where the partitioning key is specified in the query. - `MergeTree` table engines support various statistics kinds and sampling methods to help query optimization.
- Data replication support. :::note
Despite a similar name, the [Merge](/docs/en/engines/table-engines/special/merge.md/#merge) engine is different from `*MergeTree` engines.
The family of `ReplicatedMergeTree` tables provides data replication. For more information, see [Data replication](/docs/en/engines/table-engines/mergetree-family/replication.md).
- Data sampling support.
If necessary, you can set the data sampling method in the table.
:::info
The [Merge](/docs/en/engines/table-engines/special/merge.md/#merge) engine does not belong to the `*MergeTree` family.
::: :::
If you need to update rows frequently, we recommend using the [`ReplacingMergeTree`](/docs/en/engines/table-engines/mergetree-family/replacingmergetree.md) table engine. Using `ALTER TABLE my_table UPDATE` to update rows triggers a mutation, which causes parts to be re-written and uses IO/resources. With `ReplacingMergeTree`, you can simply insert the updated rows and the old rows will be replaced according to the table sorting key. ## Creating Tables {#table_engine-mergetree-creating-a-table}
## Creating a Table {#table_engine-mergetree-creating-a-table}
``` sql ``` sql
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
@ -59,23 +50,24 @@ ORDER BY expr
[SETTINGS name = value, ...] [SETTINGS name = value, ...]
``` ```
For a description of parameters, see the [CREATE query description](/docs/en/sql-reference/statements/create/table.md). For a detailed description of the parameters, see the [CREATE TABLE](/docs/en/sql-reference/statements/create/table.md) statement
### Query Clauses {#mergetree-query-clauses} ### Query Clauses {#mergetree-query-clauses}
#### ENGINE #### ENGINE
`ENGINE` — Name and parameters of the engine. `ENGINE = MergeTree()`. The `MergeTree` engine does not have parameters. `ENGINE` — Name and parameters of the engine. `ENGINE = MergeTree()`. The `MergeTree` engine has no parameters.
#### ORDER_BY #### ORDER_BY
`ORDER BY` — The sorting key. `ORDER BY` — The sorting key.
A tuple of column names or arbitrary expressions. Example: `ORDER BY (CounterID, EventDate)`. A tuple of column names or arbitrary expressions. Example: `ORDER BY (CounterID + 1, EventDate)`.
ClickHouse uses the sorting key as a primary key if the primary key is not defined explicitly by the `PRIMARY KEY` clause. If no primary key is defined (i.e. `PRIMARY KEY` was not specified), ClickHouse uses the the sorting key as primary key.
Use the `ORDER BY tuple()` syntax, if you do not need sorting, or set `create_table_empty_primary_key_by_default` to `true` to use the `ORDER BY tuple()` syntax by default. See [Selecting the Primary Key](#selecting-the-primary-key). If no sorting is required, you can use syntax `ORDER BY tuple()`.
Alternatively, if setting `create_table_empty_primary_key_by_default` is enabled, `ORDER BY tuple()` is implicitly added to `CREATE TABLE` statements. See [Selecting a Primary Key](#selecting-a-primary-key).
#### PARTITION BY #### PARTITION BY
@ -87,100 +79,32 @@ For partitioning by month, use the `toYYYYMM(date_column)` expression, where `da
`PRIMARY KEY` — The primary key if it [differs from the sorting key](#choosing-a-primary-key-that-differs-from-the-sorting-key). Optional. `PRIMARY KEY` — The primary key if it [differs from the sorting key](#choosing-a-primary-key-that-differs-from-the-sorting-key). Optional.
By default the primary key is the same as the sorting key (which is specified by the `ORDER BY` clause). Thus in most cases it is unnecessary to specify a separate `PRIMARY KEY` clause. Specifying a sorting key (using `ORDER BY` clause) implicitly specifies a primary key.
It is usually not necessary to specify the primary key in addition to the primary key.
#### SAMPLE BY #### SAMPLE BY
`SAMPLE BY` — An expression for sampling. Optional. `SAMPLE BY` — A sampling expression. Optional.
If a sampling expression is used, the primary key must contain it. The result of a sampling expression must be an unsigned integer. Example: `SAMPLE BY intHash32(UserID) ORDER BY (CounterID, EventDate, intHash32(UserID))`. If specified, it must be contained in the primary key.
The sampling expression must result in an unsigned integer.
Example: `SAMPLE BY intHash32(UserID) ORDER BY (CounterID, EventDate, intHash32(UserID))`.
#### TTL #### TTL
`TTL` — A list of rules specifying storage duration of rows and defining logic of automatic parts movement [between disks and volumes](#table_engine-mergetree-multiple-volumes). Optional. `TTL` — A list of rules that specify the storage duration of rows and the logic of automatic parts movement [between disks and volumes](#table_engine-mergetree-multiple-volumes). Optional.
Expression must have one `Date` or `DateTime` column as a result. Example: Expression must result in a `Date` or `DateTime`, e.g. `TTL date + INTERVAL 1 DAY`.
```
TTL date + INTERVAL 1 DAY
```
Type of the rule `DELETE|TO DISK 'xxx'|TO VOLUME 'xxx'|GROUP BY` specifies an action to be done with the part if the expression is satisfied (reaches current time): removal of expired rows, moving a part (if expression is satisfied for all rows in a part) to specified disk (`TO DISK 'xxx'`) or to volume (`TO VOLUME 'xxx'`), or aggregating values in expired rows. Default type of the rule is removal (`DELETE`). List of multiple rules can be specified, but there should be no more than one `DELETE` rule. Type of the rule `DELETE|TO DISK 'xxx'|TO VOLUME 'xxx'|GROUP BY` specifies an action to be done with the part if the expression is satisfied (reaches current time): removal of expired rows, moving a part (if expression is satisfied for all rows in a part) to specified disk (`TO DISK 'xxx'`) or to volume (`TO VOLUME 'xxx'`), or aggregating values in expired rows. Default type of the rule is removal (`DELETE`). List of multiple rules can be specified, but there should be no more than one `DELETE` rule.
For more details, see [TTL for columns and tables](#table_engine-mergetree-ttl) For more details, see [TTL for columns and tables](#table_engine-mergetree-ttl)
### SETTINGS #### SETTINGS
Additional parameters that control the behavior of the `MergeTree` (optional):
#### index_granularity See [MergeTree Settings](../../../operations/settings/merge-tree-settings.md).
`index_granularity` — Maximum number of data rows between the marks of an index. Default value: 8192. See [Data Storage](#mergetree-data-storage).
#### index_granularity_bytes
`index_granularity_bytes` — Maximum size of data granules in bytes. Default value: 10Mb. To restrict the granule size only by number of rows, set to 0 (not recommended). See [Data Storage](#mergetree-data-storage).
#### min_index_granularity_bytes
`min_index_granularity_bytes` — Min allowed size of data granules in bytes. Default value: 1024b. To provide a safeguard against accidentally creating tables with very low index_granularity_bytes. See [Data Storage](#mergetree-data-storage).
#### enable_mixed_granularity_parts
`enable_mixed_granularity_parts` — Enables or disables transitioning to control the granule size with the `index_granularity_bytes` setting. Before version 19.11, there was only the `index_granularity` setting for restricting granule size. The `index_granularity_bytes` setting improves ClickHouse performance when selecting data from tables with big rows (tens and hundreds of megabytes). If you have tables with big rows, you can enable this setting for the tables to improve the efficiency of `SELECT` queries.
#### use_minimalistic_part_header_in_zookeeper
`use_minimalistic_part_header_in_zookeeper` — Storage method of the data parts headers in ZooKeeper. If `use_minimalistic_part_header_in_zookeeper=1`, then ZooKeeper stores less data. For more information, see the [setting description](/docs/en/operations/server-configuration-parameters/settings.md/#server-settings-use_minimalistic_part_header_in_zookeeper) in “Server configuration parameters”.
#### min_merge_bytes_to_use_direct_io
`min_merge_bytes_to_use_direct_io` — The minimum data volume for merge operation that is required for using direct I/O access to the storage disk. When merging data parts, ClickHouse calculates the total storage volume of all the data to be merged. If the volume exceeds `min_merge_bytes_to_use_direct_io` bytes, ClickHouse reads and writes the data to the storage disk using the direct I/O interface (`O_DIRECT` option). If `min_merge_bytes_to_use_direct_io = 0`, then direct I/O is disabled. Default value: `10 * 1024 * 1024 * 1024` bytes.
#### merge_with_ttl_timeout
`merge_with_ttl_timeout` — Minimum delay in seconds before repeating a merge with delete TTL. Default value: `14400` seconds (4 hours).
#### merge_with_recompression_ttl_timeout
`merge_with_recompression_ttl_timeout` — Minimum delay in seconds before repeating a merge with recompression TTL. Default value: `14400` seconds (4 hours).
#### try_fetch_recompressed_part_timeout
`try_fetch_recompressed_part_timeout` — Timeout (in seconds) before starting merge with recompression. During this time ClickHouse tries to fetch recompressed part from replica which assigned this merge with recompression. Default value: `7200` seconds (2 hours).
#### write_final_mark
`write_final_mark` — Enables or disables writing the final index mark at the end of data part (after the last byte). Default value: 1. Dont turn it off.
#### merge_max_block_size
`merge_max_block_size` — Maximum number of rows in block for merge operations. Default value: 8192.
#### storage_policy
`storage_policy` — Storage policy. See [Using Multiple Block Devices for Data Storage](#table_engine-mergetree-multiple-volumes).
#### min_bytes_for_wide_part
`min_bytes_for_wide_part`, `min_rows_for_wide_part` — Minimum number of bytes/rows in a data part that can be stored in `Wide` format. You can set one, both or none of these settings. See [Data Storage](#mergetree-data-storage).
#### max_parts_in_total
`max_parts_in_total` — Maximum number of parts in all partitions.
#### max_compress_block_size
`max_compress_block_size` — Maximum size of blocks of uncompressed data before compressing for writing to a table. You can also specify this setting in the global settings (see [max_compress_block_size](/docs/en/operations/settings/settings.md/#max-compress-block-size) setting). The value specified when table is created overrides the global value for this setting.
#### min_compress_block_size
`min_compress_block_size` — Minimum size of blocks of uncompressed data required for compression when writing the next mark. You can also specify this setting in the global settings (see [min_compress_block_size](/docs/en/operations/settings/settings.md/#min-compress-block-size) setting). The value specified when table is created overrides the global value for this setting.
#### max_partitions_to_read
`max_partitions_to_read` — Limits the maximum number of partitions that can be accessed in one query. You can also specify setting [max_partitions_to_read](/docs/en/operations/settings/merge-tree-settings.md/#max-partitions-to-read) in the global setting.
#### allow_experimental_optimized_row_order
`allow_experimental_optimized_row_order` - Experimental. Enables the optimization of the row order during inserts to improve the compressability of the data for compression codecs (e.g. LZ4). Analyzes and reorders the data, and thus increases the CPU overhead of inserts.
**Example of Sections Setting** **Example of Sections Setting**
@ -270,7 +194,7 @@ ClickHouse does not require a unique primary key. You can insert multiple rows w
You can use `Nullable`-typed expressions in the `PRIMARY KEY` and `ORDER BY` clauses but it is strongly discouraged. To allow this feature, turn on the [allow_nullable_key](/docs/en/operations/settings/settings.md/#allow-nullable-key) setting. The [NULLS_LAST](/docs/en/sql-reference/statements/select/order-by.md/#sorting-of-special-values) principle applies for `NULL` values in the `ORDER BY` clause. You can use `Nullable`-typed expressions in the `PRIMARY KEY` and `ORDER BY` clauses but it is strongly discouraged. To allow this feature, turn on the [allow_nullable_key](/docs/en/operations/settings/settings.md/#allow-nullable-key) setting. The [NULLS_LAST](/docs/en/sql-reference/statements/select/order-by.md/#sorting-of-special-values) principle applies for `NULL` values in the `ORDER BY` clause.
### Selecting the Primary Key {#selecting-the-primary-key} ### Selecting a Primary Key {#selecting-a-primary-key}
The number of columns in the primary key is not explicitly limited. Depending on the data structure, you can include more or fewer columns in the primary key. This may: The number of columns in the primary key is not explicitly limited. Depending on the data structure, you can include more or fewer columns in the primary key. This may:

View File

@ -102,6 +102,7 @@ For partitioning by month, use the `toYYYYMM(date_column)` expression, where `da
- `_path` — Path to the file. Type: `LowCardinalty(String)`. - `_path` — Path to the file. Type: `LowCardinalty(String)`.
- `_file` — Name of the file. Type: `LowCardinalty(String)`. - `_file` — Name of the file. Type: `LowCardinalty(String)`.
- `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`. - `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`.
- `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`.
## Settings {#settings} ## Settings {#settings}

View File

@ -108,6 +108,7 @@ For partitioning by month, use the `toYYYYMM(date_column)` expression, where `da
- `_path` — Path to the `URL`. Type: `LowCardinalty(String)`. - `_path` — Path to the `URL`. Type: `LowCardinalty(String)`.
- `_file` — Resource name of the `URL`. Type: `LowCardinalty(String)`. - `_file` — Resource name of the `URL`. Type: `LowCardinalty(String)`.
- `_size` — Size of the resource in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`. - `_size` — Size of the resource in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`.
- `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`.
## Storage Settings {#storage-settings} ## Storage Settings {#storage-settings}

View File

@ -0,0 +1,194 @@
---
slug: /en/operations/analyzer
sidebar_label: Analyzer
title: Analyzer
description: Details about ClickHouse's query analyzer
keywords: [analyzer]
---
# Analyzer
<BetaBadge />
## Known incompatibilities
In ClickHouse version `24.3`, the new query analyzer was enabled by default.
Despite fixing a large number of bugs and introducing new optimizations, it also introduces some breaking changes in ClickHouse behaviour. Please read the following changes to determine how to rewrite your queries for the new analyzer.
### Invalid queries are no longer optimized
The previous query planning infrastructure applied AST-level optimizations before the query validation step.
Optimizations could rewrite the initial query so it becomes valid and can be executed.
In the new analyzer, query validation takes place before the optimization step.
This means that invalid queries that were possible to execute before are now unsupported.
In such cases, the query must be fixed manually.
**Example 1:**
```sql
SELECT number
FROM numbers(1)
GROUP BY toString(number)
```
The following query uses column `number` in the projection list when only `toString(number)` is available after the aggregation.
In the old analyzer, `GROUP BY toString(number)` was optimized into `GROUP BY number,` making the query valid.
**Example 2:**
```sql
SELECT
number % 2 AS n,
sum(number)
FROM numbers(10)
GROUP BY n
HAVING number > 5
```
The same problem occurs in this query: column `number` is used after aggregation with another key.
The previous query analyzer fixed this query by moving the `number > 5` filter from the `HAVING` clause to the `WHERE` clause.
To fix the query, you should move all conditions that apply to non-aggregated columns to the `WHERE` section to conform to standard SQL syntax:
```sql
SELECT
number % 2 AS n,
sum(number)
FROM numbers(10)
WHERE number > 5
GROUP BY n
```
### CREATE VIEW with invalid query
The new analyzer always performs type-checking.
Previously, it was possible to create a `VIEW` with an invalid `SELECT` query. It would then fail during the first `SELECT` or `INSERT` (in the case of `MATERIALIZED VIEW`).
Now, it's not possible to create such `VIEW`s anymore.
**Example:**
```sql
CREATE TABLE source (data String) ENGINE=MergeTree ORDER BY tuple();
CREATE VIEW some_view
AS SELECT JSONExtract(data, 'test', 'DateTime64(3)')
FROM source;
```
### Known incompatibilities of the `JOIN` clause
#### Join using column from projection
Alias from the `SELECT` list can not be used as a `JOIN USING` key by default.
A new setting, `analyzer_compatibility_join_using_top_level_identifier`, when enabled, alters the behavior of `JOIN USING` to prefer to resolve identifiers based on expressions from the projection list of the `SELECT` query, rather than using the columns from left table directly.
**Example:**
```sql
SELECT a + 1 AS b, t2.s
FROM Values('a UInt64, b UInt64', (1, 1)) AS t1
JOIN Values('b UInt64, s String', (1, 'one'), (2, 'two')) t2
USING (b);
```
With `analyzer_compatibility_join_using_top_level_identifier` set to `true`, the join condition is interpreted as `t1.a + 1 = t2.b`, matching the behavior of earlier versions. So, the result will be `2, 'two'`.
When the setting is `false`, the join condition defaults to `t1.b = t2.b`, and the query will return `2, 'one'`.
If `b` is not present in `t1`, the query will fail with an error.
#### Changes in behavior with `JOIN USING` and `ALIAS`/`MATERIALIZED` columns
In the new analyzer, using `*` in a `JOIN USING` query that involves `ALIAS` or `MATERIALIZED` columns will include those columns in the result set by default.
**Example:**
```sql
CREATE TABLE t1 (id UInt64, payload ALIAS sipHash64(id)) ENGINE = MergeTree ORDER BY id;
INSERT INTO t1 VALUES (1), (2);
CREATE TABLE t2 (id UInt64, payload ALIAS sipHash64(id)) ENGINE = MergeTree ORDER BY id;
INSERT INTO t2 VALUES (2), (3);
SELECT * FROM t1
FULL JOIN t2 USING (payload);
```
In the new analyzer, the result of this query will include the `payload` column along with `id` from both tables. In contrast, the previous analyzer would only include these `ALIAS` columns if specific settings (`asterisk_include_alias_columns` or `asterisk_include_materialized_columns`) were enabled, and the columns might appear in a different order.
To ensure consistent and expected results, especially when migrating old queries to the new analyzer, it is advisable to specify columns explicitly in the `SELECT` clause rather than using `*`.
#### Handling of Type Modifiers for columns in `USING` Clause
In the new version of the analyzer, the rules for determining the common supertype for columns specified in the `USING` clause have been standardized to produce more predictable outcomes, especially when dealing with type modifiers like `LowCardinality` and `Nullable`.
- `LowCardinality(T)` and `T`: When a column of type `LowCardinality(T)` is joined with a column of type `T`, the resulting common supertype will be `T`, effectively discarding the `LowCardinality` modifier.
- `Nullable(T)` and `T`: When a column of type `Nullable(T)` is joined with a column of type `T`, the resulting common supertype will be `Nullable(T)`, ensuring that the nullable property is preserved.
**Example:**
```sql
SELECT id, toTypeName(id) FROM Values('id LowCardinality(String)', ('a')) AS t1
FULL OUTER JOIN Values('id String', ('b')) AS t2
USING (id);
```
In this query, the common supertype for `id` is determined as `String`, discarding the `LowCardinality` modifier from `t1`.
### Projection column names changes
During projection names computation, aliases are not substituted.
```sql
SELECT
1 + 1 AS x,
x + 1
SETTINGS allow_experimental_analyzer = 0
FORMAT PrettyCompact
┌─x─┬─plus(plus(1, 1), 1)─┐
1. │ 2 │ 3 │
└───┴─────────────────────┘
SELECT
1 + 1 AS x,
x + 1
SETTINGS allow_experimental_analyzer = 1
FORMAT PrettyCompact
┌─x─┬─plus(x, 1)─┐
1. │ 2 │ 3 │
└───┴────────────┘
```
### Incompatible function arguments types
In the new analyzer, type inference happens during initial query analysis.
This change means that type checks are done before short-circuit evaluation; thus, `if` function arguments must always have a common supertype.
**Example:**
The following query fails with `There is no supertype for types Array(UInt8), String because some of them are Array and some of them are not`:
```sql
SELECT toTypeName(if(0, [2, 3, 4], 'String'))
```
### Heterogeneous clusters
The new analyzer significantly changed the communication protocol between servers in the cluster. Thus, it's impossible to run distributed queries on servers with different `allow_experimental_analyzer` setting values.
### Mutations are interpreted by previous analyzer
Mutations are still using the old analyzer.
This means some new ClickHouse SQL features can't be used in mutations. For example, the `QUALIFY` clause.
Status can be checked [here](https://github.com/ClickHouse/ClickHouse/issues/61563).
### Unsupported features
The list of features new analyzer currently doesn't support:
- Annoy index.
- Hypothesis index. Work in progress [here](https://github.com/ClickHouse/ClickHouse/pull/48381).
- Window view is not supported. There are no plans to support it in the future.

View File

@ -443,3 +443,59 @@ SELECT dictGet('dict', 'b', 1);
│ a │ │ a │
└─────────────────────────┘ └─────────────────────────┘
``` ```
## Named collections for accessing Kafka
The description of parameters see [Kafka](../engines/table-engines/integrations/kafka.md).
### DDL example
```sql
CREATE NAMED COLLECTION my_kafka_cluster AS
kafka_broker_list = 'localhost:9092',
kafka_topic_list = 'kafka_topic',
kafka_group_name = 'consumer_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = '1048576';
```
### XML example
```xml
<clickhouse>
<named_collections>
<my_kafka_cluster>
<kafka_broker_list>localhost:9092</kafka_broker_list>
<kafka_topic_list>kafka_topic</kafka_topic_list>
<kafka_group_name>consumer_group</kafka_group_name>
<kafka_format>JSONEachRow</kafka_format>
<kafka_max_block_size>1048576</kafka_max_block_size>
</my_kafka_cluster>
</named_collections>
</clickhouse>
```
### Example of using named collections with a Kafka table
Both of the following examples use the same named collection `my_kafka_cluster`:
```sql
CREATE TABLE queue
(
timestamp UInt64,
level String,
message String
)
ENGINE = Kafka(my_kafka_cluster)
CREATE TABLE queue
(
timestamp UInt64,
level String,
message String
)
ENGINE = Kafka(my_kafka_cluster)
SETTINGS kafka_num_consumers = 4,
kafka_thread_per_consumer = 1;
```

View File

@ -3,9 +3,126 @@ slug: /en/operations/settings/merge-tree-settings
title: "MergeTree tables settings" title: "MergeTree tables settings"
--- ---
The values of `merge_tree` settings (for all MergeTree tables) can be viewed in the table `system.merge_tree_settings`, they can be overridden in `config.xml` in the `merge_tree` section, or set in the `SETTINGS` section of each table. System table `system.merge_tree_settings` shows the globally set MergeTree settings.
These are example overrides for `max_suspicious_broken_parts`: MergeTree settings can be set in the `merge_tree` section of the server config file, or specified for each `MergeTree` table individually in
the `SETTINGS` clause of the `CREATE TABLE` statement.
Example for customizing setting `max_suspicious_broken_parts`:
Configure the default for all `MergeTree` tables in the server configuration file:
``` text
<merge_tree>
<max_suspicious_broken_parts>5</max_suspicious_broken_parts>
</merge_tree>
```
Set for a particular table:
``` sql
CREATE TABLE tab
(
`A` Int64
)
ENGINE = MergeTree
ORDER BY tuple()
SETTINGS max_suspicious_broken_parts = 500;
```
Change the settings for a particular table using `ALTER TABLE ... MODIFY SETTING`:
```sql
ALTER TABLE tab MODIFY SETTING max_suspicious_broken_parts = 100;
-- reset to global default (value from system.merge_tree_settings)
ALTER TABLE tab RESET SETTING max_suspicious_broken_parts;
```
## index_granularity
Maximum number of data rows between the marks of an index.
Default value: 8192.
## index_granularity_bytes
Maximum size of data granules in bytes.
Default value: 10Mb.
To restrict the granule size only by number of rows, set to 0 (not recommended).
## min_index_granularity_bytes
Min allowed size of data granules in bytes.
Default value: 1024b.
To provide a safeguard against accidentally creating tables with very low index_granularity_bytes.
## enable_mixed_granularity_parts
Enables or disables transitioning to control the granule size with the `index_granularity_bytes` setting. Before version 19.11, there was only the `index_granularity` setting for restricting granule size. The `index_granularity_bytes` setting improves ClickHouse performance when selecting data from tables with big rows (tens and hundreds of megabytes). If you have tables with big rows, you can enable this setting for the tables to improve the efficiency of `SELECT` queries.
## use_minimalistic_part_header_in_zookeeper
Storage method of the data parts headers in ZooKeeper. If enabled, ZooKeeper stores less data. For details, see [here](../server-configuration-parameters/settings.md/#server-settings-use_minimalistic_part_header_in_zookeeper).
## min_merge_bytes_to_use_direct_io
The minimum data volume for merge operation that is required for using direct I/O access to the storage disk.
When merging data parts, ClickHouse calculates the total storage volume of all the data to be merged.
If the volume exceeds `min_merge_bytes_to_use_direct_io` bytes, ClickHouse reads and writes the data to the storage disk using the direct I/O interface (`O_DIRECT` option).
If `min_merge_bytes_to_use_direct_io = 0`, then direct I/O is disabled.
Default value: `10 * 1024 * 1024 * 1024` bytes.
## merge_with_ttl_timeout
Minimum delay in seconds before repeating a merge with delete TTL.
Default value: `14400` seconds (4 hours).
## merge_with_recompression_ttl_timeout
Minimum delay in seconds before repeating a merge with recompression TTL.
Default value: `14400` seconds (4 hours).
## write_final_mark
Enables or disables writing the final index mark at the end of data part (after the last byte).
Default value: 1.
Dont change or bad things will happen.
## storage_policy
Storage policy.
## min_bytes_for_wide_part
Minimum number of bytes/rows in a data part that can be stored in `Wide` format.
You can set one, both or none of these settings.
## max_compress_block_size
Maximum size of blocks of uncompressed data before compressing for writing to a table.
You can also specify this setting in the global settings (see [max_compress_block_size](/docs/en/operations/settings/settings.md/#max-compress-block-size) setting).
The value specified when table is created overrides the global value for this setting.
## min_compress_block_size
Minimum size of blocks of uncompressed data required for compression when writing the next mark.
You can also specify this setting in the global settings (see [min_compress_block_size](/docs/en/operations/settings/settings.md/#min-compress-block-size) setting).
The value specified when table is created overrides the global value for this setting.
## max_partitions_to_read
Limits the maximum number of partitions that can be accessed in one query.
You can also specify setting [max_partitions_to_read](/docs/en/operations/settings/merge-tree-settings.md/#max-partitions-to-read) in the global setting.
## max_suspicious_broken_parts ## max_suspicious_broken_parts
@ -17,37 +134,6 @@ Possible values:
Default value: 100. Default value: 100.
Override example in `config.xml`:
``` text
<merge_tree>
<max_suspicious_broken_parts>5</max_suspicious_broken_parts>
</merge_tree>
```
An example to set in `SETTINGS` for a particular table:
``` sql
CREATE TABLE foo
(
`A` Int64
)
ENGINE = MergeTree
ORDER BY tuple()
SETTINGS max_suspicious_broken_parts = 500;
```
An example of changing the settings for a specific table with the `ALTER TABLE ... MODIFY SETTING` command:
``` sql
ALTER TABLE foo
MODIFY SETTING max_suspicious_broken_parts = 100;
-- reset to default (use value from system.merge_tree_settings)
ALTER TABLE foo
RESET SETTING max_suspicious_broken_parts;
```
## parts_to_throw_insert {#parts-to-throw-insert} ## parts_to_throw_insert {#parts-to-throw-insert}
If the number of active parts in a single partition exceeds the `parts_to_throw_insert` value, `INSERT` is interrupted with the `Too many parts (N). Merges are processing significantly slower than inserts` exception. If the number of active parts in a single partition exceeds the `parts_to_throw_insert` value, `INSERT` is interrupted with the `Too many parts (N). Merges are processing significantly slower than inserts` exception.
@ -301,6 +387,8 @@ Default value: 10800
## try_fetch_recompressed_part_timeout ## try_fetch_recompressed_part_timeout
Timeout (in seconds) before starting merge with recompression. During this time ClickHouse tries to fetch recompressed part from replica which assigned this merge with recompression.
Recompression works slow in most cases, so we don't start merge with recompression until this timeout and trying to fetch recompressed part from replica which assigned this merge with recompression. Recompression works slow in most cases, so we don't start merge with recompression until this timeout and trying to fetch recompressed part from replica which assigned this merge with recompression.
Possible values: Possible values:

View File

@ -1590,6 +1590,22 @@ Possible values:
Default value: `default`. Default value: `default`.
## parallel_replicas_custom_key_range_lower {#parallel_replicas_custom_key_range_lower}
Allows the filter type `range` to split the work evenly between replicas based on the custom range `[parallel_replicas_custom_key_range_lower, INT_MAX]`.
When used in conjuction with [parallel_replicas_custom_key_range_upper](#parallel_replicas_custom_key_range_upper), it lets the filter evenly split the work over replicas for the range `[parallel_replicas_custom_key_range_lower, parallel_replicas_custom_key_range_upper]`.
Note: This setting will not cause any additional data to be filtered during query processing, rather it changes the points at which the range filter breaks up the range `[0, INT_MAX]` for parallel processing.
## parallel_replicas_custom_key_range_upper {#parallel_replicas_custom_key_range_upper}
Allows the filter type `range` to split the work evenly between replicas based on the custom range `[0, parallel_replicas_custom_key_range_upper]`. A value of 0 disables the upper bound, setting it the max value of the custom key expression.
When used in conjuction with [parallel_replicas_custom_key_range_lower](#parallel_replicas_custom_key_range_lower), it lets the filter evenly split the work over replicas for the range `[parallel_replicas_custom_key_range_lower, parallel_replicas_custom_key_range_upper]`.
Note: This setting will not cause any additional data to be filtered during query processing, rather it changes the points at which the range filter breaks up the range `[0, INT_MAX]` for parallel processing.
## allow_experimental_parallel_reading_from_replicas ## allow_experimental_parallel_reading_from_replicas
Enables or disables sending SELECT queries to all replicas of a table (up to `max_parallel_replicas`). Reading is parallelized and coordinated dynamically. It will work for any kind of MergeTree table. Enables or disables sending SELECT queries to all replicas of a table (up to `max_parallel_replicas`). Reading is parallelized and coordinated dynamically. It will work for any kind of MergeTree table.
@ -3862,6 +3878,10 @@ Possible values:
Default value: 30. Default value: 30.
:::note
It's applicable only to the default profile. A server reboot is required for the changes to take effect.
:::
## http_receive_timeout {#http_receive_timeout} ## http_receive_timeout {#http_receive_timeout}
HTTP receive timeout (in seconds). HTTP receive timeout (in seconds).

View File

@ -24,6 +24,8 @@ Alias: `lttb`.
- `x` — x coordinate. [Integer](../../../sql-reference/data-types/int-uint.md) , [Float](../../../sql-reference/data-types/float.md) , [Decimal](../../../sql-reference/data-types/decimal.md) , [Date](../../../sql-reference/data-types/date.md), [Date32](../../../sql-reference/data-types/date32.md), [DateTime](../../../sql-reference/data-types/datetime.md), [DateTime64](../../../sql-reference/data-types/datetime64.md). - `x` — x coordinate. [Integer](../../../sql-reference/data-types/int-uint.md) , [Float](../../../sql-reference/data-types/float.md) , [Decimal](../../../sql-reference/data-types/decimal.md) , [Date](../../../sql-reference/data-types/date.md), [Date32](../../../sql-reference/data-types/date32.md), [DateTime](../../../sql-reference/data-types/datetime.md), [DateTime64](../../../sql-reference/data-types/datetime64.md).
- `y` — y coordinate. [Integer](../../../sql-reference/data-types/int-uint.md) , [Float](../../../sql-reference/data-types/float.md) , [Decimal](../../../sql-reference/data-types/decimal.md) , [Date](../../../sql-reference/data-types/date.md), [Date32](../../../sql-reference/data-types/date32.md), [DateTime](../../../sql-reference/data-types/datetime.md), [DateTime64](../../../sql-reference/data-types/datetime64.md). - `y` — y coordinate. [Integer](../../../sql-reference/data-types/int-uint.md) , [Float](../../../sql-reference/data-types/float.md) , [Decimal](../../../sql-reference/data-types/decimal.md) , [Date](../../../sql-reference/data-types/date.md), [Date32](../../../sql-reference/data-types/date32.md), [DateTime](../../../sql-reference/data-types/datetime.md), [DateTime64](../../../sql-reference/data-types/datetime64.md).
NaNs are ignored in the provided series, meaning that any NaN values will be excluded from the analysis. This ensures that the function operates only on valid numerical data.
**Parameters** **Parameters**
- `n` — number of points in the resulting series. [UInt64](../../../sql-reference/data-types/int-uint.md). - `n` — number of points in the resulting series. [UInt64](../../../sql-reference/data-types/int-uint.md).
@ -61,7 +63,7 @@ Result:
``` text ``` text
┌────────largestTriangleThreeBuckets(4)(x, y)───────────┐ ┌────────largestTriangleThreeBuckets(4)(x, y)───────────┐
│ [(1,10),(3,15),(5,40),(10,70)] │ │ [(1,10),(3,15),(9,55),(10,70)] │
└───────────────────────────────────────────────────────┘ └───────────────────────────────────────────────────────┘
``` ```

View File

@ -142,6 +142,34 @@ SELECT readWKTPoint('POINT (1.2 3.4)');
(1.2,3.4) (1.2,3.4)
``` ```
## readWKTLineString
Parses a Well-Known Text (WKT) representation of a LineString geometry and returns it in the internal ClickHouse format.
### Syntax
```sql
readWKTLineString(wkt_string)
```
### Arguments
- `wkt_string`: The input WKT string representing a LineString geometry.
### Returned value
The function returns a ClickHouse internal representation of the linestring geometry.
### Example
```sql
SELECT readWKTLineString('LINESTRING (1 1, 2 2, 3 3, 1 1)');
```
```response
[(1,1),(2,2),(3,3),(1,1)]
```
## readWKTRing ## readWKTRing
Parses a Well-Known Text (WKT) representation of a Polygon geometry and returns a ring (closed linestring) in the internal ClickHouse format. Parses a Well-Known Text (WKT) representation of a Polygon geometry and returns a ring (closed linestring) in the internal ClickHouse format.
@ -163,7 +191,7 @@ The function returns a ClickHouse internal representation of the ring (closed li
### Example ### Example
```sql ```sql
SELECT readWKTRing('LINESTRING (1 1, 2 2, 3 3, 1 1)'); SELECT readWKTRing('POLYGON ((1 1, 2 2, 3 3, 1 1))');
``` ```
```response ```response

View File

@ -212,7 +212,7 @@ toTypeName(x)
## blockSize {#blockSize} ## blockSize {#blockSize}
In ClickHouse, queries are processed in blocks (chunks). In ClickHouse, queries are processed in [blocks](../../development/architecture.md/#block-block) (chunks).
This function returns the size (row count) of the block the function is called on. This function returns the size (row count) of the block the function is called on.
**Syntax** **Syntax**
@ -221,6 +221,33 @@ This function returns the size (row count) of the block the function is called o
blockSize() blockSize()
``` ```
**Example**
Query:
```sql
DROP TABLE IF EXISTS test;
CREATE TABLE test (n UInt8) ENGINE = Memory;
INSERT INTO test
SELECT * FROM system.numbers LIMIT 5;
SELECT blockSize()
FROM test;
```
Result:
```response
┌─blockSize()─┐
1. │ 5 │
2. │ 5 │
3. │ 5 │
4. │ 5 │
5. │ 5 │
└─────────────┘
```
## byteSize ## byteSize
Returns an estimation of uncompressed byte size of its arguments in memory. Returns an estimation of uncompressed byte size of its arguments in memory.
@ -3688,3 +3715,108 @@ Result:
```response ```response
{'version':'1','serial_number':'2D9071D64530052D48308473922C7ADAFA85D6C5','signature_algo':'sha256WithRSAEncryption','issuer':'/CN=marsnet.local CA','not_before':'May 7 17:01:21 2024 GMT','not_after':'May 7 17:01:21 2025 GMT','subject':'/CN=chnode1','pkey_algo':'rsaEncryption'} {'version':'1','serial_number':'2D9071D64530052D48308473922C7ADAFA85D6C5','signature_algo':'sha256WithRSAEncryption','issuer':'/CN=marsnet.local CA','not_before':'May 7 17:01:21 2024 GMT','not_after':'May 7 17:01:21 2025 GMT','subject':'/CN=chnode1','pkey_algo':'rsaEncryption'}
``` ```
## lowCardinalityIndices
Returns the position of a value in the dictionary of a [LowCardinality](../data-types/lowcardinality.md) column. Positions start at 1. Since LowCardinality have per-part dictionaries, this function may return different positions for the same value in different parts.
**Syntax**
```sql
lowCardinalityIndices(col)
```
**Arguments**
- `col` — a low cardinality column. [LowCardinality](../data-types/lowcardinality.md).
**Returned value**
- The position of the value in the dictionary of the current part. [UInt64](../data-types/int-uint.md).
**Example**
Query:
```sql
DROP TABLE IF EXISTS test;
CREATE TABLE test (s LowCardinality(String)) ENGINE = Memory;
-- create two parts:
INSERT INTO test VALUES ('ab'), ('cd'), ('ab'), ('ab'), ('df');
INSERT INTO test VALUES ('ef'), ('cd'), ('ab'), ('cd'), ('ef');
SELECT s, lowCardinalityIndices(s) FROM test;
```
Result:
```response
┌─s──┬─lowCardinalityIndices(s)─┐
1. │ ab │ 1 │
2. │ cd │ 2 │
3. │ ab │ 1 │
4. │ ab │ 1 │
5. │ df │ 3 │
└────┴──────────────────────────┘
┌─s──┬─lowCardinalityIndices(s)─┐
6. │ ef │ 1 │
7. │ cd │ 2 │
8. │ ab │ 3 │
9. │ cd │ 2 │
10. │ ef │ 1 │
└────┴──────────────────────────┘
```
## lowCardinalityKeys
Returns the dictionary values of a [LowCardinality](../data-types/lowcardinality.md) column. If the block is smaller or larger than the dictionary size, the result will be truncated or extended with default values. Since LowCardinality have per-part dictionaries, this function may return different dictionary values in different parts.
**Syntax**
```sql
lowCardinalityIndices(col)
```
**Arguments**
- `col` — a low cardinality column. [LowCardinality](../data-types/lowcardinality.md).
**Returned value**
- The dictionary keys. [UInt64](../data-types/int-uint.md).
**Example**
Query:
```sql
DROP TABLE IF EXISTS test;
CREATE TABLE test (s LowCardinality(String)) ENGINE = Memory;
-- create two parts:
INSERT INTO test VALUES ('ab'), ('cd'), ('ab'), ('ab'), ('df');
INSERT INTO test VALUES ('ef'), ('cd'), ('ab'), ('cd'), ('ef');
SELECT s, lowCardinalityKeys(s) FROM test;
```
Result:
```response
┌─s──┬─lowCardinalityKeys(s)─┐
1. │ ef │ │
2. │ cd │ ef │
3. │ ab │ cd │
4. │ cd │ ab │
5. │ ef │ │
└────┴───────────────────────┘
┌─s──┬─lowCardinalityKeys(s)─┐
6. │ ab │ │
7. │ cd │ ab │
8. │ ab │ cd │
9. │ ab │ df │
10. │ df │ │
└────┴───────────────────────┘
```

View File

@ -6,49 +6,90 @@ sidebar_label: Rounding
# Rounding Functions # Rounding Functions
## floor(x\[, N\]) ## floor
Returns the largest round number that is less than or equal to `x`. A round number is a multiple of 1/10N, or the nearest number of the appropriate data type if 1 / 10N isnt exact. Returns the largest rounded number less than or equal `x`.
N is an integer constant, optional parameter. By default it is zero, which means to round to an integer. A rounded number is a multiple of 1 / 10 * N, or the nearest number of the appropriate data type if 1 / 10 * N isnt exact.
N may be negative.
Examples: `floor(123.45, 1) = 123.4, floor(123.45, -1) = 120.` Integer arguments may be rounded with negative `N` argument, with non-negative `N` the function returns `x`, i.e. does nothing.
`x` is any numeric type. The result is a number of the same type. If rounding causes an overflow (for example, `floor(-128, -1)`), the result is undefined.
For integer arguments, it makes sense to round with a negative `N` value (for non-negative `N`, the function does not do anything).
If rounding causes overflow (for example, floor(-128, -1)), an implementation-specific result is returned.
## ceil(x\[, N\]), ceiling(x\[, N\]) **Syntax**
Returns the smallest round number that is greater than or equal to `x`. In every other way, it is the same as the `floor` function (see above). ``` sql
floor(x[, N])
```
## trunc(x\[, N\]), truncate(x\[, N\]) **Parameters**
Returns the round number with largest absolute value that has an absolute value less than or equal to `x`s. In every other way, it is the same as the floor function (see above). - `x` - The value to round. [Float*](../data-types/float.md), [Decimal*](../data-types/decimal.md), or [(U)Int*](../data-types/int-uint.md).
- `N` . [(U)Int*](../data-types/int-uint.md). The default is zero, which means rounding to an integer. Can be negative.
**Returned value**
A rounded number of the same type as `x`.
**Examples**
Query:
```sql
SELECT floor(123.45, 1) AS rounded
```
Result:
```
┌─rounded─┐
│ 123.4 │
└─────────┘
```
Query:
```sql
SELECT floor(123.45, -1)
```
Result:
```
┌─rounded─┐
│ 120 │
└─────────┘
```
## ceiling
Like `floor` but returns the smallest rounded number greater than or equal `x`.
**Syntax**
``` sql
ceiling(x[, N])
```
Alias: `ceil`
## truncate
Like `floor` but returns the rounded number with largest absolute value that has an absolute value less than or equal to `x`s.
**Syntax** **Syntax**
```sql ```sql
trunc(input, precision) truncate(x[, N])
``` ```
Alias: `truncate`. Alias: `trunc`.
**Parameters**
- `input`: A numeric type ([Float](../data-types/float.md), [Decimal](../data-types/decimal.md) or [Integer](../data-types/int-uint.md)).
- `precision`: An [Integer](../data-types/int-uint.md) type.
**Returned value**
- A data type of `input`.
**Example** **Example**
Query: Query:
```sql ```sql
SELECT trunc(123.499, 1) as res; SELECT truncate(123.499, 1) as res;
``` ```
```response ```response
@ -57,37 +98,40 @@ SELECT trunc(123.499, 1) as res;
└───────┘ └───────┘
``` ```
## round(x\[, N\]) ## round
Rounds a value to a specified number of decimal places. Rounds a value to a specified number of decimal places.
The function returns the nearest number of the specified order. In case when given number has equal distance to surrounding numbers, the function uses bankers rounding for float number types and rounds away from zero for the other number types (Decimal). The function returns the nearest number of the specified order.
If the input value has equal distance to two neighboring numbers, the function uses bankers rounding for [Float*](../data-types/float.md) inputs and rounds away from zero for the other number types ([Decimal*](../data-types/decimal.md).
**Syntax**
``` sql ``` sql
round(expression [, decimal_places]) round(x[, N])
``` ```
**Arguments** **Arguments**
- `expression` — A number to be rounded. Can be any [expression](../../sql-reference/syntax.md#syntax-expressions) returning the numeric [data type](../data-types/index.md#data_types). - `x` — A number to round. [Float*](../data-types/float.md), [Decimal*](../data-types/decimal.md), or [(U)Int*](../data-types/int-uint.md).
- `decimal-places` — An integer value. - `N` — The number of decimal places to round to. Integer. Defaults to `0`.
- If `decimal-places > 0` then the function rounds the value to the right of the decimal point. - If `N > 0`, the function rounds to the right of the decimal point.
- If `decimal-places < 0` then the function rounds the value to the left of the decimal point. - If `N < 0`, the function rounds to the left of the decimal point.
- If `decimal-places = 0` then the function rounds the value to integer. In this case the argument can be omitted. - If `N = 0`, the function rounds to the next integer.
**Returned value:** **Returned value:**
The rounded number of the same type as the input number. A rounded number of the same type as `x`.
**Examples** **Examples**
Example of usage with Float: Example with `Float` inputs:
``` sql ```sql
SELECT number / 2 AS x, round(x) FROM system.numbers LIMIT 3; SELECT number / 2 AS x, round(x) FROM system.numbers LIMIT 3;
``` ```
``` text ```
┌───x─┬─round(divide(number, 2))─┐ ┌───x─┬─round(divide(number, 2))─┐
│ 0 │ 0 │ │ 0 │ 0 │
│ 0.5 │ 0 │ │ 0.5 │ 0 │
@ -95,13 +139,13 @@ SELECT number / 2 AS x, round(x) FROM system.numbers LIMIT 3;
└─────┴──────────────────────────┘ └─────┴──────────────────────────┘
``` ```
Example of usage with Decimal: Example with `Decimal` inputs:
``` sql ```sql
SELECT cast(number / 2 AS Decimal(10,4)) AS x, round(x) FROM system.numbers LIMIT 3; SELECT cast(number / 2 AS Decimal(10,4)) AS x, round(x) FROM system.numbers LIMIT 3;
``` ```
``` text ```
┌───x─┬─round(CAST(divide(number, 2), 'Decimal(10, 4)'))─┐ ┌───x─┬─round(CAST(divide(number, 2), 'Decimal(10, 4)'))─┐
│ 0 │ 0 │ │ 0 │ 0 │
│ 0.5 │ 1 │ │ 0.5 │ 1 │
@ -109,14 +153,14 @@ SELECT cast(number / 2 AS Decimal(10,4)) AS x, round(x) FROM system.numbers LIM
└─────┴──────────────────────────────────────────────────┘ └─────┴──────────────────────────────────────────────────┘
``` ```
If you want to keep the trailing zeros, you need to enable `output_format_decimal_trailing_zeros` To retain trailing zeros, enable setting `output_format_decimal_trailing_zeros`:
``` sql ```sql
SELECT cast(number / 2 AS Decimal(10,4)) AS x, round(x) FROM system.numbers LIMIT 3 settings output_format_decimal_trailing_zeros=1; SELECT cast(number / 2 AS Decimal(10,4)) AS x, round(x) FROM system.numbers LIMIT 3 settings output_format_decimal_trailing_zeros=1;
``` ```
``` text ```
┌──────x─┬─round(CAST(divide(number, 2), 'Decimal(10, 4)'))─┐ ┌──────x─┬─round(CAST(divide(number, 2), 'Decimal(10, 4)'))─┐
│ 0.0000 │ 0.0000 │ │ 0.0000 │ 0.0000 │
│ 0.5000 │ 1.0000 │ │ 0.5000 │ 1.0000 │
@ -151,9 +195,15 @@ round(3.65, 1) = 3.6
Rounds a number to a specified decimal position. Rounds a number to a specified decimal position.
- If the rounding number is halfway between two numbers, the function uses bankers rounding. Banker's rounding is a method of rounding fractional numbers. When the rounding number is halfway between two numbers, it's rounded to the nearest even digit at the specified decimal position. For example: 3.5 rounds up to 4, 2.5 rounds down to 2. It's the default rounding method for floating point numbers defined in [IEEE 754](https://en.wikipedia.org/wiki/IEEE_754#Roundings_to_nearest). The [round](#rounding_functions-round) function performs the same rounding for floating point numbers. The `roundBankers` function also rounds integers the same way, for example, `roundBankers(45, -1) = 40`. If the rounding number is halfway between two numbers, the function uses bankers rounding.
Banker's rounding is a method of rounding fractional numbers
When the rounding number is halfway between two numbers, it's rounded to the nearest even digit at the specified decimal position.
For example: 3.5 rounds up to 4, 2.5 rounds down to 2.
It's the default rounding method for floating point numbers defined in [IEEE 754](https://en.wikipedia.org/wiki/IEEE_754#Roundings_to_nearest).
The [round](#rounding_functions-round) function performs the same rounding for floating point numbers.
The `roundBankers` function also rounds integers the same way, for example, `roundBankers(45, -1) = 40`.
- In other cases, the function rounds numbers to the nearest integer. In other cases, the function rounds numbers to the nearest integer.
Using bankers rounding, you can reduce the effect that rounding numbers has on the results of summing or subtracting these numbers. Using bankers rounding, you can reduce the effect that rounding numbers has on the results of summing or subtracting these numbers.
@ -166,16 +216,20 @@ For example, sum numbers 1.5, 2.5, 3.5, 4.5 with different rounding:
**Syntax** **Syntax**
``` sql ``` sql
roundBankers(expression [, decimal_places]) roundBankers(x [, N])
``` ```
**Arguments** **Arguments**
- `expression` — A number to be rounded. Can be any [expression](../../sql-reference/syntax.md#syntax-expressions) returning the numeric [data type](../data-types/index.md#data_types). - `N > 0` — The function rounds the number to the given position right of the decimal point. Example: `roundBankers(3.55, 1) = 3.6`.
- `decimal-places` — Decimal places. An integer number. - `N < 0` — The function rounds the number to the given position left of the decimal point. Example: `roundBankers(24.55, -1) = 20`.
- `decimal-places > 0` — The function rounds the number to the given position right of the decimal point. Example: `roundBankers(3.55, 1) = 3.6`. - `N = 0` — The function rounds the number to an integer. In this case the argument can be omitted. Example: `roundBankers(2.5) = 2`.
- `decimal-places < 0` — The function rounds the number to the given position left of the decimal point. Example: `roundBankers(24.55, -1) = 20`.
- `decimal-places = 0` — The function rounds the number to an integer. In this case the argument can be omitted. Example: `roundBankers(2.5) = 2`. - `x` — A number to round. [Float*](../data-types/float.md), [Decimal*](../data-types/decimal.md), or [(U)Int*](../data-types/int-uint.md).
- `N` — The number of decimal places to round to. Integer. Defaults to `0`.
- If `N > 0`, the function rounds to the right of the decimal point.
- If `N < 0`, the function rounds to the left of the decimal point.
- If `N = 0`, the function rounds to the next integer.
**Returned value** **Returned value**
@ -185,13 +239,13 @@ A value rounded by the bankers rounding method.
Query: Query:
``` sql ```sql
SELECT number / 2 AS x, roundBankers(x, 0) AS b fROM system.numbers limit 10 SELECT number / 2 AS x, roundBankers(x, 0) AS b fROM system.numbers limit 10
``` ```
Result: Result:
``` text ```
┌───x─┬─b─┐ ┌───x─┬─b─┐
│ 0 │ 0 │ │ 0 │ 0 │
│ 0.5 │ 0 │ │ 0.5 │ 0 │
@ -208,7 +262,7 @@ Result:
Examples of Bankers rounding: Examples of Bankers rounding:
``` text ```
roundBankers(0.4) = 0 roundBankers(0.4) = 0
roundBankers(-3.5) = -4 roundBankers(-3.5) = -4
roundBankers(4.5) = 4 roundBankers(4.5) = 4
@ -264,7 +318,7 @@ Result:
## roundDuration ## roundDuration
Accepts a number. If the number is less than one, it returns `0`. Otherwise, it rounds the number down to numbers from the set of commonly used durations: `1, 10, 30, 60, 120, 180, 240, 300, 600, 1200, 1800, 3600, 7200, 18000, 36000`. Accepts a number. If the number is less than one, it returns `0`. Otherwise, it rounds the number down to numbers from the set of commonly used durations: `1, 10, 30, 60, 120, 180, 240, 300, 600, 1200, 1800, 3600, 7200, 18000, 36000`.
**Syntax** **Syntax**

View File

@ -2423,11 +2423,7 @@ Result:
## toUnixTimestamp64Milli ## toUnixTimestamp64Milli
## toUnixTimestamp64Micro Converts a `DateTime64` to a `Int64` value with fixed millisecond precision. The input value is scaled up or down appropriately depending on its precision.
## toUnixTimestamp64Nano
Converts a `DateTime64` to a `Int64` value with fixed sub-second precision. Input value is scaled up or down appropriately depending on it precision.
:::note :::note
The output value is a timestamp in UTC, not in the timezone of `DateTime64`. The output value is a timestamp in UTC, not in the timezone of `DateTime64`.
@ -2437,24 +2433,22 @@ The output value is a timestamp in UTC, not in the timezone of `DateTime64`.
```sql ```sql
toUnixTimestamp64Milli(value) toUnixTimestamp64Milli(value)
toUnixTimestamp64Micro(value)
toUnixTimestamp64Nano(value)
``` ```
**Arguments** **Arguments**
- `value` — DateTime64 value with any precision. - `value` — DateTime64 value with any precision. [DateTime64](../data-types/datetime64.md).
**Returned value** **Returned value**
- `value` converted to the `Int64` data type. - `value` converted to the `Int64` data type. [Int64](../data-types/int-uint.md).
**Examples** **Example**
Query: Query:
```sql ```sql
WITH toDateTime64('2019-09-16 19:20:12.345678910', 6) AS dt64 WITH toDateTime64('2009-02-13 23:31:31.011', 3, 'UTC') AS dt64
SELECT toUnixTimestamp64Milli(dt64); SELECT toUnixTimestamp64Milli(dt64);
``` ```
@ -2462,14 +2456,77 @@ Result:
```response ```response
┌─toUnixTimestamp64Milli(dt64)─┐ ┌─toUnixTimestamp64Milli(dt64)─┐
│ 1568650812345 │ 1234567891011
└──────────────────────────────┘ └──────────────────────────────┘
``` ```
## toUnixTimestamp64Micro
Converts a `DateTime64` to a `Int64` value with fixed microsecond precision. The input value is scaled up or down appropriately depending on its precision.
:::note
The output value is a timestamp in UTC, not in the timezone of `DateTime64`.
:::
**Syntax**
```sql
toUnixTimestamp64Micro(value)
```
**Arguments**
- `value` — DateTime64 value with any precision. [DateTime64](../data-types/datetime64.md).
**Returned value**
- `value` converted to the `Int64` data type. [Int64](../data-types/int-uint.md).
**Example**
Query: Query:
``` sql ```sql
WITH toDateTime64('2019-09-16 19:20:12.345678910', 6) AS dt64 WITH toDateTime64('1970-01-15 06:56:07.891011', 6, 'UTC') AS dt64
SELECT toUnixTimestamp64Micro(dt64);
```
Result:
```response
┌─toUnixTimestamp64Micro(dt64)─┐
│ 1234567891011 │
└──────────────────────────────┘
```
## toUnixTimestamp64Nano
Converts a `DateTime64` to a `Int64` value with fixed nanosecond precision. The input value is scaled up or down appropriately depending on its precision.
:::note
The output value is a timestamp in UTC, not in the timezone of `DateTime64`.
:::
**Syntax**
```sql
toUnixTimestamp64Nano(value)
```
**Arguments**
- `value` — DateTime64 value with any precision. [DateTime64](../data-types/datetime64.md).
**Returned value**
- `value` converted to the `Int64` data type. [Int64](../data-types/int-uint.md).
**Example**
Query:
```sql
WITH toDateTime64('1970-01-01 00:20:34.567891011', 9, 'UTC') AS dt64
SELECT toUnixTimestamp64Nano(dt64); SELECT toUnixTimestamp64Nano(dt64);
``` ```
@ -2477,34 +2534,32 @@ Result:
```response ```response
┌─toUnixTimestamp64Nano(dt64)─┐ ┌─toUnixTimestamp64Nano(dt64)─┐
1568650812345678000 1234567891011
└─────────────────────────────┘ └─────────────────────────────┘
``` ```
## fromUnixTimestamp64Milli ## fromUnixTimestamp64Milli
## fromUnixTimestamp64Micro Converts an `Int64` to a `DateTime64` value with fixed millisecond precision and optional timezone. The input value is scaled up or down appropriately depending on its precision.
## fromUnixTimestamp64Nano :::note
Please note that input value is treated as a UTC timestamp, not timestamp at the given (or implicit) timezone.
Converts an `Int64` to a `DateTime64` value with fixed sub-second precision and optional timezone. Input value is scaled up or down appropriately depending on its precision. Please note that input value is treated as UTC timestamp, not timestamp at given (or implicit) timezone. :::
**Syntax** **Syntax**
``` sql ``` sql
fromUnixTimestamp64Milli(value[, timezone]) fromUnixTimestamp64Milli(value[, timezone])
fromUnixTimestamp64Micro(value[, timezone])
fromUnixTimestamp64Nano(value[, timezone])
``` ```
**Arguments** **Arguments**
- `value``Int64` value with any precision. - `value` — value with any precision. [Int64](../data-types/int-uint.md).
- `timezone``String` (optional) timezone name of the result. - `timezone` — (optional) timezone name of the result. [String](../data-types/string.md).
**Returned value** **Returned value**
- `value` converted to the `DateTime64` data type. - `value` converted to DateTime64 with precision `3`. [DateTime64](../data-types/datetime64.md).
**Example** **Example**
@ -2512,15 +2567,101 @@ Query:
``` sql ``` sql
WITH CAST(1234567891011, 'Int64') AS i64 WITH CAST(1234567891011, 'Int64') AS i64
SELECT fromUnixTimestamp64Milli(i64, 'UTC'); SELECT
fromUnixTimestamp64Milli(i64, 'UTC') AS x,
toTypeName(x);
``` ```
Result: Result:
```response ```response
┌─fromUnixTimestamp64Milli(i64, 'UTC')─┐ ┌───────────────────────x─┬─toTypeName(x)────────┐
│ 2009-02-13 23:31:31.011 │ │ 2009-02-13 23:31:31.011 │ DateTime64(3, 'UTC') │
└──────────────────────────────────────┘ └─────────────────────────┴──────────────────────┘
```
## fromUnixTimestamp64Micro
Converts an `Int64` to a `DateTime64` value with fixed microsecond precision and optional timezone. The input value is scaled up or down appropriately depending on its precision.
:::note
Please note that input value is treated as a UTC timestamp, not timestamp at the given (or implicit) timezone.
:::
**Syntax**
``` sql
fromUnixTimestamp64Micro(value[, timezone])
```
**Arguments**
- `value` — value with any precision. [Int64](../data-types/int-uint.md).
- `timezone` — (optional) timezone name of the result. [String](../data-types/string.md).
**Returned value**
- `value` converted to DateTime64 with precision `6`. [DateTime64](../data-types/datetime64.md).
**Example**
Query:
``` sql
WITH CAST(1234567891011, 'Int64') AS i64
SELECT
fromUnixTimestamp64Micro(i64, 'UTC') AS x,
toTypeName(x);
```
Result:
```response
┌──────────────────────────x─┬─toTypeName(x)────────┐
│ 1970-01-15 06:56:07.891011 │ DateTime64(6, 'UTC') │
└────────────────────────────┴──────────────────────┘
```
## fromUnixTimestamp64Nano
Converts an `Int64` to a `DateTime64` value with fixed nanosecond precision and optional timezone. The input value is scaled up or down appropriately depending on its precision.
:::note
Please note that input value is treated as a UTC timestamp, not timestamp at the given (or implicit) timezone.
:::
**Syntax**
``` sql
fromUnixTimestamp64Nano(value[, timezone])
```
**Arguments**
- `value` — value with any precision. [Int64](../data-types/int-uint.md).
- `timezone` — (optional) timezone name of the result. [String](../data-types/string.md).
**Returned value**
- `value` converted to DateTime64 with precision `9`. [DateTime64](../data-types/datetime64.md).
**Example**
Query:
``` sql
WITH CAST(1234567891011, 'Int64') AS i64
SELECT
fromUnixTimestamp64Nano(i64, 'UTC') AS x,
toTypeName(x);
```
Result:
```response
┌─────────────────────────────x─┬─toTypeName(x)────────┐
│ 1970-01-01 00:20:34.567891011 │ DateTime64(9, 'UTC') │
└───────────────────────────────┴──────────────────────┘
``` ```
## formatRow ## formatRow

View File

@ -139,7 +139,7 @@ For the query to run successfully, the following conditions must be met:
ALTER TABLE table2 [ON CLUSTER cluster] REPLACE PARTITION partition_expr FROM table1 ALTER TABLE table2 [ON CLUSTER cluster] REPLACE PARTITION partition_expr FROM table1
``` ```
This query copies the data partition from the `table1` to `table2` and replaces existing partition in the `table2`. This query copies the data partition from `table1` to `table2` and replaces the existing partition in `table2`. The operation is atomic.
Note that: Note that:

View File

@ -72,6 +72,7 @@ SELECT count(*) FROM azureBlobStorage('DefaultEndpointsProtocol=https;AccountNam
- `_path` — Path to the file. Type: `LowCardinalty(String)`. - `_path` — Path to the file. Type: `LowCardinalty(String)`.
- `_file` — Name of the file. Type: `LowCardinalty(String)`. - `_file` — Name of the file. Type: `LowCardinalty(String)`.
- `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the file size is unknown, the value is `NULL`. - `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the file size is unknown, the value is `NULL`.
- `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`.
**See Also** **See Also**

View File

@ -196,6 +196,7 @@ SELECT count(*) FROM file('big_dir/**/file002', 'CSV', 'name String, value UInt3
- `_path` — Path to the file. Type: `LowCardinalty(String)`. - `_path` — Path to the file. Type: `LowCardinalty(String)`.
- `_file` — Name of the file. Type: `LowCardinalty(String)`. - `_file` — Name of the file. Type: `LowCardinalty(String)`.
- `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the file size is unknown, the value is `NULL`. - `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the file size is unknown, the value is `NULL`.
- `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`.
## Settings {#settings} ## Settings {#settings}

View File

@ -97,6 +97,7 @@ FROM hdfs('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name Strin
- `_path` — Path to the file. Type: `LowCardinalty(String)`. - `_path` — Path to the file. Type: `LowCardinalty(String)`.
- `_file` — Name of the file. Type: `LowCardinalty(String)`. - `_file` — Name of the file. Type: `LowCardinalty(String)`.
- `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`. - `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`.
- `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`.
## Storage Settings {#storage-settings} ## Storage Settings {#storage-settings}

View File

@ -272,6 +272,7 @@ FROM s3(
- `_path` — Path to the file. Type: `LowCardinalty(String)`. - `_path` — Path to the file. Type: `LowCardinalty(String)`.
- `_file` — Name of the file. Type: `LowCardinalty(String)`. - `_file` — Name of the file. Type: `LowCardinalty(String)`.
- `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the file size is unknown, the value is `NULL`. - `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the file size is unknown, the value is `NULL`.
- `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`.
## Storage Settings {#storage-settings} ## Storage Settings {#storage-settings}

View File

@ -53,6 +53,7 @@ Character `|` inside patterns is used to specify failover addresses. They are it
- `_path` — Path to the `URL`. Type: `LowCardinalty(String)`. - `_path` — Path to the `URL`. Type: `LowCardinalty(String)`.
- `_file` — Resource name of the `URL`. Type: `LowCardinalty(String)`. - `_file` — Resource name of the `URL`. Type: `LowCardinalty(String)`.
- `_size` — Size of the resource in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`. - `_size` — Size of the resource in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`.
- `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`.
## Storage Settings {#storage-settings} ## Storage Settings {#storage-settings}

View File

@ -10,7 +10,7 @@ sidebar_label: "Функции округления"
Возвращает наибольшее круглое число, которое меньше или равно, чем x. Возвращает наибольшее круглое число, которое меньше или равно, чем x.
Круглым называется число, кратное 1 / 10N или ближайшее к нему число соответствующего типа данных, если 1 / 10N не представимо точно. Круглым называется число, кратное 1 / 10N или ближайшее к нему число соответствующего типа данных, если 1 / 10N не представимо точно.
N - целочисленная константа, не обязательный параметр. По умолчанию - ноль, что означает - округлять до целого числа. N - целочисленный аргумент, не обязательный параметр. По умолчанию - ноль, что означает - округлять до целого числа.
N может быть отрицательным. N может быть отрицательным.
Примеры: `floor(123.45, 1) = 123.4, floor(123.45, -1) = 120.` Примеры: `floor(123.45, 1) = 123.4, floor(123.45, -1) = 120.`

View File

@ -773,7 +773,27 @@ try
LOG_INFO(log, "Available CPU instruction sets: {}", cpu_info); LOG_INFO(log, "Available CPU instruction sets: {}", cpu_info);
#endif #endif
bool will_have_trace_collector = hasPHDRCache() && config().has("trace_log"); bool has_trace_collector = false;
/// Disable it if we collect test coverage information, because it will work extremely slow.
#if !WITH_COVERAGE
/// Profilers cannot work reliably with any other libunwind or without PHDR cache.
has_trace_collector = hasPHDRCache() && config().has("trace_log");
#endif
/// Describe multiple reasons when query profiler cannot work.
#if WITH_COVERAGE
LOG_INFO(log, "Query Profiler and TraceCollector are disabled because they work extremely slow with test coverage.");
#endif
#if defined(SANITIZER)
LOG_INFO(log, "Query Profiler disabled because they cannot work under sanitizers"
" when two different stack unwinding methods will interfere with each other.");
#endif
if (!hasPHDRCache())
LOG_INFO(log, "Query Profiler and TraceCollector are disabled because they require PHDR cache to be created"
" (otherwise the function 'dl_iterate_phdr' is not lock free and not async-signal safe).");
// Initialize global thread pool. Do it before we fetch configs from zookeeper // Initialize global thread pool. Do it before we fetch configs from zookeeper
// nodes (`from_zk`), because ZooKeeper interface uses the pool. We will // nodes (`from_zk`), because ZooKeeper interface uses the pool. We will
@ -782,8 +802,27 @@ try
server_settings.max_thread_pool_size, server_settings.max_thread_pool_size,
server_settings.max_thread_pool_free_size, server_settings.max_thread_pool_free_size,
server_settings.thread_pool_queue_size, server_settings.thread_pool_queue_size,
will_have_trace_collector ? server_settings.global_profiler_real_time_period_ns : 0, has_trace_collector ? server_settings.global_profiler_real_time_period_ns : 0,
will_have_trace_collector ? server_settings.global_profiler_cpu_time_period_ns : 0); has_trace_collector ? server_settings.global_profiler_cpu_time_period_ns : 0);
if (has_trace_collector)
{
global_context->createTraceCollector();
/// Set up server-wide memory profiler (for total memory tracker).
if (server_settings.total_memory_profiler_step)
total_memory_tracker.setProfilerStep(server_settings.total_memory_profiler_step);
if (server_settings.total_memory_tracker_sample_probability > 0.0)
total_memory_tracker.setSampleProbability(server_settings.total_memory_tracker_sample_probability);
if (server_settings.total_memory_profiler_sample_min_allocation_size)
total_memory_tracker.setSampleMinAllocationSize(server_settings.total_memory_profiler_sample_min_allocation_size);
if (server_settings.total_memory_profiler_sample_max_allocation_size)
total_memory_tracker.setSampleMaxAllocationSize(server_settings.total_memory_profiler_sample_max_allocation_size);
}
/// Wait for all threads to avoid possible use-after-free (for example logging objects can be already destroyed). /// Wait for all threads to avoid possible use-after-free (for example logging objects can be already destroyed).
SCOPE_EXIT({ SCOPE_EXIT({
Stopwatch watch; Stopwatch watch;
@ -1950,52 +1989,9 @@ try
LOG_DEBUG(log, "Loaded metadata."); LOG_DEBUG(log, "Loaded metadata.");
/// Init trace collector only after trace_log system table was created if (has_trace_collector)
/// Disable it if we collect test coverage information, because it will work extremely slow.
#if !WITH_COVERAGE
/// Profilers cannot work reliably with any other libunwind or without PHDR cache.
if (hasPHDRCache())
{
global_context->initializeTraceCollector(); global_context->initializeTraceCollector();
/// Set up server-wide memory profiler (for total memory tracker).
if (server_settings.total_memory_profiler_step)
{
total_memory_tracker.setProfilerStep(server_settings.total_memory_profiler_step);
}
if (server_settings.total_memory_tracker_sample_probability > 0.0)
{
total_memory_tracker.setSampleProbability(server_settings.total_memory_tracker_sample_probability);
}
if (server_settings.total_memory_profiler_sample_min_allocation_size)
{
total_memory_tracker.setSampleMinAllocationSize(server_settings.total_memory_profiler_sample_min_allocation_size);
}
if (server_settings.total_memory_profiler_sample_max_allocation_size)
{
total_memory_tracker.setSampleMaxAllocationSize(server_settings.total_memory_profiler_sample_max_allocation_size);
}
}
#endif
/// Describe multiple reasons when query profiler cannot work.
#if WITH_COVERAGE
LOG_INFO(log, "Query Profiler and TraceCollector are disabled because they work extremely slow with test coverage.");
#endif
#if defined(SANITIZER)
LOG_INFO(log, "Query Profiler disabled because they cannot work under sanitizers"
" when two different stack unwinding methods will interfere with each other.");
#endif
if (!hasPHDRCache())
LOG_INFO(log, "Query Profiler and TraceCollector are disabled because they require PHDR cache to be created"
" (otherwise the function 'dl_iterate_phdr' is not lock free and not async-signal safe).");
#if defined(OS_LINUX) #if defined(OS_LINUX)
auto tasks_stats_provider = TasksStatsCounters::findBestAvailableProvider(); auto tasks_stats_provider = TasksStatsCounters::findBestAvailableProvider();
if (tasks_stats_provider == TasksStatsCounters::MetricsProvider::None) if (tasks_stats_provider == TasksStatsCounters::MetricsProvider::None)

View File

@ -1561,6 +1561,7 @@
<rocksdb> <rocksdb>
<options> <options>
<max_background_jobs>8</max_background_jobs> <max_background_jobs>8</max_background_jobs>
<info_log_level>DEBUG_LEVEL</info_log_level>
</options> </options>
<column_family_options> <column_family_options>
<num_levels>2</num_levels> <num_levels>2</num_levels>

View File

@ -1,5 +1,6 @@
#include <Analyzer/ArrayJoinNode.h> #include <Analyzer/ArrayJoinNode.h>
#include <Analyzer/ColumnNode.h> #include <Analyzer/ColumnNode.h>
#include <Analyzer/FunctionNode.h>
#include <Analyzer/Utils.h> #include <Analyzer/Utils.h>
#include <IO/Operators.h> #include <IO/Operators.h>
#include <IO/WriteBuffer.h> #include <IO/WriteBuffer.h>
@ -64,7 +65,12 @@ ASTPtr ArrayJoinNode::toASTImpl(const ConvertToASTOptions & options) const
auto * column_node = array_join_expression->as<ColumnNode>(); auto * column_node = array_join_expression->as<ColumnNode>();
if (column_node && column_node->getExpression()) if (column_node && column_node->getExpression())
array_join_expression_ast = column_node->getExpression()->toAST(options); {
if (const auto * function_node = column_node->getExpression()->as<FunctionNode>(); function_node && function_node->getFunctionName() == "nested")
array_join_expression_ast = array_join_expression->toAST(options);
else
array_join_expression_ast = column_node->getExpression()->toAST(options);
}
else else
array_join_expression_ast = array_join_expression->toAST(options); array_join_expression_ast = array_join_expression->toAST(options);

View File

@ -22,6 +22,7 @@ public:
if (query_node->hasOrderBy()) if (query_node->hasOrderBy())
{ {
QueryTreeNodeConstRawPtrWithHashSet unique_expressions_nodes_set;
QueryTreeNodes result_nodes; QueryTreeNodes result_nodes;
auto & query_order_by_nodes = query_node->getOrderBy().getNodes(); auto & query_order_by_nodes = query_node->getOrderBy().getNodes();
@ -45,10 +46,9 @@ public:
query_order_by_nodes = std::move(result_nodes); query_order_by_nodes = std::move(result_nodes);
} }
unique_expressions_nodes_set.clear();
if (query_node->hasLimitBy()) if (query_node->hasLimitBy())
{ {
QueryTreeNodeConstRawPtrWithHashSet unique_expressions_nodes_set;
QueryTreeNodes result_nodes; QueryTreeNodes result_nodes;
auto & query_limit_by_nodes = query_node->getLimitBy().getNodes(); auto & query_limit_by_nodes = query_node->getLimitBy().getNodes();
@ -63,9 +63,6 @@ public:
query_limit_by_nodes = std::move(result_nodes); query_limit_by_nodes = std::move(result_nodes);
} }
} }
private:
QueryTreeNodeConstRawPtrWithHashSet unique_expressions_nodes_set;
}; };
} }

View File

@ -54,9 +54,9 @@ namespace
S3::PocoHTTPClientConfiguration client_configuration = S3::ClientFactory::instance().createClientConfiguration( S3::PocoHTTPClientConfiguration client_configuration = S3::ClientFactory::instance().createClientConfiguration(
settings.auth_settings.region, settings.auth_settings.region,
context->getRemoteHostFilter(), context->getRemoteHostFilter(),
static_cast<unsigned>(global_settings.s3_max_redirects), static_cast<unsigned>(local_settings.s3_max_redirects),
static_cast<unsigned>(global_settings.s3_retry_attempts), static_cast<unsigned>(local_settings.backup_restore_s3_retry_attempts),
global_settings.enable_s3_requests_logging, local_settings.enable_s3_requests_logging,
/* for_disk_s3 = */ false, /* for_disk_s3 = */ false,
request_settings.get_request_throttler, request_settings.get_request_throttler,
request_settings.put_request_throttler, request_settings.put_request_throttler,

View File

@ -289,10 +289,14 @@ void executeColumnIfNeeded(ColumnWithTypeAndName & column, bool empty)
if (!column_function) if (!column_function)
return; return;
size_t original_size = column.column->size();
if (!empty) if (!empty)
column = column_function->reduce(); column = column_function->reduce();
else else
column.column = column_function->getResultType()->createColumn(); column.column = column_function->getResultType()->createColumnConstWithDefaultValue(original_size)->convertToFullColumnIfConst();
chassert(column.column->size() == original_size);
} }
int checkShortCircuitArguments(const ColumnsWithTypeAndName & arguments) int checkShortCircuitArguments(const ColumnsWithTypeAndName & arguments)

View File

@ -228,9 +228,9 @@ void Timer::cleanup()
#endif #endif
template <typename ProfilerImpl> template <typename ProfilerImpl>
QueryProfilerBase<ProfilerImpl>::QueryProfilerBase([[maybe_unused]] UInt64 thread_id, [[maybe_unused]] int clock_type, [[maybe_unused]] UInt32 period, [[maybe_unused]] int pause_signal_) QueryProfilerBase<ProfilerImpl>::QueryProfilerBase(
: log(getLogger("QueryProfiler")) [[maybe_unused]] UInt64 thread_id, [[maybe_unused]] int clock_type, [[maybe_unused]] UInt32 period, [[maybe_unused]] int pause_signal_)
, pause_signal(pause_signal_) : log(getLogger("QueryProfiler")), pause_signal(pause_signal_)
{ {
#if defined(SANITIZER) #if defined(SANITIZER)
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "QueryProfiler disabled because they cannot work under sanitizers"); throw Exception(ErrorCodes::NOT_IMPLEMENTED, "QueryProfiler disabled because they cannot work under sanitizers");

View File

@ -140,6 +140,18 @@ inline bool isPrintableASCII(char c)
return uc >= 32 && uc <= 126; /// 127 is ASCII DEL. return uc >= 32 && uc <= 126; /// 127 is ASCII DEL.
} }
inline bool isCSIParameterByte(char c)
{
uint8_t uc = c;
return uc >= 0x30 && uc <= 0x3F; /// ASCII 09:;<=>?
}
inline bool isCSIIntermediateByte(char c)
{
uint8_t uc = c;
return uc >= 0x20 && uc <= 0x2F; /// ASCII !"#$%&'()*+,-./
}
inline bool isCSIFinalByte(char c) inline bool isCSIFinalByte(char c)
{ {
uint8_t uc = c; uint8_t uc = c;

View File

@ -103,7 +103,7 @@ template <ComputeWidthMode mode>
size_t computeWidthImpl(const UInt8 * data, size_t size, size_t prefix, size_t limit) noexcept size_t computeWidthImpl(const UInt8 * data, size_t size, size_t prefix, size_t limit) noexcept
{ {
UTF8Decoder decoder; UTF8Decoder decoder;
int isEscapeSequence = false; bool is_escape_sequence = false;
size_t width = 0; size_t width = 0;
size_t rollback = 0; size_t rollback = 0;
for (size_t i = 0; i < size; ++i) for (size_t i = 0; i < size; ++i)
@ -116,6 +116,9 @@ size_t computeWidthImpl(const UInt8 * data, size_t size, size_t prefix, size_t l
while (i + 15 < size) while (i + 15 < size)
{ {
if (is_escape_sequence)
break;
__m128i bytes = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&data[i])); __m128i bytes = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&data[i]));
const uint16_t non_regular_width_mask = _mm_movemask_epi8( const uint16_t non_regular_width_mask = _mm_movemask_epi8(
@ -132,25 +135,28 @@ size_t computeWidthImpl(const UInt8 * data, size_t size, size_t prefix, size_t l
} }
else else
{ {
if (isEscapeSequence) i += 16;
{ width += 16;
break;
}
else
{
i += 16;
width += 16;
}
} }
} }
#endif #endif
while (i < size && isPrintableASCII(data[i])) while (i < size && isPrintableASCII(data[i]))
{ {
if (!isEscapeSequence) bool ignore_width = is_escape_sequence && (isCSIParameterByte(data[i]) || isCSIIntermediateByte(data[i]));
if (ignore_width || (data[i] == '[' && is_escape_sequence))
{
/// don't count the width
}
else if (is_escape_sequence && isCSIFinalByte(data[i]))
{
is_escape_sequence = false;
}
else
{
++width; ++width;
else if (isCSIFinalByte(data[i]) && data[i - 1] != '\x1b') }
isEscapeSequence = false; /// end of CSI escape sequence reached
++i; ++i;
} }
@ -178,7 +184,7 @@ size_t computeWidthImpl(const UInt8 * data, size_t size, size_t prefix, size_t l
// special treatment for '\t' and for ESC // special treatment for '\t' and for ESC
size_t next_width = width; size_t next_width = width;
if (decoder.codepoint == '\x1b') if (decoder.codepoint == '\x1b')
isEscapeSequence = true; is_escape_sequence = true;
else if (decoder.codepoint == '\t') else if (decoder.codepoint == '\t')
next_width += 8 - (prefix + width) % 8; next_width += 8 - (prefix + width) % 8;
else else

View File

@ -202,6 +202,8 @@ class IColumn;
M(UInt64, parallel_replica_offset, 0, "This is internal setting that should not be used directly and represents an implementation detail of the 'parallel replicas' mode. This setting will be automatically set up by the initiator server for distributed queries to the index of the replica participating in query processing among parallel replicas.", 0) \ M(UInt64, parallel_replica_offset, 0, "This is internal setting that should not be used directly and represents an implementation detail of the 'parallel replicas' mode. This setting will be automatically set up by the initiator server for distributed queries to the index of the replica participating in query processing among parallel replicas.", 0) \
M(String, parallel_replicas_custom_key, "", "Custom key assigning work to replicas when parallel replicas are used.", 0) \ M(String, parallel_replicas_custom_key, "", "Custom key assigning work to replicas when parallel replicas are used.", 0) \
M(ParallelReplicasCustomKeyFilterType, parallel_replicas_custom_key_filter_type, ParallelReplicasCustomKeyFilterType::DEFAULT, "Type of filter to use with custom key for parallel replicas. default - use modulo operation on the custom key, range - use range filter on custom key using all possible values for the value type of custom key.", 0) \ M(ParallelReplicasCustomKeyFilterType, parallel_replicas_custom_key_filter_type, ParallelReplicasCustomKeyFilterType::DEFAULT, "Type of filter to use with custom key for parallel replicas. default - use modulo operation on the custom key, range - use range filter on custom key using all possible values for the value type of custom key.", 0) \
M(UInt64, parallel_replicas_custom_key_range_lower, 0, "Lower bound for the universe that the parallel replicas custom range filter is calculated over", 0) \
M(UInt64, parallel_replicas_custom_key_range_upper, 0, "Upper bound for the universe that the parallel replicas custom range filter is calculated over. A value of 0 disables the upper bound, setting it to the max value of the custom key expression", 0) \
\ \
M(String, cluster_for_parallel_replicas, "", "Cluster for a shard in which current server is located", 0) \ M(String, cluster_for_parallel_replicas, "", "Cluster for a shard in which current server is located", 0) \
M(UInt64, allow_experimental_parallel_reading_from_replicas, 0, "Use all the replicas from a shard for SELECT query execution. Reading is parallelized and coordinated dynamically. 0 - disabled, 1 - enabled, silently disable them in case of failure, 2 - enabled, throw an exception in case of failure", 0) \ M(UInt64, allow_experimental_parallel_reading_from_replicas, 0, "Use all the replicas from a shard for SELECT query execution. Reading is parallelized and coordinated dynamically. 0 - disabled, 1 - enabled, silently disable them in case of failure, 2 - enabled, throw an exception in case of failure", 0) \
@ -515,6 +517,7 @@ class IColumn;
M(UInt64, backup_restore_keeper_value_max_size, 1048576, "Maximum size of data of a [Zoo]Keeper's node during backup", 0) \ M(UInt64, backup_restore_keeper_value_max_size, 1048576, "Maximum size of data of a [Zoo]Keeper's node during backup", 0) \
M(UInt64, backup_restore_batch_size_for_keeper_multiread, 10000, "Maximum size of batch for multiread request to [Zoo]Keeper during backup or restore", 0) \ M(UInt64, backup_restore_batch_size_for_keeper_multiread, 10000, "Maximum size of batch for multiread request to [Zoo]Keeper during backup or restore", 0) \
M(UInt64, backup_restore_batch_size_for_keeper_multi, 1000, "Maximum size of batch for multi request to [Zoo]Keeper during backup or restore", 0) \ M(UInt64, backup_restore_batch_size_for_keeper_multi, 1000, "Maximum size of batch for multi request to [Zoo]Keeper during backup or restore", 0) \
M(UInt64, backup_restore_s3_retry_attempts, 1000, "Setting for Aws::Client::RetryStrategy, Aws::Client does retries itself, 0 means no retries. It takes place only for backup/restore.", 0) \
M(UInt64, max_backup_bandwidth, 0, "The maximum read speed in bytes per second for particular backup on server. Zero means unlimited.", 0) \ M(UInt64, max_backup_bandwidth, 0, "The maximum read speed in bytes per second for particular backup on server. Zero means unlimited.", 0) \
\ \
M(Bool, log_profile_events, true, "Log query performance statistics into the query_log, query_thread_log and query_views_log.", 0) \ M(Bool, log_profile_events, true, "Log query performance statistics into the query_log, query_thread_log and query_views_log.", 0) \

View File

@ -83,7 +83,7 @@ namespace SettingsChangesHistory
/// For newly added setting choose the most appropriate previous_value (for example, if new setting /// For newly added setting choose the most appropriate previous_value (for example, if new setting
/// controls new feature and it's 'true' by default, use 'false' as previous_value). /// controls new feature and it's 'true' by default, use 'false' as previous_value).
/// It's used to implement `compatibility` setting (see https://github.com/ClickHouse/ClickHouse/issues/35972) /// It's used to implement `compatibility` setting (see https://github.com/ClickHouse/ClickHouse/issues/35972)
static std::map<ClickHouseVersion, SettingsChangesHistory::SettingsChanges> settings_changes_history = static const std::map<ClickHouseVersion, SettingsChangesHistory::SettingsChanges> settings_changes_history =
{ {
{"24.6", {{"materialize_skip_indexes_on_insert", true, true, "Added new setting to allow to disable materialization of skip indexes on insert"}, {"24.6", {{"materialize_skip_indexes_on_insert", true, true, "Added new setting to allow to disable materialization of skip indexes on insert"},
{"materialize_statistics_on_insert", true, true, "Added new setting to allow to disable materialization of statistics on insert"}, {"materialize_statistics_on_insert", true, true, "Added new setting to allow to disable materialization of statistics on insert"},
@ -100,7 +100,9 @@ static std::map<ClickHouseVersion, SettingsChangesHistory::SettingsChanges> sett
{"allow_statistic_optimize", false, false, "Old setting which popped up here being renamed."}, {"allow_statistic_optimize", false, false, "Old setting which popped up here being renamed."},
{"allow_experimental_statistic", false, false, "Old setting which popped up here being renamed."}, {"allow_experimental_statistic", false, false, "Old setting which popped up here being renamed."},
{"allow_statistics_optimize", false, false, "The setting was renamed. The previous name is `allow_statistic_optimize`."}, {"allow_statistics_optimize", false, false, "The setting was renamed. The previous name is `allow_statistic_optimize`."},
{"allow_experimental_statistics", false, false, "The setting was renamed. The previous name is `allow_experimental_statistic`."} {"allow_experimental_statistics", false, false, "The setting was renamed. The previous name is `allow_experimental_statistic`."},
{"parallel_replicas_custom_key_range_lower", 0, 0, "Add settings to control the range filter when using parallel replicas with dynamic shards"},
{"parallel_replicas_custom_key_range_upper", 0, 0, "Add settings to control the range filter when using parallel replicas with dynamic shards. A value of 0 disables the upper limit"},
}}, }},
{"24.5", {{"allow_deprecated_error_prone_window_functions", true, false, "Allow usage of deprecated error prone window functions (neighbor, runningAccumulate, runningDifferenceStartingWithFirstValue, runningDifference)"}, {"24.5", {{"allow_deprecated_error_prone_window_functions", true, false, "Allow usage of deprecated error prone window functions (neighbor, runningAccumulate, runningDifferenceStartingWithFirstValue, runningDifference)"},
{"allow_experimental_join_condition", false, false, "Support join with inequal conditions which involve columns from both left and right table. e.g. t1.y < t2.y."}, {"allow_experimental_join_condition", false, false, "Support join with inequal conditions which involve columns from both left and right table. e.g. t1.y < t2.y."},
@ -111,6 +113,7 @@ static std::map<ClickHouseVersion, SettingsChangesHistory::SettingsChanges> sett
{"http_max_chunk_size", 0, 0, "Internal limitation"}, {"http_max_chunk_size", 0, 0, "Internal limitation"},
{"prefer_external_sort_block_bytes", 0, DEFAULT_BLOCK_SIZE * 256, "Prefer maximum block bytes for external sort, reduce the memory usage during merging."}, {"prefer_external_sort_block_bytes", 0, DEFAULT_BLOCK_SIZE * 256, "Prefer maximum block bytes for external sort, reduce the memory usage during merging."},
{"input_format_force_null_for_omitted_fields", false, false, "Disable type-defaults for omitted fields when needed"}, {"input_format_force_null_for_omitted_fields", false, false, "Disable type-defaults for omitted fields when needed"},
{"backup_restore_s3_retry_attempts", 0, 1000, "A new setting."},
{"cast_string_to_dynamic_use_inference", false, false, "Add setting to allow converting String to Dynamic through parsing"}, {"cast_string_to_dynamic_use_inference", false, false, "Add setting to allow converting String to Dynamic through parsing"},
{"allow_experimental_dynamic_type", false, false, "Add new experimental Dynamic type"}, {"allow_experimental_dynamic_type", false, false, "Add new experimental Dynamic type"},
{"azure_max_blocks_in_multipart_upload", 50000, 50000, "Maximum number of blocks in multipart upload for Azure."}, {"azure_max_blocks_in_multipart_upload", 50000, 50000, "Maximum number of blocks in multipart upload for Azure."},

View File

@ -17,6 +17,13 @@ void registerDataTypeDomainGeo(DataTypeFactory & factory)
std::make_unique<DataTypeCustomDesc>(std::make_unique<DataTypePointName>())); std::make_unique<DataTypeCustomDesc>(std::make_unique<DataTypePointName>()));
}); });
// Custom type for simple line which consists from several segments.
factory.registerSimpleDataTypeCustom("LineString", []
{
return std::make_pair(DataTypeFactory::instance().get("Array(Point)"),
std::make_unique<DataTypeCustomDesc>(std::make_unique<DataTypeLineStringName>()));
});
// Custom type for simple polygon without holes stored as Array(Point) // Custom type for simple polygon without holes stored as Array(Point)
factory.registerSimpleDataTypeCustom("Ring", [] factory.registerSimpleDataTypeCustom("Ring", []
{ {

View File

@ -11,6 +11,12 @@ public:
DataTypePointName() : DataTypeCustomFixedName("Point") {} DataTypePointName() : DataTypeCustomFixedName("Point") {}
}; };
class DataTypeLineStringName : public DataTypeCustomFixedName
{
public:
DataTypeLineStringName() : DataTypeCustomFixedName("LineString") {}
};
class DataTypeRingName : public DataTypeCustomFixedName class DataTypeRingName : public DataTypeCustomFixedName
{ {
public: public:

View File

@ -543,6 +543,7 @@ template <typename DataType> constexpr bool IsDataTypeNumber = false;
template <typename DataType> constexpr bool IsDataTypeDateOrDateTime = false; template <typename DataType> constexpr bool IsDataTypeDateOrDateTime = false;
template <typename DataType> constexpr bool IsDataTypeDate = false; template <typename DataType> constexpr bool IsDataTypeDate = false;
template <typename DataType> constexpr bool IsDataTypeEnum = false; template <typename DataType> constexpr bool IsDataTypeEnum = false;
template <typename DataType> constexpr bool IsDataTypeStringOrFixedString = false;
template <typename DataType> constexpr bool IsDataTypeDecimalOrNumber = IsDataTypeDecimal<DataType> || IsDataTypeNumber<DataType>; template <typename DataType> constexpr bool IsDataTypeDecimalOrNumber = IsDataTypeDecimal<DataType> || IsDataTypeNumber<DataType>;
@ -556,6 +557,8 @@ class DataTypeDate;
class DataTypeDate32; class DataTypeDate32;
class DataTypeDateTime; class DataTypeDateTime;
class DataTypeDateTime64; class DataTypeDateTime64;
class DataTypeString;
class DataTypeFixedString;
template <is_decimal T> constexpr bool IsDataTypeDecimal<DataTypeDecimal<T>> = true; template <is_decimal T> constexpr bool IsDataTypeDecimal<DataTypeDecimal<T>> = true;
@ -572,6 +575,9 @@ template <> inline constexpr bool IsDataTypeDateOrDateTime<DataTypeDate32> = tru
template <> inline constexpr bool IsDataTypeDateOrDateTime<DataTypeDateTime> = true; template <> inline constexpr bool IsDataTypeDateOrDateTime<DataTypeDateTime> = true;
template <> inline constexpr bool IsDataTypeDateOrDateTime<DataTypeDateTime64> = true; template <> inline constexpr bool IsDataTypeDateOrDateTime<DataTypeDateTime64> = true;
template <> inline constexpr bool IsDataTypeStringOrFixedString<DataTypeString> = true;
template <> inline constexpr bool IsDataTypeStringOrFixedString<DataTypeFixedString> = true;
template <typename T> template <typename T>
class DataTypeEnum; class DataTypeEnum;

View File

@ -1,20 +1,21 @@
#include <filesystem>
#include <Databases/DatabaseAtomic.h> #include <Databases/DatabaseAtomic.h>
#include <Databases/DatabaseFactory.h>
#include <Databases/DatabaseOnDisk.h> #include <Databases/DatabaseOnDisk.h>
#include <Databases/DatabaseReplicated.h> #include <Databases/DatabaseReplicated.h>
#include <Databases/DatabaseFactory.h> #include <IO/ReadBufferFromFile.h>
#include <IO/ReadHelpers.h> #include <IO/ReadHelpers.h>
#include <IO/WriteHelpers.h> #include <IO/WriteHelpers.h>
#include <IO/ReadBufferFromFile.h> #include <Interpreters/Context.h>
#include <Interpreters/DDLTask.h>
#include <Interpreters/DatabaseCatalog.h>
#include <Interpreters/ExternalDictionariesLoader.h>
#include <Parsers/formatAST.h> #include <Parsers/formatAST.h>
#include <Storages/StorageMaterializedView.h>
#include "Common/logger_useful.h"
#include <Common/PoolId.h> #include <Common/PoolId.h>
#include <Common/atomicRename.h> #include <Common/atomicRename.h>
#include <Common/filesystemHelpers.h> #include <Common/filesystemHelpers.h>
#include <Storages/StorageMaterializedView.h>
#include <Interpreters/Context.h>
#include <Interpreters/DatabaseCatalog.h>
#include <Interpreters/ExternalDictionariesLoader.h>
#include <filesystem>
#include <Interpreters/DDLTask.h>
namespace fs = std::filesystem; namespace fs = std::filesystem;
@ -393,6 +394,7 @@ DatabaseAtomic::DetachedTables DatabaseAtomic::cleanupDetachedTables()
{ {
DetachedTables not_in_use; DetachedTables not_in_use;
auto it = detached_tables.begin(); auto it = detached_tables.begin();
LOG_DEBUG(log, "There are {} detached tables. Start searching non used tables.", detached_tables.size());
while (it != detached_tables.end()) while (it != detached_tables.end())
{ {
if (it->second.unique()) if (it->second.unique())
@ -403,6 +405,7 @@ DatabaseAtomic::DetachedTables DatabaseAtomic::cleanupDetachedTables()
else else
++it; ++it;
} }
LOG_DEBUG(log, "Found {} non used tables in detached tables.", not_in_use.size());
/// It should be destroyed in caller with released database mutex /// It should be destroyed in caller with released database mutex
return not_in_use; return not_in_use;
} }

View File

@ -794,7 +794,7 @@ ASTPtr DatabaseOnDisk::getCreateQueryFromStorage(const String & table_name, cons
throw_on_error); throw_on_error);
create_table_query->set(create_table_query->as<ASTCreateQuery>()->comment, create_table_query->set(create_table_query->as<ASTCreateQuery>()->comment,
std::make_shared<ASTLiteral>("SYSTEM TABLE is built on the fly.")); std::make_shared<ASTLiteral>(storage->getInMemoryMetadata().comment));
return create_table_query; return create_table_query;
} }

View File

@ -122,6 +122,13 @@ DatabaseReplicated::DatabaseReplicated(
fillClusterAuthInfo(db_settings.collection_name.value, context_->getConfigRef()); fillClusterAuthInfo(db_settings.collection_name.value, context_->getConfigRef());
replica_group_name = context_->getConfigRef().getString("replica_group_name", ""); replica_group_name = context_->getConfigRef().getString("replica_group_name", "");
if (!replica_group_name.empty() && database_name.starts_with(DatabaseReplicated::ALL_GROUPS_CLUSTER_PREFIX))
{
context_->addWarningMessage(fmt::format("There's a Replicated database with a name starting from '{}', "
"and replica_group_name is configured. It may cause collisions in cluster names.",
ALL_GROUPS_CLUSTER_PREFIX));
}
} }
String DatabaseReplicated::getFullReplicaName(const String & shard, const String & replica) String DatabaseReplicated::getFullReplicaName(const String & shard, const String & replica)
@ -173,13 +180,40 @@ ClusterPtr DatabaseReplicated::tryGetCluster() const
return cluster; return cluster;
} }
void DatabaseReplicated::setCluster(ClusterPtr && new_cluster) ClusterPtr DatabaseReplicated::tryGetAllGroupsCluster() const
{ {
std::lock_guard lock{mutex}; std::lock_guard lock{mutex};
cluster = std::move(new_cluster); if (replica_group_name.empty())
return nullptr;
if (cluster_all_groups)
return cluster_all_groups;
/// Database is probably not created or not initialized yet, it's ok to return nullptr
if (is_readonly)
return cluster_all_groups;
try
{
cluster_all_groups = getClusterImpl(/*all_groups*/ true);
}
catch (...)
{
tryLogCurrentException(log);
}
return cluster_all_groups;
} }
ClusterPtr DatabaseReplicated::getClusterImpl() const void DatabaseReplicated::setCluster(ClusterPtr && new_cluster, bool all_groups)
{
std::lock_guard lock{mutex};
if (all_groups)
cluster_all_groups = std::move(new_cluster);
else
cluster = std::move(new_cluster);
}
ClusterPtr DatabaseReplicated::getClusterImpl(bool all_groups) const
{ {
Strings unfiltered_hosts; Strings unfiltered_hosts;
Strings hosts; Strings hosts;
@ -199,17 +233,24 @@ ClusterPtr DatabaseReplicated::getClusterImpl() const
"It's possible if the first replica is not fully created yet " "It's possible if the first replica is not fully created yet "
"or if the last replica was just dropped or due to logical error", zookeeper_path); "or if the last replica was just dropped or due to logical error", zookeeper_path);
hosts.clear(); if (all_groups)
std::vector<String> paths;
for (const auto & host : unfiltered_hosts)
paths.push_back(zookeeper_path + "/replicas/" + host + "/replica_group");
auto replica_groups = zookeeper->tryGet(paths);
for (size_t i = 0; i < paths.size(); ++i)
{ {
if (replica_groups[i].data == replica_group_name) hosts = unfiltered_hosts;
hosts.push_back(unfiltered_hosts[i]); }
else
{
hosts.clear();
std::vector<String> paths;
for (const auto & host : unfiltered_hosts)
paths.push_back(zookeeper_path + "/replicas/" + host + "/replica_group");
auto replica_groups = zookeeper->tryGet(paths);
for (size_t i = 0; i < paths.size(); ++i)
{
if (replica_groups[i].data == replica_group_name)
hosts.push_back(unfiltered_hosts[i]);
}
} }
Int32 cversion = stat.cversion; Int32 cversion = stat.cversion;
@ -274,6 +315,11 @@ ClusterPtr DatabaseReplicated::getClusterImpl() const
bool treat_local_as_remote = false; bool treat_local_as_remote = false;
bool treat_local_port_as_remote = getContext()->getApplicationType() == Context::ApplicationType::LOCAL; bool treat_local_port_as_remote = getContext()->getApplicationType() == Context::ApplicationType::LOCAL;
String cluster_name = TSA_SUPPRESS_WARNING_FOR_READ(database_name); /// FIXME
if (all_groups)
cluster_name = ALL_GROUPS_CLUSTER_PREFIX + cluster_name;
ClusterConnectionParameters params{ ClusterConnectionParameters params{
cluster_auth_info.cluster_username, cluster_auth_info.cluster_username,
cluster_auth_info.cluster_password, cluster_auth_info.cluster_password,
@ -282,7 +328,7 @@ ClusterPtr DatabaseReplicated::getClusterImpl() const
treat_local_port_as_remote, treat_local_port_as_remote,
cluster_auth_info.cluster_secure_connection, cluster_auth_info.cluster_secure_connection,
Priority{1}, Priority{1},
TSA_SUPPRESS_WARNING_FOR_READ(database_name), /// FIXME cluster_name,
cluster_auth_info.cluster_secret}; cluster_auth_info.cluster_secret};
return std::make_shared<Cluster>(getContext()->getSettingsRef(), shards, params); return std::make_shared<Cluster>(getContext()->getSettingsRef(), shards, params);

View File

@ -20,6 +20,8 @@ using ClusterPtr = std::shared_ptr<Cluster>;
class DatabaseReplicated : public DatabaseAtomic class DatabaseReplicated : public DatabaseAtomic
{ {
public: public:
static constexpr auto ALL_GROUPS_CLUSTER_PREFIX = "all_groups.";
DatabaseReplicated(const String & name_, const String & metadata_path_, UUID uuid, DatabaseReplicated(const String & name_, const String & metadata_path_, UUID uuid,
const String & zookeeper_path_, const String & shard_name_, const String & replica_name_, const String & zookeeper_path_, const String & shard_name_, const String & replica_name_,
DatabaseReplicatedSettings db_settings_, DatabaseReplicatedSettings db_settings_,
@ -65,6 +67,7 @@ public:
/// Returns cluster consisting of database replicas /// Returns cluster consisting of database replicas
ClusterPtr tryGetCluster() const; ClusterPtr tryGetCluster() const;
ClusterPtr tryGetAllGroupsCluster() const;
void drop(ContextPtr /*context*/) override; void drop(ContextPtr /*context*/) override;
@ -113,8 +116,8 @@ private:
ASTPtr parseQueryFromMetadataInZooKeeper(const String & node_name, const String & query); ASTPtr parseQueryFromMetadataInZooKeeper(const String & node_name, const String & query);
String readMetadataFile(const String & table_name) const; String readMetadataFile(const String & table_name) const;
ClusterPtr getClusterImpl() const; ClusterPtr getClusterImpl(bool all_groups = false) const;
void setCluster(ClusterPtr && new_cluster); void setCluster(ClusterPtr && new_cluster, bool all_groups = false);
void createEmptyLogEntry(const ZooKeeperPtr & current_zookeeper); void createEmptyLogEntry(const ZooKeeperPtr & current_zookeeper);
@ -155,6 +158,7 @@ private:
UInt64 tables_metadata_digest TSA_GUARDED_BY(metadata_mutex); UInt64 tables_metadata_digest TSA_GUARDED_BY(metadata_mutex);
mutable ClusterPtr cluster; mutable ClusterPtr cluster;
mutable ClusterPtr cluster_all_groups;
LoadTaskPtr startup_replicated_database_task TSA_GUARDED_BY(mutex); LoadTaskPtr startup_replicated_database_task TSA_GUARDED_BY(mutex);
}; };

View File

@ -421,6 +421,8 @@ DDLTaskPtr DatabaseReplicatedDDLWorker::initAndCheckTask(const String & entry_na
{ {
/// Some replica is added or removed, let's update cached cluster /// Some replica is added or removed, let's update cached cluster
database->setCluster(database->getClusterImpl()); database->setCluster(database->getClusterImpl());
if (!database->replica_group_name.empty())
database->setCluster(database->getClusterImpl(/*all_groups*/ true), /*all_groups*/ true);
out_reason = fmt::format("Entry {} is a dummy task", entry_name); out_reason = fmt::format("Entry {} is a dummy task", entry_name);
return {}; return {};
} }

View File

@ -19,11 +19,15 @@ namespace ProfileEvents
namespace DB namespace DB
{ {
namespace ErrorCodes
{
extern const int LOGICAL_ERROR;
}
struct WriteBufferFromAzureBlobStorage::PartData struct WriteBufferFromAzureBlobStorage::PartData
{ {
Memory<> memory; Memory<> memory;
size_t data_size = 0; size_t data_size = 0;
std::string block_id;
}; };
BufferAllocationPolicyPtr createBufferAllocationPolicy(const AzureObjectStorageSettings & settings) BufferAllocationPolicyPtr createBufferAllocationPolicy(const AzureObjectStorageSettings & settings)
@ -119,22 +123,30 @@ void WriteBufferFromAzureBlobStorage::preFinalize()
// This function should not be run again // This function should not be run again
is_prefinalized = true; is_prefinalized = true;
hidePartialData();
if (hidden_size > 0)
detachBuffer();
setFakeBufferWhenPreFinalized();
/// If there is only one block and size is less than or equal to max_single_part_upload_size /// If there is only one block and size is less than or equal to max_single_part_upload_size
/// then we use single part upload instead of multi part upload /// then we use single part upload instead of multi part upload
if (buffer_allocation_policy->getBufferNumber() == 1) if (block_ids.empty() && detached_part_data.size() == 1 && detached_part_data.front().data_size <= max_single_part_upload_size)
{ {
size_t data_size = size_t(position() - memory.data()); auto part_data = std::move(detached_part_data.front());
if (data_size <= max_single_part_upload_size) auto block_blob_client = blob_container_client->GetBlockBlobClient(blob_path);
{ Azure::Core::IO::MemoryBodyStream memory_stream(reinterpret_cast<const uint8_t *>(part_data.memory.data()), part_data.data_size);
auto block_blob_client = blob_container_client->GetBlockBlobClient(blob_path); execWithRetry([&](){ block_blob_client.Upload(memory_stream); }, max_unexpected_write_error_retries, part_data.data_size);
Azure::Core::IO::MemoryBodyStream memory_stream(reinterpret_cast<const uint8_t *>(memory.data()), data_size); LOG_TRACE(log, "Committed single block for blob `{}`", blob_path);
execWithRetry([&](){ block_blob_client.Upload(memory_stream); }, max_unexpected_write_error_retries, data_size);
LOG_TRACE(log, "Committed single block for blob `{}`", blob_path);
return;
}
}
writePart(); detached_part_data.pop_front();
return;
}
else
{
writeMultipartUpload();
}
} }
void WriteBufferFromAzureBlobStorage::finalizeImpl() void WriteBufferFromAzureBlobStorage::finalizeImpl()
@ -144,9 +156,13 @@ void WriteBufferFromAzureBlobStorage::finalizeImpl()
if (!is_prefinalized) if (!is_prefinalized)
preFinalize(); preFinalize();
chassert(offset() == 0);
chassert(hidden_size == 0);
task_tracker->waitAll();
if (!block_ids.empty()) if (!block_ids.empty())
{ {
task_tracker->waitAll();
auto block_blob_client = blob_container_client->GetBlockBlobClient(blob_path); auto block_blob_client = blob_container_client->GetBlockBlobClient(blob_path);
execWithRetry([&](){ block_blob_client.CommitBlockList(block_ids); }, max_unexpected_write_error_retries); execWithRetry([&](){ block_blob_client.CommitBlockList(block_ids); }, max_unexpected_write_error_retries);
LOG_TRACE(log, "Committed {} blocks for blob `{}`", block_ids.size(), blob_path); LOG_TRACE(log, "Committed {} blocks for blob `{}`", block_ids.size(), blob_path);
@ -155,14 +171,66 @@ void WriteBufferFromAzureBlobStorage::finalizeImpl()
void WriteBufferFromAzureBlobStorage::nextImpl() void WriteBufferFromAzureBlobStorage::nextImpl()
{ {
if (is_prefinalized)
throw Exception(
ErrorCodes::LOGICAL_ERROR,
"Cannot write to prefinalized buffer for Azure Blob Storage, the file could have been created");
task_tracker->waitIfAny(); task_tracker->waitIfAny();
writePart();
hidePartialData();
reallocateFirstBuffer();
if (available() > 0)
return;
detachBuffer();
if (detached_part_data.size() > 1)
writeMultipartUpload();
allocateBuffer(); allocateBuffer();
} }
void WriteBufferFromAzureBlobStorage::hidePartialData()
{
if (write_settings.remote_throttler)
write_settings.remote_throttler->add(offset(), ProfileEvents::RemoteWriteThrottlerBytes, ProfileEvents::RemoteWriteThrottlerSleepMicroseconds);
chassert(memory.size() >= hidden_size + offset());
hidden_size += offset();
chassert(memory.data() + hidden_size == working_buffer.begin() + offset());
chassert(memory.data() + hidden_size == position());
WriteBuffer::set(memory.data() + hidden_size, memory.size() - hidden_size);
chassert(offset() == 0);
}
void WriteBufferFromAzureBlobStorage::reallocateFirstBuffer()
{
chassert(offset() == 0);
if (buffer_allocation_policy->getBufferNumber() > 1 || available() > 0)
return;
const size_t max_first_buffer = buffer_allocation_policy->getBufferSize();
if (memory.size() == max_first_buffer)
return;
size_t size = std::min(memory.size() * 2, max_first_buffer);
memory.resize(size);
WriteBuffer::set(memory.data() + hidden_size, memory.size() - hidden_size);
chassert(offset() == 0);
}
void WriteBufferFromAzureBlobStorage::allocateBuffer() void WriteBufferFromAzureBlobStorage::allocateBuffer()
{ {
buffer_allocation_policy->nextBuffer(); buffer_allocation_policy->nextBuffer();
chassert(0 == hidden_size);
auto size = buffer_allocation_policy->getBufferSize(); auto size = buffer_allocation_policy->getBufferSize();
if (buffer_allocation_policy->getBufferNumber() == 1) if (buffer_allocation_policy->getBufferNumber() == 1)
@ -172,30 +240,56 @@ void WriteBufferFromAzureBlobStorage::allocateBuffer()
WriteBuffer::set(memory.data(), memory.size()); WriteBuffer::set(memory.data(), memory.size());
} }
void WriteBufferFromAzureBlobStorage::writePart() void WriteBufferFromAzureBlobStorage::detachBuffer()
{ {
auto data_size = size_t(position() - memory.data()); size_t data_size = size_t(position() - memory.data());
if (data_size == 0) if (data_size == 0)
return; return;
const std::string & block_id = block_ids.emplace_back(getRandomASCIIString(64)); chassert(data_size == hidden_size);
std::shared_ptr<PartData> part_data = std::make_shared<PartData>(std::move(memory), data_size, block_id);
WriteBuffer::set(nullptr, 0);
auto upload_worker = [this, part_data] () auto buf = std::move(memory);
WriteBuffer::set(nullptr, 0);
total_size += hidden_size;
hidden_size = 0;
detached_part_data.push_back({std::move(buf), data_size});
WriteBuffer::set(nullptr, 0);
}
void WriteBufferFromAzureBlobStorage::writePart(WriteBufferFromAzureBlobStorage::PartData && part_data)
{
const std::string & block_id = block_ids.emplace_back(getRandomASCIIString(64));
auto worker_data = std::make_shared<std::tuple<std::string, WriteBufferFromAzureBlobStorage::PartData>>(block_id, std::move(part_data));
auto upload_worker = [this, worker_data] ()
{ {
auto & data_size = std::get<1>(*worker_data).data_size;
auto & data_block_id = std::get<0>(*worker_data);
auto block_blob_client = blob_container_client->GetBlockBlobClient(blob_path); auto block_blob_client = blob_container_client->GetBlockBlobClient(blob_path);
Azure::Core::IO::MemoryBodyStream memory_stream(reinterpret_cast<const uint8_t *>(part_data->memory.data()), part_data->data_size); Azure::Core::IO::MemoryBodyStream memory_stream(reinterpret_cast<const uint8_t *>(std::get<1>(*worker_data).memory.data()), data_size);
execWithRetry([&](){ block_blob_client.StageBlock(part_data->block_id, memory_stream); }, max_unexpected_write_error_retries, part_data->data_size); execWithRetry([&](){ block_blob_client.StageBlock(data_block_id, memory_stream); }, max_unexpected_write_error_retries, data_size);
if (write_settings.remote_throttler)
write_settings.remote_throttler->add(part_data->data_size, ProfileEvents::RemoteWriteThrottlerBytes, ProfileEvents::RemoteWriteThrottlerSleepMicroseconds);
}; };
task_tracker->add(std::move(upload_worker)); task_tracker->add(std::move(upload_worker));
} }
void WriteBufferFromAzureBlobStorage::setFakeBufferWhenPreFinalized()
{
WriteBuffer::set(fake_buffer_when_prefinalized, sizeof(fake_buffer_when_prefinalized));
}
void WriteBufferFromAzureBlobStorage::writeMultipartUpload()
{
while (!detached_part_data.empty())
{
writePart(std::move(detached_part_data.front()));
detached_part_data.pop_front();
}
}
} }
#endif #endif

View File

@ -48,8 +48,13 @@ public:
private: private:
struct PartData; struct PartData;
void writePart(); void writeMultipartUpload();
void writePart(PartData && part_data);
void detachBuffer();
void reallocateFirstBuffer();
void allocateBuffer(); void allocateBuffer();
void hidePartialData();
void setFakeBufferWhenPreFinalized();
void finalizeImpl() override; void finalizeImpl() override;
void execWithRetry(std::function<void()> func, size_t num_tries, size_t cost = 0); void execWithRetry(std::function<void()> func, size_t num_tries, size_t cost = 0);
@ -77,9 +82,16 @@ private:
MemoryBufferPtr allocateBuffer() const; MemoryBufferPtr allocateBuffer() const;
char fake_buffer_when_prefinalized[1] = {};
bool first_buffer=true; bool first_buffer=true;
size_t total_size = 0;
size_t hidden_size = 0;
std::unique_ptr<TaskTracker> task_tracker; std::unique_ptr<TaskTracker> task_tracker;
std::deque<PartData> detached_part_data;
}; };
} }

View File

@ -166,6 +166,8 @@ public:
return client.get(); return client.get();
} }
bool supportParallelWrite() const override { return true; }
private: private:
using SharedAzureClientPtr = std::shared_ptr<const Azure::Storage::Blobs::BlobContainerClient>; using SharedAzureClientPtr = std::shared_ptr<const Azure::Storage::Blobs::BlobContainerClient>;
void removeObjectImpl(const StoredObject & object, const SharedAzureClientPtr & client_ptr, bool if_exists); void removeObjectImpl(const StoredObject & object, const SharedAzureClientPtr & client_ptr, bool if_exists);

View File

@ -314,7 +314,7 @@ void checkFunctionArgumentSizes(const ColumnsWithTypeAndName & arguments, size_t
if (current_size != input_rows_count) if (current_size != input_rows_count)
throw Exception( throw Exception(
ErrorCodes::LOGICAL_ERROR, ErrorCodes::LOGICAL_ERROR,
"Expected the argument nº#{} ('{}' of type {}) to have {} rows, but it has {}", "Expected the argument {} ('{}' of type {}) to have {} rows, but it has {}",
i + 1, i + 1,
arguments[i].name, arguments[i].name,
arguments[i].type->getName(), arguments[i].type->getName(),

View File

@ -709,7 +709,7 @@ bool tryParseImpl(typename DataType::FieldType & x, ReadBuffer & rb, const DateL
else else
return tryReadFloatTextFast(x, rb); return tryReadFloatTextFast(x, rb);
} }
else /*if constexpr (is_integer_v<typename DataType::FieldType>)*/ else /*if constexpr (is_integral_v<typename DataType::FieldType>)*/
return tryReadIntText(x, rb); return tryReadIntText(x, rb);
} }
@ -814,6 +814,16 @@ enum class ConvertFromStringParsingMode : uint8_t
BestEffortUS BestEffortUS
}; };
struct AccurateConvertStrategyAdditions
{
UInt32 scale { 0 };
};
struct AccurateOrNullConvertStrategyAdditions
{
UInt32 scale { 0 };
};
template <typename FromDataType, typename ToDataType, typename Name, template <typename FromDataType, typename ToDataType, typename Name,
ConvertFromStringExceptionMode exception_mode, ConvertFromStringParsingMode parsing_mode> ConvertFromStringExceptionMode exception_mode, ConvertFromStringParsingMode parsing_mode>
struct ConvertThroughParsing struct ConvertThroughParsing
@ -1020,7 +1030,13 @@ struct ConvertThroughParsing
break; break;
} }
} }
parseImpl<ToDataType>(vec_to[i], read_buffer, local_time_zone, precise_float_parsing); if constexpr (std::is_same_v<Additions, AccurateConvertStrategyAdditions>)
{
if (!tryParseImpl<ToDataType>(vec_to[i], read_buffer, local_time_zone, precise_float_parsing))
throw Exception(ErrorCodes::CANNOT_PARSE_TEXT, "Cannot parse string to type {}", TypeName<typename ToDataType::FieldType>);
}
else
parseImpl<ToDataType>(vec_to[i], read_buffer, local_time_zone, precise_float_parsing);
} while (false); } while (false);
} }
} }
@ -1120,16 +1136,6 @@ struct ConvertThroughParsing
/// Function toUnixTimestamp has exactly the same implementation as toDateTime of String type. /// Function toUnixTimestamp has exactly the same implementation as toDateTime of String type.
struct NameToUnixTimestamp { static constexpr auto name = "toUnixTimestamp"; }; struct NameToUnixTimestamp { static constexpr auto name = "toUnixTimestamp"; };
struct AccurateConvertStrategyAdditions
{
UInt32 scale { 0 };
};
struct AccurateOrNullConvertStrategyAdditions
{
UInt32 scale { 0 };
};
enum class BehaviourOnErrorFromString : uint8_t enum class BehaviourOnErrorFromString : uint8_t
{ {
ConvertDefaultBehaviorTag, ConvertDefaultBehaviorTag,
@ -3174,8 +3180,11 @@ private:
{ {
TypeIndex from_type_index = from_type->getTypeId(); TypeIndex from_type_index = from_type->getTypeId();
WhichDataType which(from_type_index); WhichDataType which(from_type_index);
TypeIndex to_type_index = to_type->getTypeId();
WhichDataType to(to_type_index);
bool can_apply_accurate_cast = (cast_type == CastType::accurate || cast_type == CastType::accurateOrNull) bool can_apply_accurate_cast = (cast_type == CastType::accurate || cast_type == CastType::accurateOrNull)
&& (which.isInt() || which.isUInt() || which.isFloat()); && (which.isInt() || which.isUInt() || which.isFloat());
can_apply_accurate_cast |= cast_type == CastType::accurate && which.isStringOrFixedString() && to.isNativeInteger();
FormatSettings::DateTimeOverflowBehavior date_time_overflow_behavior = default_date_time_overflow_behavior; FormatSettings::DateTimeOverflowBehavior date_time_overflow_behavior = default_date_time_overflow_behavior;
if (context) if (context)
@ -3260,6 +3269,20 @@ private:
return true; return true;
} }
} }
else if constexpr (IsDataTypeStringOrFixedString<LeftDataType>)
{
if constexpr (IsDataTypeNumber<RightDataType>)
{
chassert(wrapper_cast_type == CastType::accurate);
result_column = ConvertImpl<LeftDataType, RightDataType, FunctionCastName>::execute(
arguments,
result_type,
input_rows_count,
BehaviourOnErrorFromString::ConvertDefaultBehaviorTag,
AccurateConvertStrategyAdditions());
}
return true;
}
return false; return false;
}); });

View File

@ -31,7 +31,6 @@ namespace DB
namespace ErrorCodes namespace ErrorCodes
{ {
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int ILLEGAL_TYPE_OF_ARGUMENT; extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int ARGUMENT_OUT_OF_BOUND; extern const int ARGUMENT_OUT_OF_BOUND;
extern const int ILLEGAL_COLUMN; extern const int ILLEGAL_COLUMN;
@ -40,26 +39,22 @@ namespace ErrorCodes
} }
/** Rounding Functions: /// Rounding Functions:
* round(x, N) - rounding to nearest (N = 0 by default). Use banker's rounding for floating point numbers. /// - round(x, N) - rounding to nearest (N = 0 by default). Use banker's rounding for floating point numbers.
* roundBankers(x, N) - rounding to nearest (N = 0 by default). Use banker's rounding for all numbers. /// - roundBankers(x, N) - rounding to nearest (N = 0 by default). Use banker's rounding for all numbers.
* floor(x, N) is the largest number <= x (N = 0 by default). /// - floor(x, N) is the largest number <= x (N = 0 by default).
* ceil(x, N) is the smallest number >= x (N = 0 by default). /// - ceil(x, N) is the smallest number >= x (N = 0 by default).
* trunc(x, N) - is the largest by absolute value number that is not greater than x by absolute value (N = 0 by default). /// - trunc(x, N) - is the largest by absolute value number that is not greater than x by absolute value (N = 0 by default).
*
* The value of the parameter N (scale):
* - N > 0: round to the number with N decimal places after the decimal point
* - N < 0: round to an integer with N zero characters
* - N = 0: round to an integer
*
* Type of the result is the type of argument.
* For integer arguments, when passing negative scale, overflow can occur.
* In that case, the behavior is implementation specific.
*/
/// The value of the parameter N (scale):
/// - N > 0: round to the number with N decimal places after the decimal point
/// - N < 0: round to an integer with N zero characters
/// - N = 0: round to an integer
/** This parameter controls the behavior of the rounding functions. /// Type of the result is the type of argument.
*/ /// For integer arguments, when passing negative scale, overflow can occur. In that case, the behavior is undefined.
/// Controls the behavior of the rounding functions.
enum class ScaleMode : uint8_t enum class ScaleMode : uint8_t
{ {
Positive, // round to a number with N decimal places after the decimal point Positive, // round to a number with N decimal places after the decimal point
@ -75,7 +70,7 @@ enum class RoundingMode : uint8_t
Ceil = _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC, Ceil = _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC,
Trunc = _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC, Trunc = _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC,
#else #else
Round = 8, /// Values are correspond to above just in case. Round = 8, /// Values correspond to above values, just in case.
Floor = 9, Floor = 9,
Ceil = 10, Ceil = 10,
Trunc = 11, Trunc = 11,
@ -84,16 +79,21 @@ enum class RoundingMode : uint8_t
enum class TieBreakingMode : uint8_t enum class TieBreakingMode : uint8_t
{ {
Auto, // use banker's rounding for floating point numbers, round up otherwise Auto, /// banker's rounding for floating point numbers, round up otherwise
Bankers, // use banker's rounding Bankers, /// banker's rounding
};
enum class Vectorize : uint8_t
{
No,
Yes
}; };
/// For N, no more than the number of digits in the largest type. /// For N, no more than the number of digits in the largest type.
using Scale = Int16; using Scale = Int16;
/** Rounding functions for integer values. /// Rounding functions for integer values.
*/
template <typename T, RoundingMode rounding_mode, ScaleMode scale_mode, TieBreakingMode tie_breaking_mode> template <typename T, RoundingMode rounding_mode, ScaleMode scale_mode, TieBreakingMode tie_breaking_mode>
struct IntegerRoundingComputation struct IntegerRoundingComputation
{ {
@ -149,6 +149,8 @@ struct IntegerRoundingComputation
return x; return x;
} }
} }
std::unreachable();
} }
static ALWAYS_INLINE T compute(T x, T scale) static ALWAYS_INLINE T compute(T x, T scale)
@ -161,9 +163,12 @@ struct IntegerRoundingComputation
case ScaleMode::Negative: case ScaleMode::Negative:
return computeImpl(x, scale); return computeImpl(x, scale);
} }
std::unreachable();
} }
static ALWAYS_INLINE void compute(const T * __restrict in, size_t scale, T * __restrict out) requires std::integral<T> static ALWAYS_INLINE void compute(const T * __restrict in, size_t scale, T * __restrict out)
requires std::integral<T>
{ {
if constexpr (sizeof(T) <= sizeof(scale) && scale_mode == ScaleMode::Negative) if constexpr (sizeof(T) <= sizeof(scale) && scale_mode == ScaleMode::Negative)
{ {
@ -176,20 +181,23 @@ struct IntegerRoundingComputation
*out = compute(*in, static_cast<T>(scale)); *out = compute(*in, static_cast<T>(scale));
} }
static ALWAYS_INLINE void compute(const T * __restrict in, T scale, T * __restrict out) requires(!std::integral<T>) static ALWAYS_INLINE void compute(const T * __restrict in, T scale, T * __restrict out)
requires(!std::integral<T>)
{ {
*out = compute(*in, scale); *out = compute(*in, scale);
} }
}; };
template <typename T, Vectorize vectorize>
class FloatRoundingComputationBase;
#ifdef __SSE4_1__ #ifdef __SSE4_1__
template <typename T> /// Vectorized implementation for x86.
class BaseFloatRoundingComputation;
template <> template <>
class BaseFloatRoundingComputation<Float32> class FloatRoundingComputationBase<Float32, Vectorize::Yes>
{ {
public: public:
using ScalarType = Float32; using ScalarType = Float32;
@ -210,7 +218,7 @@ public:
}; };
template <> template <>
class BaseFloatRoundingComputation<Float64> class FloatRoundingComputationBase<Float64, Vectorize::Yes>
{ {
public: public:
using ScalarType = Float64; using ScalarType = Float64;
@ -230,9 +238,9 @@ public:
} }
}; };
#else #endif
/// Implementation for ARM. Not vectorized. /// Sequential implementation for ARM. Also used for scalar arguments.
inline float roundWithMode(float x, RoundingMode mode) inline float roundWithMode(float x, RoundingMode mode)
{ {
@ -243,6 +251,8 @@ inline float roundWithMode(float x, RoundingMode mode)
case RoundingMode::Ceil: return ceilf(x); case RoundingMode::Ceil: return ceilf(x);
case RoundingMode::Trunc: return truncf(x); case RoundingMode::Trunc: return truncf(x);
} }
std::unreachable();
} }
inline double roundWithMode(double x, RoundingMode mode) inline double roundWithMode(double x, RoundingMode mode)
@ -254,10 +264,12 @@ inline double roundWithMode(double x, RoundingMode mode)
case RoundingMode::Ceil: return ceil(x); case RoundingMode::Ceil: return ceil(x);
case RoundingMode::Trunc: return trunc(x); case RoundingMode::Trunc: return trunc(x);
} }
std::unreachable();
} }
template <typename T> template <typename T>
class BaseFloatRoundingComputation class FloatRoundingComputationBase<T, Vectorize::No>
{ {
public: public:
using ScalarType = T; using ScalarType = T;
@ -277,15 +289,13 @@ public:
} }
}; };
#endif
/** Implementation of low-level round-off functions for floating-point values. /** Implementation of low-level round-off functions for floating-point values.
*/ */
template <typename T, RoundingMode rounding_mode, ScaleMode scale_mode> template <typename T, RoundingMode rounding_mode, ScaleMode scale_mode, Vectorize vectorize>
class FloatRoundingComputation : public BaseFloatRoundingComputation<T> class FloatRoundingComputation : public FloatRoundingComputationBase<T, vectorize>
{ {
using Base = BaseFloatRoundingComputation<T>; using Base = FloatRoundingComputationBase<T, vectorize>;
public: public:
static void compute(const T * __restrict in, const typename Base::VectorType & scale, T * __restrict out) static void compute(const T * __restrict in, const typename Base::VectorType & scale, T * __restrict out)
@ -317,15 +327,22 @@ struct FloatRoundingImpl
private: private:
static_assert(!is_decimal<T>); static_assert(!is_decimal<T>);
using Op = FloatRoundingComputation<T, rounding_mode, scale_mode>; template <Vectorize vectorize =
using Data = std::array<T, Op::data_count>; #ifdef __SSE4_1__
Vectorize::Yes
#else
Vectorize::No
#endif
>
using Op = FloatRoundingComputation<T, rounding_mode, scale_mode, vectorize>;
using Data = std::array<T, Op<>::data_count>;
using ColumnType = ColumnVector<T>; using ColumnType = ColumnVector<T>;
using Container = typename ColumnType::Container; using Container = typename ColumnType::Container;
public: public:
static NO_INLINE void apply(const Container & in, size_t scale, Container & out) static NO_INLINE void apply(const Container & in, size_t scale, Container & out)
{ {
auto mm_scale = Op::prepare(scale); auto mm_scale = Op<>::prepare(scale);
const size_t data_count = std::tuple_size<Data>(); const size_t data_count = std::tuple_size<Data>();
@ -337,7 +354,7 @@ public:
while (p_in < limit) while (p_in < limit)
{ {
Op::compute(p_in, mm_scale, p_out); Op<>::compute(p_in, mm_scale, p_out);
p_in += data_count; p_in += data_count;
p_out += data_count; p_out += data_count;
} }
@ -350,10 +367,17 @@ public:
size_t tail_size_bytes = (end_in - p_in) * sizeof(*p_in); size_t tail_size_bytes = (end_in - p_in) * sizeof(*p_in);
memcpy(&tmp_src, p_in, tail_size_bytes); memcpy(&tmp_src, p_in, tail_size_bytes);
Op::compute(reinterpret_cast<T *>(&tmp_src), mm_scale, reinterpret_cast<T *>(&tmp_dst)); Op<>::compute(reinterpret_cast<T *>(&tmp_src), mm_scale, reinterpret_cast<T *>(&tmp_dst));
memcpy(p_out, &tmp_dst, tail_size_bytes); memcpy(p_out, &tmp_dst, tail_size_bytes);
} }
} }
static void applyOne(T in, size_t scale, T& out)
{
using ScalarOp = Op<Vectorize::No>;
auto s = ScalarOp::prepare(scale);
ScalarOp::compute(&in, s, &out);
}
}; };
template <typename T, RoundingMode rounding_mode, ScaleMode scale_mode, TieBreakingMode tie_breaking_mode> template <typename T, RoundingMode rounding_mode, ScaleMode scale_mode, TieBreakingMode tie_breaking_mode>
@ -409,6 +433,11 @@ public:
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unexpected 'scale' parameter passed to function"); throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unexpected 'scale' parameter passed to function");
} }
} }
static void applyOne(T in, size_t scale, T& out)
{
Op::compute(&in, scale, &out);
}
}; };
@ -444,11 +473,40 @@ public:
memcpy(out.data(), in.data(), in.size() * sizeof(T)); memcpy(out.data(), in.data(), in.size() * sizeof(T));
} }
} }
static void applyOne(NativeType in, UInt32 in_scale, NativeType& out, Scale scale_arg)
{
scale_arg = in_scale - scale_arg;
if (scale_arg > 0)
{
auto scale = intExp10OfSize<NativeType>(scale_arg);
Op::compute(&in, scale, &out);
}
else
{
memcpy(&out, &in, sizeof(T));
}
}
}; };
/// Select the appropriate processing algorithm depending on the scale.
inline void validateScale(Int64 scale64)
{
if (scale64 > std::numeric_limits<Scale>::max() || scale64 < std::numeric_limits<Scale>::min())
throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Scale argument for rounding function is too large");
}
/** Select the appropriate processing algorithm depending on the scale. inline Scale getScaleArg(const ColumnConst* scale_col)
*/ {
const auto & scale_field = scale_col->getField();
Int64 scale64 = scale_field.get<Int64>();
validateScale(scale64);
return scale64;
}
/// Generic dispatcher
template <typename T, RoundingMode rounding_mode, TieBreakingMode tie_breaking_mode> template <typename T, RoundingMode rounding_mode, TieBreakingMode tie_breaking_mode>
struct Dispatcher struct Dispatcher
{ {
@ -457,30 +515,65 @@ struct Dispatcher
FloatRoundingImpl<T, rounding_mode, scale_mode>, FloatRoundingImpl<T, rounding_mode, scale_mode>,
IntegerRoundingImpl<T, rounding_mode, scale_mode, tie_breaking_mode>>; IntegerRoundingImpl<T, rounding_mode, scale_mode, tie_breaking_mode>>;
static ColumnPtr apply(const IColumn * col_general, Scale scale_arg) template <typename ScaleType>
static ColumnPtr apply(const IColumn * value_col, const IColumn * scale_col = nullptr)
{ {
const auto & col = checkAndGetColumn<ColumnVector<T>>(*col_general); const auto & value_col_typed = checkAndGetColumn<ColumnVector<T>>(*value_col);
auto col_res = ColumnVector<T>::create(); auto col_res = ColumnVector<T>::create();
typename ColumnVector<T>::Container & vec_res = col_res->getData(); typename ColumnVector<T>::Container & vec_res = col_res->getData();
vec_res.resize(col.getData().size()); vec_res.resize(value_col_typed.getData().size());
if (!vec_res.empty()) if (!vec_res.empty())
{ {
if (scale_arg == 0) if (scale_col == nullptr || isColumnConst(*scale_col))
{ {
size_t scale = 1; auto scale_arg = (scale_col == nullptr) ? 0 : getScaleArg(checkAndGetColumnConst<ColumnVector<ScaleType>>(scale_col));
FunctionRoundingImpl<ScaleMode::Zero>::apply(col.getData(), scale, vec_res); if (scale_arg == 0)
{
size_t scale = 1;
FunctionRoundingImpl<ScaleMode::Zero>::apply(value_col_typed.getData(), scale, vec_res);
}
else if (scale_arg > 0)
{
size_t scale = intExp10(scale_arg);
FunctionRoundingImpl<ScaleMode::Positive>::apply(value_col_typed.getData(), scale, vec_res);
}
else
{
size_t scale = intExp10(-scale_arg);
FunctionRoundingImpl<ScaleMode::Negative>::apply(value_col_typed.getData(), scale, vec_res);
}
} }
else if (scale_arg > 0) /// Non-const scale argument:
else if (const auto * scale_col_typed = checkAndGetColumn<ColumnVector<ScaleType>>(scale_col))
{ {
size_t scale = intExp10(scale_arg); const auto & value_data = value_col_typed.getData();
FunctionRoundingImpl<ScaleMode::Positive>::apply(col.getData(), scale, vec_res); const auto & scale_data = scale_col_typed->getData();
} const size_t rows = value_data.size();
else
{ for (size_t i = 0; i < rows; ++i)
size_t scale = intExp10(-scale_arg); {
FunctionRoundingImpl<ScaleMode::Negative>::apply(col.getData(), scale, vec_res); Int64 scale64 = scale_data[i];
validateScale(scale64);
Scale raw_scale = scale64;
if (raw_scale == 0)
{
size_t scale = 1;
FunctionRoundingImpl<ScaleMode::Zero>::applyOne(value_data[i], scale, vec_res[i]);
}
else if (raw_scale > 0)
{
size_t scale = intExp10(raw_scale);
FunctionRoundingImpl<ScaleMode::Positive>::applyOne(value_data[i], scale, vec_res[i]);
}
else
{
size_t scale = intExp10(-raw_scale);
FunctionRoundingImpl<ScaleMode::Negative>::applyOne(value_data[i], scale, vec_res[i]);
}
}
} }
} }
@ -488,28 +581,51 @@ struct Dispatcher
} }
}; };
/// Dispatcher for Decimal inputs
template <is_decimal T, RoundingMode rounding_mode, TieBreakingMode tie_breaking_mode> template <is_decimal T, RoundingMode rounding_mode, TieBreakingMode tie_breaking_mode>
struct Dispatcher<T, rounding_mode, tie_breaking_mode> struct Dispatcher<T, rounding_mode, tie_breaking_mode>
{ {
public: public:
static ColumnPtr apply(const IColumn * col_general, Scale scale_arg) template <typename ScaleType>
static ColumnPtr apply(const IColumn * value_col, const IColumn * scale_col = nullptr)
{ {
const auto & col = checkAndGetColumn<ColumnDecimal<T>>(*col_general); const auto & value_col_typed = checkAndGetColumn<ColumnDecimal<T>>(*value_col);
const typename ColumnDecimal<T>::Container & vec_src = col.getData(); const typename ColumnDecimal<T>::Container & vec_src = value_col_typed.getData();
auto col_res = ColumnDecimal<T>::create(vec_src.size(), col.getScale()); auto col_res = ColumnDecimal<T>::create(vec_src.size(), value_col_typed.getScale());
auto & vec_res = col_res->getData(); auto & vec_res = col_res->getData();
if (!vec_res.empty()) if (!vec_res.empty())
DecimalRoundingImpl<T, rounding_mode, tie_breaking_mode>::apply(col.getData(), col.getScale(), vec_res, scale_arg); {
if (scale_col == nullptr || isColumnConst(*scale_col))
{
auto scale_arg = scale_col == nullptr ? 0 : getScaleArg(checkAndGetColumnConst<ColumnVector<ScaleType>>(scale_col));
DecimalRoundingImpl<T, rounding_mode, tie_breaking_mode>::apply(value_col_typed.getData(), value_col_typed.getScale(), vec_res, scale_arg);
}
/// Non-const scale argument
else if (const auto * scale_col_typed = checkAndGetColumn<ColumnVector<ScaleType>>(scale_col))
{
const auto & scale = scale_col_typed->getData();
const size_t rows = vec_src.size();
for (size_t i = 0; i < rows; ++i)
{
Int64 scale64 = scale[i];
validateScale(scale64);
Scale raw_scale = scale64;
DecimalRoundingImpl<T, rounding_mode, tie_breaking_mode>::applyOne(value_col_typed.getElement(i), value_col_typed.getScale(),
reinterpret_cast<ColumnDecimal<T>::NativeT&>(col_res->getElement(i)), raw_scale);
}
}
}
return col_res; return col_res;
} }
}; };
/** A template for functions that round the value of an input parameter of type /// Functions that round the value of an input parameter of type (U)Int8/16/32/64, Float32/64 or Decimal32/64/128.
* (U)Int8/16/32/64, Float32/64 or Decimal32/64/128, and accept an additional optional parameter (default is 0). /// Accept an additional optional parameter of type (U)Int8/16/32/64 (0 by default).
*/
template <typename Name, RoundingMode rounding_mode, TieBreakingMode tie_breaking_mode> template <typename Name, RoundingMode rounding_mode, TieBreakingMode tie_breaking_mode>
class FunctionRounding : public IFunction class FunctionRounding : public IFunction
{ {
@ -517,75 +633,58 @@ public:
static constexpr auto name = Name::name; static constexpr auto name = Name::name;
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionRounding>(); } static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionRounding>(); }
String getName() const override String getName() const override { return name; }
{
return name;
}
bool isVariadic() const override { return true; } bool isVariadic() const override { return true; }
size_t getNumberOfArguments() const override { return 0; } size_t getNumberOfArguments() const override { return 0; }
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; } bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }
/// Get result types by argument types. If the function does not apply to these arguments, throw an exception.
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
{
if ((arguments.empty()) || (arguments.size() > 2))
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
"Number of arguments for function {} doesn't match: passed {}, should be 1 or 2.",
getName(), arguments.size());
for (const auto & type : arguments)
if (!isNumber(type))
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument of function {}",
arguments[0]->getName(), getName());
return arguments[0];
}
static Scale getScaleArg(const ColumnsWithTypeAndName & arguments)
{
if (arguments.size() == 2)
{
const IColumn & scale_column = *arguments[1].column;
if (!isColumnConst(scale_column))
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Scale argument for rounding functions must be constant");
Field scale_field = assert_cast<const ColumnConst &>(scale_column).getField();
if (scale_field.getType() != Field::Types::UInt64
&& scale_field.getType() != Field::Types::Int64)
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Scale argument for rounding functions must have integer type");
Int64 scale64 = scale_field.get<Int64>();
if (scale64 > std::numeric_limits<Scale>::max()
|| scale64 < std::numeric_limits<Scale>::min())
throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Scale argument for rounding function is too large");
return scale64;
}
return 0;
}
bool useDefaultImplementationForConstants() const override { return true; } bool useDefaultImplementationForConstants() const override { return true; }
ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1}; }
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
{
FunctionArgumentDescriptors mandatory_args{
{"x", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isNumber), nullptr, "A number to round"},
};
FunctionArgumentDescriptors optional_args{
{"N", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isNativeInteger), nullptr, "The number of decimal places to round to"},
};
validateFunctionArgumentTypes(*this, arguments, mandatory_args, optional_args);
return arguments[0].type;
}
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override
{ {
const ColumnWithTypeAndName & column = arguments[0]; const ColumnWithTypeAndName & value_arg = arguments[0];
Scale scale_arg = getScaleArg(arguments);
ColumnPtr res; ColumnPtr res;
auto call = [&](const auto & types) -> bool auto call_data = [&](const auto & types) -> bool
{ {
using Types = std::decay_t<decltype(types)>; using Types = std::decay_t<decltype(types)>;
using DataType = typename Types::LeftType; using DataType = typename Types::RightType;
if constexpr (IsDataTypeNumber<DataType> || IsDataTypeDecimal<DataType>) if (arguments.size() > 1)
{ {
using FieldType = typename DataType::FieldType; const ColumnWithTypeAndName & scale_column = arguments[1];
res = Dispatcher<FieldType, rounding_mode, tie_breaking_mode>::apply(column.column.get(), scale_arg);
auto call_scale = [&](const auto & scaleTypes) -> bool
{
using ScaleTypes = std::decay_t<decltype(scaleTypes)>;
using ScaleType = typename ScaleTypes::RightType;
if (isColumnConst(*value_arg.column) && !isColumnConst(*scale_column.column))
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Scale column must be const for const data column");
res = Dispatcher<DataType, rounding_mode, tie_breaking_mode>::template apply<ScaleType>(value_arg.column.get(), scale_column.column.get());
return true;
};
TypeIndex right_index = scale_column.type->getTypeId();
if (!callOnBasicType<void, true, false, false, false>(right_index, call_scale))
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Scale argument for rounding functions must have integer type");
return true; return true;
} }
return false; res = Dispatcher<DataType, rounding_mode, tie_breaking_mode>::template apply<int>(value_arg.column.get());
return true;
}; };
#if !defined(__SSE4_1__) #if !defined(__SSE4_1__)
@ -597,10 +696,9 @@ public:
throw Exception(ErrorCodes::CANNOT_SET_ROUNDING_MODE, "Cannot set floating point rounding mode"); throw Exception(ErrorCodes::CANNOT_SET_ROUNDING_MODE, "Cannot set floating point rounding mode");
#endif #endif
if (!callOnIndexAndDataType<void>(column.type->getTypeId(), call)) TypeIndex left_index = value_arg.type->getTypeId();
{ if (!callOnBasicType<void, true, true, true, false>(left_index, call_data))
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of argument of function {}", column.name, getName()); throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of argument of function {}", value_arg.name, getName());
}
return res; return res;
} }
@ -617,9 +715,8 @@ public:
}; };
/** Rounds down to a number within explicitly specified array. /// Rounds down to a number within explicitly specified array.
* If the value is less than the minimal bound - returns the minimal bound. /// If the value is less than the minimal bound - returns the minimal bound.
*/
class FunctionRoundDown : public IFunction class FunctionRoundDown : public IFunction
{ {
public: public:
@ -627,7 +724,6 @@ public:
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionRoundDown>(); } static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionRoundDown>(); }
String getName() const override { return name; } String getName() const override { return name; }
bool isVariadic() const override { return false; } bool isVariadic() const override { return false; }
size_t getNumberOfArguments() const override { return 2; } size_t getNumberOfArguments() const override { return 2; }
bool useDefaultImplementationForConstants() const override { return true; } bool useDefaultImplementationForConstants() const override { return true; }

View File

@ -28,6 +28,9 @@ namespace ErrorCodes
extern const int ILLEGAL_TYPE_OF_ARGUMENT; extern const int ILLEGAL_TYPE_OF_ARGUMENT;
} }
template <typename Point>
using LineString = boost::geometry::model::linestring<Point>;
template <typename Point> template <typename Point>
using Ring = boost::geometry::model::ring<Point>; using Ring = boost::geometry::model::ring<Point>;
@ -38,11 +41,13 @@ template <typename Point>
using MultiPolygon = boost::geometry::model::multi_polygon<Polygon<Point>>; using MultiPolygon = boost::geometry::model::multi_polygon<Polygon<Point>>;
using CartesianPoint = boost::geometry::model::d2::point_xy<Float64>; using CartesianPoint = boost::geometry::model::d2::point_xy<Float64>;
using CartesianLineString = LineString<CartesianPoint>;
using CartesianRing = Ring<CartesianPoint>; using CartesianRing = Ring<CartesianPoint>;
using CartesianPolygon = Polygon<CartesianPoint>; using CartesianPolygon = Polygon<CartesianPoint>;
using CartesianMultiPolygon = MultiPolygon<CartesianPoint>; using CartesianMultiPolygon = MultiPolygon<CartesianPoint>;
using SphericalPoint = boost::geometry::model::point<Float64, 2, boost::geometry::cs::spherical_equatorial<boost::geometry::degree>>; using SphericalPoint = boost::geometry::model::point<Float64, 2, boost::geometry::cs::spherical_equatorial<boost::geometry::degree>>;
using SphericalLineString = LineString<SphericalPoint>;
using SphericalRing = Ring<SphericalPoint>; using SphericalRing = Ring<SphericalPoint>;
using SphericalPolygon = Polygon<SphericalPoint>; using SphericalPolygon = Polygon<SphericalPoint>;
using SphericalMultiPolygon = MultiPolygon<SphericalPoint>; using SphericalMultiPolygon = MultiPolygon<SphericalPoint>;
@ -85,6 +90,29 @@ struct ColumnToPointsConverter
} }
}; };
/**
* Class which converts Column with type Array(Tuple(Float64, Float64)) to a vector of boost linestring type.
*/
template <typename Point>
struct ColumnToLineStringsConverter
{
static std::vector<LineString<Point>> convert(ColumnPtr col)
{
const IColumn::Offsets & offsets = typeid_cast<const ColumnArray &>(*col).getOffsets();
size_t prev_offset = 0;
std::vector<LineString<Point>> answer;
answer.reserve(offsets.size());
auto tmp = ColumnToPointsConverter<Point>::convert(typeid_cast<const ColumnArray &>(*col).getDataPtr());
for (size_t offset : offsets)
{
answer.emplace_back(tmp.begin() + prev_offset, tmp.begin() + offset);
prev_offset = offset;
}
return answer;
}
};
/** /**
* Class which converts Column with type Array(Tuple(Float64, Float64)) to a vector of boost ring type. * Class which converts Column with type Array(Tuple(Float64, Float64)) to a vector of boost ring type.
*/ */
@ -208,6 +236,39 @@ private:
ColumnFloat64::Container & second_container; ColumnFloat64::Container & second_container;
}; };
/// Serialize Point, LineString as LineString
template <typename Point>
class LineStringSerializer
{
public:
LineStringSerializer()
: offsets(ColumnUInt64::create())
{}
explicit LineStringSerializer(size_t n)
: offsets(ColumnUInt64::create(n))
{}
void add(const LineString<Point> & ring)
{
size += ring.size();
offsets->insertValue(size);
for (const auto & point : ring)
point_serializer.add(point);
}
ColumnPtr finalize()
{
return ColumnArray::create(point_serializer.finalize(), std::move(offsets));
}
private:
size_t size = 0;
PointSerializer<Point> point_serializer;
ColumnUInt64::MutablePtr offsets;
};
/// Almost the same as LineStringSerializer
/// Serialize Point, Ring as Ring /// Serialize Point, Ring as Ring
template <typename Point> template <typename Point>
class RingSerializer class RingSerializer
@ -344,8 +405,16 @@ static void callOnGeometryDataType(DataTypePtr type, F && f)
/// There is no Point type, because for most of geometry functions it is useless. /// There is no Point type, because for most of geometry functions it is useless.
if (factory.get("Point")->equals(*type)) if (factory.get("Point")->equals(*type))
return f(ConverterType<ColumnToPointsConverter<Point>>()); return f(ConverterType<ColumnToPointsConverter<Point>>());
/// We should take the name into consideration to avoid ambiguity.
/// Because for example both Ring and LineString are resolved to Array(Tuple(Point)).
else if (factory.get("LineString")->equals(*type) && type->getCustomName() && type->getCustomName()->getName() == "LineString")
return f(ConverterType<ColumnToLineStringsConverter<Point>>());
/// For backward compatibility if we call this function not on a custom type, we will consider Array(Tuple(Point)) as type Ring.
else if (factory.get("Ring")->equals(*type)) else if (factory.get("Ring")->equals(*type))
return f(ConverterType<ColumnToRingsConverter<Point>>()); return f(ConverterType<ColumnToRingsConverter<Point>>());
else if (factory.get("Polygon")->equals(*type)) else if (factory.get("Polygon")->equals(*type))
return f(ConverterType<ColumnToPolygonsConverter<Point>>()); return f(ConverterType<ColumnToPolygonsConverter<Point>>());
else if (factory.get("MultiPolygon")->equals(*type)) else if (factory.get("MultiPolygon")->equals(*type))

View File

@ -73,6 +73,8 @@ public:
if constexpr (std::is_same_v<ColumnToPointsConverter<Point>, LeftConverter> || std::is_same_v<ColumnToPointsConverter<Point>, RightConverter>) if constexpr (std::is_same_v<ColumnToPointsConverter<Point>, LeftConverter> || std::is_same_v<ColumnToPointsConverter<Point>, RightConverter>)
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Any argument of function {} must not be Point", getName()); throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Any argument of function {} must not be Point", getName());
else if constexpr (std::is_same_v<ColumnToLineStringsConverter<Point>, LeftConverter> || std::is_same_v<ColumnToLineStringsConverter<Point>, RightConverter>)
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Any argument of function {} must not be LineString", getName());
else else
{ {
auto first = LeftConverter::convert(arguments[0].column->convertToFullColumnIfConst()); auto first = LeftConverter::convert(arguments[0].column->convertToFullColumnIfConst());

View File

@ -71,6 +71,8 @@ public:
if constexpr (std::is_same_v<ColumnToPointsConverter<Point>, LeftConverter> || std::is_same_v<ColumnToPointsConverter<Point>, RightConverter>) if constexpr (std::is_same_v<ColumnToPointsConverter<Point>, LeftConverter> || std::is_same_v<ColumnToPointsConverter<Point>, RightConverter>)
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Any argument of function {} must not be Point", getName()); throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Any argument of function {} must not be Point", getName());
else if constexpr (std::is_same_v<ColumnToLineStringsConverter<Point>, LeftConverter> || std::is_same_v<ColumnToLineStringsConverter<Point>, RightConverter>)
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Any argument of function {} must not be LineString", getName());
else else
{ {
auto first = LeftConverter::convert(arguments[0].column->convertToFullColumnIfConst()); auto first = LeftConverter::convert(arguments[0].column->convertToFullColumnIfConst());

View File

@ -71,6 +71,8 @@ public:
if constexpr (std::is_same_v<ColumnToPointsConverter<Point>, LeftConverter> || std::is_same_v<ColumnToPointsConverter<Point>, RightConverter>) if constexpr (std::is_same_v<ColumnToPointsConverter<Point>, LeftConverter> || std::is_same_v<ColumnToPointsConverter<Point>, RightConverter>)
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Any argument of function {} must not be Point", getName()); throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Any argument of function {} must not be Point", getName());
else if constexpr (std::is_same_v<ColumnToLineStringsConverter<Point>, LeftConverter> || std::is_same_v<ColumnToLineStringsConverter<Point>, RightConverter>)
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Any argument of function {} must not be LineString", getName());
else else
{ {
auto first = LeftConverter::convert(arguments[0].column->convertToFullColumnIfConst()); auto first = LeftConverter::convert(arguments[0].column->convertToFullColumnIfConst());

View File

@ -75,6 +75,8 @@ public:
if constexpr (std::is_same_v<ColumnToPointsConverter<Point>, LeftConverter> || std::is_same_v<ColumnToPointsConverter<Point>, RightConverter>) if constexpr (std::is_same_v<ColumnToPointsConverter<Point>, LeftConverter> || std::is_same_v<ColumnToPointsConverter<Point>, RightConverter>)
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Any argument of function {} must not be Point", getName()); throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Any argument of function {} must not be Point", getName());
else if constexpr (std::is_same_v<ColumnToLineStringsConverter<Point>, LeftConverter> || std::is_same_v<ColumnToLineStringsConverter<Point>, RightConverter>)
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Any argument of function {} must not be LineString", getName());
else else
{ {
auto first = LeftConverter::convert(arguments[0].column->convertToFullColumnIfConst()); auto first = LeftConverter::convert(arguments[0].column->convertToFullColumnIfConst());

View File

@ -82,6 +82,11 @@ struct ReadWKTPointNameHolder
static constexpr const char * name = "readWKTPoint"; static constexpr const char * name = "readWKTPoint";
}; };
struct ReadWKTLineStringNameHolder
{
static constexpr const char * name = "readWKTLineString";
};
struct ReadWKTRingNameHolder struct ReadWKTRingNameHolder
{ {
static constexpr const char * name = "readWKTRing"; static constexpr const char * name = "readWKTRing";
@ -102,6 +107,30 @@ struct ReadWKTMultiPolygonNameHolder
REGISTER_FUNCTION(ReadWKT) REGISTER_FUNCTION(ReadWKT)
{ {
factory.registerFunction<FunctionReadWKT<DataTypePointName, CartesianPoint, PointSerializer<CartesianPoint>, ReadWKTPointNameHolder>>(); factory.registerFunction<FunctionReadWKT<DataTypePointName, CartesianPoint, PointSerializer<CartesianPoint>, ReadWKTPointNameHolder>>();
factory.registerFunction<FunctionReadWKT<DataTypeLineStringName, CartesianLineString, LineStringSerializer<CartesianPoint>, ReadWKTLineStringNameHolder>>(FunctionDocumentation
{
.description=R"(
Parses a Well-Known Text (WKT) representation of a LineString geometry and returns it in the internal ClickHouse format.
)",
.syntax = "readWKTLineString(wkt_string)",
.arguments{
{"wkt_string", "The input WKT string representing a LineString geometry."}
},
.returned_value = "The function returns a ClickHouse internal representation of the linestring geometry.",
.examples{
{"first call", "SELECT readWKTLineString('LINESTRING (1 1, 2 2, 3 3, 1 1)');", R"(
readWKTLineString('LINESTRING (1 1, 2 2, 3 3, 1 1)')
[(1,1),(2,2),(3,3),(1,1)]
)"},
{"second call", "SELECT toTypeName(readWKTLineString('LINESTRING (1 1, 2 2, 3 3, 1 1)'));", R"(
toTypeName(readWKTLineString('LINESTRING (1 1, 2 2, 3 3, 1 1)'))
LineString
)"},
},
.categories{"Unique identifiers"}
});
factory.registerFunction<FunctionReadWKT<DataTypeRingName, CartesianRing, RingSerializer<CartesianPoint>, ReadWKTRingNameHolder>>(); factory.registerFunction<FunctionReadWKT<DataTypeRingName, CartesianRing, RingSerializer<CartesianPoint>, ReadWKTRingNameHolder>>();
factory.registerFunction<FunctionReadWKT<DataTypePolygonName, CartesianPolygon, PolygonSerializer<CartesianPoint>, ReadWKTPolygonNameHolder>>(); factory.registerFunction<FunctionReadWKT<DataTypePolygonName, CartesianPolygon, PolygonSerializer<CartesianPoint>, ReadWKTPolygonNameHolder>>();
factory.registerFunction<FunctionReadWKT<DataTypeMultiPolygonName, CartesianMultiPolygon, MultiPolygonSerializer<CartesianPoint>, ReadWKTMultiPolygonNameHolder>>(); factory.registerFunction<FunctionReadWKT<DataTypeMultiPolygonName, CartesianMultiPolygon, MultiPolygonSerializer<CartesianPoint>, ReadWKTMultiPolygonNameHolder>>();

View File

@ -61,7 +61,7 @@ public:
return std::make_shared<DataTypeTuple>(tuple_arg_types); return std::make_shared<DataTypeTuple>(tuple_arg_types);
} }
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t) const override ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
{ {
const size_t num_arguments = arguments.size(); const size_t num_arguments = arguments.size();
Columns columns; Columns columns;
@ -92,6 +92,9 @@ public:
columns.push_back(inner_col); columns.push_back(inner_col);
} }
if (columns.empty())
return ColumnTuple::create(input_rows_count);
return ColumnTuple::create(columns); return ColumnTuple::create(columns);
} }
}; };

View File

@ -41,6 +41,14 @@ public:
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; } bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
/*
* Functions like recursiveRemoveLowCardinality don't pay enough attention to custom types and just erase
* the information about it during type conversions.
* While it is a big problem the quick solution would be just to disable default low cardinality implementation
* because it doesn't make a lot of sense for geo types.
*/
bool useDefaultImplementationForLowCardinalityColumns() const override { return false; }
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & /*result_type*/, size_t input_rows_count) const override ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & /*result_type*/, size_t input_rows_count) const override
{ {
auto res_column = ColumnString::create(); auto res_column = ColumnString::create();

View File

@ -5,6 +5,7 @@
#include <functional> #include <functional>
#include <memory> #include <memory>
#include <Poco/Timestamp.h>
namespace DB namespace DB
{ {
@ -25,6 +26,7 @@ public:
{ {
UInt64 uncompressed_size; UInt64 uncompressed_size;
UInt64 compressed_size; UInt64 compressed_size;
Poco::Timestamp last_modified;
bool is_encrypted; bool is_encrypted;
}; };

View File

@ -157,6 +157,7 @@ public:
file_info.emplace(); file_info.emplace();
file_info->uncompressed_size = archive_entry_size(current_entry); file_info->uncompressed_size = archive_entry_size(current_entry);
file_info->compressed_size = archive_entry_size(current_entry); file_info->compressed_size = archive_entry_size(current_entry);
file_info->last_modified = archive_entry_mtime(current_entry);
file_info->is_encrypted = false; file_info->is_encrypted = false;
} }

View File

@ -162,7 +162,7 @@ public:
class RetryStrategy : public Aws::Client::RetryStrategy class RetryStrategy : public Aws::Client::RetryStrategy
{ {
public: public:
explicit RetryStrategy(uint32_t maxRetries_ = 10, uint32_t scaleFactor_ = 25, uint32_t maxDelayMs_ = 90000); explicit RetryStrategy(uint32_t maxRetries_ = 10, uint32_t scaleFactor_ = 25, uint32_t maxDelayMs_ = 5000);
/// NOLINTNEXTLINE(google-runtime-int) /// NOLINTNEXTLINE(google-runtime-int)
bool ShouldRetry(const Aws::Client::AWSError<Aws::Client::CoreErrors>& error, long attemptedRetries) const override; bool ShouldRetry(const Aws::Client::AWSError<Aws::Client::CoreErrors>& error, long attemptedRetries) const override;

View File

@ -596,6 +596,34 @@ void ActionsDAG::removeUnusedActions(const std::unordered_set<const Node *> & us
std::erase_if(inputs, [&](const Node * node) { return !visited_nodes.contains(node); }); std::erase_if(inputs, [&](const Node * node) { return !visited_nodes.contains(node); });
} }
void ActionsDAG::removeAliasesForFilter(const std::string & filter_name)
{
const auto & filter_node = findInOutputs(filter_name);
std::stack<Node *> stack;
stack.push(const_cast<Node *>(&filter_node));
std::unordered_set<const Node *> visited;
visited.insert(stack.top());
while (!stack.empty())
{
auto * node = stack.top();
stack.pop();
for (auto & child : node->children)
{
while (child->type == ActionType::ALIAS)
child = child->children.front();
if (!visited.contains(child))
{
stack.push(const_cast<Node *>(child));
visited.insert(child);
}
}
}
}
ActionsDAGPtr ActionsDAG::cloneSubDAG(const NodeRawConstPtrs & outputs, bool remove_aliases) ActionsDAGPtr ActionsDAG::cloneSubDAG(const NodeRawConstPtrs & outputs, bool remove_aliases)
{ {
auto actions = std::make_shared<ActionsDAG>(); auto actions = std::make_shared<ActionsDAG>();
@ -1704,7 +1732,7 @@ void ActionsDAG::mergeNodes(ActionsDAG && second, NodeRawConstPtrs * out_outputs
} }
} }
ActionsDAG::SplitResult ActionsDAG::split(std::unordered_set<const Node *> split_nodes, bool create_split_nodes_mapping) const ActionsDAG::SplitResult ActionsDAG::split(std::unordered_set<const Node *> split_nodes, bool create_split_nodes_mapping, bool avoid_duplicate_inputs) const
{ {
/// Split DAG into two parts. /// Split DAG into two parts.
/// (first_nodes, first_outputs) is a part which will have split_list in result. /// (first_nodes, first_outputs) is a part which will have split_list in result.
@ -1718,6 +1746,14 @@ ActionsDAG::SplitResult ActionsDAG::split(std::unordered_set<const Node *> split
/// List of nodes from current actions which are not inputs, but will be in second part. /// List of nodes from current actions which are not inputs, but will be in second part.
NodeRawConstPtrs new_inputs; NodeRawConstPtrs new_inputs;
/// Avoid new inputs to have the same name as existing inputs.
/// It's allowed for DAG but may break Block invariant 'columns with identical name must have identical structure'.
std::unordered_set<std::string_view> duplicate_inputs;
size_t duplicate_counter = 0;
if (avoid_duplicate_inputs)
for (const auto * input : inputs)
duplicate_inputs.insert(input->result_name);
struct Frame struct Frame
{ {
const Node * node = nullptr; const Node * node = nullptr;
@ -1830,7 +1866,8 @@ ActionsDAG::SplitResult ActionsDAG::split(std::unordered_set<const Node *> split
input_node.result_name = child->result_name; input_node.result_name = child->result_name;
child_data.to_second = &second_nodes.emplace_back(std::move(input_node)); child_data.to_second = &second_nodes.emplace_back(std::move(input_node));
new_inputs.push_back(child); if (child->type != ActionType::INPUT)
new_inputs.push_back(child);
} }
} }
@ -1886,7 +1923,32 @@ ActionsDAG::SplitResult ActionsDAG::split(std::unordered_set<const Node *> split
for (const auto * input : new_inputs) for (const auto * input : new_inputs)
{ {
const auto & cur = data[input]; auto & cur = data[input];
if (avoid_duplicate_inputs)
{
bool is_name_updated = false;
while (!duplicate_inputs.insert(cur.to_first->result_name).second)
{
is_name_updated = true;
cur.to_first->result_name = fmt::format("{}_{}", input->result_name, duplicate_counter);
++duplicate_counter;
}
if (is_name_updated)
{
Node input_node;
input_node.type = ActionType::INPUT;
input_node.result_type = cur.to_first->result_type;
input_node.result_name = cur.to_first->result_name;
auto * new_input = &second_nodes.emplace_back(std::move(input_node));
cur.to_second->type = ActionType::ALIAS;
cur.to_second->children = {new_input};
cur.to_second = new_input;
}
}
second_inputs.push_back(cur.to_second); second_inputs.push_back(cur.to_second);
first_outputs.push_back(cur.to_first); first_outputs.push_back(cur.to_first);
} }

View File

@ -195,6 +195,8 @@ public:
/// Remove actions that are not needed to compute output nodes with required names /// Remove actions that are not needed to compute output nodes with required names
void removeUnusedActions(const NameSet & required_names, bool allow_remove_inputs = true, bool allow_constant_folding = true); void removeUnusedActions(const NameSet & required_names, bool allow_remove_inputs = true, bool allow_constant_folding = true);
void removeAliasesForFilter(const std::string & filter_name);
/// Transform the current DAG in a way that leaf nodes get folded into their parents. It's done /// Transform the current DAG in a way that leaf nodes get folded into their parents. It's done
/// because each projection can provide some columns as inputs to substitute certain sub-DAGs /// because each projection can provide some columns as inputs to substitute certain sub-DAGs
/// (expressions). Consider the following example: /// (expressions). Consider the following example:
@ -343,7 +345,7 @@ public:
/// initial DAG : (a, b, c, d, e) -> (w, x, y, z) | 1 a 2 b 3 c 4 d 5 e 6 -> 1 2 3 4 5 6 w x y z /// initial DAG : (a, b, c, d, e) -> (w, x, y, z) | 1 a 2 b 3 c 4 d 5 e 6 -> 1 2 3 4 5 6 w x y z
/// split (first) : (a, c, d) -> (i, j, k, w, y) | 1 a 2 b 3 c 4 d 5 e 6 -> 1 2 b 3 4 5 e 6 i j k w y /// split (first) : (a, c, d) -> (i, j, k, w, y) | 1 a 2 b 3 c 4 d 5 e 6 -> 1 2 b 3 4 5 e 6 i j k w y
/// split (second) : (i, j, k, y, b, e) -> (x, y, z) | 1 2 b 3 4 5 e 6 i j k w y -> 1 2 3 4 5 6 w x y z /// split (second) : (i, j, k, y, b, e) -> (x, y, z) | 1 2 b 3 4 5 e 6 i j k w y -> 1 2 3 4 5 6 w x y z
SplitResult split(std::unordered_set<const Node *> split_nodes, bool create_split_nodes_mapping = false) const; SplitResult split(std::unordered_set<const Node *> split_nodes, bool create_split_nodes_mapping = false, bool avoid_duplicate_inputs = false) const;
/// Splits actions into two parts. Returned first half may be swapped with ARRAY JOIN. /// Splits actions into two parts. Returned first half may be swapped with ARRAY JOIN.
SplitResult splitActionsBeforeArrayJoin(const NameSet & array_joined_columns) const; SplitResult splitActionsBeforeArrayJoin(const NameSet & array_joined_columns) const;

View File

@ -740,12 +740,18 @@ struct ContextSharedPart : boost::noncopyable
void initializeTraceCollector(std::shared_ptr<TraceLog> trace_log) void initializeTraceCollector(std::shared_ptr<TraceLog> trace_log)
{ {
if (!trace_log) if (!trace_collector.has_value())
return; throw Exception(ErrorCodes::LOGICAL_ERROR, "TraceCollector needs to be first created before initialization");
trace_collector->initialize(trace_log);
}
void createTraceCollector()
{
if (hasTraceCollector()) if (hasTraceCollector())
return; return;
trace_collector.emplace(std::move(trace_log)); trace_collector.emplace();
} }
void addWarningMessage(const String & message) TSA_REQUIRES(mutex) void addWarningMessage(const String & message) TSA_REQUIRES(mutex)
@ -3891,6 +3897,11 @@ void Context::initializeSystemLogs()
}); });
} }
void Context::createTraceCollector()
{
shared->createTraceCollector();
}
void Context::initializeTraceCollector() void Context::initializeTraceCollector()
{ {
shared->initializeTraceCollector(getTraceLog()); shared->initializeTraceCollector(getTraceLog());

View File

@ -1077,6 +1077,8 @@ public:
void initializeSystemLogs(); void initializeSystemLogs();
/// Call after initialization before using trace collector. /// Call after initialization before using trace collector.
void createTraceCollector();
void initializeTraceCollector(); void initializeTraceCollector();
/// Call after unexpected crash happen. /// Call after unexpected crash happen.

View File

@ -568,8 +568,21 @@ void ZooKeeperMetadataTransaction::commit()
ClusterPtr tryGetReplicatedDatabaseCluster(const String & cluster_name) ClusterPtr tryGetReplicatedDatabaseCluster(const String & cluster_name)
{ {
if (const auto * replicated_db = dynamic_cast<const DatabaseReplicated *>(DatabaseCatalog::instance().tryGetDatabase(cluster_name).get())) String name = cluster_name;
return replicated_db->tryGetCluster(); bool all_groups = false;
if (name.starts_with(DatabaseReplicated::ALL_GROUPS_CLUSTER_PREFIX))
{
name = name.substr(strlen(DatabaseReplicated::ALL_GROUPS_CLUSTER_PREFIX));
all_groups = true;
}
if (const auto * replicated_db = dynamic_cast<const DatabaseReplicated *>(DatabaseCatalog::instance().tryGetDatabase(name).get()))
{
if (all_groups)
return replicated_db->tryGetAllGroupsCluster();
else
return replicated_db->tryGetCluster();
}
return {}; return {};
} }

View File

@ -240,4 +240,34 @@ bool SplitTokenExtractor::nextInStringLike(const char * data, size_t length, siz
return !bad_token && !token.empty(); return !bad_token && !token.empty();
} }
void SplitTokenExtractor::substringToBloomFilter(const char * data, size_t length, BloomFilter & bloom_filter, bool is_prefix, bool is_suffix) const
{
size_t cur = 0;
size_t token_start = 0;
size_t token_len = 0;
while (cur < length && nextInString(data, length, &cur, &token_start, &token_len))
// In order to avoid filter updates with incomplete tokens,
// first token is ignored, unless substring is prefix and
// last token is ignored, unless substring is suffix
if ((token_start > 0 || is_prefix) && (token_start + token_len < length || is_suffix))
bloom_filter.add(data + token_start, token_len);
}
void SplitTokenExtractor::substringToGinFilter(const char * data, size_t length, GinFilter & gin_filter, bool is_prefix, bool is_suffix) const
{
gin_filter.setQueryString(data, length);
size_t cur = 0;
size_t token_start = 0;
size_t token_len = 0;
while (cur < length && nextInString(data, length, &cur, &token_start, &token_len))
// In order to avoid filter updates with incomplete tokens,
// first token is ignored, unless substring is prefix and
// last token is ignored, unless substring is suffix
if ((token_start > 0 || is_prefix) && (token_start + token_len < length || is_suffix))
gin_filter.addTerm(data + token_start, token_len);
}
} }

View File

@ -28,8 +28,22 @@ struct ITokenExtractor
/// It skips unescaped `%` and `_` and supports escaping symbols, but it is less lightweight. /// It skips unescaped `%` and `_` and supports escaping symbols, but it is less lightweight.
virtual bool nextInStringLike(const char * data, size_t length, size_t * pos, String & out) const = 0; virtual bool nextInStringLike(const char * data, size_t length, size_t * pos, String & out) const = 0;
/// Updates Bloom filter from exact-match string filter value
virtual void stringToBloomFilter(const char * data, size_t length, BloomFilter & bloom_filter) const = 0; virtual void stringToBloomFilter(const char * data, size_t length, BloomFilter & bloom_filter) const = 0;
/// Updates Bloom filter from substring-match string filter value.
/// An `ITokenExtractor` implementation may decide to skip certain
/// tokens depending on whether the substring is a prefix or a suffix.
virtual void substringToBloomFilter(
const char * data,
size_t length,
BloomFilter & bloom_filter,
bool is_prefix [[maybe_unused]],
bool is_suffix [[maybe_unused]]) const
{
stringToBloomFilter(data, length, bloom_filter);
}
virtual void stringPaddedToBloomFilter(const char * data, size_t length, BloomFilter & bloom_filter) const virtual void stringPaddedToBloomFilter(const char * data, size_t length, BloomFilter & bloom_filter) const
{ {
stringToBloomFilter(data, length, bloom_filter); stringToBloomFilter(data, length, bloom_filter);
@ -37,8 +51,22 @@ struct ITokenExtractor
virtual void stringLikeToBloomFilter(const char * data, size_t length, BloomFilter & bloom_filter) const = 0; virtual void stringLikeToBloomFilter(const char * data, size_t length, BloomFilter & bloom_filter) const = 0;
/// Updates GIN filter from exact-match string filter value
virtual void stringToGinFilter(const char * data, size_t length, GinFilter & gin_filter) const = 0; virtual void stringToGinFilter(const char * data, size_t length, GinFilter & gin_filter) const = 0;
/// Updates GIN filter from substring-match string filter value.
/// An `ITokenExtractor` implementation may decide to skip certain
/// tokens depending on whether the substring is a prefix or a suffix.
virtual void substringToGinFilter(
const char * data,
size_t length,
GinFilter & gin_filter,
bool is_prefix [[maybe_unused]],
bool is_suffix [[maybe_unused]]) const
{
stringToGinFilter(data, length, gin_filter);
}
virtual void stringPaddedToGinFilter(const char * data, size_t length, GinFilter & gin_filter) const virtual void stringPaddedToGinFilter(const char * data, size_t length, GinFilter & gin_filter) const
{ {
stringToGinFilter(data, length, gin_filter); stringToGinFilter(data, length, gin_filter);
@ -148,6 +176,11 @@ struct SplitTokenExtractor final : public ITokenExtractorHelper<SplitTokenExtrac
bool nextInStringLike(const char * data, size_t length, size_t * __restrict pos, String & token) const override; bool nextInStringLike(const char * data, size_t length, size_t * __restrict pos, String & token) const override;
void substringToBloomFilter(const char * data, size_t length, BloomFilter & bloom_filter, bool is_prefix, bool is_suffix) const override;
void substringToGinFilter(const char * data, size_t length, GinFilter & gin_filter, bool is_prefix, bool is_suffix) const override;
}; };
} }

View File

@ -279,6 +279,8 @@ Chain InterpreterInsertQuery::buildChain(
std::atomic_uint64_t * elapsed_counter_ms, std::atomic_uint64_t * elapsed_counter_ms,
bool check_access) bool check_access)
{ {
IInterpreter::checkStorageSupportsTransactionsIfNeeded(table, getContext());
ProfileEvents::increment(ProfileEvents::InsertQueriesWithSubqueries); ProfileEvents::increment(ProfileEvents::InsertQueriesWithSubqueries);
ProfileEvents::increment(ProfileEvents::QueriesWithSubqueries); ProfileEvents::increment(ProfileEvents::QueriesWithSubqueries);

View File

@ -578,7 +578,9 @@ InterpreterSelectQuery::InterpreterSelectQuery(
settings.parallel_replicas_count, settings.parallel_replicas_count,
settings.parallel_replica_offset, settings.parallel_replica_offset,
std::move(custom_key_ast), std::move(custom_key_ast),
settings.parallel_replicas_custom_key_filter_type, {settings.parallel_replicas_custom_key_filter_type,
settings.parallel_replicas_custom_key_range_lower,
settings.parallel_replicas_custom_key_range_upper},
storage->getInMemoryMetadataPtr()->columns, storage->getInMemoryMetadataPtr()->columns,
context); context);
} }

View File

@ -1,5 +1,4 @@
#include "TraceCollector.h" #include <Interpreters/TraceCollector.h>
#include <Core/Field.h> #include <Core/Field.h>
#include <IO/ReadBufferFromFileDescriptor.h> #include <IO/ReadBufferFromFileDescriptor.h>
#include <IO/ReadHelpers.h> #include <IO/ReadHelpers.h>
@ -14,8 +13,12 @@
namespace DB namespace DB
{ {
TraceCollector::TraceCollector(std::shared_ptr<TraceLog> trace_log_) namespace ErrorCodes
: trace_log(std::move(trace_log_)) {
extern const int LOGICAL_ERROR;
}
TraceCollector::TraceCollector()
{ {
TraceSender::pipe.open(); TraceSender::pipe.open();
@ -28,6 +31,23 @@ TraceCollector::TraceCollector(std::shared_ptr<TraceLog> trace_log_)
thread = ThreadFromGlobalPool(&TraceCollector::run, this); thread = ThreadFromGlobalPool(&TraceCollector::run, this);
} }
void TraceCollector::initialize(std::shared_ptr<TraceLog> trace_log_)
{
if (is_trace_log_initialized)
throw DB::Exception(ErrorCodes::LOGICAL_ERROR, "TraceCollector is already initialized");
trace_log_ptr = trace_log_;
is_trace_log_initialized.store(true, std::memory_order_release);
}
std::shared_ptr<TraceLog> TraceCollector::getTraceLog()
{
if (!is_trace_log_initialized.load(std::memory_order_acquire))
return nullptr;
return trace_log_ptr;
}
void TraceCollector::tryClosePipe() void TraceCollector::tryClosePipe()
{ {
try try
@ -120,7 +140,7 @@ void TraceCollector::run()
ProfileEvents::Count increment; ProfileEvents::Count increment;
readPODBinary(increment, in); readPODBinary(increment, in);
if (trace_log) if (auto trace_log = getTraceLog())
{ {
// time and time_in_microseconds are both being constructed from the same timespec so that the // time and time_in_microseconds are both being constructed from the same timespec so that the
// times will be equal up to the precision of a second. // times will be equal up to the precision of a second.

View File

@ -1,4 +1,5 @@
#pragma once #pragma once
#include <atomic>
#include <Common/ThreadPool.h> #include <Common/ThreadPool.h>
class StackTrace; class StackTrace;
@ -16,11 +17,17 @@ class TraceLog;
class TraceCollector class TraceCollector
{ {
public: public:
explicit TraceCollector(std::shared_ptr<TraceLog> trace_log_); TraceCollector();
~TraceCollector(); ~TraceCollector();
void initialize(std::shared_ptr<TraceLog> trace_log_);
private: private:
std::shared_ptr<TraceLog> trace_log; std::shared_ptr<TraceLog> getTraceLog();
std::atomic<bool> is_trace_log_initialized = false;
std::shared_ptr<TraceLog> trace_log_ptr;
ThreadFromGlobalPool thread; ThreadFromGlobalPool thread;
void tryClosePipe(); void tryClosePipe();

View File

@ -7,7 +7,6 @@
#include <Interpreters/Context.h> #include <Interpreters/Context.h>
#include <DataTypes/DataTypesNumber.h>
#include <boost/rational.hpp> #include <boost/rational.hpp>
@ -18,18 +17,19 @@ namespace DB
namespace ErrorCodes namespace ErrorCodes
{ {
extern const int ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER; extern const int ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER;
extern const int INVALID_SETTING_VALUE;
} }
ASTPtr getCustomKeyFilterForParallelReplica( ASTPtr getCustomKeyFilterForParallelReplica(
size_t replicas_count, size_t replicas_count,
size_t replica_num, size_t replica_num,
ASTPtr custom_key_ast, ASTPtr custom_key_ast,
ParallelReplicasCustomKeyFilterType filter_type, ParallelReplicasCustomKeyFilter filter,
const ColumnsDescription & columns, const ColumnsDescription & columns,
const ContextPtr & context) const ContextPtr & context)
{ {
chassert(replicas_count > 1); chassert(replicas_count > 1);
if (filter_type == ParallelReplicasCustomKeyFilterType::DEFAULT) if (filter.filter_type == ParallelReplicasCustomKeyFilterType::DEFAULT)
{ {
// first we do modulo with replica count // first we do modulo with replica count
auto modulo_function = makeASTFunction("positiveModulo", custom_key_ast, std::make_shared<ASTLiteral>(replicas_count)); auto modulo_function = makeASTFunction("positiveModulo", custom_key_ast, std::make_shared<ASTLiteral>(replicas_count));
@ -40,35 +40,80 @@ ASTPtr getCustomKeyFilterForParallelReplica(
return equals_function; return equals_function;
} }
assert(filter_type == ParallelReplicasCustomKeyFilterType::RANGE); chassert(filter.filter_type == ParallelReplicasCustomKeyFilterType::RANGE);
KeyDescription custom_key_description KeyDescription custom_key_description
= KeyDescription::getKeyFromAST(custom_key_ast, columns, context); = KeyDescription::getKeyFromAST(custom_key_ast, columns, context);
using RelativeSize = boost::rational<ASTSampleRatio::BigNum>; using RelativeSize = boost::rational<ASTSampleRatio::BigNum>;
RelativeSize size_of_universum = 0; RelativeSize range_upper = RelativeSize(0);
RelativeSize range_lower = RelativeSize(filter.range_lower);
DataTypePtr custom_key_column_type = custom_key_description.data_types[0]; DataTypePtr custom_key_column_type = custom_key_description.data_types[0];
size_of_universum = RelativeSize(std::numeric_limits<UInt32>::max()) + RelativeSize(1);
if (custom_key_description.data_types.size() == 1) if (custom_key_description.data_types.size() == 1)
{ {
if (typeid_cast<const DataTypeUInt64 *>(custom_key_column_type.get())) if (typeid_cast<const DataTypeUInt64 *>(custom_key_column_type.get()))
size_of_universum = RelativeSize(std::numeric_limits<UInt64>::max()) + RelativeSize(1); {
range_upper = filter.range_upper > 0 ? RelativeSize(filter.range_upper) + RelativeSize(1)
: RelativeSize(std::numeric_limits<UInt64>::max()) + RelativeSize(1);
if (range_upper > RelativeSize(std::numeric_limits<UInt64>::max()) + RelativeSize(1))
throw Exception(
ErrorCodes::INVALID_SETTING_VALUE,
"Invalid custom key range upper bound: {}. Value must be smaller than custom key column type (UInt64) max value",
range_upper);
}
else if (typeid_cast<const DataTypeUInt32 *>(custom_key_column_type.get())) else if (typeid_cast<const DataTypeUInt32 *>(custom_key_column_type.get()))
size_of_universum = RelativeSize(std::numeric_limits<UInt32>::max()) + RelativeSize(1); {
range_upper = filter.range_upper > 0 ? RelativeSize(filter.range_upper) + RelativeSize(1)
: RelativeSize(std::numeric_limits<UInt32>::max()) + RelativeSize(1);
if (range_upper > RelativeSize(std::numeric_limits<UInt32>::max()) + RelativeSize(1))
throw Exception(
ErrorCodes::INVALID_SETTING_VALUE,
"Invalid custom key range upper bound: {}. Value must be smaller than custom key column type (UInt32) max value",
range_upper);
}
else if (typeid_cast<const DataTypeUInt16 *>(custom_key_column_type.get())) else if (typeid_cast<const DataTypeUInt16 *>(custom_key_column_type.get()))
size_of_universum = RelativeSize(std::numeric_limits<UInt16>::max()) + RelativeSize(1); {
range_upper = filter.range_upper > 0 ? RelativeSize(filter.range_upper) + RelativeSize(1)
: RelativeSize(std::numeric_limits<UInt16>::max()) + RelativeSize(1);
if (range_upper > RelativeSize(std::numeric_limits<UInt16>::max()) + RelativeSize(1))
throw Exception(
ErrorCodes::INVALID_SETTING_VALUE,
"Invalid custom key range upper bound: {}. Value must be smaller than custom key column type (UInt16) max value",
range_upper);
}
else if (typeid_cast<const DataTypeUInt8 *>(custom_key_column_type.get())) else if (typeid_cast<const DataTypeUInt8 *>(custom_key_column_type.get()))
size_of_universum = RelativeSize(std::numeric_limits<UInt8>::max()) + RelativeSize(1); {
range_upper = filter.range_upper > 0 ? RelativeSize(filter.range_upper) + RelativeSize(1)
: RelativeSize(std::numeric_limits<UInt8>::max()) + RelativeSize(1);
if (range_upper > RelativeSize(std::numeric_limits<UInt8>::max()) + RelativeSize(1))
throw Exception(
ErrorCodes::INVALID_SETTING_VALUE,
"Invalid custom key range upper bound: {}. Value must be smaller than custom key column type (UInt8) max value",
range_upper);
}
} }
if (size_of_universum == RelativeSize(0)) if (range_upper == RelativeSize(0))
throw Exception( throw Exception(
ErrorCodes::ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER, ErrorCodes::ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER,
"Invalid custom key column type: {}. Must be one unsigned integer type", "Invalid custom key column type: {}. Must be one unsigned integer type",
custom_key_column_type->getName()); custom_key_column_type->getName());
if (range_lower >= range_upper)
throw Exception(
ErrorCodes::INVALID_SETTING_VALUE,
"Invalid custom key filter range: Range upper bound {} must be larger than range lower bound {}",
range_lower,
range_upper);
RelativeSize size_of_universum = range_upper - range_lower;
if (size_of_universum <= RelativeSize(replicas_count))
throw Exception(
ErrorCodes::INVALID_SETTING_VALUE, "Invalid custom key filter range: Range must be larger than than the number of replicas");
RelativeSize relative_range_size = RelativeSize(1) / replicas_count; RelativeSize relative_range_size = RelativeSize(1) / replicas_count;
RelativeSize relative_range_offset = relative_range_size * RelativeSize(replica_num); RelativeSize relative_range_offset = relative_range_size * RelativeSize(replica_num);
@ -76,19 +121,19 @@ ASTPtr getCustomKeyFilterForParallelReplica(
bool has_lower_limit = false; bool has_lower_limit = false;
bool has_upper_limit = false; bool has_upper_limit = false;
RelativeSize lower_limit_rational = relative_range_offset * size_of_universum; RelativeSize lower_limit_rational = range_lower + relative_range_offset * size_of_universum;
RelativeSize upper_limit_rational = (relative_range_offset + relative_range_size) * size_of_universum; RelativeSize upper_limit_rational = range_lower + (relative_range_offset + relative_range_size) * size_of_universum;
UInt64 lower = boost::rational_cast<ASTSampleRatio::BigNum>(lower_limit_rational); UInt64 lower = boost::rational_cast<ASTSampleRatio::BigNum>(lower_limit_rational);
UInt64 upper = boost::rational_cast<ASTSampleRatio::BigNum>(upper_limit_rational); UInt64 upper = boost::rational_cast<ASTSampleRatio::BigNum>(upper_limit_rational);
if (lower > 0) if (lower_limit_rational > range_lower)
has_lower_limit = true; has_lower_limit = true;
if (upper_limit_rational < size_of_universum) if (upper_limit_rational < range_upper)
has_upper_limit = true; has_upper_limit = true;
assert(has_lower_limit || has_upper_limit); chassert(has_lower_limit || has_upper_limit);
/// Let's add the conditions to cut off something else when the index is scanned again and when the request is processed. /// Let's add the conditions to cut off something else when the index is scanned again and when the request is processed.
std::shared_ptr<ASTFunction> lower_function; std::shared_ptr<ASTFunction> lower_function;
@ -110,7 +155,7 @@ ASTPtr getCustomKeyFilterForParallelReplica(
return upper_function; return upper_function;
} }
assert(upper_function && lower_function); chassert(upper_function && lower_function);
return makeASTFunction("and", std::move(lower_function), std::move(upper_function)); return makeASTFunction("and", std::move(lower_function), std::move(upper_function));
} }

View File

@ -6,16 +6,24 @@
#include <Storages/IStorage.h> #include <Storages/IStorage.h>
#include <Core/SettingsEnums.h> #include <Core/SettingsEnums.h>
#include <Interpreters/DatabaseAndTableWithAlias.h> #include <Interpreters/DatabaseAndTableWithAlias.h>
#include <DataTypes/DataTypesNumber.h>
namespace DB namespace DB
{ {
struct ParallelReplicasCustomKeyFilter
{
ParallelReplicasCustomKeyFilterType filter_type;
UInt64 range_lower;
UInt64 range_upper;
};
/// Get AST for filter created from custom_key /// Get AST for filter created from custom_key
/// replica_num is the number of the replica for which we are generating filter starting from 0 /// replica_num is the number of the replica for which we are generating filter starting from 0
ASTPtr getCustomKeyFilterForParallelReplica( ASTPtr getCustomKeyFilterForParallelReplica(
size_t replicas_count, size_t replicas_count,
size_t replica_num, size_t replica_num,
ASTPtr custom_key_ast, ASTPtr custom_key_ast,
ParallelReplicasCustomKeyFilterType filter_type, ParallelReplicasCustomKeyFilter filter,
const ColumnsDescription & columns, const ColumnsDescription & columns,
const ContextPtr & context); const ContextPtr & context);

View File

@ -59,9 +59,6 @@ Token quotedStringWithUnicodeQuotes(const char *& pos, const char * const token_
pos = find_first_symbols<'\xE2'>(pos, end); pos = find_first_symbols<'\xE2'>(pos, end);
if (pos + 2 >= end) if (pos + 2 >= end)
return Token(error_token, token_begin, end); return Token(error_token, token_begin, end);
/// Empty identifiers are not allowed, while empty strings are.
if (success_token == TokenType::QuotedIdentifier && pos + 3 >= end)
return Token(error_token, token_begin, end);
if (pos[0] == '\xE2' && pos[1] == '\x80' && pos[2] == expected_end_byte) if (pos[0] == '\xE2' && pos[1] == '\x80' && pos[2] == expected_end_byte)
{ {

View File

@ -498,12 +498,14 @@ FilterDAGInfo buildCustomKeyFilterIfNeeded(const StoragePtr & storage,
LOG_TRACE(getLogger("Planner"), "Processing query on a replica using custom_key '{}'", settings.parallel_replicas_custom_key.value); LOG_TRACE(getLogger("Planner"), "Processing query on a replica using custom_key '{}'", settings.parallel_replicas_custom_key.value);
auto parallel_replicas_custom_filter_ast = getCustomKeyFilterForParallelReplica( auto parallel_replicas_custom_filter_ast = getCustomKeyFilterForParallelReplica(
settings.parallel_replicas_count, settings.parallel_replicas_count,
settings.parallel_replica_offset, settings.parallel_replica_offset,
std::move(custom_key_ast), std::move(custom_key_ast),
settings.parallel_replicas_custom_key_filter_type, {settings.parallel_replicas_custom_key_filter_type,
storage->getInMemoryMetadataPtr()->columns, settings.parallel_replicas_custom_key_range_lower,
query_context); settings.parallel_replicas_custom_key_range_upper},
storage->getInMemoryMetadataPtr()->columns,
query_context);
return buildFilterInfo(parallel_replicas_custom_filter_ast, table_expression_query_info.table_expression, planner_context); return buildFilterInfo(parallel_replicas_custom_filter_ast, table_expression_query_info.table_expression, planner_context);
} }

View File

@ -269,7 +269,12 @@ convertFieldToORCLiteral(const orc::Type & orc_type, const Field & field, DataTy
case orc::SHORT: case orc::SHORT:
case orc::INT: case orc::INT:
case orc::LONG: { case orc::LONG: {
/// May throw exception /// May throw exception.
///
/// In particular, it'll throw if we request the column as unsigned, like this:
/// SELECT * FROM file('t.orc', ORC, 'x UInt8') WHERE x > 10
/// We have to reject this, otherwise it would miss values > 127 (because
/// they're treated as negative by ORC).
auto val = field.get<Int64>(); auto val = field.get<Int64>();
return orc::Literal(val); return orc::Literal(val);
} }

View File

@ -315,18 +315,20 @@ void ORCBlockOutputFormat::writeColumn(
if (null_bytemap) if (null_bytemap)
orc_column.hasNulls = true; orc_column.hasNulls = true;
/// ORC doesn't have unsigned types, so cast everything to signed and sign-extend to Int64 to
/// make the ORC library calculate min and max correctly.
switch (type->getTypeId()) switch (type->getTypeId())
{ {
case TypeIndex::Enum8: [[fallthrough]]; case TypeIndex::Enum8: [[fallthrough]];
case TypeIndex::Int8: case TypeIndex::Int8:
{ {
/// Note: Explicit cast to avoid clang-tidy error: 'signed char' to 'long' conversion; consider casting to 'unsigned char' first. /// Note: Explicit cast to avoid clang-tidy error: 'signed char' to 'long' conversion; consider casting to 'unsigned char' first.
writeNumbers<Int8, orc::LongVectorBatch>(orc_column, column, null_bytemap, [](const Int8 & value){ return static_cast<int64_t>(value); }); writeNumbers<Int8, orc::LongVectorBatch>(orc_column, column, null_bytemap, [](const Int8 & value){ return Int64(Int8(value)); });
break; break;
} }
case TypeIndex::UInt8: case TypeIndex::UInt8:
{ {
writeNumbers<UInt8, orc::LongVectorBatch>(orc_column, column, null_bytemap, [](const UInt8 & value){ return value; }); writeNumbers<UInt8, orc::LongVectorBatch>(orc_column, column, null_bytemap, [](const UInt8 & value){ return Int64(Int8(value)); });
break; break;
} }
case TypeIndex::Enum16: [[fallthrough]]; case TypeIndex::Enum16: [[fallthrough]];
@ -338,7 +340,7 @@ void ORCBlockOutputFormat::writeColumn(
case TypeIndex::Date: [[fallthrough]]; case TypeIndex::Date: [[fallthrough]];
case TypeIndex::UInt16: case TypeIndex::UInt16:
{ {
writeNumbers<UInt16, orc::LongVectorBatch>(orc_column, column, null_bytemap, [](const UInt16 & value){ return value; }); writeNumbers<UInt16, orc::LongVectorBatch>(orc_column, column, null_bytemap, [](const UInt16 & value){ return Int64(Int16(value)); });
break; break;
} }
case TypeIndex::Date32: [[fallthrough]]; case TypeIndex::Date32: [[fallthrough]];
@ -349,12 +351,12 @@ void ORCBlockOutputFormat::writeColumn(
} }
case TypeIndex::UInt32: case TypeIndex::UInt32:
{ {
writeNumbers<UInt32, orc::LongVectorBatch>(orc_column, column, null_bytemap, [](const UInt32 & value){ return value; }); writeNumbers<UInt32, orc::LongVectorBatch>(orc_column, column, null_bytemap, [](const UInt32 & value){ return Int64(Int32(value)); });
break; break;
} }
case TypeIndex::IPv4: case TypeIndex::IPv4:
{ {
writeNumbers<IPv4, orc::LongVectorBatch>(orc_column, column, null_bytemap, [](const IPv4 & value){ return value.toUnderType(); }); writeNumbers<IPv4, orc::LongVectorBatch>(orc_column, column, null_bytemap, [](const IPv4 & value){ return Int64(Int32(value.toUnderType())); });
break; break;
} }
case TypeIndex::Int64: case TypeIndex::Int64:

View File

@ -50,6 +50,8 @@ FilterStep::FilterStep(
, filter_column_name(std::move(filter_column_name_)) , filter_column_name(std::move(filter_column_name_))
, remove_filter_column(remove_filter_column_) , remove_filter_column(remove_filter_column_)
{ {
actions_dag = actions_dag->clone();
actions_dag->removeAliasesForFilter(filter_column_name);
} }
void FilterStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & settings) void FilterStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & settings)

View File

@ -118,7 +118,7 @@ void optimizePrewhere(Stack & stack, QueryPlan::Nodes &)
outputs.resize(size); outputs.resize(size);
} }
auto split_result = filter_step->getExpression()->split(optimize_result.prewhere_nodes, true); auto split_result = filter_step->getExpression()->split(optimize_result.prewhere_nodes, true, true);
/// This is the leak of abstraction. /// This is the leak of abstraction.
/// Splited actions may have inputs which are needed only for PREWHERE. /// Splited actions may have inputs which are needed only for PREWHERE.

View File

@ -59,16 +59,6 @@ std::string DataPartStorageOnDiskBase::getRelativePath() const
return fs::path(root_path) / part_dir / ""; return fs::path(root_path) / part_dir / "";
} }
std::string DataPartStorageOnDiskBase::getParentDirectory() const
{
/// Cut last "/" if it exists (it shouldn't). Otherwise fs::path behave differently.
fs::path part_dir_without_slash = part_dir.ends_with("/") ? part_dir.substr(0, part_dir.size() - 1) : part_dir;
if (part_dir_without_slash.has_parent_path())
return part_dir_without_slash.parent_path();
return "";
}
std::optional<String> DataPartStorageOnDiskBase::getRelativePathForPrefix(LoggerPtr log, const String & prefix, bool detached, bool broken) const std::optional<String> DataPartStorageOnDiskBase::getRelativePathForPrefix(LoggerPtr log, const String & prefix, bool detached, bool broken) const
{ {
assert(!broken || detached); assert(!broken || detached);
@ -684,9 +674,9 @@ void DataPartStorageOnDiskBase::remove(
if (!has_delete_prefix) if (!has_delete_prefix)
{ {
auto parent_path = getParentDirectory(); if (part_dir_without_slash.has_parent_path())
if (!parent_path.empty())
{ {
auto parent_path = part_dir_without_slash.parent_path();
if (parent_path == MergeTreeData::DETACHED_DIR_NAME) if (parent_path == MergeTreeData::DETACHED_DIR_NAME)
throw Exception( throw Exception(
ErrorCodes::LOGICAL_ERROR, ErrorCodes::LOGICAL_ERROR,
@ -694,7 +684,7 @@ void DataPartStorageOnDiskBase::remove(
part_dir, part_dir,
root_path); root_path);
part_dir_without_slash = fs::path(parent_path) / ("delete_tmp_" + std::string{part_dir_without_slash.filename()}); part_dir_without_slash = parent_path / ("delete_tmp_" + std::string{part_dir_without_slash.filename()});
} }
else else
{ {

View File

@ -20,7 +20,6 @@ public:
std::string getRelativePath() const override; std::string getRelativePath() const override;
std::string getPartDirectory() const override; std::string getPartDirectory() const override;
std::string getFullRootPath() const override; std::string getFullRootPath() const override;
std::string getParentDirectory() const override;
Poco::Timestamp getLastModified() const override; Poco::Timestamp getLastModified() const override;
UInt64 calculateTotalSizeOnDisk() const override; UInt64 calculateTotalSizeOnDisk() const override;

View File

@ -96,12 +96,11 @@ public:
virtual MergeTreeDataPartStorageType getType() const = 0; virtual MergeTreeDataPartStorageType getType() const = 0;
/// Methods to get path components of a data part. /// Methods to get path components of a data part.
virtual std::string getFullPath() const = 0; /// '/var/lib/clickhouse/data/database/table/moving/all_1_5_1' virtual std::string getFullPath() const = 0; /// '/var/lib/clickhouse/data/database/table/moving/all_1_5_1'
virtual std::string getRelativePath() const = 0; /// 'database/table/moving/all_1_5_1' virtual std::string getRelativePath() const = 0; /// 'database/table/moving/all_1_5_1'
virtual std::string getPartDirectory() const = 0; /// 'all_1_5_1' virtual std::string getPartDirectory() const = 0; /// 'all_1_5_1'
virtual std::string getFullRootPath() const = 0; /// '/var/lib/clickhouse/data/database/table/moving' virtual std::string getFullRootPath() const = 0; /// '/var/lib/clickhouse/data/database/table/moving'
virtual std::string getParentDirectory() const = 0; /// '' (or 'detached' for 'detached/all_1_5_1') /// Can add it if needed /// 'database/table/moving'
/// Can add it if needed /// 'database/table/moving'
/// virtual std::string getRelativeRootPath() const = 0; /// virtual std::string getRelativeRootPath() const = 0;
/// Get a storage for projection. /// Get a storage for projection.

View File

@ -737,11 +737,7 @@ void IMergeTreeDataPart::loadColumnsChecksumsIndexes(bool require_columns_checks
{ {
/// Don't scare people with broken part error /// Don't scare people with broken part error
if (!isRetryableException(std::current_exception())) if (!isRetryableException(std::current_exception()))
{ LOG_ERROR(storage.log, "Part {} is broken and need manual correction", getDataPartStorage().getFullPath());
auto message = getCurrentExceptionMessage(true);
LOG_ERROR(storage.log, "Part {} is broken and need manual correction. Reason: {}",
getDataPartStorage().getFullPath(), message);
}
// There could be conditions that data part to be loaded is broken, but some of meta infos are already written // There could be conditions that data part to be loaded is broken, but some of meta infos are already written
// into meta data before exception, need to clean them all. // into meta data before exception, need to clean them all.

View File

@ -3894,7 +3894,7 @@ void MergeTreeData::checkPartDynamicColumns(MutableDataPartPtr & part, DataParts
} }
} }
void MergeTreeData::preparePartForCommit(MutableDataPartPtr & part, Transaction & out_transaction, bool need_rename, bool rename_in_transaction) void MergeTreeData::preparePartForCommit(MutableDataPartPtr & part, Transaction & out_transaction, bool need_rename)
{ {
part->is_temp = false; part->is_temp = false;
part->setState(DataPartState::PreActive); part->setState(DataPartState::PreActive);
@ -3906,15 +3906,12 @@ void MergeTreeData::preparePartForCommit(MutableDataPartPtr & part, Transaction
return !may_be_cleaned_up || temporary_parts.contains(dir_name); return !may_be_cleaned_up || temporary_parts.contains(dir_name);
}()); }());
if (need_rename && !rename_in_transaction) if (need_rename)
part->renameTo(part->name, true); part->renameTo(part->name, true);
LOG_TEST(log, "preparePartForCommit: inserting {} into data_parts_indexes", part->getNameWithState()); LOG_TEST(log, "preparePartForCommit: inserting {} into data_parts_indexes", part->getNameWithState());
data_parts_indexes.insert(part); data_parts_indexes.insert(part);
if (rename_in_transaction) out_transaction.addPart(part);
out_transaction.addPart(part, need_rename);
else
out_transaction.addPart(part, /* need_rename= */ false);
} }
bool MergeTreeData::addTempPart( bool MergeTreeData::addTempPart(
@ -3963,8 +3960,7 @@ bool MergeTreeData::renameTempPartAndReplaceImpl(
MutableDataPartPtr & part, MutableDataPartPtr & part,
Transaction & out_transaction, Transaction & out_transaction,
DataPartsLock & lock, DataPartsLock & lock,
DataPartsVector * out_covered_parts, DataPartsVector * out_covered_parts)
bool rename_in_transaction)
{ {
LOG_TRACE(log, "Renaming temporary part {} to {} with tid {}.", part->getDataPartStorage().getPartDirectory(), part->name, out_transaction.getTID()); LOG_TRACE(log, "Renaming temporary part {} to {} with tid {}.", part->getDataPartStorage().getPartDirectory(), part->name, out_transaction.getTID());
@ -4003,7 +3999,7 @@ bool MergeTreeData::renameTempPartAndReplaceImpl(
/// All checks are passed. Now we can rename the part on disk. /// All checks are passed. Now we can rename the part on disk.
/// So, we maintain invariant: if a non-temporary part in filesystem then it is in data_parts /// So, we maintain invariant: if a non-temporary part in filesystem then it is in data_parts
preparePartForCommit(part, out_transaction, /* need_rename= */ true, rename_in_transaction); preparePartForCommit(part, out_transaction, /* need_rename */ true);
if (out_covered_parts) if (out_covered_parts)
{ {
@ -4018,31 +4014,29 @@ bool MergeTreeData::renameTempPartAndReplaceUnlocked(
MutableDataPartPtr & part, MutableDataPartPtr & part,
Transaction & out_transaction, Transaction & out_transaction,
DataPartsLock & lock, DataPartsLock & lock,
bool rename_in_transaction) DataPartsVector * out_covered_parts)
{ {
return renameTempPartAndReplaceImpl(part, out_transaction, lock, /*out_covered_parts=*/ nullptr, rename_in_transaction); return renameTempPartAndReplaceImpl(part, out_transaction, lock, out_covered_parts);
} }
MergeTreeData::DataPartsVector MergeTreeData::renameTempPartAndReplace( MergeTreeData::DataPartsVector MergeTreeData::renameTempPartAndReplace(
MutableDataPartPtr & part, MutableDataPartPtr & part,
Transaction & out_transaction, Transaction & out_transaction)
bool rename_in_transaction)
{ {
auto part_lock = lockParts(); auto part_lock = lockParts();
DataPartsVector covered_parts; DataPartsVector covered_parts;
renameTempPartAndReplaceImpl(part, out_transaction, part_lock, &covered_parts, rename_in_transaction); renameTempPartAndReplaceImpl(part, out_transaction, part_lock, &covered_parts);
return covered_parts; return covered_parts;
} }
bool MergeTreeData::renameTempPartAndAdd( bool MergeTreeData::renameTempPartAndAdd(
MutableDataPartPtr & part, MutableDataPartPtr & part,
Transaction & out_transaction, Transaction & out_transaction,
DataPartsLock & lock, DataPartsLock & lock)
bool rename_in_transaction)
{ {
DataPartsVector covered_parts; DataPartsVector covered_parts;
if (!renameTempPartAndReplaceImpl(part, out_transaction, lock, &covered_parts, rename_in_transaction)) if (!renameTempPartAndReplaceImpl(part, out_transaction, lock, &covered_parts))
return false; return false;
if (!covered_parts.empty()) if (!covered_parts.empty())
@ -4083,9 +4077,9 @@ void MergeTreeData::removePartsFromWorkingSet(MergeTreeTransaction * txn, const
resetObjectColumnsFromActiveParts(acquired_lock); resetObjectColumnsFromActiveParts(acquired_lock);
} }
void MergeTreeData::removePartsFromWorkingSetImmediatelyAndSetTemporaryState(const DataPartsVector & remove, DataPartsLock * acquired_lock) void MergeTreeData::removePartsFromWorkingSetImmediatelyAndSetTemporaryState(const DataPartsVector & remove)
{ {
auto lock = (acquired_lock) ? DataPartsLock() : lockParts(); auto lock = lockParts();
for (const auto & part : remove) for (const auto & part : remove)
{ {
@ -4251,9 +4245,8 @@ MergeTreeData::PartsToRemoveFromZooKeeper MergeTreeData::removePartsInRangeFromW
auto [new_data_part, tmp_dir_holder] = createEmptyPart(empty_info, partition, empty_part_name, NO_TRANSACTION_PTR); auto [new_data_part, tmp_dir_holder] = createEmptyPart(empty_info, partition, empty_part_name, NO_TRANSACTION_PTR);
MergeTreeData::Transaction transaction(*this, NO_TRANSACTION_RAW); MergeTreeData::Transaction transaction(*this, NO_TRANSACTION_RAW);
renameTempPartAndAdd(new_data_part, transaction, lock, /*rename_in_transaction=*/ true); /// All covered parts must be already removed renameTempPartAndAdd(new_data_part, transaction, lock); /// All covered parts must be already removed
transaction.renameParts();
/// It will add the empty part to the set of Outdated parts without making it Active (exactly what we need) /// It will add the empty part to the set of Outdated parts without making it Active (exactly what we need)
transaction.rollback(&lock); transaction.rollback(&lock);
new_data_part->remove_time.store(0, std::memory_order_relaxed); new_data_part->remove_time.store(0, std::memory_order_relaxed);
@ -6624,54 +6617,25 @@ TransactionID MergeTreeData::Transaction::getTID() const
return Tx::PrehistoricTID; return Tx::PrehistoricTID;
} }
void MergeTreeData::Transaction::addPart(MutableDataPartPtr & part, bool need_rename) void MergeTreeData::Transaction::addPart(MutableDataPartPtr & part)
{ {
precommitted_parts.insert(part); precommitted_parts.insert(part);
if (need_rename)
precommitted_parts_need_rename.insert(part);
} }
void MergeTreeData::Transaction::rollback(DataPartsLock * lock) void MergeTreeData::Transaction::rollback(DataPartsLock * lock)
{ {
if (!isEmpty()) if (!isEmpty())
{ {
for (const auto & part : precommitted_parts)
part->version.creation_csn.store(Tx::RolledBackCSN);
auto non_detached_precommitted_parts = precommitted_parts;
/// Remove detached parts from working set.
///
/// It is possible to have detached parts here, only when rename (in
/// commit()) of detached parts had been broken (i.e. during ATTACH),
/// i.e. the part itself is broken.
DataPartsVector detached_precommitted_parts;
for (auto it = non_detached_precommitted_parts.begin(); it != non_detached_precommitted_parts.end();)
{
const auto & part = *it;
if (part->getDataPartStorage().getParentDirectory() == DETACHED_DIR_NAME)
{
detached_precommitted_parts.push_back(part);
it = non_detached_precommitted_parts.erase(it);
}
else
++it;
}
WriteBufferFromOwnString buf; WriteBufferFromOwnString buf;
buf << "Removing parts:"; buf << "Removing parts:";
for (const auto & part : non_detached_precommitted_parts) for (const auto & part : precommitted_parts)
buf << " " << part->getDataPartStorage().getPartDirectory(); buf << " " << part->getDataPartStorage().getPartDirectory();
buf << "."; buf << ".";
if (!detached_precommitted_parts.empty())
{
buf << " Rollbacking parts state to temporary and removing from working set:";
for (const auto & part : detached_precommitted_parts)
buf << " " << part->getDataPartStorage().getPartDirectory();
buf << ".";
}
LOG_DEBUG(data.log, "Undoing transaction {}. {}", getTID(), buf.str()); LOG_DEBUG(data.log, "Undoing transaction {}. {}", getTID(), buf.str());
for (const auto & part : precommitted_parts)
part->version.creation_csn.store(Tx::RolledBackCSN);
/// It would be much better with TSA... /// It would be much better with TSA...
auto our_lock = (lock) ? DataPartsLock() : data.lockParts(); auto our_lock = (lock) ? DataPartsLock() : data.lockParts();
@ -6681,7 +6645,7 @@ void MergeTreeData::Transaction::rollback(DataPartsLock * lock)
if (!data.all_data_dropped) if (!data.all_data_dropped)
{ {
Strings part_names; Strings part_names;
for (const auto & part : non_detached_precommitted_parts) for (const auto & part : precommitted_parts)
part_names.emplace_back(part->name); part_names.emplace_back(part->name);
throw Exception(ErrorCodes::LOGICAL_ERROR, "There are some PreActive parts ({}) to rollback, " throw Exception(ErrorCodes::LOGICAL_ERROR, "There are some PreActive parts ({}) to rollback, "
"but data parts set is empty and table {} was not dropped. It's a bug", "but data parts set is empty and table {} was not dropped. It's a bug",
@ -6690,12 +6654,8 @@ void MergeTreeData::Transaction::rollback(DataPartsLock * lock)
} }
else else
{ {
data.removePartsFromWorkingSetImmediatelyAndSetTemporaryState(
detached_precommitted_parts,
&our_lock);
data.removePartsFromWorkingSet(txn, data.removePartsFromWorkingSet(txn,
DataPartsVector(non_detached_precommitted_parts.begin(), non_detached_precommitted_parts.end()), DataPartsVector(precommitted_parts.begin(), precommitted_parts.end()),
/* clear_without_timeout = */ true, &our_lock); /* clear_without_timeout = */ true, &our_lock);
} }
} }
@ -6705,16 +6665,7 @@ void MergeTreeData::Transaction::rollback(DataPartsLock * lock)
void MergeTreeData::Transaction::clear() void MergeTreeData::Transaction::clear()
{ {
chassert(precommitted_parts.size() >= precommitted_parts_need_rename.size());
precommitted_parts.clear(); precommitted_parts.clear();
precommitted_parts_need_rename.clear();
}
void MergeTreeData::Transaction::renameParts()
{
for (const auto & part_need_rename : precommitted_parts_need_rename)
part_need_rename->renameTo(part_need_rename->name, true);
precommitted_parts_need_rename.clear();
} }
MergeTreeData::DataPartsVector MergeTreeData::Transaction::commit(DataPartsLock * acquired_parts_lock) MergeTreeData::DataPartsVector MergeTreeData::Transaction::commit(DataPartsLock * acquired_parts_lock)
@ -6723,9 +6674,6 @@ MergeTreeData::DataPartsVector MergeTreeData::Transaction::commit(DataPartsLock
if (!isEmpty()) if (!isEmpty())
{ {
if (!precommitted_parts_need_rename.empty())
throw Exception(ErrorCodes::LOGICAL_ERROR, "Parts not renamed");
auto settings = data.getSettings(); auto settings = data.getSettings();
auto parts_lock = acquired_parts_lock ? DataPartsLock() : data.lockParts(); auto parts_lock = acquired_parts_lock ? DataPartsLock() : data.lockParts();
auto * owing_parts_lock = acquired_parts_lock ? acquired_parts_lock : &parts_lock; auto * owing_parts_lock = acquired_parts_lock ? acquired_parts_lock : &parts_lock;
@ -6734,8 +6682,6 @@ MergeTreeData::DataPartsVector MergeTreeData::Transaction::commit(DataPartsLock
if (part->getDataPartStorage().hasActiveTransaction()) if (part->getDataPartStorage().hasActiveTransaction())
part->getDataPartStorage().commitTransaction(); part->getDataPartStorage().commitTransaction();
renameParts();
if (txn) if (txn)
{ {
for (const auto & part : precommitted_parts) for (const auto & part : precommitted_parts)

View File

@ -255,9 +255,7 @@ public:
DataPartsVector commit(DataPartsLock * acquired_parts_lock = nullptr); DataPartsVector commit(DataPartsLock * acquired_parts_lock = nullptr);
void renameParts(); void addPart(MutableDataPartPtr & part);
void addPart(MutableDataPartPtr & part, bool need_rename);
void rollback(DataPartsLock * lock = nullptr); void rollback(DataPartsLock * lock = nullptr);
@ -288,9 +286,9 @@ public:
MergeTreeData & data; MergeTreeData & data;
MergeTreeTransaction * txn; MergeTreeTransaction * txn;
MutableDataParts precommitted_parts; MutableDataParts precommitted_parts;
MutableDataParts precommitted_parts_need_rename; MutableDataParts locked_parts;
}; };
using TransactionUniquePtr = std::unique_ptr<Transaction>; using TransactionUniquePtr = std::unique_ptr<Transaction>;
@ -590,27 +588,25 @@ public:
bool renameTempPartAndAdd( bool renameTempPartAndAdd(
MutableDataPartPtr & part, MutableDataPartPtr & part,
Transaction & transaction, Transaction & transaction,
DataPartsLock & lock, DataPartsLock & lock);
bool rename_in_transaction);
/// The same as renameTempPartAndAdd but the block range of the part can contain existing parts. /// The same as renameTempPartAndAdd but the block range of the part can contain existing parts.
/// Returns all parts covered by the added part (in ascending order). /// Returns all parts covered by the added part (in ascending order).
DataPartsVector renameTempPartAndReplace( DataPartsVector renameTempPartAndReplace(
MutableDataPartPtr & part, MutableDataPartPtr & part,
Transaction & out_transaction, Transaction & out_transaction);
bool rename_in_transaction);
/// Unlocked version of previous one. Useful when added multiple parts with a single lock. /// Unlocked version of previous one. Useful when added multiple parts with a single lock.
bool renameTempPartAndReplaceUnlocked( bool renameTempPartAndReplaceUnlocked(
MutableDataPartPtr & part, MutableDataPartPtr & part,
Transaction & out_transaction, Transaction & out_transaction,
DataPartsLock & lock, DataPartsLock & lock,
bool rename_in_transaction); DataPartsVector * out_covered_parts = nullptr);
/// Remove parts from working set immediately (without wait for background /// Remove parts from working set immediately (without wait for background
/// process). Transfer part state to temporary. Have very limited usage only /// process). Transfer part state to temporary. Have very limited usage only
/// for new parts which aren't already present in table. /// for new parts which aren't already present in table.
void removePartsFromWorkingSetImmediatelyAndSetTemporaryState(const DataPartsVector & remove, DataPartsLock * acquired_lock = nullptr); void removePartsFromWorkingSetImmediatelyAndSetTemporaryState(const DataPartsVector & remove);
/// Removes parts from the working set parts. /// Removes parts from the working set parts.
/// Parts in add must already be in data_parts with PreActive, Active, or Outdated states. /// Parts in add must already be in data_parts with PreActive, Active, or Outdated states.
@ -1606,10 +1602,7 @@ private:
/// Preparing itself to be committed in memory: fill some fields inside part, add it to data_parts_indexes /// Preparing itself to be committed in memory: fill some fields inside part, add it to data_parts_indexes
/// in precommitted state and to transaction /// in precommitted state and to transaction
/// void preparePartForCommit(MutableDataPartPtr & part, Transaction & out_transaction, bool need_rename);
/// @param need_rename - rename the part
/// @param rename_in_transaction - if set, the rename will be done as part of transaction (without holding DataPartsLock), otherwise inplace (when it does not make sense).
void preparePartForCommit(MutableDataPartPtr & part, Transaction & out_transaction, bool need_rename, bool rename_in_transaction = false);
/// Low-level method for preparing parts for commit (in-memory). /// Low-level method for preparing parts for commit (in-memory).
/// FIXME Merge MergeTreeTransaction and Transaction /// FIXME Merge MergeTreeTransaction and Transaction
@ -1617,8 +1610,7 @@ private:
MutableDataPartPtr & part, MutableDataPartPtr & part,
Transaction & out_transaction, Transaction & out_transaction,
DataPartsLock & lock, DataPartsLock & lock,
DataPartsVector * out_covered_parts, DataPartsVector * out_covered_parts);
bool rename_in_transaction);
/// RAII Wrapper for atomic work with currently moving parts /// RAII Wrapper for atomic work with currently moving parts
/// Acquire them in constructor and remove them in destructor /// Acquire them in constructor and remove them in destructor

View File

@ -748,10 +748,7 @@ MergeTreeData::DataPartPtr MergeTreeDataMergerMutator::renameMergedTemporaryPart
"but transactions were enabled for this table"); "but transactions were enabled for this table");
/// Rename new part, add to the set and remove original parts. /// Rename new part, add to the set and remove original parts.
auto replaced_parts = data.renameTempPartAndReplace(new_data_part, out_transaction, /*rename_in_transaction=*/ true); auto replaced_parts = data.renameTempPartAndReplace(new_data_part, out_transaction);
/// Explicitly rename part while still holding the lock for tmp folder to avoid cleanup
out_transaction.renameParts();
/// Let's check that all original parts have been deleted and only them. /// Let's check that all original parts have been deleted and only them.
if (replaced_parts.size() != parts.size()) if (replaced_parts.size() != parts.size())

View File

@ -566,7 +566,7 @@ bool MergeTreeConditionBloomFilterText::traverseTreeEquals(
out.function = RPNElement::FUNCTION_EQUALS; out.function = RPNElement::FUNCTION_EQUALS;
out.bloom_filter = std::make_unique<BloomFilter>(params); out.bloom_filter = std::make_unique<BloomFilter>(params);
const auto & value = const_value.get<String>(); const auto & value = const_value.get<String>();
token_extractor->stringToBloomFilter(value.data(), value.size(), *out.bloom_filter); token_extractor->substringToBloomFilter(value.data(), value.size(), *out.bloom_filter, true, false);
return true; return true;
} }
else if (function_name == "endsWith") else if (function_name == "endsWith")
@ -575,7 +575,7 @@ bool MergeTreeConditionBloomFilterText::traverseTreeEquals(
out.function = RPNElement::FUNCTION_EQUALS; out.function = RPNElement::FUNCTION_EQUALS;
out.bloom_filter = std::make_unique<BloomFilter>(params); out.bloom_filter = std::make_unique<BloomFilter>(params);
const auto & value = const_value.get<String>(); const auto & value = const_value.get<String>();
token_extractor->stringToBloomFilter(value.data(), value.size(), *out.bloom_filter); token_extractor->substringToBloomFilter(value.data(), value.size(), *out.bloom_filter, false, true);
return true; return true;
} }
else if (function_name == "multiSearchAny" else if (function_name == "multiSearchAny"
@ -596,7 +596,15 @@ bool MergeTreeConditionBloomFilterText::traverseTreeEquals(
bloom_filters.back().emplace_back(params); bloom_filters.back().emplace_back(params);
const auto & value = element.get<String>(); const auto & value = element.get<String>();
token_extractor->stringToBloomFilter(value.data(), value.size(), bloom_filters.back().back());
if (function_name == "multiSearchAny")
{
token_extractor->substringToBloomFilter(value.data(), value.size(), bloom_filters.back().back(), false, false);
}
else
{
token_extractor->stringToBloomFilter(value.data(), value.size(), bloom_filters.back().back());
}
} }
out.set_bloom_filters = std::move(bloom_filters); out.set_bloom_filters = std::move(bloom_filters);
return true; return true;
@ -625,12 +633,12 @@ bool MergeTreeConditionBloomFilterText::traverseTreeEquals(
for (const auto & alternative : alternatives) for (const auto & alternative : alternatives)
{ {
bloom_filters.back().emplace_back(params); bloom_filters.back().emplace_back(params);
token_extractor->stringToBloomFilter(alternative.data(), alternative.size(), bloom_filters.back().back()); token_extractor->substringToBloomFilter(alternative.data(), alternative.size(), bloom_filters.back().back(), false, false);
} }
out.set_bloom_filters = std::move(bloom_filters); out.set_bloom_filters = std::move(bloom_filters);
} }
else else
token_extractor->stringToBloomFilter(required_substring.data(), required_substring.size(), *out.bloom_filter); token_extractor->substringToBloomFilter(required_substring.data(), required_substring.size(), *out.bloom_filter, false, false);
return true; return true;
} }

View File

@ -595,7 +595,7 @@ bool MergeTreeConditionFullText::traverseASTEquals(
out.function = RPNElement::FUNCTION_EQUALS; out.function = RPNElement::FUNCTION_EQUALS;
out.gin_filter = std::make_unique<GinFilter>(params); out.gin_filter = std::make_unique<GinFilter>(params);
const auto & value = const_value.get<String>(); const auto & value = const_value.get<String>();
token_extractor->stringToGinFilter(value.data(), value.size(), *out.gin_filter); token_extractor->substringToGinFilter(value.data(), value.size(), *out.gin_filter, true, false);
return true; return true;
} }
else if (function_name == "endsWith") else if (function_name == "endsWith")
@ -604,7 +604,7 @@ bool MergeTreeConditionFullText::traverseASTEquals(
out.function = RPNElement::FUNCTION_EQUALS; out.function = RPNElement::FUNCTION_EQUALS;
out.gin_filter = std::make_unique<GinFilter>(params); out.gin_filter = std::make_unique<GinFilter>(params);
const auto & value = const_value.get<String>(); const auto & value = const_value.get<String>();
token_extractor->stringToGinFilter(value.data(), value.size(), *out.gin_filter); token_extractor->substringToGinFilter(value.data(), value.size(), *out.gin_filter, false, true);
return true; return true;
} }
else if (function_name == "multiSearchAny") else if (function_name == "multiSearchAny")
@ -622,7 +622,7 @@ bool MergeTreeConditionFullText::traverseASTEquals(
gin_filters.back().emplace_back(params); gin_filters.back().emplace_back(params);
const auto & value = element.get<String>(); const auto & value = element.get<String>();
token_extractor->stringToGinFilter(value.data(), value.size(), gin_filters.back().back()); token_extractor->substringToGinFilter(value.data(), value.size(), gin_filters.back().back(), false, false);
} }
out.set_gin_filters = std::move(gin_filters); out.set_gin_filters = std::move(gin_filters);
return true; return true;
@ -650,14 +650,14 @@ bool MergeTreeConditionFullText::traverseASTEquals(
for (const auto & alternative : alternatives) for (const auto & alternative : alternatives)
{ {
gin_filters.back().emplace_back(params); gin_filters.back().emplace_back(params);
token_extractor->stringToGinFilter(alternative.data(), alternative.size(), gin_filters.back().back()); token_extractor->substringToGinFilter(alternative.data(), alternative.size(), gin_filters.back().back(), false, false);
} }
out.set_gin_filters = std::move(gin_filters); out.set_gin_filters = std::move(gin_filters);
} }
else else
{ {
out.gin_filter = std::make_unique<GinFilter>(params); out.gin_filter = std::make_unique<GinFilter>(params);
token_extractor->stringToGinFilter(required_substring.data(), required_substring.size(), *out.gin_filter); token_extractor->substringToGinFilter(required_substring.data(), required_substring.size(), *out.gin_filter, false, false);
} }
return true; return true;

View File

@ -186,8 +186,7 @@ void MergeTreeSink::finishDelayedChunk()
} }
} }
/// FIXME added = storage.renameTempPartAndAdd(part, transaction, lock);
added = storage.renameTempPartAndAdd(part, transaction, lock, /*rename_in_transaction=*/ false);
transaction.commit(&lock); transaction.commit(&lock);
} }

View File

@ -236,11 +236,10 @@ bool MutateFromLogEntryTask::finalize(ReplicatedMergeMutateTaskBase::PartLogWrit
if (data_part_storage.hasActiveTransaction()) if (data_part_storage.hasActiveTransaction())
data_part_storage.precommitTransaction(); data_part_storage.precommitTransaction();
storage.renameTempPartAndReplace(new_part, *transaction_ptr, /*rename_in_transaction=*/ true); storage.renameTempPartAndReplace(new_part, *transaction_ptr);
try try
{ {
transaction_ptr->renameParts();
storage.checkPartChecksumsAndCommit(*transaction_ptr, new_part, mutate_task->getHardlinkedFiles()); storage.checkPartChecksumsAndCommit(*transaction_ptr, new_part, mutate_task->getHardlinkedFiles());
} }
catch (const Exception & e) catch (const Exception & e)

View File

@ -97,8 +97,7 @@ bool MutatePlainMergeTreeTask::executeStep()
MergeTreeData::Transaction transaction(storage, merge_mutate_entry->txn.get()); MergeTreeData::Transaction transaction(storage, merge_mutate_entry->txn.get());
/// FIXME Transactions: it's too optimistic, better to lock parts before starting transaction /// FIXME Transactions: it's too optimistic, better to lock parts before starting transaction
storage.renameTempPartAndReplace(new_part, transaction, /*rename_in_transaction=*/ true); storage.renameTempPartAndReplace(new_part, transaction);
transaction.renameParts();
transaction.commit(); transaction.commit();
storage.updateMutationEntriesErrors(future_part, true, ""); storage.updateMutationEntriesErrors(future_part, true, "");

View File

@ -888,7 +888,7 @@ std::pair<std::vector<String>, bool> ReplicatedMergeTreeSinkImpl<async_insert>::
try try
{ {
auto lock = storage.lockParts(); auto lock = storage.lockParts();
storage.renameTempPartAndAdd(part, transaction, lock, /*rename_in_transaction=*/ false); storage.renameTempPartAndAdd(part, transaction, lock);
} }
catch (const Exception & e) catch (const Exception & e)
{ {
@ -903,9 +903,6 @@ std::pair<std::vector<String>, bool> ReplicatedMergeTreeSinkImpl<async_insert>::
throw; throw;
} }
/// Rename parts before committing to ZooKeeper without holding DataPartsLock.
transaction.renameParts();
ThreadFuzzer::maybeInjectSleep(); ThreadFuzzer::maybeInjectSleep();
fiu_do_on(FailPoints::replicated_merge_tree_commit_zk_fail_after_op, { zookeeper->forceFailureAfterOperation(); }); fiu_do_on(FailPoints::replicated_merge_tree_commit_zk_fail_after_op, { zookeeper->forceFailureAfterOperation(); });

Some files were not shown because too many files have changed in this diff Show More