mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 16:42:05 +00:00
Merge branch 'ClickHouse:master' into master
This commit is contained in:
commit
4ca49cec6c
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -360,3 +360,6 @@
|
||||
[submodule "contrib/sqids-cpp"]
|
||||
path = contrib/sqids-cpp
|
||||
url = https://github.com/sqids/sqids-cpp.git
|
||||
[submodule "contrib/idna"]
|
||||
path = contrib/idna
|
||||
url = https://github.com/ada-url/idna.git
|
||||
|
@ -22,7 +22,7 @@
|
||||
* The MergeTree setting `clean_deleted_rows` is deprecated, it has no effect anymore. The `CLEANUP` keyword for the `OPTIMIZE` is not allowed by default (it can be unlocked with the `allow_experimental_replacing_merge_with_cleanup` setting). [#58267](https://github.com/ClickHouse/ClickHouse/pull/58267) ([Alexander Tokmakov](https://github.com/tavplubix)). This fixes [#57930](https://github.com/ClickHouse/ClickHouse/issues/57930). This closes [#54988](https://github.com/ClickHouse/ClickHouse/issues/54988). This closes [#54570](https://github.com/ClickHouse/ClickHouse/issues/54570). This closes [#50346](https://github.com/ClickHouse/ClickHouse/issues/50346). This closes [#47579](https://github.com/ClickHouse/ClickHouse/issues/47579). The feature has to be removed because it is not good. We have to remove it as quickly as possible, because there is no other option. [#57932](https://github.com/ClickHouse/ClickHouse/pull/57932) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### New Feature
|
||||
* Implement Refreshable Materialized Views, requested in [#33919](https://github.com/ClickHouse/ClickHouse/issues/57995). [#56946](https://github.com/ClickHouse/ClickHouse/pull/56946) ([Michael Kolupaev](https://github.com/al13n321), [Michael Guzov](https://github.com/koloshmet)).
|
||||
* Implement Refreshable Materialized Views, requested in [#33919](https://github.com/ClickHouse/ClickHouse/issues/33919). [#56946](https://github.com/ClickHouse/ClickHouse/pull/56946) ([Michael Kolupaev](https://github.com/al13n321), [Michael Guzov](https://github.com/koloshmet)).
|
||||
* Introduce `PASTE JOIN`, which allows users to join tables without `ON` clause simply by row numbers. Example: `SELECT * FROM (SELECT number AS a FROM numbers(2)) AS t1 PASTE JOIN (SELECT number AS a FROM numbers(2) ORDER BY a DESC) AS t2`. [#57995](https://github.com/ClickHouse/ClickHouse/pull/57995) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* The `ORDER BY` clause now supports specifying `ALL`, meaning that ClickHouse sorts by all columns in the `SELECT` clause. Example: `SELECT col1, col2 FROM tab WHERE [...] ORDER BY ALL`. [#57875](https://github.com/ClickHouse/ClickHouse/pull/57875) ([zhongyuankai](https://github.com/zhongyuankai)).
|
||||
* Added a new mutation command `ALTER TABLE <table> APPLY DELETED MASK`, which allows to enforce applying of mask written by lightweight delete and to remove rows marked as deleted from disk. [#57433](https://github.com/ClickHouse/ClickHouse/pull/57433) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
@ -375,6 +375,7 @@
|
||||
* Do not interpret the `send_timeout` set on the client side as the `receive_timeout` on the server side and vise-versa. [#56035](https://github.com/ClickHouse/ClickHouse/pull/56035) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Comparison of time intervals with different units will throw an exception. This closes [#55942](https://github.com/ClickHouse/ClickHouse/issues/55942). You might have occasionally rely on the previous behavior when the underlying numeric values were compared regardless of the units. [#56090](https://github.com/ClickHouse/ClickHouse/pull/56090) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Rewrited the experimental `S3Queue` table engine completely: changed the way we keep information in zookeeper which allows to make less zookeeper requests, added caching of zookeeper state in cases when we know the state will not change, improved the polling from s3 process to make it less aggressive, changed the way ttl and max set for trached files is maintained, now it is a background process. Added `system.s3queue` and `system.s3queue_log` tables. Closes [#54998](https://github.com/ClickHouse/ClickHouse/issues/54998). [#54422](https://github.com/ClickHouse/ClickHouse/pull/54422) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Arbitrary paths on HTTP endpoint are no longer interpreted as a request to the `/query` endpoint. [#55521](https://github.com/ClickHouse/ClickHouse/pull/55521) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||
|
||||
#### New Feature
|
||||
* Add function `arrayFold(accumulator, x1, ..., xn -> expression, initial, array1, ..., arrayn)` which applies a lambda function to multiple arrays of the same cardinality and collects the result in an accumulator. [#49794](https://github.com/ClickHouse/ClickHouse/pull/49794) ([Lirikl](https://github.com/Lirikl)).
|
||||
|
1
contrib/CMakeLists.txt
vendored
1
contrib/CMakeLists.txt
vendored
@ -154,6 +154,7 @@ add_contrib (libpqxx-cmake libpqxx)
|
||||
add_contrib (libpq-cmake libpq)
|
||||
add_contrib (nuraft-cmake NuRaft)
|
||||
add_contrib (fast_float-cmake fast_float)
|
||||
add_contrib (idna-cmake idna)
|
||||
add_contrib (datasketches-cpp-cmake datasketches-cpp)
|
||||
add_contrib (incbin-cmake incbin)
|
||||
add_contrib (sqids-cpp-cmake sqids-cpp)
|
||||
|
2
contrib/NuRaft
vendored
2
contrib/NuRaft
vendored
@ -1 +1 @@
|
||||
Subproject commit b7ea89b817a18dc0eafc1f909d568869f02d2d04
|
||||
Subproject commit 2f5f52c4d8c87c2a3a3d101ca3a0194c9b77526f
|
1
contrib/idna
vendored
Submodule
1
contrib/idna
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 3c8be01d42b75649f1ac9b697d0ef757eebfe667
|
24
contrib/idna-cmake/CMakeLists.txt
Normal file
24
contrib/idna-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,24 @@
|
||||
option(ENABLE_IDNA "Enable idna support" ${ENABLE_LIBRARIES})
|
||||
if ((NOT ENABLE_IDNA))
|
||||
message (STATUS "Not using idna")
|
||||
return()
|
||||
endif()
|
||||
set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/idna")
|
||||
|
||||
set (SRCS
|
||||
"${LIBRARY_DIR}/src/idna.cpp"
|
||||
"${LIBRARY_DIR}/src/mapping.cpp"
|
||||
"${LIBRARY_DIR}/src/mapping_tables.cpp"
|
||||
"${LIBRARY_DIR}/src/normalization.cpp"
|
||||
"${LIBRARY_DIR}/src/normalization_tables.cpp"
|
||||
"${LIBRARY_DIR}/src/punycode.cpp"
|
||||
"${LIBRARY_DIR}/src/to_ascii.cpp"
|
||||
"${LIBRARY_DIR}/src/to_unicode.cpp"
|
||||
"${LIBRARY_DIR}/src/unicode_transcoding.cpp"
|
||||
"${LIBRARY_DIR}/src/validity.cpp"
|
||||
)
|
||||
|
||||
add_library (_idna ${SRCS})
|
||||
target_include_directories(_idna PUBLIC "${LIBRARY_DIR}/include")
|
||||
|
||||
add_library (ch_contrib::idna ALIAS _idna)
|
@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="23.12.1.1368"
|
||||
ARG VERSION="23.12.2.59"
|
||||
ARG PACKAGES="clickhouse-keeper"
|
||||
ARG DIRECT_DOWNLOAD_URLS=""
|
||||
|
||||
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="23.12.1.1368"
|
||||
ARG VERSION="23.12.2.59"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
ARG DIRECT_DOWNLOAD_URLS=""
|
||||
|
||||
|
@ -30,7 +30,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
||||
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||
ARG VERSION="23.12.1.1368"
|
||||
ARG VERSION="23.12.2.59"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
# set non-empty deb_location_url url to create a docker image
|
||||
|
@ -236,6 +236,10 @@ function check_logs_for_critical_errors()
|
||||
&& echo -e "S3_ERROR No such key thrown (see clickhouse-server.log or no_such_key_errors.txt)$FAIL$(trim_server_logs no_such_key_errors.txt)" >> /test_output/test_results.tsv \
|
||||
|| echo -e "No lost s3 keys$OK" >> /test_output/test_results.tsv
|
||||
|
||||
rg -Fa "it is lost forever" /var/log/clickhouse-server/clickhouse-server*.log | grep 'SharedMergeTreePartCheckThread' > /dev/null \
|
||||
&& echo -e "Lost forever for SharedMergeTree$FAIL" >> /test_output/test_results.tsv \
|
||||
|| echo -e "No SharedMergeTree lost forever in clickhouse-server.log$OK" >> /test_output/test_results.tsv
|
||||
|
||||
# Remove file no_such_key_errors.txt if it's empty
|
||||
[ -s /test_output/no_such_key_errors.txt ] || rm /test_output/no_such_key_errors.txt
|
||||
|
||||
|
51
docs/changelogs/v23.10.6.60-stable.md
Normal file
51
docs/changelogs/v23.10.6.60-stable.md
Normal file
@ -0,0 +1,51 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v23.10.6.60-stable (68907bbe643) FIXME as compared to v23.10.5.20-stable (e84001e5c61)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#58493](https://github.com/ClickHouse/ClickHouse/issues/58493): Fix transfer query to MySQL compatible query. Fixes [#57253](https://github.com/ClickHouse/ClickHouse/issues/57253). Fixes [#52654](https://github.com/ClickHouse/ClickHouse/issues/52654). Fixes [#56729](https://github.com/ClickHouse/ClickHouse/issues/56729). [#56456](https://github.com/ClickHouse/ClickHouse/pull/56456) ([flynn](https://github.com/ucasfl)).
|
||||
* Backported in [#57659](https://github.com/ClickHouse/ClickHouse/issues/57659): Handle sigabrt case when getting PostgreSQl table structure with empty array. [#57618](https://github.com/ClickHouse/ClickHouse/pull/57618) ([Mike Kot (Михаил Кот)](https://github.com/myrrc)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#57586](https://github.com/ClickHouse/ClickHouse/issues/57586): Fix issue caught in https://github.com/docker-library/official-images/pull/15846. [#57571](https://github.com/ClickHouse/ClickHouse/pull/57571) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Flatten only true Nested type if flatten_nested=1, not all Array(Tuple) [#56132](https://github.com/ClickHouse/ClickHouse/pull/56132) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix ALTER COLUMN with ALIAS [#56493](https://github.com/ClickHouse/ClickHouse/pull/56493) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Prevent incompatible ALTER of projection columns [#56948](https://github.com/ClickHouse/ClickHouse/pull/56948) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix segfault after ALTER UPDATE with Nullable MATERIALIZED column [#57147](https://github.com/ClickHouse/ClickHouse/pull/57147) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix incorrect JOIN plan optimization with partially materialized normal projection [#57196](https://github.com/ClickHouse/ClickHouse/pull/57196) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix `ReadonlyReplica` metric for all cases [#57267](https://github.com/ClickHouse/ClickHouse/pull/57267) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Background merges correctly use temporary data storage in the cache [#57275](https://github.com/ClickHouse/ClickHouse/pull/57275) ([vdimir](https://github.com/vdimir)).
|
||||
* MergeTree mutations reuse source part index granularity [#57352](https://github.com/ClickHouse/ClickHouse/pull/57352) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix function jsonMergePatch for partially const columns [#57379](https://github.com/ClickHouse/ClickHouse/pull/57379) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix working with read buffers in StreamingFormatExecutor [#57438](https://github.com/ClickHouse/ClickHouse/pull/57438) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* bugfix: correctly parse SYSTEM STOP LISTEN TCP SECURE [#57483](https://github.com/ClickHouse/ClickHouse/pull/57483) ([joelynch](https://github.com/joelynch)).
|
||||
* Ignore ON CLUSTER clause in grant/revoke queries for management of replicated access entities. [#57538](https://github.com/ClickHouse/ClickHouse/pull/57538) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||
* Disable system.kafka_consumers by default (due to possible live memory leak) [#57822](https://github.com/ClickHouse/ClickHouse/pull/57822) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix invalid memory access in BLAKE3 (Rust) [#57876](https://github.com/ClickHouse/ClickHouse/pull/57876) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Normalize function names in CREATE INDEX [#57906](https://github.com/ClickHouse/ClickHouse/pull/57906) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix invalid preprocessing on Keeper [#58069](https://github.com/ClickHouse/ClickHouse/pull/58069) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix Integer overflow in Poco::UTF32Encoding [#58073](https://github.com/ClickHouse/ClickHouse/pull/58073) ([Andrey Fedotov](https://github.com/anfedotoff)).
|
||||
* Remove parallel parsing for JSONCompactEachRow [#58181](https://github.com/ClickHouse/ClickHouse/pull/58181) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix parallel parsing for JSONCompactEachRow [#58250](https://github.com/ClickHouse/ClickHouse/pull/58250) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix lost blobs after dropping a replica with broken detached parts [#58333](https://github.com/ClickHouse/ClickHouse/pull/58333) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* MergeTreePrefetchedReadPool disable for LIMIT only queries [#58505](https://github.com/ClickHouse/ClickHouse/pull/58505) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
|
||||
#### NO CL CATEGORY
|
||||
|
||||
* Backported in [#57916](https://github.com/ClickHouse/ClickHouse/issues/57916):. [#57909](https://github.com/ClickHouse/ClickHouse/pull/57909) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Pin alpine version of integration tests helper container [#57669](https://github.com/ClickHouse/ClickHouse/pull/57669) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Remove heavy rust stable toolchain [#57905](https://github.com/ClickHouse/ClickHouse/pull/57905) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix docker image for integration tests (fixes CI) [#57952](https://github.com/ClickHouse/ClickHouse/pull/57952) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix test_user_valid_until [#58409](https://github.com/ClickHouse/ClickHouse/pull/58409) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
|
26
docs/changelogs/v23.11.4.24-stable.md
Normal file
26
docs/changelogs/v23.11.4.24-stable.md
Normal file
@ -0,0 +1,26 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v23.11.4.24-stable (e79d840d7fe) FIXME as compared to v23.11.3.23-stable (a14ab450b0e)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Flatten only true Nested type if flatten_nested=1, not all Array(Tuple) [#56132](https://github.com/ClickHouse/ClickHouse/pull/56132) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix working with read buffers in StreamingFormatExecutor [#57438](https://github.com/ClickHouse/ClickHouse/pull/57438) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Disable system.kafka_consumers by default (due to possible live memory leak) [#57822](https://github.com/ClickHouse/ClickHouse/pull/57822) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix invalid preprocessing on Keeper [#58069](https://github.com/ClickHouse/ClickHouse/pull/58069) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix Integer overflow in Poco::UTF32Encoding [#58073](https://github.com/ClickHouse/ClickHouse/pull/58073) ([Andrey Fedotov](https://github.com/anfedotoff)).
|
||||
* Remove parallel parsing for JSONCompactEachRow [#58181](https://github.com/ClickHouse/ClickHouse/pull/58181) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix parallel parsing for JSONCompactEachRow [#58250](https://github.com/ClickHouse/ClickHouse/pull/58250) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix lost blobs after dropping a replica with broken detached parts [#58333](https://github.com/ClickHouse/ClickHouse/pull/58333) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* MergeTreePrefetchedReadPool disable for LIMIT only queries [#58505](https://github.com/ClickHouse/ClickHouse/pull/58505) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Handle another case for preprocessing in Keeper [#58308](https://github.com/ClickHouse/ClickHouse/pull/58308) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix test_user_valid_until [#58409](https://github.com/ClickHouse/ClickHouse/pull/58409) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
|
32
docs/changelogs/v23.12.2.59-stable.md
Normal file
32
docs/changelogs/v23.12.2.59-stable.md
Normal file
@ -0,0 +1,32 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v23.12.2.59-stable (17ab210e761) FIXME as compared to v23.12.1.1368-stable (a2faa65b080)
|
||||
|
||||
#### Backward Incompatible Change
|
||||
* Backported in [#58389](https://github.com/ClickHouse/ClickHouse/issues/58389): The MergeTree setting `clean_deleted_rows` is deprecated, it has no effect anymore. The `CLEANUP` keyword for `OPTIMIZE` is not allowed by default (unless `allow_experimental_replacing_merge_with_cleanup` is enabled). [#58316](https://github.com/ClickHouse/ClickHouse/pull/58316) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Flatten only true Nested type if flatten_nested=1, not all Array(Tuple) [#56132](https://github.com/ClickHouse/ClickHouse/pull/56132) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix working with read buffers in StreamingFormatExecutor [#57438](https://github.com/ClickHouse/ClickHouse/pull/57438) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix lost blobs after dropping a replica with broken detached parts [#58333](https://github.com/ClickHouse/ClickHouse/pull/58333) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix segfault when graphite table does not have agg function [#58453](https://github.com/ClickHouse/ClickHouse/pull/58453) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* MergeTreePrefetchedReadPool disable for LIMIT only queries [#58505](https://github.com/ClickHouse/ClickHouse/pull/58505) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
|
||||
#### NO CL ENTRY
|
||||
|
||||
* NO CL ENTRY: 'Revert "Refreshable materialized views (takeover)"'. [#58296](https://github.com/ClickHouse/ClickHouse/pull/58296) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Fix an error in the release script - it didn't allow to make 23.12. [#58288](https://github.com/ClickHouse/ClickHouse/pull/58288) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Update version_date.tsv and changelogs after v23.12.1.1368-stable [#58290](https://github.com/ClickHouse/ClickHouse/pull/58290) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Fix test_storage_s3_queue/test.py::test_drop_table [#58293](https://github.com/ClickHouse/ClickHouse/pull/58293) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Handle another case for preprocessing in Keeper [#58308](https://github.com/ClickHouse/ClickHouse/pull/58308) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix test_user_valid_until [#58409](https://github.com/ClickHouse/ClickHouse/pull/58409) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
|
36
docs/changelogs/v23.3.19.32-lts.md
Normal file
36
docs/changelogs/v23.3.19.32-lts.md
Normal file
@ -0,0 +1,36 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v23.3.19.32-lts (c4d4ca8ec02) FIXME as compared to v23.3.18.15-lts (7228475d77a)
|
||||
|
||||
#### Backward Incompatible Change
|
||||
* Backported in [#57840](https://github.com/ClickHouse/ClickHouse/issues/57840): Remove function `arrayFold` because it has a bug. This closes [#57816](https://github.com/ClickHouse/ClickHouse/issues/57816). This closes [#57458](https://github.com/ClickHouse/ClickHouse/issues/57458). [#57836](https://github.com/ClickHouse/ClickHouse/pull/57836) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#58489](https://github.com/ClickHouse/ClickHouse/issues/58489): Fix transfer query to MySQL compatible query. Fixes [#57253](https://github.com/ClickHouse/ClickHouse/issues/57253). Fixes [#52654](https://github.com/ClickHouse/ClickHouse/issues/52654). Fixes [#56729](https://github.com/ClickHouse/ClickHouse/issues/56729). [#56456](https://github.com/ClickHouse/ClickHouse/pull/56456) ([flynn](https://github.com/ucasfl)).
|
||||
* Backported in [#57653](https://github.com/ClickHouse/ClickHouse/issues/57653): Handle sigabrt case when getting PostgreSQl table structure with empty array. [#57618](https://github.com/ClickHouse/ClickHouse/pull/57618) ([Mike Kot (Михаил Кот)](https://github.com/myrrc)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#57580](https://github.com/ClickHouse/ClickHouse/issues/57580): Fix issue caught in https://github.com/docker-library/official-images/pull/15846. [#57571](https://github.com/ClickHouse/ClickHouse/pull/57571) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Prevent incompatible ALTER of projection columns [#56948](https://github.com/ClickHouse/ClickHouse/pull/56948) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix segfault after ALTER UPDATE with Nullable MATERIALIZED column [#57147](https://github.com/ClickHouse/ClickHouse/pull/57147) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix incorrect JOIN plan optimization with partially materialized normal projection [#57196](https://github.com/ClickHouse/ClickHouse/pull/57196) ([Amos Bird](https://github.com/amosbird)).
|
||||
* MergeTree mutations reuse source part index granularity [#57352](https://github.com/ClickHouse/ClickHouse/pull/57352) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix invalid memory access in BLAKE3 (Rust) [#57876](https://github.com/ClickHouse/ClickHouse/pull/57876) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Normalize function names in CREATE INDEX [#57906](https://github.com/ClickHouse/ClickHouse/pull/57906) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix invalid preprocessing on Keeper [#58069](https://github.com/ClickHouse/ClickHouse/pull/58069) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix Integer overflow in Poco::UTF32Encoding [#58073](https://github.com/ClickHouse/ClickHouse/pull/58073) ([Andrey Fedotov](https://github.com/anfedotoff)).
|
||||
* Remove parallel parsing for JSONCompactEachRow [#58181](https://github.com/ClickHouse/ClickHouse/pull/58181) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Pin alpine version of integration tests helper container [#57669](https://github.com/ClickHouse/ClickHouse/pull/57669) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix docker image for integration tests (fixes CI) [#57952](https://github.com/ClickHouse/ClickHouse/pull/57952) ([Azat Khuzhin](https://github.com/azat)).
|
||||
|
47
docs/changelogs/v23.8.9.54-lts.md
Normal file
47
docs/changelogs/v23.8.9.54-lts.md
Normal file
@ -0,0 +1,47 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v23.8.9.54-lts (192a1d231fa) FIXME as compared to v23.8.8.20-lts (5e012a03bf2)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#57668](https://github.com/ClickHouse/ClickHouse/issues/57668): Output valid JSON/XML on excetpion during HTTP query execution. Add setting `http_write_exception_in_output_format` to enable/disable this behaviour (enabled by default). [#52853](https://github.com/ClickHouse/ClickHouse/pull/52853) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#58491](https://github.com/ClickHouse/ClickHouse/issues/58491): Fix transfer query to MySQL compatible query. Fixes [#57253](https://github.com/ClickHouse/ClickHouse/issues/57253). Fixes [#52654](https://github.com/ClickHouse/ClickHouse/issues/52654). Fixes [#56729](https://github.com/ClickHouse/ClickHouse/issues/56729). [#56456](https://github.com/ClickHouse/ClickHouse/pull/56456) ([flynn](https://github.com/ucasfl)).
|
||||
* Backported in [#57238](https://github.com/ClickHouse/ClickHouse/issues/57238): Fetching a part waits when that part is fully committed on remote replica. It is better not send part in PreActive state. In case of zero copy this is mandatory restriction. [#56808](https://github.com/ClickHouse/ClickHouse/pull/56808) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Backported in [#57655](https://github.com/ClickHouse/ClickHouse/issues/57655): Handle sigabrt case when getting PostgreSQl table structure with empty array. [#57618](https://github.com/ClickHouse/ClickHouse/pull/57618) ([Mike Kot (Михаил Кот)](https://github.com/myrrc)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#57582](https://github.com/ClickHouse/ClickHouse/issues/57582): Fix issue caught in https://github.com/docker-library/official-images/pull/15846. [#57571](https://github.com/ClickHouse/ClickHouse/pull/57571) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Flatten only true Nested type if flatten_nested=1, not all Array(Tuple) [#56132](https://github.com/ClickHouse/ClickHouse/pull/56132) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix ALTER COLUMN with ALIAS [#56493](https://github.com/ClickHouse/ClickHouse/pull/56493) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Prevent incompatible ALTER of projection columns [#56948](https://github.com/ClickHouse/ClickHouse/pull/56948) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix segfault after ALTER UPDATE with Nullable MATERIALIZED column [#57147](https://github.com/ClickHouse/ClickHouse/pull/57147) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix incorrect JOIN plan optimization with partially materialized normal projection [#57196](https://github.com/ClickHouse/ClickHouse/pull/57196) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix `ReadonlyReplica` metric for all cases [#57267](https://github.com/ClickHouse/ClickHouse/pull/57267) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix working with read buffers in StreamingFormatExecutor [#57438](https://github.com/ClickHouse/ClickHouse/pull/57438) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* bugfix: correctly parse SYSTEM STOP LISTEN TCP SECURE [#57483](https://github.com/ClickHouse/ClickHouse/pull/57483) ([joelynch](https://github.com/joelynch)).
|
||||
* Ignore ON CLUSTER clause in grant/revoke queries for management of replicated access entities. [#57538](https://github.com/ClickHouse/ClickHouse/pull/57538) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||
* Disable system.kafka_consumers by default (due to possible live memory leak) [#57822](https://github.com/ClickHouse/ClickHouse/pull/57822) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix invalid memory access in BLAKE3 (Rust) [#57876](https://github.com/ClickHouse/ClickHouse/pull/57876) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Normalize function names in CREATE INDEX [#57906](https://github.com/ClickHouse/ClickHouse/pull/57906) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix invalid preprocessing on Keeper [#58069](https://github.com/ClickHouse/ClickHouse/pull/58069) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix Integer overflow in Poco::UTF32Encoding [#58073](https://github.com/ClickHouse/ClickHouse/pull/58073) ([Andrey Fedotov](https://github.com/anfedotoff)).
|
||||
* Remove parallel parsing for JSONCompactEachRow [#58181](https://github.com/ClickHouse/ClickHouse/pull/58181) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix parallel parsing for JSONCompactEachRow [#58250](https://github.com/ClickHouse/ClickHouse/pull/58250) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
||||
#### NO CL ENTRY
|
||||
|
||||
* NO CL ENTRY: 'Update PeekableWriteBuffer.cpp'. [#57701](https://github.com/ClickHouse/ClickHouse/pull/57701) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Pin alpine version of integration tests helper container [#57669](https://github.com/ClickHouse/ClickHouse/pull/57669) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Remove heavy rust stable toolchain [#57905](https://github.com/ClickHouse/ClickHouse/pull/57905) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix docker image for integration tests (fixes CI) [#57952](https://github.com/ClickHouse/ClickHouse/pull/57952) ([Azat Khuzhin](https://github.com/azat)).
|
||||
|
@ -1262,6 +1262,7 @@ SELECT * FROM json_each_row_nested
|
||||
|
||||
- [input_format_import_nested_json](/docs/en/operations/settings/settings-formats.md/#input_format_import_nested_json) - map nested JSON data to nested tables (it works for JSONEachRow format). Default value - `false`.
|
||||
- [input_format_json_read_bools_as_numbers](/docs/en/operations/settings/settings-formats.md/#input_format_json_read_bools_as_numbers) - allow to parse bools as numbers in JSON input formats. Default value - `true`.
|
||||
- [input_format_json_read_bools_as_strings](/docs/en/operations/settings/settings-formats.md/#input_format_json_read_bools_as_strings) - allow to parse bools as strings in JSON input formats. Default value - `true`.
|
||||
- [input_format_json_read_numbers_as_strings](/docs/en/operations/settings/settings-formats.md/#input_format_json_read_numbers_as_strings) - allow to parse numbers as strings in JSON input formats. Default value - `true`.
|
||||
- [input_format_json_read_arrays_as_strings](/docs/en/operations/settings/settings-formats.md/#input_format_json_read_arrays_as_strings) - allow to parse JSON arrays as strings in JSON input formats. Default value - `true`.
|
||||
- [input_format_json_read_objects_as_strings](/docs/en/operations/settings/settings-formats.md/#input_format_json_read_objects_as_strings) - allow to parse JSON objects as strings in JSON input formats. Default value - `true`.
|
||||
|
@ -614,6 +614,26 @@ DESC format(JSONEachRow, $$
|
||||
└───────┴─────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||
```
|
||||
|
||||
##### input_format_json_read_bools_as_strings
|
||||
|
||||
Enabling this setting allows reading Bool values as strings.
|
||||
|
||||
This setting is enabled by default.
|
||||
|
||||
**Example:**
|
||||
|
||||
```sql
|
||||
SET input_format_json_read_bools_as_strings = 1;
|
||||
DESC format(JSONEachRow, $$
|
||||
{"value" : true}
|
||||
{"value" : "Hello, World"}
|
||||
$$)
|
||||
```
|
||||
```response
|
||||
┌─name──┬─type─────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
|
||||
│ value │ Nullable(String) │ │ │ │ │ │
|
||||
└───────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||
```
|
||||
##### input_format_json_read_arrays_as_strings
|
||||
|
||||
Enabling this setting allows reading JSON array values as strings.
|
||||
|
@ -377,6 +377,12 @@ Allow parsing bools as numbers in JSON input formats.
|
||||
|
||||
Enabled by default.
|
||||
|
||||
## input_format_json_read_bools_as_strings {#input_format_json_read_bools_as_strings}
|
||||
|
||||
Allow parsing bools as strings in JSON input formats.
|
||||
|
||||
Enabled by default.
|
||||
|
||||
## input_format_json_read_numbers_as_strings {#input_format_json_read_numbers_as_strings}
|
||||
|
||||
Allow parsing numbers as strings in JSON input formats.
|
||||
|
@ -3847,6 +3847,8 @@ Possible values:
|
||||
- `none` — Is similar to throw, but distributed DDL query returns no result set.
|
||||
- `null_status_on_timeout` — Returns `NULL` as execution status in some rows of result set instead of throwing `TIMEOUT_EXCEEDED` if query is not finished on the corresponding hosts.
|
||||
- `never_throw` — Do not throw `TIMEOUT_EXCEEDED` and do not rethrow exceptions if query has failed on some hosts.
|
||||
- `null_status_on_timeout_only_active` — similar to `null_status_on_timeout`, but doesn't wait for inactive replicas of the `Replicated` database
|
||||
- `throw_only_active` — similar to `throw`, but doesn't wait for inactive replicas of the `Replicated` database
|
||||
|
||||
Default value: `throw`.
|
||||
|
||||
|
@ -14,6 +14,11 @@ Columns:
|
||||
- `changed` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Shows whether a setting was specified in `config.xml`
|
||||
- `description` ([String](../../sql-reference/data-types/string.md)) — Short server setting description.
|
||||
- `type` ([String](../../sql-reference/data-types/string.md)) — Server setting value type.
|
||||
- `changeable_without_restart` ([Enum8](../../sql-reference/data-types/enum.md)) — Whether the setting can be changed at server runtime. Values:
|
||||
- `'No' `
|
||||
- `'IncreaseOnly'`
|
||||
- `'DecreaseOnly'`
|
||||
- `'Yes'`
|
||||
- `is_obsolete` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) - Shows whether a setting is obsolete.
|
||||
|
||||
**Example**
|
||||
@ -27,22 +32,21 @@ WHERE name LIKE '%thread_pool%'
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─name────────────────────────────────────────_─value─_─default─_─changed─_─description──────────────────────────────────────────────────────────────────────────────────────────────────────
|
||||
───────────────────────────────────_─type───_─is_obsolete─┐
|
||||
│ max_thread_pool_size │ 10000 │ 10000 │ 1 │ The maximum number of threads that could be allocated from the OS and used for query execution and background operations. │ UInt64 │ 0 │
|
||||
│ max_thread_pool_free_size │ 1000 │ 1000 │ 0 │ The maximum number of threads that will always stay in a global thread pool once allocated and remain idle in case of insufficient number of tasks. │ UInt64 │ 0 │
|
||||
│ thread_pool_queue_size │ 10000 │ 10000 │ 0 │ The maximum number of tasks that will be placed in a queue and wait for execution. │ UInt64 │ 0 │
|
||||
│ max_io_thread_pool_size │ 100 │ 100 │ 0 │ The maximum number of threads that would be used for IO operations │ UInt64 │ 0 │
|
||||
│ max_io_thread_pool_free_size │ 0 │ 0 │ 0 │ Max free size for IO thread pool. │ UInt64 │ 0 │
|
||||
│ io_thread_pool_queue_size │ 10000 │ 10000 │ 0 │ Queue size for IO thread pool. │ UInt64 │ 0 │
|
||||
│ max_active_parts_loading_thread_pool_size │ 64 │ 64 │ 0 │ The number of threads to load active set of data parts (Active ones) at startup. │ UInt64 │ 0 │
|
||||
│ max_outdated_parts_loading_thread_pool_size │ 32 │ 32 │ 0 │ The number of threads to load inactive set of data parts (Outdated ones) at startup. │ UInt64 │ 0 │
|
||||
│ max_parts_cleaning_thread_pool_size │ 128 │ 128 │ 0 │ The number of threads for concurrent removal of inactive data parts. │ UInt64 │ 0 │
|
||||
│ max_backups_io_thread_pool_size │ 1000 │ 1000 │ 0 │ The maximum number of threads that would be used for IO operations for BACKUP queries │ UInt64 │ 0 │
|
||||
│ max_backups_io_thread_pool_free_size │ 0 │ 0 │ 0 │ Max free size for backups IO thread pool. │ UInt64 │ 0 │
|
||||
│ backups_io_thread_pool_queue_size │ 0 │ 0 │ 0 │ Queue size for backups IO thread pool. │ UInt64 │ 0 │
|
||||
└─────────────────────────────────────────────┴───────┴─────────┴─────────┴──────────────────────────────────────────────────────────────────────────────────────────────────────────────────
|
||||
───────────────────────────────────┴────────┴─────────────┘
|
||||
┌─name────────────────────────────────────────┬─value─┬─default─┬─changed─┬─description─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬─type───┬─changeable_without_restart─┬─is_obsolete─┐
|
||||
│ max_thread_pool_size │ 10000 │ 10000 │ 0 │ The maximum number of threads that could be allocated from the OS and used for query execution and background operations. │ UInt64 │ No │ 0 │
|
||||
│ max_thread_pool_free_size │ 1000 │ 1000 │ 0 │ The maximum number of threads that will always stay in a global thread pool once allocated and remain idle in case of insufficient number of tasks. │ UInt64 │ No │ 0 │
|
||||
│ thread_pool_queue_size │ 10000 │ 10000 │ 0 │ The maximum number of tasks that will be placed in a queue and wait for execution. │ UInt64 │ No │ 0 │
|
||||
│ max_io_thread_pool_size │ 100 │ 100 │ 0 │ The maximum number of threads that would be used for IO operations │ UInt64 │ No │ 0 │
|
||||
│ max_io_thread_pool_free_size │ 0 │ 0 │ 0 │ Max free size for IO thread pool. │ UInt64 │ No │ 0 │
|
||||
│ io_thread_pool_queue_size │ 10000 │ 10000 │ 0 │ Queue size for IO thread pool. │ UInt64 │ No │ 0 │
|
||||
│ max_active_parts_loading_thread_pool_size │ 64 │ 64 │ 0 │ The number of threads to load active set of data parts (Active ones) at startup. │ UInt64 │ No │ 0 │
|
||||
│ max_outdated_parts_loading_thread_pool_size │ 32 │ 32 │ 0 │ The number of threads to load inactive set of data parts (Outdated ones) at startup. │ UInt64 │ No │ 0 │
|
||||
│ max_parts_cleaning_thread_pool_size │ 128 │ 128 │ 0 │ The number of threads for concurrent removal of inactive data parts. │ UInt64 │ No │ 0 │
|
||||
│ max_backups_io_thread_pool_size │ 1000 │ 1000 │ 0 │ The maximum number of threads that would be used for IO operations for BACKUP queries │ UInt64 │ No │ 0 │
|
||||
│ max_backups_io_thread_pool_free_size │ 0 │ 0 │ 0 │ Max free size for backups IO thread pool. │ UInt64 │ No │ 0 │
|
||||
│ backups_io_thread_pool_queue_size │ 0 │ 0 │ 0 │ Queue size for backups IO thread pool. │ UInt64 │ No │ 0 │
|
||||
└─────────────────────────────────────────────┴───────┴─────────┴─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴────────┴────────────────────────────┴─────────────┘
|
||||
|
||||
```
|
||||
|
||||
Using of `WHERE changed` can be useful, for example, when you want to check
|
||||
|
@ -27,7 +27,7 @@ $ clickhouse-format --query "select number from numbers(10) where number%2 order
|
||||
|
||||
Result:
|
||||
|
||||
```sql
|
||||
```bash
|
||||
SELECT number
|
||||
FROM numbers(10)
|
||||
WHERE number % 2
|
||||
@ -49,22 +49,20 @@ SELECT sum(number) FROM numbers(5)
|
||||
3. Multiqueries:
|
||||
|
||||
```bash
|
||||
$ clickhouse-format -n <<< "SELECT * FROM (SELECT 1 AS x UNION ALL SELECT 1 UNION DISTINCT SELECT 3);"
|
||||
$ clickhouse-format -n <<< "SELECT min(number) FROM numbers(5); SELECT max(number) FROM numbers(5);"
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```sql
|
||||
SELECT *
|
||||
FROM
|
||||
(
|
||||
SELECT 1 AS x
|
||||
UNION ALL
|
||||
SELECT 1
|
||||
UNION DISTINCT
|
||||
SELECT 3
|
||||
)
|
||||
```
|
||||
SELECT min(number)
|
||||
FROM numbers(5)
|
||||
;
|
||||
|
||||
SELECT max(number)
|
||||
FROM numbers(5)
|
||||
;
|
||||
|
||||
```
|
||||
|
||||
4. Obfuscating:
|
||||
@ -75,7 +73,7 @@ $ clickhouse-format --seed Hello --obfuscate <<< "SELECT cost_first_screen BETWE
|
||||
|
||||
Result:
|
||||
|
||||
```sql
|
||||
```
|
||||
SELECT treasury_mammoth_hazelnut BETWEEN nutmeg AND span, CASE WHEN chive >= 116 THEN switching ELSE ANYTHING END;
|
||||
```
|
||||
|
||||
@ -87,7 +85,7 @@ $ clickhouse-format --seed World --obfuscate <<< "SELECT cost_first_screen BETWE
|
||||
|
||||
Result:
|
||||
|
||||
```sql
|
||||
```
|
||||
SELECT horse_tape_summer BETWEEN folklore AND moccasins, CASE WHEN intestine >= 116 THEN nonconformist ELSE FORESTRY END;
|
||||
```
|
||||
|
||||
@ -99,7 +97,7 @@ $ clickhouse-format --backslash <<< "SELECT * FROM (SELECT 1 AS x UNION ALL SELE
|
||||
|
||||
Result:
|
||||
|
||||
```sql
|
||||
```
|
||||
SELECT * \
|
||||
FROM \
|
||||
( \
|
||||
|
@ -1483,7 +1483,9 @@ For mode values with a meaning of “with 4 or more days this year,” weeks are
|
||||
|
||||
- Otherwise, it is the last week of the previous year, and the next week is week 1.
|
||||
|
||||
For mode values with a meaning of “contains January 1”, the week contains January 1 is week 1. It does not matter how many days in the new year the week contained, even if it contained only one day.
|
||||
For mode values with a meaning of “contains January 1”, the week contains January 1 is week 1.
|
||||
It does not matter how many days in the new year the week contained, even if it contained only one day.
|
||||
I.e. if the last week of December contains January 1 of the next year, it will be week 1 of the next year.
|
||||
|
||||
**Syntax**
|
||||
|
||||
|
@ -1779,7 +1779,9 @@ Result:
|
||||
|
||||
## sqid
|
||||
|
||||
Transforms numbers into YouTube-like short URL hash called [Sqid](https://sqids.org/).
|
||||
Transforms numbers into a [Sqid](https://sqids.org/) which is a YouTube-like ID string.
|
||||
The output alphabet is `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789`.
|
||||
Do not use this function for hashing - the generated IDs can be decoded back into numbers.
|
||||
|
||||
**Syntax**
|
||||
|
||||
|
@ -53,7 +53,7 @@ The rounded number of the same type as the input number.
|
||||
**Example of use with Float**
|
||||
|
||||
``` sql
|
||||
SELECT number / 2 AS x, round(x) FROM system.numbers LIMIT 3
|
||||
SELECT number / 2 AS x, round(x) FROM system.numbers LIMIT 3;
|
||||
```
|
||||
|
||||
``` text
|
||||
@ -67,7 +67,22 @@ SELECT number / 2 AS x, round(x) FROM system.numbers LIMIT 3
|
||||
**Example of use with Decimal**
|
||||
|
||||
``` sql
|
||||
SELECT cast(number / 2 AS Decimal(10,4)) AS x, round(x) FROM system.numbers LIMIT 3
|
||||
SELECT cast(number / 2 AS Decimal(10,4)) AS x, round(x) FROM system.numbers LIMIT 3;
|
||||
```
|
||||
|
||||
``` text
|
||||
┌───x─┬─round(CAST(divide(number, 2), 'Decimal(10, 4)'))─┐
|
||||
│ 0 │ 0 │
|
||||
│ 0.5 │ 1 │
|
||||
│ 1 │ 1 │
|
||||
└─────┴──────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
If you want to keep the trailing zeros, you need to enable `output_format_decimal_trailing_zeros`
|
||||
|
||||
``` sql
|
||||
SELECT cast(number / 2 AS Decimal(10,4)) AS x, round(x) FROM system.numbers LIMIT 3 settings output_format_decimal_trailing_zeros=1;
|
||||
|
||||
```
|
||||
|
||||
``` text
|
||||
|
@ -1383,6 +1383,148 @@ Result:
|
||||
└──────────────────┘
|
||||
```
|
||||
|
||||
## punycodeEncode
|
||||
|
||||
Returns the [Punycode](https://en.wikipedia.org/wiki/Punycode) representation of a string.
|
||||
The string must be UTF8-encoded, otherwise the behavior is undefined.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
punycodeEncode(val)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `val` - Input value. [String](../data-types/string.md)
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A Punycode representation of the input value. [String](../data-types/string.md)
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
select punycodeEncode('München');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```result
|
||||
┌─punycodeEncode('München')─┐
|
||||
│ Mnchen-3ya │
|
||||
└───────────────────────────┘
|
||||
```
|
||||
|
||||
## punycodeDecode
|
||||
|
||||
Returns the UTF8-encoded plaintext of a [Punycode](https://en.wikipedia.org/wiki/Punycode)-encoded string.
|
||||
If no valid Punycode-encoded string is given, an exception is thrown.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
punycodeEncode(val)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `val` - Punycode-encoded string. [String](../data-types/string.md)
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The plaintext of the input value. [String](../data-types/string.md)
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
select punycodeDecode('Mnchen-3ya');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```result
|
||||
┌─punycodeDecode('Mnchen-3ya')─┐
|
||||
│ München │
|
||||
└──────────────────────────────┘
|
||||
```
|
||||
|
||||
## tryPunycodeDecode
|
||||
|
||||
Like `punycodeDecode` but returns an empty string if no valid Punycode-encoded string is given.
|
||||
|
||||
## idnaEncode
|
||||
|
||||
Returns the the ASCII representation (ToASCII algorithm) of a domain name according to the [Internationalized Domain Names in Applications](https://en.wikipedia.org/wiki/Internationalized_domain_name#Internationalizing_Domain_Names_in_Applications) (IDNA) mechanism.
|
||||
The input string must be UTF-encoded and translatable to an ASCII string, otherwise an exception is thrown.
|
||||
Note: No percent decoding or trimming of tabs, spaces or control characters is performed.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
idnaEncode(val)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `val` - Input value. [String](../data-types/string.md)
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A ASCII representation according to the IDNA mechanism of the input value. [String](../data-types/string.md)
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
select idnaEncode('straße.münchen.de');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```result
|
||||
┌─idnaEncode('straße.münchen.de')─────┐
|
||||
│ xn--strae-oqa.xn--mnchen-3ya.de │
|
||||
└─────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## tryIdnaEncode
|
||||
|
||||
Like `idnaEncode` but returns an empty string in case of an error instead of throwing an exception.
|
||||
|
||||
## idnaDecode
|
||||
|
||||
Returns the the Unicode (UTF-8) representation (ToUnicode algorithm) of a domain name according to the [Internationalized Domain Names in Applications](https://en.wikipedia.org/wiki/Internationalized_domain_name#Internationalizing_Domain_Names_in_Applications) (IDNA) mechanism.
|
||||
In case of an error (e.g. because the input is invalid), the input string is returned.
|
||||
Note that repeated application of `idnaEncode()` and `idnaDecode()` does not necessarily return the original string due to case normalization.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
idnaDecode(val)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `val` - Input value. [String](../data-types/string.md)
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A Unicode (UTF-8) representation according to the IDNA mechanism of the input value. [String](../data-types/string.md)
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
select idnaDecode('xn--strae-oqa.xn--mnchen-3ya.de');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```result
|
||||
┌─idnaDecode('xn--strae-oqa.xn--mnchen-3ya.de')─┐
|
||||
│ straße.münchen.de │
|
||||
└───────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## byteHammingDistance
|
||||
|
||||
Calculates the [hamming distance](https://en.wikipedia.org/wiki/Hamming_distance) between two byte strings.
|
||||
|
@ -578,7 +578,9 @@ SELECT
|
||||
|
||||
- В противном случае это последняя неделя предыдущего года, а следующая неделя - неделя 1.
|
||||
|
||||
Для режимов со значением «содержит 1 января», неделя 1 – это неделя содержащая 1 января. Не имеет значения, сколько дней в новом году содержала неделя, даже если она содержала только один день.
|
||||
Для режимов со значением «содержит 1 января», неделя 1 – это неделя, содержащая 1 января.
|
||||
Не имеет значения, сколько дней нового года содержит эта неделя, даже если она содержит только один день.
|
||||
Так, если последняя неделя декабря содержит 1 января следующего года, то она считается неделей 1 следующего года.
|
||||
|
||||
**Пример**
|
||||
|
||||
|
@ -335,7 +335,7 @@ try
|
||||
else if (std::filesystem::is_directory(std::filesystem::path{config().getString("path", DBMS_DEFAULT_PATH)} / "coordination"))
|
||||
{
|
||||
throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG,
|
||||
"By default 'keeper.storage_path' could be assigned to {}, but the directory {} already exists. Please specify 'keeper.storage_path' in the keeper configuration explicitly",
|
||||
"By default 'keeper_server.storage_path' could be assigned to {}, but the directory {} already exists. Please specify 'keeper_server.storage_path' in the keeper configuration explicitly",
|
||||
KEEPER_DEFAULT_PATH, String{std::filesystem::path{config().getString("path", DBMS_DEFAULT_PATH)} / "coordination"});
|
||||
}
|
||||
else
|
||||
|
@ -1260,11 +1260,11 @@ try
|
||||
{
|
||||
Settings::checkNoSettingNamesAtTopLevel(*config, config_path);
|
||||
|
||||
ServerSettings server_settings_;
|
||||
server_settings_.loadSettingsFromConfig(*config);
|
||||
ServerSettings new_server_settings;
|
||||
new_server_settings.loadSettingsFromConfig(*config);
|
||||
|
||||
size_t max_server_memory_usage = server_settings_.max_server_memory_usage;
|
||||
double max_server_memory_usage_to_ram_ratio = server_settings_.max_server_memory_usage_to_ram_ratio;
|
||||
size_t max_server_memory_usage = new_server_settings.max_server_memory_usage;
|
||||
double max_server_memory_usage_to_ram_ratio = new_server_settings.max_server_memory_usage_to_ram_ratio;
|
||||
|
||||
size_t current_physical_server_memory = getMemoryAmount(); /// With cgroups, the amount of memory available to the server can be changed dynamically.
|
||||
size_t default_max_server_memory_usage = static_cast<size_t>(current_physical_server_memory * max_server_memory_usage_to_ram_ratio);
|
||||
@ -1294,9 +1294,9 @@ try
|
||||
total_memory_tracker.setDescription("(total)");
|
||||
total_memory_tracker.setMetric(CurrentMetrics::MemoryTracking);
|
||||
|
||||
size_t merges_mutations_memory_usage_soft_limit = server_settings_.merges_mutations_memory_usage_soft_limit;
|
||||
size_t merges_mutations_memory_usage_soft_limit = new_server_settings.merges_mutations_memory_usage_soft_limit;
|
||||
|
||||
size_t default_merges_mutations_server_memory_usage = static_cast<size_t>(current_physical_server_memory * server_settings_.merges_mutations_memory_usage_to_ram_ratio);
|
||||
size_t default_merges_mutations_server_memory_usage = static_cast<size_t>(current_physical_server_memory * new_server_settings.merges_mutations_memory_usage_to_ram_ratio);
|
||||
if (merges_mutations_memory_usage_soft_limit == 0)
|
||||
{
|
||||
merges_mutations_memory_usage_soft_limit = default_merges_mutations_server_memory_usage;
|
||||
@ -1304,7 +1304,7 @@ try
|
||||
" ({} available * {:.2f} merges_mutations_memory_usage_to_ram_ratio)",
|
||||
formatReadableSizeWithBinarySuffix(merges_mutations_memory_usage_soft_limit),
|
||||
formatReadableSizeWithBinarySuffix(current_physical_server_memory),
|
||||
server_settings_.merges_mutations_memory_usage_to_ram_ratio);
|
||||
new_server_settings.merges_mutations_memory_usage_to_ram_ratio);
|
||||
}
|
||||
else if (merges_mutations_memory_usage_soft_limit > default_merges_mutations_server_memory_usage)
|
||||
{
|
||||
@ -1313,7 +1313,7 @@ try
|
||||
" ({} available * {:.2f} merges_mutations_memory_usage_to_ram_ratio)",
|
||||
formatReadableSizeWithBinarySuffix(merges_mutations_memory_usage_soft_limit),
|
||||
formatReadableSizeWithBinarySuffix(current_physical_server_memory),
|
||||
server_settings_.merges_mutations_memory_usage_to_ram_ratio);
|
||||
new_server_settings.merges_mutations_memory_usage_to_ram_ratio);
|
||||
}
|
||||
|
||||
LOG_INFO(log, "Merges and mutations memory limit is set to {}",
|
||||
@ -1322,7 +1322,7 @@ try
|
||||
background_memory_tracker.setDescription("(background)");
|
||||
background_memory_tracker.setMetric(CurrentMetrics::MergesMutationsMemoryTracking);
|
||||
|
||||
total_memory_tracker.setAllowUseJemallocMemory(server_settings_.allow_use_jemalloc_memory);
|
||||
total_memory_tracker.setAllowUseJemallocMemory(new_server_settings.allow_use_jemalloc_memory);
|
||||
|
||||
auto * global_overcommit_tracker = global_context->getGlobalOvercommitTracker();
|
||||
total_memory_tracker.setOvercommitTracker(global_overcommit_tracker);
|
||||
@ -1346,26 +1346,26 @@ try
|
||||
global_context->setRemoteHostFilter(*config);
|
||||
global_context->setHTTPHeaderFilter(*config);
|
||||
|
||||
global_context->setMaxTableSizeToDrop(server_settings_.max_table_size_to_drop);
|
||||
global_context->setMaxPartitionSizeToDrop(server_settings_.max_partition_size_to_drop);
|
||||
global_context->setMaxTableNumToWarn(server_settings_.max_table_num_to_warn);
|
||||
global_context->setMaxDatabaseNumToWarn(server_settings_.max_database_num_to_warn);
|
||||
global_context->setMaxPartNumToWarn(server_settings_.max_part_num_to_warn);
|
||||
global_context->setMaxTableSizeToDrop(new_server_settings.max_table_size_to_drop);
|
||||
global_context->setMaxPartitionSizeToDrop(new_server_settings.max_partition_size_to_drop);
|
||||
global_context->setMaxTableNumToWarn(new_server_settings.max_table_num_to_warn);
|
||||
global_context->setMaxDatabaseNumToWarn(new_server_settings.max_database_num_to_warn);
|
||||
global_context->setMaxPartNumToWarn(new_server_settings.max_part_num_to_warn);
|
||||
|
||||
ConcurrencyControl::SlotCount concurrent_threads_soft_limit = ConcurrencyControl::Unlimited;
|
||||
if (server_settings_.concurrent_threads_soft_limit_num > 0 && server_settings_.concurrent_threads_soft_limit_num < concurrent_threads_soft_limit)
|
||||
concurrent_threads_soft_limit = server_settings_.concurrent_threads_soft_limit_num;
|
||||
if (server_settings_.concurrent_threads_soft_limit_ratio_to_cores > 0)
|
||||
if (new_server_settings.concurrent_threads_soft_limit_num > 0 && new_server_settings.concurrent_threads_soft_limit_num < concurrent_threads_soft_limit)
|
||||
concurrent_threads_soft_limit = new_server_settings.concurrent_threads_soft_limit_num;
|
||||
if (new_server_settings.concurrent_threads_soft_limit_ratio_to_cores > 0)
|
||||
{
|
||||
auto value = server_settings_.concurrent_threads_soft_limit_ratio_to_cores * std::thread::hardware_concurrency();
|
||||
auto value = new_server_settings.concurrent_threads_soft_limit_ratio_to_cores * std::thread::hardware_concurrency();
|
||||
if (value > 0 && value < concurrent_threads_soft_limit)
|
||||
concurrent_threads_soft_limit = value;
|
||||
}
|
||||
ConcurrencyControl::instance().setMaxConcurrency(concurrent_threads_soft_limit);
|
||||
|
||||
global_context->getProcessList().setMaxSize(server_settings_.max_concurrent_queries);
|
||||
global_context->getProcessList().setMaxInsertQueriesAmount(server_settings_.max_concurrent_insert_queries);
|
||||
global_context->getProcessList().setMaxSelectQueriesAmount(server_settings_.max_concurrent_select_queries);
|
||||
global_context->getProcessList().setMaxSize(new_server_settings.max_concurrent_queries);
|
||||
global_context->getProcessList().setMaxInsertQueriesAmount(new_server_settings.max_concurrent_insert_queries);
|
||||
global_context->getProcessList().setMaxSelectQueriesAmount(new_server_settings.max_concurrent_select_queries);
|
||||
|
||||
if (config->has("keeper_server"))
|
||||
global_context->updateKeeperConfiguration(*config);
|
||||
@ -1376,68 +1376,68 @@ try
|
||||
/// This is done for backward compatibility.
|
||||
if (global_context->areBackgroundExecutorsInitialized())
|
||||
{
|
||||
auto new_pool_size = server_settings_.background_pool_size;
|
||||
auto new_ratio = server_settings_.background_merges_mutations_concurrency_ratio;
|
||||
auto new_pool_size = new_server_settings.background_pool_size;
|
||||
auto new_ratio = new_server_settings.background_merges_mutations_concurrency_ratio;
|
||||
global_context->getMergeMutateExecutor()->increaseThreadsAndMaxTasksCount(new_pool_size, static_cast<size_t>(new_pool_size * new_ratio));
|
||||
global_context->getMergeMutateExecutor()->updateSchedulingPolicy(server_settings_.background_merges_mutations_scheduling_policy.toString());
|
||||
global_context->getMergeMutateExecutor()->updateSchedulingPolicy(new_server_settings.background_merges_mutations_scheduling_policy.toString());
|
||||
}
|
||||
|
||||
if (global_context->areBackgroundExecutorsInitialized())
|
||||
{
|
||||
auto new_pool_size = server_settings_.background_move_pool_size;
|
||||
auto new_pool_size = new_server_settings.background_move_pool_size;
|
||||
global_context->getMovesExecutor()->increaseThreadsAndMaxTasksCount(new_pool_size, new_pool_size);
|
||||
}
|
||||
|
||||
if (global_context->areBackgroundExecutorsInitialized())
|
||||
{
|
||||
auto new_pool_size = server_settings_.background_fetches_pool_size;
|
||||
auto new_pool_size = new_server_settings.background_fetches_pool_size;
|
||||
global_context->getFetchesExecutor()->increaseThreadsAndMaxTasksCount(new_pool_size, new_pool_size);
|
||||
}
|
||||
|
||||
if (global_context->areBackgroundExecutorsInitialized())
|
||||
{
|
||||
auto new_pool_size = server_settings_.background_common_pool_size;
|
||||
auto new_pool_size = new_server_settings.background_common_pool_size;
|
||||
global_context->getCommonExecutor()->increaseThreadsAndMaxTasksCount(new_pool_size, new_pool_size);
|
||||
}
|
||||
|
||||
global_context->getBufferFlushSchedulePool().increaseThreadsCount(server_settings_.background_buffer_flush_schedule_pool_size);
|
||||
global_context->getSchedulePool().increaseThreadsCount(server_settings_.background_schedule_pool_size);
|
||||
global_context->getMessageBrokerSchedulePool().increaseThreadsCount(server_settings_.background_message_broker_schedule_pool_size);
|
||||
global_context->getDistributedSchedulePool().increaseThreadsCount(server_settings_.background_distributed_schedule_pool_size);
|
||||
global_context->getBufferFlushSchedulePool().increaseThreadsCount(new_server_settings.background_buffer_flush_schedule_pool_size);
|
||||
global_context->getSchedulePool().increaseThreadsCount(new_server_settings.background_schedule_pool_size);
|
||||
global_context->getMessageBrokerSchedulePool().increaseThreadsCount(new_server_settings.background_message_broker_schedule_pool_size);
|
||||
global_context->getDistributedSchedulePool().increaseThreadsCount(new_server_settings.background_distributed_schedule_pool_size);
|
||||
|
||||
global_context->getAsyncLoader().setMaxThreads(TablesLoaderForegroundPoolId, server_settings_.tables_loader_foreground_pool_size);
|
||||
global_context->getAsyncLoader().setMaxThreads(TablesLoaderBackgroundLoadPoolId, server_settings_.tables_loader_background_pool_size);
|
||||
global_context->getAsyncLoader().setMaxThreads(TablesLoaderBackgroundStartupPoolId, server_settings_.tables_loader_background_pool_size);
|
||||
global_context->getAsyncLoader().setMaxThreads(TablesLoaderForegroundPoolId, new_server_settings.tables_loader_foreground_pool_size);
|
||||
global_context->getAsyncLoader().setMaxThreads(TablesLoaderBackgroundLoadPoolId, new_server_settings.tables_loader_background_pool_size);
|
||||
global_context->getAsyncLoader().setMaxThreads(TablesLoaderBackgroundStartupPoolId, new_server_settings.tables_loader_background_pool_size);
|
||||
|
||||
getIOThreadPool().reloadConfiguration(
|
||||
server_settings.max_io_thread_pool_size,
|
||||
server_settings.max_io_thread_pool_free_size,
|
||||
server_settings.io_thread_pool_queue_size);
|
||||
new_server_settings.max_io_thread_pool_size,
|
||||
new_server_settings.max_io_thread_pool_free_size,
|
||||
new_server_settings.io_thread_pool_queue_size);
|
||||
|
||||
getBackupsIOThreadPool().reloadConfiguration(
|
||||
server_settings.max_backups_io_thread_pool_size,
|
||||
server_settings.max_backups_io_thread_pool_free_size,
|
||||
server_settings.backups_io_thread_pool_queue_size);
|
||||
new_server_settings.max_backups_io_thread_pool_size,
|
||||
new_server_settings.max_backups_io_thread_pool_free_size,
|
||||
new_server_settings.backups_io_thread_pool_queue_size);
|
||||
|
||||
getActivePartsLoadingThreadPool().reloadConfiguration(
|
||||
server_settings.max_active_parts_loading_thread_pool_size,
|
||||
new_server_settings.max_active_parts_loading_thread_pool_size,
|
||||
0, // We don't need any threads once all the parts will be loaded
|
||||
server_settings.max_active_parts_loading_thread_pool_size);
|
||||
new_server_settings.max_active_parts_loading_thread_pool_size);
|
||||
|
||||
getOutdatedPartsLoadingThreadPool().reloadConfiguration(
|
||||
server_settings.max_outdated_parts_loading_thread_pool_size,
|
||||
new_server_settings.max_outdated_parts_loading_thread_pool_size,
|
||||
0, // We don't need any threads once all the parts will be loaded
|
||||
server_settings.max_outdated_parts_loading_thread_pool_size);
|
||||
new_server_settings.max_outdated_parts_loading_thread_pool_size);
|
||||
|
||||
/// It could grow if we need to synchronously wait until all the data parts will be loaded.
|
||||
getOutdatedPartsLoadingThreadPool().setMaxTurboThreads(
|
||||
server_settings.max_active_parts_loading_thread_pool_size
|
||||
new_server_settings.max_active_parts_loading_thread_pool_size
|
||||
);
|
||||
|
||||
getPartsCleaningThreadPool().reloadConfiguration(
|
||||
server_settings.max_parts_cleaning_thread_pool_size,
|
||||
new_server_settings.max_parts_cleaning_thread_pool_size,
|
||||
0, // We don't need any threads one all the parts will be deleted
|
||||
server_settings.max_parts_cleaning_thread_pool_size);
|
||||
new_server_settings.max_parts_cleaning_thread_pool_size);
|
||||
|
||||
if (config->has("resources"))
|
||||
{
|
||||
|
@ -1379,6 +1379,9 @@
|
||||
|
||||
<!-- Controls how many tasks could be in the queue -->
|
||||
<!-- <max_tasks_in_queue>1000</max_tasks_in_queue> -->
|
||||
|
||||
<!-- Host name of the current node. If specified, will only compare and not resolve hostnames inside the DDL tasks -->
|
||||
<!-- <host_name>replica</host_name> -->
|
||||
</distributed_ddl>
|
||||
|
||||
<!-- Settings to fine tune MergeTree tables. See documentation in source code, in MergeTreeSettings.h -->
|
||||
|
@ -140,8 +140,7 @@ void SettingsProfilesCache::mergeSettingsAndConstraintsFor(EnabledSettings & ena
|
||||
|
||||
auto info = std::make_shared<SettingsProfilesInfo>(access_control);
|
||||
|
||||
info->profiles = merged_settings.toProfileIDs();
|
||||
substituteProfiles(merged_settings, info->profiles_with_implicit, info->names_of_profiles);
|
||||
substituteProfiles(merged_settings, info->profiles, info->profiles_with_implicit, info->names_of_profiles);
|
||||
|
||||
info->settings = merged_settings.toSettingsChanges();
|
||||
info->constraints = merged_settings.toSettingsConstraints(access_control);
|
||||
@ -152,9 +151,12 @@ void SettingsProfilesCache::mergeSettingsAndConstraintsFor(EnabledSettings & ena
|
||||
|
||||
void SettingsProfilesCache::substituteProfiles(
|
||||
SettingsProfileElements & elements,
|
||||
std::vector<UUID> & profiles,
|
||||
std::vector<UUID> & substituted_profiles,
|
||||
std::unordered_map<UUID, String> & names_of_substituted_profiles) const
|
||||
{
|
||||
profiles = elements.toProfileIDs();
|
||||
|
||||
/// We should substitute profiles in reversive order because the same profile can occur
|
||||
/// in `elements` multiple times (with some other settings in between) and in this case
|
||||
/// the last occurrence should override all the previous ones.
|
||||
@ -184,6 +186,11 @@ void SettingsProfilesCache::substituteProfiles(
|
||||
names_of_substituted_profiles.emplace(profile_id, profile->getName());
|
||||
}
|
||||
std::reverse(substituted_profiles.begin(), substituted_profiles.end());
|
||||
|
||||
std::erase_if(profiles, [&substituted_profiles_set](const UUID & profile_id)
|
||||
{
|
||||
return !substituted_profiles_set.contains(profile_id);
|
||||
});
|
||||
}
|
||||
|
||||
std::shared_ptr<const EnabledSettings> SettingsProfilesCache::getEnabledSettings(
|
||||
@ -225,13 +232,13 @@ std::shared_ptr<const SettingsProfilesInfo> SettingsProfilesCache::getSettingsPr
|
||||
if (auto pos = this->profile_infos_cache.get(profile_id))
|
||||
return *pos;
|
||||
|
||||
SettingsProfileElements elements = all_profiles[profile_id]->elements;
|
||||
SettingsProfileElements elements;
|
||||
auto & element = elements.emplace_back();
|
||||
element.parent_profile = profile_id;
|
||||
|
||||
auto info = std::make_shared<SettingsProfilesInfo>(access_control);
|
||||
|
||||
info->profiles.push_back(profile_id);
|
||||
info->profiles_with_implicit.push_back(profile_id);
|
||||
substituteProfiles(elements, info->profiles_with_implicit, info->names_of_profiles);
|
||||
substituteProfiles(elements, info->profiles, info->profiles_with_implicit, info->names_of_profiles);
|
||||
info->settings = elements.toSettingsChanges();
|
||||
info->constraints.merge(elements.toSettingsConstraints(access_control));
|
||||
|
||||
|
@ -37,7 +37,11 @@ private:
|
||||
void profileRemoved(const UUID & profile_id);
|
||||
void mergeSettingsAndConstraints();
|
||||
void mergeSettingsAndConstraintsFor(EnabledSettings & enabled) const;
|
||||
void substituteProfiles(SettingsProfileElements & elements, std::vector<UUID> & substituted_profiles, std::unordered_map<UUID, String> & names_of_substituted_profiles) const;
|
||||
|
||||
void substituteProfiles(SettingsProfileElements & elements,
|
||||
std::vector<UUID> & profiles,
|
||||
std::vector<UUID> & substituted_profiles,
|
||||
std::unordered_map<UUID, String> & names_of_substituted_profiles) const;
|
||||
|
||||
const AccessControl & access_control;
|
||||
std::unordered_map<UUID, SettingsProfilePtr> all_profiles;
|
||||
|
@ -14,8 +14,9 @@
|
||||
#include <DataTypes/DataTypesDecimal.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <Common/PODArray.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/PODArray.h>
|
||||
#include <Common/iota.h>
|
||||
#include <base/types.h>
|
||||
|
||||
#include <boost/math/distributions/normal.hpp>
|
||||
@ -48,7 +49,7 @@ struct LargestTriangleThreeBucketsData : public StatisticalSample<Float64, Float
|
||||
// sort the this->x and this->y in ascending order of this->x using index
|
||||
std::vector<size_t> index(this->x.size());
|
||||
|
||||
std::iota(index.begin(), index.end(), 0);
|
||||
iota(index.data(), index.size(), size_t(0));
|
||||
::sort(index.begin(), index.end(), [&](size_t i1, size_t i2) { return this->x[i1] < this->x[i2]; });
|
||||
|
||||
SampleX temp_x{};
|
||||
|
@ -1,7 +1,8 @@
|
||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||
#include <AggregateFunctions/FactoryHelpers.h>
|
||||
#include <AggregateFunctions/HelpersMinMaxAny.h>
|
||||
#include <AggregateFunctions/findNumeric.h>
|
||||
#include <Common/Concepts.h>
|
||||
#include <Common/findExtreme.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -19,7 +20,7 @@ public:
|
||||
explicit AggregateFunctionsSingleValueMax(const DataTypePtr & type) : Parent(type) { }
|
||||
|
||||
/// Specializations for native numeric types
|
||||
ALWAYS_INLINE inline void addBatchSinglePlace(
|
||||
void addBatchSinglePlace(
|
||||
size_t row_begin,
|
||||
size_t row_end,
|
||||
AggregateDataPtr __restrict place,
|
||||
@ -27,7 +28,7 @@ public:
|
||||
Arena * arena,
|
||||
ssize_t if_argument_pos) const override;
|
||||
|
||||
ALWAYS_INLINE inline void addBatchSinglePlaceNotNull(
|
||||
void addBatchSinglePlaceNotNull(
|
||||
size_t row_begin,
|
||||
size_t row_end,
|
||||
AggregateDataPtr __restrict place,
|
||||
@ -53,10 +54,10 @@ void AggregateFunctionsSingleValueMax<typename DB::AggregateFunctionMaxData<Sing
|
||||
if (if_argument_pos >= 0) \
|
||||
{ \
|
||||
const auto & flags = assert_cast<const ColumnUInt8 &>(*columns[if_argument_pos]).getData(); \
|
||||
opt = findNumericMaxIf(column.getData().data(), flags.data(), row_begin, row_end); \
|
||||
opt = findExtremeMaxIf(column.getData().data(), flags.data(), row_begin, row_end); \
|
||||
} \
|
||||
else \
|
||||
opt = findNumericMax(column.getData().data(), row_begin, row_end); \
|
||||
opt = findExtremeMax(column.getData().data(), row_begin, row_end); \
|
||||
if (opt.has_value()) \
|
||||
this->data(place).changeIfGreater(opt.value()); \
|
||||
}
|
||||
@ -74,7 +75,57 @@ void AggregateFunctionsSingleValueMax<Data>::addBatchSinglePlace(
|
||||
Arena * arena,
|
||||
ssize_t if_argument_pos) const
|
||||
{
|
||||
return Parent::addBatchSinglePlace(row_begin, row_end, place, columns, arena, if_argument_pos);
|
||||
if constexpr (!is_any_of<typename Data::Impl, SingleValueDataString, SingleValueDataGeneric>)
|
||||
{
|
||||
/// Leave other numeric types (large integers, decimals, etc) to keep doing the comparison as it's
|
||||
/// faster than doing a permutation
|
||||
return Parent::addBatchSinglePlace(row_begin, row_end, place, columns, arena, if_argument_pos);
|
||||
}
|
||||
|
||||
constexpr int nan_direction_hint = 1;
|
||||
auto const & column = *columns[0];
|
||||
if (if_argument_pos >= 0)
|
||||
{
|
||||
size_t index = row_begin;
|
||||
const auto & if_flags = assert_cast<const ColumnUInt8 &>(*columns[if_argument_pos]).getData();
|
||||
while (if_flags[index] == 0 && index < row_end)
|
||||
index++;
|
||||
if (index >= row_end)
|
||||
return;
|
||||
|
||||
for (size_t i = index + 1; i < row_end; i++)
|
||||
{
|
||||
if ((if_flags[i] != 0) && (column.compareAt(i, index, column, nan_direction_hint) > 0))
|
||||
index = i;
|
||||
}
|
||||
this->data(place).changeIfGreater(column, index, arena);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (row_begin >= row_end)
|
||||
return;
|
||||
|
||||
/// TODO: Introduce row_begin and row_end to getPermutation
|
||||
if (row_begin != 0 || row_end != column.size())
|
||||
{
|
||||
size_t index = row_begin;
|
||||
for (size_t i = index + 1; i < row_end; i++)
|
||||
{
|
||||
if (column.compareAt(i, index, column, nan_direction_hint) > 0)
|
||||
index = i;
|
||||
}
|
||||
this->data(place).changeIfGreater(column, index, arena);
|
||||
}
|
||||
else
|
||||
{
|
||||
constexpr IColumn::PermutationSortDirection direction = IColumn::PermutationSortDirection::Descending;
|
||||
constexpr IColumn::PermutationSortStability stability = IColumn::PermutationSortStability::Unstable;
|
||||
IColumn::Permutation permutation;
|
||||
constexpr UInt64 limit = 1;
|
||||
column.getPermutation(direction, stability, limit, nan_direction_hint, permutation);
|
||||
this->data(place).changeIfGreater(column, permutation[0], arena);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NOLINTBEGIN(bugprone-macro-parentheses)
|
||||
@ -97,10 +148,10 @@ void AggregateFunctionsSingleValueMax<typename DB::AggregateFunctionMaxData<Sing
|
||||
auto final_flags = std::make_unique<UInt8[]>(row_end); \
|
||||
for (size_t i = row_begin; i < row_end; ++i) \
|
||||
final_flags[i] = (!null_map[i]) & !!if_flags[i]; \
|
||||
opt = findNumericMaxIf(column.getData().data(), final_flags.get(), row_begin, row_end); \
|
||||
opt = findExtremeMaxIf(column.getData().data(), final_flags.get(), row_begin, row_end); \
|
||||
} \
|
||||
else \
|
||||
opt = findNumericMaxNotNull(column.getData().data(), null_map, row_begin, row_end); \
|
||||
opt = findExtremeMaxNotNull(column.getData().data(), null_map, row_begin, row_end); \
|
||||
if (opt.has_value()) \
|
||||
this->data(place).changeIfGreater(opt.value()); \
|
||||
}
|
||||
@ -119,7 +170,46 @@ void AggregateFunctionsSingleValueMax<Data>::addBatchSinglePlaceNotNull(
|
||||
Arena * arena,
|
||||
ssize_t if_argument_pos) const
|
||||
{
|
||||
return Parent::addBatchSinglePlaceNotNull(row_begin, row_end, place, columns, null_map, arena, if_argument_pos);
|
||||
if constexpr (!is_any_of<typename Data::Impl, SingleValueDataString, SingleValueDataGeneric>)
|
||||
{
|
||||
/// Leave other numeric types (large integers, decimals, etc) to keep doing the comparison as it's
|
||||
/// faster than doing a permutation
|
||||
return Parent::addBatchSinglePlaceNotNull(row_begin, row_end, place, columns, null_map, arena, if_argument_pos);
|
||||
}
|
||||
|
||||
constexpr int nan_direction_hint = 1;
|
||||
auto const & column = *columns[0];
|
||||
if (if_argument_pos >= 0)
|
||||
{
|
||||
size_t index = row_begin;
|
||||
const auto & if_flags = assert_cast<const ColumnUInt8 &>(*columns[if_argument_pos]).getData();
|
||||
while ((if_flags[index] == 0 || null_map[index] != 0) && (index < row_end))
|
||||
index++;
|
||||
if (index >= row_end)
|
||||
return;
|
||||
|
||||
for (size_t i = index + 1; i < row_end; i++)
|
||||
{
|
||||
if ((if_flags[i] != 0) && (null_map[i] == 0) && (column.compareAt(i, index, column, nan_direction_hint) > 0))
|
||||
index = i;
|
||||
}
|
||||
this->data(place).changeIfGreater(column, index, arena);
|
||||
}
|
||||
else
|
||||
{
|
||||
size_t index = row_begin;
|
||||
while ((null_map[index] != 0) && (index < row_end))
|
||||
index++;
|
||||
if (index >= row_end)
|
||||
return;
|
||||
|
||||
for (size_t i = index + 1; i < row_end; i++)
|
||||
{
|
||||
if ((null_map[i] == 0) && (column.compareAt(i, index, column, nan_direction_hint) > 0))
|
||||
index = i;
|
||||
}
|
||||
this->data(place).changeIfGreater(column, index, arena);
|
||||
}
|
||||
}
|
||||
|
||||
AggregateFunctionPtr createAggregateFunctionMax(
|
||||
|
@ -1,7 +1,8 @@
|
||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||
#include <AggregateFunctions/FactoryHelpers.h>
|
||||
#include <AggregateFunctions/HelpersMinMaxAny.h>
|
||||
#include <AggregateFunctions/findNumeric.h>
|
||||
#include <Common/Concepts.h>
|
||||
#include <Common/findExtreme.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -20,7 +21,7 @@ public:
|
||||
explicit AggregateFunctionsSingleValueMin(const DataTypePtr & type) : Parent(type) { }
|
||||
|
||||
/// Specializations for native numeric types
|
||||
ALWAYS_INLINE inline void addBatchSinglePlace(
|
||||
void addBatchSinglePlace(
|
||||
size_t row_begin,
|
||||
size_t row_end,
|
||||
AggregateDataPtr __restrict place,
|
||||
@ -28,7 +29,7 @@ public:
|
||||
Arena * arena,
|
||||
ssize_t if_argument_pos) const override;
|
||||
|
||||
ALWAYS_INLINE inline void addBatchSinglePlaceNotNull(
|
||||
void addBatchSinglePlaceNotNull(
|
||||
size_t row_begin,
|
||||
size_t row_end,
|
||||
AggregateDataPtr __restrict place,
|
||||
@ -54,10 +55,10 @@ public:
|
||||
if (if_argument_pos >= 0) \
|
||||
{ \
|
||||
const auto & flags = assert_cast<const ColumnUInt8 &>(*columns[if_argument_pos]).getData(); \
|
||||
opt = findNumericMinIf(column.getData().data(), flags.data(), row_begin, row_end); \
|
||||
opt = findExtremeMinIf(column.getData().data(), flags.data(), row_begin, row_end); \
|
||||
} \
|
||||
else \
|
||||
opt = findNumericMin(column.getData().data(), row_begin, row_end); \
|
||||
opt = findExtremeMin(column.getData().data(), row_begin, row_end); \
|
||||
if (opt.has_value()) \
|
||||
this->data(place).changeIfLess(opt.value()); \
|
||||
}
|
||||
@ -75,7 +76,57 @@ void AggregateFunctionsSingleValueMin<Data>::addBatchSinglePlace(
|
||||
Arena * arena,
|
||||
ssize_t if_argument_pos) const
|
||||
{
|
||||
return Parent::addBatchSinglePlace(row_begin, row_end, place, columns, arena, if_argument_pos);
|
||||
if constexpr (!is_any_of<typename Data::Impl, SingleValueDataString, SingleValueDataGeneric>)
|
||||
{
|
||||
/// Leave other numeric types (large integers, decimals, etc) to keep doing the comparison as it's
|
||||
/// faster than doing a permutation
|
||||
return Parent::addBatchSinglePlace(row_begin, row_end, place, columns, arena, if_argument_pos);
|
||||
}
|
||||
|
||||
constexpr int nan_direction_hint = 1;
|
||||
auto const & column = *columns[0];
|
||||
if (if_argument_pos >= 0)
|
||||
{
|
||||
size_t index = row_begin;
|
||||
const auto & if_flags = assert_cast<const ColumnUInt8 &>(*columns[if_argument_pos]).getData();
|
||||
while (if_flags[index] == 0 && index < row_end)
|
||||
index++;
|
||||
if (index >= row_end)
|
||||
return;
|
||||
|
||||
for (size_t i = index + 1; i < row_end; i++)
|
||||
{
|
||||
if ((if_flags[i] != 0) && (column.compareAt(i, index, column, nan_direction_hint) < 0))
|
||||
index = i;
|
||||
}
|
||||
this->data(place).changeIfLess(column, index, arena);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (row_begin >= row_end)
|
||||
return;
|
||||
|
||||
/// TODO: Introduce row_begin and row_end to getPermutation
|
||||
if (row_begin != 0 || row_end != column.size())
|
||||
{
|
||||
size_t index = row_begin;
|
||||
for (size_t i = index + 1; i < row_end; i++)
|
||||
{
|
||||
if (column.compareAt(i, index, column, nan_direction_hint) < 0)
|
||||
index = i;
|
||||
}
|
||||
this->data(place).changeIfLess(column, index, arena);
|
||||
}
|
||||
else
|
||||
{
|
||||
constexpr IColumn::PermutationSortDirection direction = IColumn::PermutationSortDirection::Ascending;
|
||||
constexpr IColumn::PermutationSortStability stability = IColumn::PermutationSortStability::Unstable;
|
||||
IColumn::Permutation permutation;
|
||||
constexpr UInt64 limit = 1;
|
||||
column.getPermutation(direction, stability, limit, nan_direction_hint, permutation);
|
||||
this->data(place).changeIfLess(column, permutation[0], arena);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NOLINTBEGIN(bugprone-macro-parentheses)
|
||||
@ -98,10 +149,10 @@ void AggregateFunctionsSingleValueMin<Data>::addBatchSinglePlace(
|
||||
auto final_flags = std::make_unique<UInt8[]>(row_end); \
|
||||
for (size_t i = row_begin; i < row_end; ++i) \
|
||||
final_flags[i] = (!null_map[i]) & !!if_flags[i]; \
|
||||
opt = findNumericMinIf(column.getData().data(), final_flags.get(), row_begin, row_end); \
|
||||
opt = findExtremeMinIf(column.getData().data(), final_flags.get(), row_begin, row_end); \
|
||||
} \
|
||||
else \
|
||||
opt = findNumericMinNotNull(column.getData().data(), null_map, row_begin, row_end); \
|
||||
opt = findExtremeMinNotNull(column.getData().data(), null_map, row_begin, row_end); \
|
||||
if (opt.has_value()) \
|
||||
this->data(place).changeIfLess(opt.value()); \
|
||||
}
|
||||
@ -120,7 +171,46 @@ void AggregateFunctionsSingleValueMin<Data>::addBatchSinglePlaceNotNull(
|
||||
Arena * arena,
|
||||
ssize_t if_argument_pos) const
|
||||
{
|
||||
return Parent::addBatchSinglePlaceNotNull(row_begin, row_end, place, columns, null_map, arena, if_argument_pos);
|
||||
if constexpr (!is_any_of<typename Data::Impl, SingleValueDataString, SingleValueDataGeneric>)
|
||||
{
|
||||
/// Leave other numeric types (large integers, decimals, etc) to keep doing the comparison as it's
|
||||
/// faster than doing a permutation
|
||||
return Parent::addBatchSinglePlaceNotNull(row_begin, row_end, place, columns, null_map, arena, if_argument_pos);
|
||||
}
|
||||
|
||||
constexpr int nan_direction_hint = 1;
|
||||
auto const & column = *columns[0];
|
||||
if (if_argument_pos >= 0)
|
||||
{
|
||||
size_t index = row_begin;
|
||||
const auto & if_flags = assert_cast<const ColumnUInt8 &>(*columns[if_argument_pos]).getData();
|
||||
while ((if_flags[index] == 0 || null_map[index] != 0) && (index < row_end))
|
||||
index++;
|
||||
if (index >= row_end)
|
||||
return;
|
||||
|
||||
for (size_t i = index + 1; i < row_end; i++)
|
||||
{
|
||||
if ((if_flags[i] != 0) && (null_map[index] == 0) && (column.compareAt(i, index, column, nan_direction_hint) < 0))
|
||||
index = i;
|
||||
}
|
||||
this->data(place).changeIfLess(column, index, arena);
|
||||
}
|
||||
else
|
||||
{
|
||||
size_t index = row_begin;
|
||||
while ((null_map[index] != 0) && (index < row_end))
|
||||
index++;
|
||||
if (index >= row_end)
|
||||
return;
|
||||
|
||||
for (size_t i = index + 1; i < row_end; i++)
|
||||
{
|
||||
if ((null_map[i] == 0) && (column.compareAt(i, index, column, nan_direction_hint) < 0))
|
||||
index = i;
|
||||
}
|
||||
this->data(place).changeIfLess(column, index, arena);
|
||||
}
|
||||
}
|
||||
|
||||
AggregateFunctionPtr createAggregateFunctionMin(
|
||||
|
@ -965,6 +965,7 @@ template <typename Data>
|
||||
struct AggregateFunctionMinData : Data
|
||||
{
|
||||
using Self = AggregateFunctionMinData;
|
||||
using Impl = Data;
|
||||
|
||||
bool changeIfBetter(const IColumn & column, size_t row_num, Arena * arena) { return this->changeIfLess(column, row_num, arena); }
|
||||
bool changeIfBetter(const Self & to, Arena * arena) { return this->changeIfLess(to, arena); }
|
||||
@ -993,6 +994,7 @@ template <typename Data>
|
||||
struct AggregateFunctionMaxData : Data
|
||||
{
|
||||
using Self = AggregateFunctionMaxData;
|
||||
using Impl = Data;
|
||||
|
||||
bool changeIfBetter(const IColumn & column, size_t row_num, Arena * arena) { return this->changeIfGreater(column, row_num, arena); }
|
||||
bool changeIfBetter(const Self & to, Arena * arena) { return this->changeIfGreater(to, arena); }
|
||||
|
@ -6,6 +6,7 @@
|
||||
|
||||
#include <Common/FieldVisitorConvertToNumber.h>
|
||||
#include <Common/NaNUtils.h>
|
||||
#include <Common/iota.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -63,10 +64,9 @@ struct QuantileLevels
|
||||
|
||||
if (isNaN(levels[i]) || levels[i] < 0 || levels[i] > 1)
|
||||
throw Exception(ErrorCodes::PARAMETER_OUT_OF_BOUND, "Quantile level is out of range [0..1]");
|
||||
|
||||
permutation[i] = i;
|
||||
}
|
||||
|
||||
iota(permutation.data(), size, Permutation::value_type(0));
|
||||
::sort(permutation.begin(), permutation.end(), [this] (size_t a, size_t b) { return levels[a] < levels[b]; });
|
||||
}
|
||||
};
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <base/sort.h>
|
||||
|
||||
#include <Common/ArenaAllocator.h>
|
||||
#include <Common/iota.h>
|
||||
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
@ -30,7 +31,7 @@ std::pair<RanksArray, Float64> computeRanksAndTieCorrection(const Values & value
|
||||
const size_t size = values.size();
|
||||
/// Save initial positions, than sort indices according to the values.
|
||||
std::vector<size_t> indexes(size);
|
||||
std::iota(indexes.begin(), indexes.end(), 0);
|
||||
iota(indexes.data(), indexes.size(), size_t(0));
|
||||
std::sort(indexes.begin(), indexes.end(),
|
||||
[&] (size_t lhs, size_t rhs) { return values[lhs] < values[rhs]; });
|
||||
|
||||
|
@ -1,15 +0,0 @@
|
||||
#include <AggregateFunctions/findNumeric.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
#define INSTANTIATION(T) \
|
||||
template std::optional<T> findNumericMin(const T * __restrict ptr, size_t start, size_t end); \
|
||||
template std::optional<T> findNumericMinNotNull(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end); \
|
||||
template std::optional<T> findNumericMinIf(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end); \
|
||||
template std::optional<T> findNumericMax(const T * __restrict ptr, size_t start, size_t end); \
|
||||
template std::optional<T> findNumericMaxNotNull(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end); \
|
||||
template std::optional<T> findNumericMaxIf(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end);
|
||||
|
||||
FOR_BASIC_NUMERIC_TYPES(INSTANTIATION)
|
||||
#undef INSTANTIATION
|
||||
}
|
@ -143,9 +143,17 @@ public:
|
||||
return alias;
|
||||
}
|
||||
|
||||
const String & getOriginalAlias() const
|
||||
{
|
||||
return original_alias.empty() ? alias : original_alias;
|
||||
}
|
||||
|
||||
/// Set node alias
|
||||
void setAlias(String alias_value)
|
||||
{
|
||||
if (original_alias.empty())
|
||||
original_alias = std::move(alias);
|
||||
|
||||
alias = std::move(alias_value);
|
||||
}
|
||||
|
||||
@ -276,6 +284,9 @@ protected:
|
||||
|
||||
private:
|
||||
String alias;
|
||||
/// An alias from query. Alias can be replaced by query passes,
|
||||
/// but we need to keep the original one to support additional_table_filters.
|
||||
String original_alias;
|
||||
ASTPtr original_ast;
|
||||
};
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
#include <Analyzer/Passes/FuseFunctionsPass.h>
|
||||
|
||||
#include <Common/iota.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <DataTypes/DataTypeTuple.h>
|
||||
@ -184,7 +185,7 @@ FunctionNodePtr createFusedQuantilesNode(std::vector<QueryTreeNodePtr *> & nodes
|
||||
{
|
||||
/// Sort nodes and parameters in ascending order of quantile level
|
||||
std::vector<size_t> permutation(nodes.size());
|
||||
std::iota(permutation.begin(), permutation.end(), 0);
|
||||
iota(permutation.data(), permutation.size(), size_t(0));
|
||||
std::sort(permutation.begin(), permutation.end(), [&](size_t i, size_t j) { return parameters[i].get<Float64>() < parameters[j].get<Float64>(); });
|
||||
|
||||
std::vector<QueryTreeNodePtr *> new_nodes;
|
||||
|
@ -52,6 +52,7 @@
|
||||
|
||||
#include <Processors/Executors/PullingAsyncPipelineExecutor.h>
|
||||
|
||||
#include <Analyzer/createUniqueTableAliases.h>
|
||||
#include <Analyzer/Utils.h>
|
||||
#include <Analyzer/SetUtils.h>
|
||||
#include <Analyzer/AggregationUtils.h>
|
||||
@ -1198,7 +1199,7 @@ private:
|
||||
|
||||
static void mergeWindowWithParentWindow(const QueryTreeNodePtr & window_node, const QueryTreeNodePtr & parent_window_node, IdentifierResolveScope & scope);
|
||||
|
||||
static void replaceNodesWithPositionalArguments(QueryTreeNodePtr & node_list, const QueryTreeNodes & projection_nodes, IdentifierResolveScope & scope);
|
||||
void replaceNodesWithPositionalArguments(QueryTreeNodePtr & node_list, const QueryTreeNodes & projection_nodes, IdentifierResolveScope & scope);
|
||||
|
||||
static void convertLimitOffsetExpression(QueryTreeNodePtr & expression_node, const String & expression_description, IdentifierResolveScope & scope);
|
||||
|
||||
@ -2168,7 +2169,12 @@ void QueryAnalyzer::replaceNodesWithPositionalArguments(QueryTreeNodePtr & node_
|
||||
scope.scope_node->formatASTForErrorMessage());
|
||||
|
||||
--positional_argument_number;
|
||||
*node_to_replace = projection_nodes[positional_argument_number];
|
||||
*node_to_replace = projection_nodes[positional_argument_number]->clone();
|
||||
if (auto it = resolved_expressions.find(projection_nodes[positional_argument_number]);
|
||||
it != resolved_expressions.end())
|
||||
{
|
||||
resolved_expressions[*node_to_replace] = it->second;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -7366,6 +7372,7 @@ void QueryAnalysisPass::run(QueryTreeNodePtr query_tree_node, ContextPtr context
|
||||
{
|
||||
QueryAnalyzer analyzer;
|
||||
analyzer.resolve(query_tree_node, table_expression, context);
|
||||
createUniqueTableAliases(query_tree_node, table_expression, context);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -326,7 +326,7 @@ void addTableExpressionOrJoinIntoTablesInSelectQuery(ASTPtr & tables_in_select_q
|
||||
}
|
||||
}
|
||||
|
||||
QueryTreeNodes extractTableExpressions(const QueryTreeNodePtr & join_tree_node)
|
||||
QueryTreeNodes extractTableExpressions(const QueryTreeNodePtr & join_tree_node, bool add_array_join)
|
||||
{
|
||||
QueryTreeNodes result;
|
||||
|
||||
@ -357,6 +357,8 @@ QueryTreeNodes extractTableExpressions(const QueryTreeNodePtr & join_tree_node)
|
||||
{
|
||||
auto & array_join_node = node_to_process->as<ArrayJoinNode &>();
|
||||
nodes_to_process.push_front(array_join_node.getTableExpression());
|
||||
if (add_array_join)
|
||||
result.push_back(std::move(node_to_process));
|
||||
break;
|
||||
}
|
||||
case QueryTreeNodeType::JOIN:
|
||||
|
@ -51,7 +51,7 @@ std::optional<bool> tryExtractConstantFromConditionNode(const QueryTreeNodePtr &
|
||||
void addTableExpressionOrJoinIntoTablesInSelectQuery(ASTPtr & tables_in_select_query_ast, const QueryTreeNodePtr & table_expression, const IQueryTreeNode::ConvertToASTOptions & convert_to_ast_options);
|
||||
|
||||
/// Extract table, table function, query, union from join tree
|
||||
QueryTreeNodes extractTableExpressions(const QueryTreeNodePtr & join_tree_node);
|
||||
QueryTreeNodes extractTableExpressions(const QueryTreeNodePtr & join_tree_node, bool add_array_join = false);
|
||||
|
||||
/// Extract left table expression from join tree
|
||||
QueryTreeNodePtr extractLeftTableExpression(const QueryTreeNodePtr & join_tree_node);
|
||||
|
141
src/Analyzer/createUniqueTableAliases.cpp
Normal file
141
src/Analyzer/createUniqueTableAliases.cpp
Normal file
@ -0,0 +1,141 @@
|
||||
#include <memory>
|
||||
#include <unordered_map>
|
||||
#include <Analyzer/createUniqueTableAliases.h>
|
||||
#include <Analyzer/FunctionNode.h>
|
||||
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||
#include <Analyzer/IQueryTreeNode.h>
|
||||
#include <Analyzer/LambdaNode.h>
|
||||
#include <Analyzer/Utils.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
class CreateUniqueTableAliasesVisitor : public InDepthQueryTreeVisitorWithContext<CreateUniqueTableAliasesVisitor>
|
||||
{
|
||||
public:
|
||||
using Base = InDepthQueryTreeVisitorWithContext<CreateUniqueTableAliasesVisitor>;
|
||||
|
||||
explicit CreateUniqueTableAliasesVisitor(const ContextPtr & context)
|
||||
: Base(context)
|
||||
{
|
||||
// Insert a fake node on top of the stack.
|
||||
scope_nodes_stack.push_back(std::make_shared<LambdaNode>(Names{}, nullptr));
|
||||
}
|
||||
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
auto node_type = node->getNodeType();
|
||||
|
||||
switch (node_type)
|
||||
{
|
||||
case QueryTreeNodeType::QUERY:
|
||||
[[fallthrough]];
|
||||
case QueryTreeNodeType::UNION:
|
||||
{
|
||||
/// Queries like `(SELECT 1) as t` have invalid syntax. To avoid creating such queries (e.g. in StorageDistributed)
|
||||
/// we need to remove aliases for top level queries.
|
||||
/// N.B. Subquery depth starts count from 1, so the following condition checks if it's a top level.
|
||||
if (getSubqueryDepth() == 1)
|
||||
{
|
||||
node->removeAlias();
|
||||
break;
|
||||
}
|
||||
[[fallthrough]];
|
||||
}
|
||||
case QueryTreeNodeType::TABLE:
|
||||
[[fallthrough]];
|
||||
case QueryTreeNodeType::TABLE_FUNCTION:
|
||||
[[fallthrough]];
|
||||
case QueryTreeNodeType::ARRAY_JOIN:
|
||||
{
|
||||
auto & alias = table_expression_to_alias[node];
|
||||
if (alias.empty())
|
||||
{
|
||||
scope_to_nodes_with_aliases[scope_nodes_stack.back()].push_back(node);
|
||||
alias = fmt::format("__table{}", ++next_id);
|
||||
node->setAlias(alias);
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
switch (node_type)
|
||||
{
|
||||
case QueryTreeNodeType::QUERY:
|
||||
[[fallthrough]];
|
||||
case QueryTreeNodeType::UNION:
|
||||
[[fallthrough]];
|
||||
case QueryTreeNodeType::LAMBDA:
|
||||
scope_nodes_stack.push_back(node);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void leaveImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (scope_nodes_stack.back() == node)
|
||||
{
|
||||
if (auto it = scope_to_nodes_with_aliases.find(scope_nodes_stack.back());
|
||||
it != scope_to_nodes_with_aliases.end())
|
||||
{
|
||||
for (const auto & node_with_alias : it->second)
|
||||
{
|
||||
table_expression_to_alias.erase(node_with_alias);
|
||||
}
|
||||
scope_to_nodes_with_aliases.erase(it);
|
||||
}
|
||||
scope_nodes_stack.pop_back();
|
||||
}
|
||||
|
||||
/// Here we revisit subquery for IN function. Reasons:
|
||||
/// * For remote query execution, query tree may be traversed a few times.
|
||||
/// In such a case, it is possible to get AST like
|
||||
/// `IN ((SELECT ... FROM table AS __table4) AS __table1)` which result in
|
||||
/// `Multiple expressions for the alias` exception
|
||||
/// * Tables in subqueries could have different aliases => different three hashes,
|
||||
/// which is important to be able to find a set in PreparedSets
|
||||
/// See 01253_subquery_in_aggregate_function_JustStranger.
|
||||
///
|
||||
/// So, we revisit this subquery to make aliases stable.
|
||||
/// This should be safe cause columns from IN subquery can't be used in main query anyway.
|
||||
if (node->getNodeType() == QueryTreeNodeType::FUNCTION)
|
||||
{
|
||||
auto * function_node = node->as<FunctionNode>();
|
||||
if (isNameOfInFunction(function_node->getFunctionName()))
|
||||
{
|
||||
auto arg = function_node->getArguments().getNodes().back();
|
||||
/// Avoid aliasing IN `table`
|
||||
if (arg->getNodeType() != QueryTreeNodeType::TABLE)
|
||||
CreateUniqueTableAliasesVisitor(getContext()).visit(function_node->getArguments().getNodes().back());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
size_t next_id = 0;
|
||||
|
||||
// Stack of nodes which create scopes: QUERY, UNION and LAMBDA.
|
||||
QueryTreeNodes scope_nodes_stack;
|
||||
|
||||
std::unordered_map<QueryTreeNodePtr, QueryTreeNodes> scope_to_nodes_with_aliases;
|
||||
|
||||
// We need to use raw pointer as a key, not a QueryTreeNodePtrWithHash.
|
||||
std::unordered_map<QueryTreeNodePtr, String> table_expression_to_alias;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
|
||||
void createUniqueTableAliases(QueryTreeNodePtr & node, const QueryTreeNodePtr & /*table_expression*/, const ContextPtr & context)
|
||||
{
|
||||
CreateUniqueTableAliasesVisitor(context).visit(node);
|
||||
}
|
||||
|
||||
}
|
18
src/Analyzer/createUniqueTableAliases.h
Normal file
18
src/Analyzer/createUniqueTableAliases.h
Normal file
@ -0,0 +1,18 @@
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <Interpreters/Context_fwd.h>
|
||||
|
||||
class IQueryTreeNode;
|
||||
using QueryTreeNodePtr = std::shared_ptr<IQueryTreeNode>;
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/*
|
||||
* For each table expression in the Query Tree generate and add a unique alias.
|
||||
* If table expression had an alias in initial query tree, override it.
|
||||
*/
|
||||
void createUniqueTableAliases(QueryTreeNodePtr & node, const QueryTreeNodePtr & table_expression, const ContextPtr & context);
|
||||
|
||||
}
|
@ -573,11 +573,12 @@ void RestorerFromBackup::createDatabase(const String & database_name) const
|
||||
create_database_query->if_not_exists = (restore_settings.create_table == RestoreTableCreationMode::kCreateIfNotExists);
|
||||
|
||||
LOG_TRACE(log, "Creating database {}: {}", backQuoteIfNeed(database_name), serializeAST(*create_database_query));
|
||||
|
||||
auto query_context = Context::createCopy(context);
|
||||
query_context->setSetting("allow_deprecated_database_ordinary", 1);
|
||||
try
|
||||
{
|
||||
/// Execute CREATE DATABASE query.
|
||||
InterpreterCreateQuery interpreter{create_database_query, context};
|
||||
InterpreterCreateQuery interpreter{create_database_query, query_context};
|
||||
interpreter.setInternal(true);
|
||||
interpreter.execute();
|
||||
}
|
||||
|
@ -77,7 +77,6 @@ static String getLoadSuggestionQuery(Int32 suggestion_limit, bool basic_suggesti
|
||||
};
|
||||
|
||||
add_column("name", "functions", false, {});
|
||||
add_column("name", "database_engines", false, {});
|
||||
add_column("name", "table_engines", false, {});
|
||||
add_column("name", "formats", false, {});
|
||||
add_column("name", "table_functions", false, {});
|
||||
|
@ -1,18 +1,19 @@
|
||||
#include <Columns/ColumnAggregateFunction.h>
|
||||
#include <Columns/ColumnsCommon.h>
|
||||
#include <Columns/MaskOperations.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Processors/Transforms/ColumnGathererTransform.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/WriteBufferFromArena.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <Common/FieldVisitorToString.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <Processors/Transforms/ColumnGathererTransform.h>
|
||||
#include <Common/AlignedBuffer.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Common/Arena.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/FieldVisitorToString.h>
|
||||
#include <Common/HashTable/Hash.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/iota.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -626,8 +627,7 @@ void ColumnAggregateFunction::getPermutation(PermutationSortDirection /*directio
|
||||
{
|
||||
size_t s = data.size();
|
||||
res.resize(s);
|
||||
for (size_t i = 0; i < s; ++i)
|
||||
res[i] = i;
|
||||
iota(res.data(), s, IColumn::Permutation::value_type(0));
|
||||
}
|
||||
|
||||
void ColumnAggregateFunction::updatePermutation(PermutationSortDirection, PermutationSortStability,
|
||||
|
@ -2,9 +2,10 @@
|
||||
|
||||
#include <Columns/ColumnConst.h>
|
||||
#include <Columns/ColumnsCommon.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/HashTable/Hash.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/iota.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
|
||||
#include <base/defines.h>
|
||||
|
||||
@ -128,8 +129,7 @@ void ColumnConst::getPermutation(PermutationSortDirection /*direction*/, Permuta
|
||||
size_t /*limit*/, int /*nan_direction_hint*/, Permutation & res) const
|
||||
{
|
||||
res.resize(s);
|
||||
for (size_t i = 0; i < s; ++i)
|
||||
res[i] = i;
|
||||
iota(res.data(), s, IColumn::Permutation::value_type(0));
|
||||
}
|
||||
|
||||
void ColumnConst::updatePermutation(PermutationSortDirection /*direction*/, PermutationSortStability /*stability*/,
|
||||
|
@ -1,10 +1,11 @@
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/Arena.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/HashTable/Hash.h>
|
||||
#include <Common/RadixSort.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/iota.h>
|
||||
|
||||
#include <base/sort.h>
|
||||
|
||||
@ -163,8 +164,7 @@ void ColumnDecimal<T>::getPermutation(IColumn::PermutationSortDirection directio
|
||||
if (limit >= data_size)
|
||||
limit = 0;
|
||||
|
||||
for (size_t i = 0; i < data_size; ++i)
|
||||
res[i] = i;
|
||||
iota(res.data(), data_size, IColumn::Permutation::value_type(0));
|
||||
|
||||
if constexpr (is_arithmetic_v<NativeT> && !is_big_int_v<NativeT>)
|
||||
{
|
||||
@ -183,8 +183,7 @@ void ColumnDecimal<T>::getPermutation(IColumn::PermutationSortDirection directio
|
||||
/// Thresholds on size. Lower threshold is arbitrary. Upper threshold is chosen by the type for histogram counters.
|
||||
if (data_size >= 256 && data_size <= std::numeric_limits<UInt32>::max() && use_radix_sort)
|
||||
{
|
||||
for (size_t i = 0; i < data_size; ++i)
|
||||
res[i] = i;
|
||||
iota(res.data(), data_size, IColumn::Permutation::value_type(0));
|
||||
|
||||
bool try_sort = false;
|
||||
|
||||
|
@ -2,6 +2,7 @@
|
||||
#include <Columns/ColumnObject.h>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <Columns/ColumnArray.h>
|
||||
#include <Common/iota.h>
|
||||
#include <DataTypes/ObjectUtils.h>
|
||||
#include <DataTypes/getLeastSupertype.h>
|
||||
#include <DataTypes/DataTypeNothing.h>
|
||||
@ -838,7 +839,7 @@ MutableColumnPtr ColumnObject::cloneResized(size_t new_size) const
|
||||
void ColumnObject::getPermutation(PermutationSortDirection, PermutationSortStability, size_t, int, Permutation & res) const
|
||||
{
|
||||
res.resize(num_rows);
|
||||
std::iota(res.begin(), res.end(), 0);
|
||||
iota(res.data(), res.size(), size_t(0));
|
||||
}
|
||||
|
||||
void ColumnObject::compareColumn(const IColumn & rhs, size_t rhs_row_num,
|
||||
|
@ -1,11 +1,12 @@
|
||||
#include <Columns/ColumnSparse.h>
|
||||
#include <Columns/ColumnsCommon.h>
|
||||
#include <Columns/ColumnCompressed.h>
|
||||
#include <Columns/ColumnSparse.h>
|
||||
#include <Columns/ColumnTuple.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <Common/HashTable/Hash.h>
|
||||
#include <Columns/ColumnsCommon.h>
|
||||
#include <Processors/Transforms/ColumnGathererTransform.h>
|
||||
#include <Common/HashTable/Hash.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/iota.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <bit>
|
||||
@ -499,8 +500,7 @@ void ColumnSparse::getPermutationImpl(IColumn::PermutationSortDirection directio
|
||||
res.resize(_size);
|
||||
if (offsets->empty())
|
||||
{
|
||||
for (size_t i = 0; i < _size; ++i)
|
||||
res[i] = i;
|
||||
iota(res.data(), _size, IColumn::Permutation::value_type(0));
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1,16 +1,17 @@
|
||||
#include <Columns/ColumnTuple.h>
|
||||
|
||||
#include <base/sort.h>
|
||||
#include <Columns/IColumnImpl.h>
|
||||
#include <Columns/ColumnCompressed.h>
|
||||
#include <Columns/IColumnImpl.h>
|
||||
#include <Core/Field.h>
|
||||
#include <Processors/Transforms/ColumnGathererTransform.h>
|
||||
#include <DataTypes/Serializations/SerializationInfoTuple.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <Processors/Transforms/ColumnGathererTransform.h>
|
||||
#include <base/sort.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/iota.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <DataTypes/Serializations/SerializationInfoTuple.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -378,8 +379,7 @@ void ColumnTuple::getPermutationImpl(IColumn::PermutationSortDirection direction
|
||||
{
|
||||
size_t rows = size();
|
||||
res.resize(rows);
|
||||
for (size_t i = 0; i < rows; ++i)
|
||||
res[i] = i;
|
||||
iota(res.data(), rows, IColumn::Permutation::value_type(0));
|
||||
|
||||
if (limit >= rows)
|
||||
limit = 0;
|
||||
|
@ -1,24 +1,25 @@
|
||||
#include "ColumnVector.h"
|
||||
|
||||
#include <Columns/ColumnsCommon.h>
|
||||
#include <Columns/ColumnCompressed.h>
|
||||
#include <Columns/ColumnsCommon.h>
|
||||
#include <Columns/MaskOperations.h>
|
||||
#include <Columns/RadixSortHelper.h>
|
||||
#include <Processors/Transforms/ColumnGathererTransform.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Processors/Transforms/ColumnGathererTransform.h>
|
||||
#include <base/bit_cast.h>
|
||||
#include <base/scope_guard.h>
|
||||
#include <base/sort.h>
|
||||
#include <base/unaligned.h>
|
||||
#include <Common/Arena.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/HashTable/Hash.h>
|
||||
#include <Common/NaNUtils.h>
|
||||
#include <Common/RadixSort.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/TargetSpecific.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <base/sort.h>
|
||||
#include <base/unaligned.h>
|
||||
#include <base/bit_cast.h>
|
||||
#include <base/scope_guard.h>
|
||||
#include <Common/iota.h>
|
||||
|
||||
#include <bit>
|
||||
#include <cmath>
|
||||
@ -244,8 +245,7 @@ void ColumnVector<T>::getPermutation(IColumn::PermutationSortDirection direction
|
||||
if (limit >= data_size)
|
||||
limit = 0;
|
||||
|
||||
for (size_t i = 0; i < data_size; ++i)
|
||||
res[i] = i;
|
||||
iota(res.data(), data_size, IColumn::Permutation::value_type(0));
|
||||
|
||||
if constexpr (is_arithmetic_v<T> && !is_big_int_v<T>)
|
||||
{
|
||||
|
@ -1,7 +1,8 @@
|
||||
#include <Common/Arena.h>
|
||||
#include <Core/Field.h>
|
||||
#include <Columns/IColumnDummy.h>
|
||||
#include <Columns/ColumnsCommon.h>
|
||||
#include <Columns/IColumnDummy.h>
|
||||
#include <Core/Field.h>
|
||||
#include <Common/Arena.h>
|
||||
#include <Common/iota.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -87,8 +88,7 @@ void IColumnDummy::getPermutation(IColumn::PermutationSortDirection /*direction*
|
||||
size_t /*limit*/, int /*nan_direction_hint*/, Permutation & res) const
|
||||
{
|
||||
res.resize(s);
|
||||
for (size_t i = 0; i < s; ++i)
|
||||
res[i] = i;
|
||||
iota(res.data(), s, IColumn::Permutation::value_type(0));
|
||||
}
|
||||
|
||||
ColumnPtr IColumnDummy::replicate(const Offsets & offsets) const
|
||||
|
@ -6,10 +6,11 @@
|
||||
* implementation.
|
||||
*/
|
||||
|
||||
#include <Columns/IColumn.h>
|
||||
#include <Common/PODArray.h>
|
||||
#include <base/sort.h>
|
||||
#include <algorithm>
|
||||
#include <Columns/IColumn.h>
|
||||
#include <base/sort.h>
|
||||
#include <Common/PODArray.h>
|
||||
#include <Common/iota.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -299,8 +300,7 @@ void IColumn::getPermutationImpl(
|
||||
if (limit >= data_size)
|
||||
limit = 0;
|
||||
|
||||
for (size_t i = 0; i < data_size; ++i)
|
||||
res[i] = i;
|
||||
iota(res.data(), data_size, Permutation::value_type(0));
|
||||
|
||||
if (limit)
|
||||
{
|
||||
|
@ -1,6 +1,7 @@
|
||||
#include <Columns/ColumnSparse.h>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
|
||||
#include <Common/iota.h>
|
||||
#include <Common/randomSeed.h>
|
||||
#include <pcg_random.hpp>
|
||||
#include <gtest/gtest.h>
|
||||
@ -191,7 +192,7 @@ TEST(ColumnSparse, Permute)
|
||||
auto [sparse_src, full_src] = createColumns(n, k);
|
||||
|
||||
IColumn::Permutation perm(n);
|
||||
std::iota(perm.begin(), perm.end(), 0);
|
||||
iota(perm.data(), perm.size(), size_t(0));
|
||||
std::shuffle(perm.begin(), perm.end(), rng);
|
||||
|
||||
auto sparse_dst = sparse_src->permute(perm, limit);
|
||||
|
@ -9,7 +9,6 @@
|
||||
#include <Columns/ColumnUnique.h>
|
||||
#include <Columns/ColumnVector.h>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <DataTypes/DataTypeLowCardinality.h>
|
||||
#include <DataTypes/DataTypeMap.h>
|
||||
@ -17,6 +16,7 @@
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <DataTypes/DataTypeTuple.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <Common/iota.h>
|
||||
|
||||
|
||||
using namespace DB;
|
||||
@ -32,8 +32,7 @@ void stableGetColumnPermutation(
|
||||
|
||||
size_t size = column.size();
|
||||
out_permutation.resize(size);
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
out_permutation[i] = i;
|
||||
iota(out_permutation.data(), size, IColumn::Permutation::value_type(0));
|
||||
|
||||
std::stable_sort(
|
||||
out_permutation.begin(),
|
||||
@ -146,10 +145,7 @@ void assertColumnPermutations(ColumnCreateFunc column_create_func, ValueTransfor
|
||||
|
||||
std::vector<std::vector<Field>> ranges(ranges_size);
|
||||
std::vector<size_t> ranges_permutations(ranges_size);
|
||||
for (size_t i = 0; i < ranges_size; ++i)
|
||||
{
|
||||
ranges_permutations[i] = i;
|
||||
}
|
||||
iota(ranges_permutations.data(), ranges_size, IColumn::Permutation::value_type(0));
|
||||
|
||||
IColumn::Permutation actual_permutation;
|
||||
IColumn::Permutation expected_permutation;
|
||||
|
@ -589,6 +589,7 @@
|
||||
M(707, GCP_ERROR) \
|
||||
M(708, ILLEGAL_STATISTIC) \
|
||||
M(709, CANNOT_GET_REPLICATED_DATABASE_SNAPSHOT) \
|
||||
M(710, FAULT_INJECTED) \
|
||||
\
|
||||
M(999, KEEPER_EXCEPTION) \
|
||||
M(1000, POCO_EXCEPTION) \
|
||||
|
@ -34,6 +34,8 @@ static struct InitFiu
|
||||
|
||||
#define APPLY_FOR_FAILPOINTS(ONCE, REGULAR, PAUSEABLE_ONCE, PAUSEABLE) \
|
||||
ONCE(replicated_merge_tree_commit_zk_fail_after_op) \
|
||||
ONCE(replicated_queue_fail_next_entry) \
|
||||
REGULAR(replicated_queue_unfail_entries) \
|
||||
ONCE(replicated_merge_tree_insert_quorum_fail_0) \
|
||||
REGULAR(replicated_merge_tree_commit_zk_fail_when_recovering_from_hw_fault) \
|
||||
REGULAR(use_delayed_remote_source) \
|
||||
|
@ -288,6 +288,18 @@ The server successfully detected this situation and will download merged part fr
|
||||
M(OSReadChars, "Number of bytes read from filesystem, including page cache.") \
|
||||
M(OSWriteChars, "Number of bytes written to filesystem, including page cache.") \
|
||||
\
|
||||
M(ParallelReplicasHandleRequestMicroseconds, "Time spent processing requests for marks from replicas") \
|
||||
M(ParallelReplicasHandleAnnouncementMicroseconds, "Time spent processing replicas announcements") \
|
||||
\
|
||||
M(ParallelReplicasReadAssignedMarks, "Sum across all replicas of how many of scheduled marks were assigned by consistent hash") \
|
||||
M(ParallelReplicasReadUnassignedMarks, "Sum across all replicas of how many unassigned marks were scheduled") \
|
||||
M(ParallelReplicasReadAssignedForStealingMarks, "Sum across all replicas of how many of scheduled marks were assigned for stealing by consistent hash") \
|
||||
\
|
||||
M(ParallelReplicasStealingByHashMicroseconds, "Time spent collecting segments meant for stealing by hash") \
|
||||
M(ParallelReplicasProcessingPartsMicroseconds, "Time spent processing data parts") \
|
||||
M(ParallelReplicasStealingLeftoversMicroseconds, "Time spent collecting orphaned segments") \
|
||||
M(ParallelReplicasCollectingOwnedSegmentsMicroseconds, "Time spent collecting segments meant by hash") \
|
||||
\
|
||||
M(PerfCpuCycles, "Total cycles. Be wary of what happens during CPU frequency scaling.") \
|
||||
M(PerfInstructions, "Retired instructions. Be careful, these can be affected by various issues, most notably hardware interrupt counts.") \
|
||||
M(PerfCacheReferences, "Cache accesses. Usually, this indicates Last Level Cache accesses, but this may vary depending on your CPU. This may include prefetches and coherency messages; again this depends on the design of your CPU.") \
|
||||
|
@ -365,7 +365,7 @@ DECLARE_AVX512VBMI2_SPECIFIC_CODE(
|
||||
FUNCTION_HEADER \
|
||||
\
|
||||
name \
|
||||
FUNCTION_BODY \
|
||||
FUNCTION_BODY \
|
||||
|
||||
/// NOLINTNEXTLINE
|
||||
#define MULTITARGET_FUNCTION_AVX512BW_AVX512F_AVX2_SSE42(FUNCTION_HEADER, name, FUNCTION_BODY) \
|
||||
|
@ -28,6 +28,7 @@
|
||||
#cmakedefine01 USE_S2_GEOMETRY
|
||||
#cmakedefine01 USE_FASTOPS
|
||||
#cmakedefine01 USE_SQIDS
|
||||
#cmakedefine01 USE_IDNA
|
||||
#cmakedefine01 USE_NLP
|
||||
#cmakedefine01 USE_VECTORSCAN
|
||||
#cmakedefine01 USE_LIBURING
|
||||
|
@ -1,18 +1,9 @@
|
||||
#pragma once
|
||||
|
||||
#include <DataTypes/IDataType.h>
|
||||
#include <base/defines.h>
|
||||
#include <base/types.h>
|
||||
#include <Common/Concepts.h>
|
||||
#include <Common/TargetSpecific.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <optional>
|
||||
#include <Common/findExtreme.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
template <typename T>
|
||||
concept is_any_native_number = (is_any_of<T, Int8, Int16, Int32, Int64, UInt8, UInt16, UInt32, UInt64, Float32, Float64>);
|
||||
|
||||
template <is_any_native_number T>
|
||||
struct MinComparator
|
||||
@ -28,8 +19,8 @@ struct MaxComparator
|
||||
|
||||
MULTITARGET_FUNCTION_AVX2_SSE42(
|
||||
MULTITARGET_FUNCTION_HEADER(template <is_any_native_number T, typename ComparatorClass, bool add_all_elements, bool add_if_cond_zero> static std::optional<T> NO_INLINE),
|
||||
findNumericExtremeImpl,
|
||||
MULTITARGET_FUNCTION_BODY((const T * __restrict ptr, const UInt8 * __restrict condition_map [[maybe_unused]], size_t row_begin, size_t row_end)
|
||||
findExtremeImpl,
|
||||
MULTITARGET_FUNCTION_BODY((const T * __restrict ptr, const UInt8 * __restrict condition_map [[maybe_unused]], size_t row_begin, size_t row_end) /// NOLINT
|
||||
{
|
||||
size_t count = row_end - row_begin;
|
||||
ptr += row_begin;
|
||||
@ -86,69 +77,67 @@ MULTITARGET_FUNCTION_AVX2_SSE42(
|
||||
}
|
||||
))
|
||||
|
||||
|
||||
/// Given a vector of T finds the extreme (MIN or MAX) value
|
||||
template <is_any_native_number T, class ComparatorClass, bool add_all_elements, bool add_if_cond_zero>
|
||||
static std::optional<T>
|
||||
findNumericExtreme(const T * __restrict ptr, const UInt8 * __restrict condition_map [[maybe_unused]], size_t start, size_t end)
|
||||
findExtreme(const T * __restrict ptr, const UInt8 * __restrict condition_map [[maybe_unused]], size_t start, size_t end)
|
||||
{
|
||||
#if USE_MULTITARGET_CODE
|
||||
/// We see no benefit from using AVX512BW or AVX512F (over AVX2), so we only declare SSE and AVX2
|
||||
if (isArchSupported(TargetArch::AVX2))
|
||||
return findNumericExtremeImplAVX2<T, ComparatorClass, add_all_elements, add_if_cond_zero>(ptr, condition_map, start, end);
|
||||
return findExtremeImplAVX2<T, ComparatorClass, add_all_elements, add_if_cond_zero>(ptr, condition_map, start, end);
|
||||
|
||||
if (isArchSupported(TargetArch::SSE42))
|
||||
return findNumericExtremeImplSSE42<T, ComparatorClass, add_all_elements, add_if_cond_zero>(ptr, condition_map, start, end);
|
||||
return findExtremeImplSSE42<T, ComparatorClass, add_all_elements, add_if_cond_zero>(ptr, condition_map, start, end);
|
||||
#endif
|
||||
return findNumericExtremeImpl<T, ComparatorClass, add_all_elements, add_if_cond_zero>(ptr, condition_map, start, end);
|
||||
return findExtremeImpl<T, ComparatorClass, add_all_elements, add_if_cond_zero>(ptr, condition_map, start, end);
|
||||
}
|
||||
|
||||
template <is_any_native_number T>
|
||||
std::optional<T> findNumericMin(const T * __restrict ptr, size_t start, size_t end)
|
||||
std::optional<T> findExtremeMin(const T * __restrict ptr, size_t start, size_t end)
|
||||
{
|
||||
return findNumericExtreme<T, MinComparator<T>, true, false>(ptr, nullptr, start, end);
|
||||
return findExtreme<T, MinComparator<T>, true, false>(ptr, nullptr, start, end);
|
||||
}
|
||||
|
||||
template <is_any_native_number T>
|
||||
std::optional<T> findNumericMinNotNull(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end)
|
||||
std::optional<T> findExtremeMinNotNull(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end)
|
||||
{
|
||||
return findNumericExtreme<T, MinComparator<T>, false, true>(ptr, condition_map, start, end);
|
||||
return findExtreme<T, MinComparator<T>, false, true>(ptr, condition_map, start, end);
|
||||
}
|
||||
|
||||
template <is_any_native_number T>
|
||||
std::optional<T> findNumericMinIf(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end)
|
||||
std::optional<T> findExtremeMinIf(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end)
|
||||
{
|
||||
return findNumericExtreme<T, MinComparator<T>, false, false>(ptr, condition_map, start, end);
|
||||
return findExtreme<T, MinComparator<T>, false, false>(ptr, condition_map, start, end);
|
||||
}
|
||||
|
||||
template <is_any_native_number T>
|
||||
std::optional<T> findNumericMax(const T * __restrict ptr, size_t start, size_t end)
|
||||
std::optional<T> findExtremeMax(const T * __restrict ptr, size_t start, size_t end)
|
||||
{
|
||||
return findNumericExtreme<T, MaxComparator<T>, true, false>(ptr, nullptr, start, end);
|
||||
return findExtreme<T, MaxComparator<T>, true, false>(ptr, nullptr, start, end);
|
||||
}
|
||||
|
||||
template <is_any_native_number T>
|
||||
std::optional<T> findNumericMaxNotNull(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end)
|
||||
std::optional<T> findExtremeMaxNotNull(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end)
|
||||
{
|
||||
return findNumericExtreme<T, MaxComparator<T>, false, true>(ptr, condition_map, start, end);
|
||||
return findExtreme<T, MaxComparator<T>, false, true>(ptr, condition_map, start, end);
|
||||
}
|
||||
|
||||
template <is_any_native_number T>
|
||||
std::optional<T> findNumericMaxIf(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end)
|
||||
std::optional<T> findExtremeMaxIf(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end)
|
||||
{
|
||||
return findNumericExtreme<T, MaxComparator<T>, false, false>(ptr, condition_map, start, end);
|
||||
return findExtreme<T, MaxComparator<T>, false, false>(ptr, condition_map, start, end);
|
||||
}
|
||||
|
||||
|
||||
#define EXTERN_INSTANTIATION(T) \
|
||||
extern template std::optional<T> findNumericMin(const T * __restrict ptr, size_t start, size_t end); \
|
||||
extern template std::optional<T> findNumericMinNotNull(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end); \
|
||||
extern template std::optional<T> findNumericMinIf(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end); \
|
||||
extern template std::optional<T> findNumericMax(const T * __restrict ptr, size_t start, size_t end); \
|
||||
extern template std::optional<T> findNumericMaxNotNull(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end); \
|
||||
extern template std::optional<T> findNumericMaxIf(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end);
|
||||
|
||||
FOR_BASIC_NUMERIC_TYPES(EXTERN_INSTANTIATION)
|
||||
#undef EXTERN_INSTANTIATION
|
||||
#define INSTANTIATION(T) \
|
||||
template std::optional<T> findExtremeMin(const T * __restrict ptr, size_t start, size_t end); \
|
||||
template std::optional<T> findExtremeMinNotNull(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end); \
|
||||
template std::optional<T> findExtremeMinIf(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end); \
|
||||
template std::optional<T> findExtremeMax(const T * __restrict ptr, size_t start, size_t end); \
|
||||
template std::optional<T> findExtremeMaxNotNull(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end); \
|
||||
template std::optional<T> findExtremeMaxIf(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end);
|
||||
|
||||
FOR_BASIC_NUMERIC_TYPES(INSTANTIATION)
|
||||
#undef INSTANTIATION
|
||||
}
|
45
src/Common/findExtreme.h
Normal file
45
src/Common/findExtreme.h
Normal file
@ -0,0 +1,45 @@
|
||||
#pragma once
|
||||
|
||||
#include <DataTypes/IDataType.h>
|
||||
#include <base/defines.h>
|
||||
#include <base/types.h>
|
||||
#include <Common/Concepts.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <optional>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
template <typename T>
|
||||
concept is_any_native_number = (is_any_of<T, Int8, Int16, Int32, Int64, UInt8, UInt16, UInt32, UInt64, Float32, Float64>);
|
||||
|
||||
template <is_any_native_number T>
|
||||
std::optional<T> findExtremeMin(const T * __restrict ptr, size_t start, size_t end);
|
||||
|
||||
template <is_any_native_number T>
|
||||
std::optional<T> findExtremeMinNotNull(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end);
|
||||
|
||||
template <is_any_native_number T>
|
||||
std::optional<T> findExtremeMinIf(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end);
|
||||
|
||||
template <is_any_native_number T>
|
||||
std::optional<T> findExtremeMax(const T * __restrict ptr, size_t start, size_t end);
|
||||
|
||||
template <is_any_native_number T>
|
||||
std::optional<T> findExtremeMaxNotNull(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end);
|
||||
|
||||
template <is_any_native_number T>
|
||||
std::optional<T> findExtremeMaxIf(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end);
|
||||
|
||||
#define EXTERN_INSTANTIATION(T) \
|
||||
extern template std::optional<T> findExtremeMin(const T * __restrict ptr, size_t start, size_t end); \
|
||||
extern template std::optional<T> findExtremeMinNotNull(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end); \
|
||||
extern template std::optional<T> findExtremeMinIf(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end); \
|
||||
extern template std::optional<T> findExtremeMax(const T * __restrict ptr, size_t start, size_t end); \
|
||||
extern template std::optional<T> findExtremeMaxNotNull(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end); \
|
||||
extern template std::optional<T> findExtremeMaxIf(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end);
|
||||
|
||||
FOR_BASIC_NUMERIC_TYPES(EXTERN_INSTANTIATION)
|
||||
#undef EXTERN_INSTANTIATION
|
||||
|
||||
}
|
36
src/Common/iota.cpp
Normal file
36
src/Common/iota.cpp
Normal file
@ -0,0 +1,36 @@
|
||||
#include <base/defines.h>
|
||||
#include <Common/iota.h>
|
||||
#include <Common/TargetSpecific.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
MULTITARGET_FUNCTION_AVX2_SSE42(
|
||||
MULTITARGET_FUNCTION_HEADER(template <iota_supported_types T> void NO_INLINE),
|
||||
iotaImpl, MULTITARGET_FUNCTION_BODY((T * begin, size_t count, T first_value) /// NOLINT
|
||||
{
|
||||
for (size_t i = 0; i < count; i++)
|
||||
*(begin + i) = static_cast<T>(first_value + i);
|
||||
})
|
||||
)
|
||||
|
||||
template <iota_supported_types T>
|
||||
void iota(T * begin, size_t count, T first_value)
|
||||
{
|
||||
#if USE_MULTITARGET_CODE
|
||||
if (isArchSupported(TargetArch::AVX2))
|
||||
return iotaImplAVX2(begin, count, first_value);
|
||||
|
||||
if (isArchSupported(TargetArch::SSE42))
|
||||
return iotaImplSSE42(begin, count, first_value);
|
||||
#endif
|
||||
return iotaImpl(begin, count, first_value);
|
||||
}
|
||||
|
||||
template void iota(UInt8 * begin, size_t count, UInt8 first_value);
|
||||
template void iota(UInt32 * begin, size_t count, UInt32 first_value);
|
||||
template void iota(UInt64 * begin, size_t count, UInt64 first_value);
|
||||
#if defined(OS_DARWIN)
|
||||
template void iota(size_t * begin, size_t count, size_t first_value);
|
||||
#endif
|
||||
}
|
34
src/Common/iota.h
Normal file
34
src/Common/iota.h
Normal file
@ -0,0 +1,34 @@
|
||||
#pragma once
|
||||
|
||||
#include <base/types.h>
|
||||
#include <Common/Concepts.h>
|
||||
|
||||
/// This is a replacement for std::iota to use dynamic dispatch
|
||||
/// Note that is only defined for containers with contiguous memory only
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/// Make sure to add any new type to the extern declaration at the end of the file and instantiate it in iota.cpp
|
||||
|
||||
template <typename T>
|
||||
concept iota_supported_types = (is_any_of<
|
||||
T,
|
||||
UInt8,
|
||||
UInt32,
|
||||
UInt64
|
||||
#if defined(OS_DARWIN)
|
||||
,
|
||||
size_t
|
||||
#endif
|
||||
>);
|
||||
|
||||
template <iota_supported_types T> void iota(T * begin, size_t count, T first_value);
|
||||
|
||||
extern template void iota(UInt8 * begin, size_t count, UInt8 first_value);
|
||||
extern template void iota(UInt32 * begin, size_t count, UInt32 first_value);
|
||||
extern template void iota(UInt64 * begin, size_t count, UInt64 first_value);
|
||||
#if defined(OS_DARWIN)
|
||||
extern template void iota(size_t * begin, size_t count, size_t first_value);
|
||||
#endif
|
||||
}
|
@ -1,5 +1,6 @@
|
||||
#include <Common/levenshteinDistance.h>
|
||||
#include <Common/PODArray.h>
|
||||
#include <Common/iota.h>
|
||||
#include <Common/levenshteinDistance.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -11,8 +12,7 @@ size_t levenshteinDistance(const String & lhs, const String & rhs)
|
||||
|
||||
PODArrayWithStackMemory<size_t, 64> row(n + 1);
|
||||
|
||||
for (size_t i = 1; i <= n; ++i)
|
||||
row[i] = i;
|
||||
iota(row.data() + 1, n, size_t(1));
|
||||
|
||||
for (size_t j = 1; j <= m; ++j)
|
||||
{
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <Common/HashTable/HashMap.h>
|
||||
#include <Common/HashTable/HashSet.h>
|
||||
#include <Common/HashTable/Hash.h>
|
||||
#include <Common/iota.h>
|
||||
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
@ -20,7 +21,7 @@ namespace
|
||||
std::vector<UInt64> getVectorWithNumbersUpToN(size_t n)
|
||||
{
|
||||
std::vector<UInt64> res(n);
|
||||
std::iota(res.begin(), res.end(), 0);
|
||||
iota(res.data(), res.size(), UInt64(0));
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include "config.h"
|
||||
|
||||
#include <chrono>
|
||||
#include <mutex>
|
||||
#include <string>
|
||||
#include <Coordination/KeeperStateMachine.h>
|
||||
#include <Coordination/KeeperStateManager.h>
|
||||
@ -14,6 +15,7 @@
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <libnuraft/callback.hxx>
|
||||
#include <libnuraft/cluster_config.hxx>
|
||||
#include <libnuraft/log_val_type.hxx>
|
||||
#include <libnuraft/msg_type.hxx>
|
||||
@ -196,13 +198,9 @@ struct KeeperServer::KeeperRaftServer : public nuraft::raft_server
|
||||
nuraft::raft_server::commit_in_bg();
|
||||
}
|
||||
|
||||
void commitLogs(uint64_t index_to_commit, bool initial_commit_exec)
|
||||
std::unique_lock<std::recursive_mutex> lockRaft()
|
||||
{
|
||||
leader_commit_index_.store(index_to_commit);
|
||||
quick_commit_index_ = index_to_commit;
|
||||
lagging_sm_target_index_ = index_to_commit;
|
||||
|
||||
commit_in_bg_exec(0, initial_commit_exec);
|
||||
return std::unique_lock(lock_);
|
||||
}
|
||||
|
||||
using nuraft::raft_server::raft_server;
|
||||
@ -518,6 +516,7 @@ void KeeperServer::putLocalReadRequest(const KeeperStorage::RequestForSession &
|
||||
RaftAppendResult KeeperServer::putRequestBatch(const KeeperStorage::RequestsForSessions & requests_for_sessions)
|
||||
{
|
||||
std::vector<nuraft::ptr<nuraft::buffer>> entries;
|
||||
entries.reserve(requests_for_sessions.size());
|
||||
for (const auto & request_for_session : requests_for_sessions)
|
||||
entries.push_back(getZooKeeperLogEntry(request_for_session));
|
||||
|
||||
@ -630,32 +629,32 @@ nuraft::cb_func::ReturnCode KeeperServer::callbackFunc(nuraft::cb_func::Type typ
|
||||
{
|
||||
const auto preprocess_logs = [&]
|
||||
{
|
||||
auto lock = raft_instance->lockRaft();
|
||||
keeper_context->local_logs_preprocessed = true;
|
||||
auto log_store = state_manager->load_log_store();
|
||||
if (last_log_idx_on_disk > 0 && last_log_idx_on_disk > state_machine->last_commit_index())
|
||||
{
|
||||
auto log_entries = log_store->log_entries(state_machine->last_commit_index() + 1, last_log_idx_on_disk + 1);
|
||||
auto log_entries = log_store->log_entries(state_machine->last_commit_index() + 1, log_store->next_slot());
|
||||
|
||||
size_t preprocessed = 0;
|
||||
LOG_INFO(log, "Preprocessing {} log entries", log_entries->size());
|
||||
auto idx = state_machine->last_commit_index() + 1;
|
||||
for (const auto & entry : *log_entries)
|
||||
{
|
||||
if (entry && entry->get_val_type() == nuraft::log_val_type::app_log)
|
||||
state_machine->pre_commit(idx, entry->get_buf());
|
||||
|
||||
++idx;
|
||||
++preprocessed;
|
||||
|
||||
if (preprocessed % 50000 == 0)
|
||||
LOG_TRACE(log, "Preprocessed {}/{} entries", preprocessed, log_entries->size());
|
||||
}
|
||||
LOG_INFO(log, "Preprocessing done");
|
||||
}
|
||||
else
|
||||
if (log_entries->empty())
|
||||
{
|
||||
LOG_INFO(log, "All local log entries preprocessed");
|
||||
return;
|
||||
}
|
||||
|
||||
size_t preprocessed = 0;
|
||||
LOG_INFO(log, "Preprocessing {} log entries", log_entries->size());
|
||||
auto idx = state_machine->last_commit_index() + 1;
|
||||
for (const auto & entry : *log_entries)
|
||||
{
|
||||
if (entry && entry->get_val_type() == nuraft::log_val_type::app_log)
|
||||
state_machine->pre_commit(idx, entry->get_buf());
|
||||
|
||||
++idx;
|
||||
++preprocessed;
|
||||
|
||||
if (preprocessed % 50000 == 0)
|
||||
LOG_TRACE(log, "Preprocessed {}/{} entries", preprocessed, log_entries->size());
|
||||
}
|
||||
LOG_INFO(log, "Preprocessing done");
|
||||
};
|
||||
|
||||
switch (type)
|
||||
@ -666,43 +665,34 @@ nuraft::cb_func::ReturnCode KeeperServer::callbackFunc(nuraft::cb_func::Type typ
|
||||
/// until we preprocess all stored logs
|
||||
return nuraft::cb_func::ReturnCode::ReturnNull;
|
||||
}
|
||||
case nuraft::cb_func::InitialBatchCommited:
|
||||
{
|
||||
preprocess_logs();
|
||||
break;
|
||||
}
|
||||
case nuraft::cb_func::GotAppendEntryReqFromLeader:
|
||||
{
|
||||
auto & req = *static_cast<nuraft::req_msg *>(param->ctx);
|
||||
|
||||
if (req.get_commit_idx() == 0 || req.log_entries().empty())
|
||||
break;
|
||||
|
||||
auto last_committed_index = state_machine->last_commit_index();
|
||||
// Actual log number.
|
||||
auto index_to_commit = std::min({last_log_idx_on_disk, req.get_last_log_idx(), req.get_commit_idx()});
|
||||
|
||||
if (index_to_commit > last_committed_index)
|
||||
{
|
||||
LOG_TRACE(log, "Trying to commit local log entries, committing upto {}", index_to_commit);
|
||||
raft_instance->commitLogs(index_to_commit, true);
|
||||
/// after we manually committed all the local logs we can, we assert that all of the local logs are either
|
||||
/// committed or preprocessed
|
||||
if (!keeper_context->local_logs_preprocessed)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Local logs are not preprocessed");
|
||||
}
|
||||
else if (last_log_idx_on_disk <= last_committed_index)
|
||||
{
|
||||
keeper_context->local_logs_preprocessed = true;
|
||||
}
|
||||
else if
|
||||
(
|
||||
index_to_commit == 0 ||
|
||||
(index_to_commit == last_committed_index && last_log_idx_on_disk > index_to_commit) /// we need to rollback all the logs so we preprocess all of them
|
||||
)
|
||||
/// maybe we got snapshot installed
|
||||
if (state_machine->last_commit_index() >= last_log_idx_on_disk)
|
||||
{
|
||||
preprocess_logs();
|
||||
break;
|
||||
}
|
||||
|
||||
auto & req = *static_cast<nuraft::req_msg *>(param->ctx);
|
||||
|
||||
if (req.log_entries().empty())
|
||||
break;
|
||||
|
||||
if (req.get_last_log_idx() < last_log_idx_on_disk)
|
||||
last_log_idx_on_disk = req.get_last_log_idx();
|
||||
/// we don't want to accept too many new logs before we preprocess all the local logs
|
||||
/// because the next log index is decreased on each failure we need to also accept requests when it's near last_log_idx_on_disk
|
||||
/// so the counter is reset on the leader side
|
||||
else if (raft_instance->get_target_committed_log_idx() >= last_log_idx_on_disk && req.get_last_log_idx() > last_log_idx_on_disk)
|
||||
return nuraft::cb_func::ReturnNull;
|
||||
|
||||
break;
|
||||
}
|
||||
case nuraft::cb_func::StateMachineExecution:
|
||||
{
|
||||
if (state_machine->last_commit_index() >= last_log_idx_on_disk)
|
||||
preprocess_logs();
|
||||
break;
|
||||
}
|
||||
default:
|
||||
|
@ -13,6 +13,7 @@ private:
|
||||
|
||||
static inline const std::unordered_map<LogsLevel, Poco::Message::Priority> LEVELS =
|
||||
{
|
||||
{LogsLevel::test, Poco::Message::Priority::PRIO_TEST},
|
||||
{LogsLevel::trace, Poco::Message::Priority::PRIO_TRACE},
|
||||
{LogsLevel::debug, Poco::Message::Priority::PRIO_DEBUG},
|
||||
{LogsLevel::information, Poco::Message::PRIO_INFORMATION},
|
||||
|
@ -26,6 +26,8 @@ namespace DB
|
||||
M(UInt64, max_active_parts_loading_thread_pool_size, 64, "The number of threads to load active set of data parts (Active ones) at startup.", 0) \
|
||||
M(UInt64, max_outdated_parts_loading_thread_pool_size, 32, "The number of threads to load inactive set of data parts (Outdated ones) at startup.", 0) \
|
||||
M(UInt64, max_parts_cleaning_thread_pool_size, 128, "The number of threads for concurrent removal of inactive data parts.", 0) \
|
||||
M(UInt64, max_mutations_bandwidth_for_server, 0, "The maximum read speed of all mutations on server in bytes per second. Zero means unlimited.", 0) \
|
||||
M(UInt64, max_merges_bandwidth_for_server, 0, "The maximum read speed of all merges on server in bytes per second. Zero means unlimited.", 0) \
|
||||
M(UInt64, max_replicated_fetches_network_bandwidth_for_server, 0, "The maximum speed of data exchange over the network in bytes per second for replicated fetches. Zero means unlimited.", 0) \
|
||||
M(UInt64, max_replicated_sends_network_bandwidth_for_server, 0, "The maximum speed of data exchange over the network in bytes per second for replicated sends. Zero means unlimited.", 0) \
|
||||
M(UInt64, max_remote_read_network_bandwidth_for_server, 0, "The maximum speed of data exchange over the network in bytes per second for read. Zero means unlimited.", 0) \
|
||||
@ -111,6 +113,8 @@ namespace DB
|
||||
M(Bool, validate_tcp_client_information, false, "Validate client_information in the query packet over the native TCP protocol.", 0) \
|
||||
M(Bool, storage_metadata_write_full_object_key, false, "Write disk metadata files with VERSION_FULL_OBJECT_KEY format", 0) \
|
||||
|
||||
/// If you add a setting which can be updated at runtime, please update 'changeable_settings' map in StorageSystemServerSettings.cpp
|
||||
|
||||
DECLARE_SETTINGS_TRAITS(ServerSettingsTraits, SERVER_SETTINGS)
|
||||
|
||||
struct ServerSettings : public BaseSettings<ServerSettingsTraits>
|
||||
|
@ -157,7 +157,7 @@ class IColumn;
|
||||
M(Bool, allow_suspicious_fixed_string_types, false, "In CREATE TABLE statement allows creating columns of type FixedString(n) with n > 256. FixedString with length >= 256 is suspicious and most likely indicates misusage", 0) \
|
||||
M(Bool, allow_suspicious_indices, false, "Reject primary/secondary indexes and sorting keys with identical expressions", 0) \
|
||||
M(Bool, allow_suspicious_ttl_expressions, false, "Reject TTL expressions that don't depend on any of table's columns. It indicates a user error most of the time.", 0) \
|
||||
M(Bool, compile_expressions, true, "Compile some scalar functions and operators to native code.", 0) \
|
||||
M(Bool, compile_expressions, false, "Compile some scalar functions and operators to native code.", 0) \
|
||||
M(UInt64, min_count_to_compile_expression, 3, "The number of identical expressions before they are JIT-compiled", 0) \
|
||||
M(Bool, compile_aggregate_expressions, true, "Compile aggregate functions to native code.", 0) \
|
||||
M(UInt64, min_count_to_compile_aggregate_expression, 3, "The number of identical aggregate expressions before they are JIT-compiled", 0) \
|
||||
@ -185,6 +185,7 @@ class IColumn;
|
||||
M(Float, parallel_replicas_single_task_marks_count_multiplier, 2, "A multiplier which will be added during calculation for minimal number of marks to retrieve from coordinator. This will be applied only for remote replicas.", 0) \
|
||||
M(Bool, parallel_replicas_for_non_replicated_merge_tree, false, "If true, ClickHouse will use parallel replicas algorithm also for non-replicated MergeTree tables", 0) \
|
||||
M(UInt64, parallel_replicas_min_number_of_rows_per_replica, 0, "Limit the number of replicas used in a query to (estimated rows to read / min_number_of_rows_per_replica). The max is still limited by 'max_parallel_replicas'", 0) \
|
||||
M(UInt64, parallel_replicas_mark_segment_size, 128, "Parts virtually divided into segments to be distributed between replicas for parallel reading. This setting controls the size of these segments. Not recommended to change until you're absolutely sure in what you're doing", 0) \
|
||||
\
|
||||
M(Bool, skip_unavailable_shards, false, "If true, ClickHouse silently skips unavailable shards. Shard is marked as unavailable when: 1) The shard cannot be reached due to a connection failure. 2) Shard is unresolvable through DNS. 3) Table does not exist on the shard.", 0) \
|
||||
\
|
||||
@ -584,6 +585,7 @@ class IColumn;
|
||||
M(Bool, enable_early_constant_folding, true, "Enable query optimization where we analyze function and subqueries results and rewrite query if there're constants there", 0) \
|
||||
M(Bool, deduplicate_blocks_in_dependent_materialized_views, false, "Should deduplicate blocks for materialized views if the block is not a duplicate for the table. Use true to always deduplicate in dependent tables.", 0) \
|
||||
M(Bool, materialized_views_ignore_errors, false, "Allows to ignore errors for MATERIALIZED VIEW, and deliver original block to the table regardless of MVs", 0) \
|
||||
M(Bool, ignore_materialized_views_with_dropped_target_table, false, "Ignore MVs with dropped taraget table during pushing to views", 0) \
|
||||
M(Bool, allow_experimental_refreshable_materialized_view, false, "Allow refreshable materialized views (CREATE MATERIALIZED VIEW <name> REFRESH ...).", 0) \
|
||||
M(Bool, stop_refreshable_materialized_views_on_startup, false, "On server startup, prevent scheduling of refreshable materialized views, as if with SYSTEM STOP VIEWS. You can manually start them with SYSTEM START VIEWS or SYSTEM START VIEW <name> afterwards. Also applies to newly created views. Has no effect on non-refreshable materialized views.", 0) \
|
||||
M(Bool, use_compact_format_in_distributed_parts_names, true, "Changes format of directories names for distributed table insert parts.", 0) \
|
||||
@ -707,7 +709,6 @@ class IColumn;
|
||||
M(Bool, query_plan_execute_functions_after_sorting, true, "Allow to re-order functions after sorting", 0) \
|
||||
M(Bool, query_plan_reuse_storage_ordering_for_window_functions, true, "Allow to use the storage sorting for window functions", 0) \
|
||||
M(Bool, query_plan_lift_up_union, true, "Allow to move UNIONs up so that more parts of the query plan can be optimized", 0) \
|
||||
M(Bool, query_plan_optimize_primary_key, true, "Analyze primary key using query plan (instead of AST)", 0) \
|
||||
M(Bool, query_plan_read_in_order, true, "Use query plan for read-in-order optimization", 0) \
|
||||
M(Bool, query_plan_aggregation_in_order, true, "Use query plan for aggregation-in-order optimization", 0) \
|
||||
M(Bool, query_plan_remove_redundant_sorting, true, "Remove redundant sorting in query plan. For example, sorting steps related to ORDER BY clauses in subqueries", 0) \
|
||||
@ -843,7 +844,7 @@ class IColumn;
|
||||
M(Timezone, session_timezone, "", "This setting can be removed in the future due to potential caveats. It is experimental and is not suitable for production usage. The default timezone for current session or query. The server default timezone if empty.", 0) \
|
||||
M(Bool, allow_create_index_without_type, false, "Allow CREATE INDEX query without TYPE. Query will be ignored. Made for SQL compatibility tests.", 0) \
|
||||
M(Bool, create_index_ignore_unique, false, "Ignore UNIQUE keyword in CREATE UNIQUE INDEX. Made for SQL compatibility tests.", 0) \
|
||||
M(Bool, print_pretty_type_names, false, "Print pretty type names in DESCRIBE query and toTypeName() function", 0) \
|
||||
M(Bool, print_pretty_type_names, true, "Print pretty type names in DESCRIBE query and toTypeName() function", 0) \
|
||||
M(Bool, create_table_empty_primary_key_by_default, false, "Allow to create *MergeTree tables with empty primary key when ORDER BY and PRIMARY KEY not specified", 0) \
|
||||
M(Bool, allow_named_collection_override_by_default, true, "Allow named collections' fields override by default.", 0)\
|
||||
M(Bool, allow_experimental_shared_merge_tree, false, "Only available in ClickHouse Cloud", 0) \
|
||||
@ -916,6 +917,7 @@ class IColumn;
|
||||
MAKE_OBSOLETE(M, Bool, optimize_move_functions_out_of_any, false) \
|
||||
MAKE_OBSOLETE(M, Bool, allow_experimental_undrop_table_query, true) \
|
||||
MAKE_OBSOLETE(M, Bool, allow_experimental_s3queue, true) \
|
||||
MAKE_OBSOLETE(M, Bool, query_plan_optimize_primary_key, true) \
|
||||
|
||||
/** The section above is for obsolete settings. Do not add anything there. */
|
||||
|
||||
@ -981,6 +983,7 @@ class IColumn;
|
||||
M(SchemaInferenceMode, schema_inference_mode, "default", "Mode of schema inference. 'default' - assume that all files have the same schema and schema can be inferred from any file, 'union' - files can have different schemas and the resulting schema should be the a union of schemas of all files", 0) \
|
||||
M(Bool, schema_inference_make_columns_nullable, true, "If set to true, all inferred types will be Nullable in schema inference for formats without information about nullability.", 0) \
|
||||
M(Bool, input_format_json_read_bools_as_numbers, true, "Allow to parse bools as numbers in JSON input formats", 0) \
|
||||
M(Bool, input_format_json_read_bools_as_strings, true, "Allow to parse bools as strings in JSON input formats", 0) \
|
||||
M(Bool, input_format_json_try_infer_numbers_from_strings, false, "Try to infer numbers from string fields while schema inference", 0) \
|
||||
M(Bool, input_format_json_validate_types_from_metadata, true, "For JSON/JSONCompact/JSONColumnsWithMetadata input formats this controls whether format parser should check if data types from input metadata match data types of the corresponding columns from the table", 0) \
|
||||
M(Bool, input_format_json_read_numbers_as_strings, true, "Allow to parse numbers as strings in JSON input formats", 0) \
|
||||
|
@ -81,6 +81,8 @@ namespace SettingsChangesHistory
|
||||
/// It's used to implement `compatibility` setting (see https://github.com/ClickHouse/ClickHouse/issues/35972)
|
||||
static std::map<ClickHouseVersion, SettingsChangesHistory::SettingsChanges> settings_changes_history =
|
||||
{
|
||||
{"24.1", {{"print_pretty_type_names", false, true, "Better user experience."},
|
||||
{"input_format_json_read_bools_as_strings", false, true, "Allow to read bools as strings in JSON formats by default"}}},
|
||||
{"23.12", {{"allow_suspicious_ttl_expressions", true, false, "It is a new setting, and in previous versions the behavior was equivalent to allowing."},
|
||||
{"input_format_parquet_allow_missing_columns", false, true, "Allow missing columns in Parquet files by default"},
|
||||
{"input_format_orc_allow_missing_columns", false, true, "Allow missing columns in ORC files by default"},
|
||||
|
@ -115,6 +115,8 @@ IMPLEMENT_SETTING_ENUM(DistributedDDLOutputMode, ErrorCodes::BAD_ARGUMENTS,
|
||||
{{"none", DistributedDDLOutputMode::NONE},
|
||||
{"throw", DistributedDDLOutputMode::THROW},
|
||||
{"null_status_on_timeout", DistributedDDLOutputMode::NULL_STATUS_ON_TIMEOUT},
|
||||
{"throw_only_active", DistributedDDLOutputMode::THROW_ONLY_ACTIVE},
|
||||
{"null_status_on_timeout_only_active", DistributedDDLOutputMode::NULL_STATUS_ON_TIMEOUT_ONLY_ACTIVE},
|
||||
{"never_throw", DistributedDDLOutputMode::NEVER_THROW}})
|
||||
|
||||
IMPLEMENT_SETTING_ENUM(StreamingHandleErrorMode, ErrorCodes::BAD_ARGUMENTS,
|
||||
|
@ -173,6 +173,8 @@ enum class DistributedDDLOutputMode
|
||||
THROW,
|
||||
NULL_STATUS_ON_TIMEOUT,
|
||||
NEVER_THROW,
|
||||
THROW_ONLY_ACTIVE,
|
||||
NULL_STATUS_ON_TIMEOUT_ONLY_ACTIVE,
|
||||
};
|
||||
|
||||
DECLARE_SETTING_ENUM(DistributedDDLOutputMode)
|
||||
|
@ -85,10 +85,7 @@ std::string DataTypeMap::doGetName() const
|
||||
std::string DataTypeMap::doGetPrettyName(size_t indent) const
|
||||
{
|
||||
WriteBufferFromOwnString s;
|
||||
s << "Map(\n"
|
||||
<< fourSpaceIndent(indent + 1) << key_type->getPrettyName(indent + 1) << ",\n"
|
||||
<< fourSpaceIndent(indent + 1) << value_type->getPrettyName(indent + 1) << '\n'
|
||||
<< fourSpaceIndent(indent) << ')';
|
||||
s << "Map(" << key_type->getPrettyName(indent) << ", " << value_type->getPrettyName(indent) << ')';
|
||||
return s.str();
|
||||
}
|
||||
|
||||
|
@ -98,21 +98,38 @@ std::string DataTypeTuple::doGetPrettyName(size_t indent) const
|
||||
{
|
||||
size_t size = elems.size();
|
||||
WriteBufferFromOwnString s;
|
||||
s << "Tuple(\n";
|
||||
|
||||
for (size_t i = 0; i != size; ++i)
|
||||
/// If the Tuple is named, we will output it in multiple lines with indentation.
|
||||
if (have_explicit_names)
|
||||
{
|
||||
if (i != 0)
|
||||
s << ",\n";
|
||||
s << "Tuple(\n";
|
||||
|
||||
s << fourSpaceIndent(indent + 1);
|
||||
if (have_explicit_names)
|
||||
s << backQuoteIfNeed(names[i]) << ' ';
|
||||
for (size_t i = 0; i != size; ++i)
|
||||
{
|
||||
if (i != 0)
|
||||
s << ",\n";
|
||||
|
||||
s << elems[i]->getPrettyName(indent + 1);
|
||||
s << fourSpaceIndent(indent + 1)
|
||||
<< backQuoteIfNeed(names[i]) << ' '
|
||||
<< elems[i]->getPrettyName(indent + 1);
|
||||
}
|
||||
|
||||
s << ')';
|
||||
}
|
||||
else
|
||||
{
|
||||
s << "Tuple(";
|
||||
|
||||
for (size_t i = 0; i != size; ++i)
|
||||
{
|
||||
if (i != 0)
|
||||
s << ", ";
|
||||
s << elems[i]->getPrettyName(indent);
|
||||
}
|
||||
|
||||
s << ')';
|
||||
}
|
||||
|
||||
s << '\n' << fourSpaceIndent(indent) << ')';
|
||||
return s.str();
|
||||
}
|
||||
|
||||
|
@ -335,6 +335,22 @@ void SerializationString::deserializeTextJSON(IColumn & column, ReadBuffer & ist
|
||||
{
|
||||
read(column, [&](ColumnString::Chars & data) { readJSONArrayInto(data, istr); });
|
||||
}
|
||||
else if (settings.json.read_bools_as_strings && !istr.eof() && (*istr.position() == 't' || *istr.position() == 'f'))
|
||||
{
|
||||
String str_value;
|
||||
if (*istr.position() == 't')
|
||||
{
|
||||
assertString("true", istr);
|
||||
str_value = "true";
|
||||
}
|
||||
else if (*istr.position() == 'f')
|
||||
{
|
||||
assertString("false", istr);
|
||||
str_value = "false";
|
||||
}
|
||||
|
||||
read(column, [&](ColumnString::Chars & data) { data.insert(str_value.begin(), str_value.end()); });
|
||||
}
|
||||
else if (settings.json.read_numbers_as_strings && !istr.eof() && *istr.position() != '"')
|
||||
{
|
||||
String field;
|
||||
|
@ -92,9 +92,16 @@ void validate(const ASTCreateQuery & create_query)
|
||||
|
||||
DatabasePtr DatabaseFactory::get(const ASTCreateQuery & create, const String & metadata_path, ContextPtr context)
|
||||
{
|
||||
const auto engine_name = create.storage->engine->name;
|
||||
/// check if the database engine is a valid one before proceeding
|
||||
if (!database_engines.contains(create.storage->engine->name))
|
||||
throw Exception(ErrorCodes::UNKNOWN_DATABASE_ENGINE, "Unknown database engine: {}", create.storage->engine->name);
|
||||
if (!database_engines.contains(engine_name))
|
||||
{
|
||||
auto hints = getHints(engine_name);
|
||||
if (!hints.empty())
|
||||
throw Exception(ErrorCodes::UNKNOWN_DATABASE_ENGINE, "Unknown database engine {}. Maybe you meant: {}", engine_name, toString(hints));
|
||||
else
|
||||
throw Exception(ErrorCodes::UNKNOWN_DATABASE_ENGINE, "Unknown database engine: {}", create.storage->engine->name);
|
||||
}
|
||||
|
||||
/// if the engine is found (i.e. registered with the factory instance), then validate if the
|
||||
/// supplied engine arguments, settings and table overrides are valid for the engine.
|
||||
|
@ -1,5 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <Common/NamePrompter.h>
|
||||
#include <Interpreters/Context_fwd.h>
|
||||
#include <Databases/IDatabase.h>
|
||||
#include <Parsers/ASTCreateQuery.h>
|
||||
@ -24,7 +25,7 @@ static inline ValueType safeGetLiteralValue(const ASTPtr &ast, const String &eng
|
||||
return ast->as<ASTLiteral>()->value.safeGet<ValueType>();
|
||||
}
|
||||
|
||||
class DatabaseFactory : private boost::noncopyable
|
||||
class DatabaseFactory : private boost::noncopyable, public IHints<>
|
||||
{
|
||||
public:
|
||||
|
||||
@ -52,6 +53,14 @@ public:
|
||||
|
||||
const DatabaseEngines & getDatabaseEngines() const { return database_engines; }
|
||||
|
||||
std::vector<String> getAllRegisteredNames() const override
|
||||
{
|
||||
std::vector<String> result;
|
||||
auto getter = [](const auto & pair) { return pair.first; };
|
||||
std::transform(database_engines.begin(), database_engines.end(), std::back_inserter(result), getter);
|
||||
return result;
|
||||
}
|
||||
|
||||
private:
|
||||
DatabaseEngines database_engines;
|
||||
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#include <Dictionaries/IDictionary.h>
|
||||
#include <Common/CurrentThread.h>
|
||||
#include <Common/iota.h>
|
||||
#include <Common/scope_guard_safe.h>
|
||||
#include <Common/ConcurrentBoundedQueue.h>
|
||||
#include <Common/ThreadPool.h>
|
||||
@ -53,7 +54,7 @@ public:
|
||||
LOG_TRACE(dictionary.log, "Will load the dictionary using {} threads (with {} backlog)", shards, backlog);
|
||||
|
||||
shards_slots.resize(shards);
|
||||
std::iota(shards_slots.begin(), shards_slots.end(), 0);
|
||||
iota(shards_slots.data(), shards_slots.size(), UInt64(0));
|
||||
|
||||
for (size_t shard = 0; shard < shards; ++shard)
|
||||
{
|
||||
|
@ -5,6 +5,7 @@
|
||||
|
||||
#include <base/sort.h>
|
||||
|
||||
#include <Common/iota.h>
|
||||
#include <Columns/ColumnArray.h>
|
||||
#include <Columns/ColumnTuple.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
@ -507,7 +508,7 @@ const IColumn * unrollSimplePolygons(const ColumnPtr & column, Offset & offset)
|
||||
if (!ptr_polygons)
|
||||
throw Exception(ErrorCodes::TYPE_MISMATCH, "Expected a column containing arrays of points");
|
||||
offset.ring_offsets.assign(ptr_polygons->getOffsets());
|
||||
std::iota(offset.polygon_offsets.begin(), offset.polygon_offsets.end(), 1);
|
||||
iota<IColumn::Offsets::value_type>(offset.polygon_offsets.data(), offset.polygon_offsets.size(), IColumn::Offsets::value_type(1));
|
||||
offset.multi_polygon_offsets.assign(offset.polygon_offsets);
|
||||
|
||||
return ptr_polygons->getDataPtr().get();
|
||||
|
@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <base/types.h>
|
||||
#include <Common/iota.h>
|
||||
#include <Common/ThreadPool.h>
|
||||
#include <Poco/Logger.h>
|
||||
|
||||
@ -184,7 +185,7 @@ public:
|
||||
{
|
||||
setBoundingBox();
|
||||
std::vector<size_t> order(polygons.size());
|
||||
std::iota(order.begin(), order.end(), 0);
|
||||
iota(order.data(), order.size(), size_t(0));
|
||||
root = makeCell(min_x, min_y, max_x, max_y, order);
|
||||
}
|
||||
|
||||
|
@ -450,10 +450,11 @@ String getAdditionalFormatInfoByEscapingRule(const FormatSettings & settings, Fo
|
||||
break;
|
||||
case FormatSettings::EscapingRule::JSON:
|
||||
result += fmt::format(
|
||||
", try_infer_numbers_from_strings={}, read_bools_as_numbers={}, read_objects_as_strings={}, read_numbers_as_strings={}, "
|
||||
", try_infer_numbers_from_strings={}, read_bools_as_numbers={}, read_bools_as_strings={}, read_objects_as_strings={}, read_numbers_as_strings={}, "
|
||||
"read_arrays_as_strings={}, try_infer_objects_as_tuples={}, infer_incomplete_types_as_strings={}, try_infer_objects={}",
|
||||
settings.json.try_infer_numbers_from_strings,
|
||||
settings.json.read_bools_as_numbers,
|
||||
settings.json.read_bools_as_strings,
|
||||
settings.json.read_objects_as_strings,
|
||||
settings.json.read_numbers_as_strings,
|
||||
settings.json.read_arrays_as_strings,
|
||||
|
@ -111,6 +111,7 @@ FormatSettings getFormatSettings(ContextPtr context, const Settings & settings)
|
||||
format_settings.json.quote_denormals = settings.output_format_json_quote_denormals;
|
||||
format_settings.json.quote_decimals = settings.output_format_json_quote_decimals;
|
||||
format_settings.json.read_bools_as_numbers = settings.input_format_json_read_bools_as_numbers;
|
||||
format_settings.json.read_bools_as_strings = settings.input_format_json_read_bools_as_strings;
|
||||
format_settings.json.read_numbers_as_strings = settings.input_format_json_read_numbers_as_strings;
|
||||
format_settings.json.read_objects_as_strings = settings.input_format_json_read_objects_as_strings;
|
||||
format_settings.json.read_arrays_as_strings = settings.input_format_json_read_arrays_as_strings;
|
||||
|
@ -204,6 +204,7 @@ struct FormatSettings
|
||||
bool ignore_unknown_keys_in_named_tuple = false;
|
||||
bool serialize_as_strings = false;
|
||||
bool read_bools_as_numbers = true;
|
||||
bool read_bools_as_strings = true;
|
||||
bool read_numbers_as_strings = true;
|
||||
bool read_objects_as_strings = true;
|
||||
bool read_arrays_as_strings = true;
|
||||
|
@ -377,6 +377,22 @@ namespace
|
||||
type_indexes.erase(TypeIndex::UInt8);
|
||||
}
|
||||
|
||||
/// If we have Bool and String types convert all numbers to String.
|
||||
/// It's applied only when setting input_format_json_read_bools_as_strings is enabled.
|
||||
void transformJSONBoolsAndStringsToString(DataTypes & data_types, TypeIndexesSet & type_indexes)
|
||||
{
|
||||
if (!type_indexes.contains(TypeIndex::String) || !type_indexes.contains(TypeIndex::UInt8))
|
||||
return;
|
||||
|
||||
for (auto & type : data_types)
|
||||
{
|
||||
if (isBool(type))
|
||||
type = std::make_shared<DataTypeString>();
|
||||
}
|
||||
|
||||
type_indexes.erase(TypeIndex::UInt8);
|
||||
}
|
||||
|
||||
/// If we have type Nothing/Nullable(Nothing) and some other non Nothing types,
|
||||
/// convert all Nothing/Nullable(Nothing) types to the first non Nothing.
|
||||
/// For example, when we have [Nothing, Array(Int64)] it will convert it to [Array(Int64), Array(Int64)]
|
||||
@ -628,6 +644,10 @@ namespace
|
||||
if (settings.json.read_bools_as_numbers)
|
||||
transformBoolsAndNumbersToNumbers(data_types, type_indexes);
|
||||
|
||||
/// Convert Bool to String if needed.
|
||||
if (settings.json.read_bools_as_strings)
|
||||
transformJSONBoolsAndStringsToString(data_types, type_indexes);
|
||||
|
||||
if (settings.json.try_infer_objects_as_tuples)
|
||||
mergeJSONPaths(data_types, type_indexes, settings, json_info);
|
||||
};
|
||||
|
@ -83,6 +83,10 @@ if (TARGET ch_contrib::sqids)
|
||||
list (APPEND PRIVATE_LIBS ch_contrib::sqids)
|
||||
endif()
|
||||
|
||||
if (TARGET ch_contrib::idna)
|
||||
list (APPEND PRIVATE_LIBS ch_contrib::idna)
|
||||
endif()
|
||||
|
||||
if (TARGET ch_contrib::h3)
|
||||
list (APPEND PRIVATE_LIBS ch_contrib::h3)
|
||||
endif()
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <Functions/FunctionsStringSimilarity.h>
|
||||
#include <Common/PODArray.h>
|
||||
#include <Common/UTF8Helpers.h>
|
||||
#include <Common/iota.h>
|
||||
|
||||
#ifdef __SSE4_2__
|
||||
# include <nmmintrin.h>
|
||||
@ -246,8 +247,7 @@ struct ByteEditDistanceImpl
|
||||
ResultType insertion = 0;
|
||||
ResultType deletion = 0;
|
||||
|
||||
for (size_t i = 0; i <= haystack_size; ++i)
|
||||
distances0[i] = i;
|
||||
iota(distances0.data(), haystack_size + 1, ResultType(0));
|
||||
|
||||
for (size_t pos_needle = 0; pos_needle < needle_size; ++pos_needle)
|
||||
{
|
||||
|
@ -1,5 +1,6 @@
|
||||
#include <Columns/ColumnArray.h>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <Common/iota.h>
|
||||
#include <Common/randomSeed.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
@ -80,7 +81,7 @@ public:
|
||||
const size_t cur_samples = std::min(num_elements, samples);
|
||||
|
||||
indices.resize(num_elements);
|
||||
std::iota(indices.begin(), indices.end(), prev_array_offset);
|
||||
iota(indices.data(), indices.size(), prev_array_offset);
|
||||
std::shuffle(indices.begin(), indices.end(), rng);
|
||||
|
||||
for (UInt64 i = 0; i < cur_samples; i++)
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <Functions/FunctionHelpers.h>
|
||||
#include <Functions/IFunction.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/iota.h>
|
||||
#include <Common/randomSeed.h>
|
||||
#include <Common/shuffle.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
@ -150,7 +151,7 @@ ColumnPtr FunctionArrayShuffleImpl<Traits>::executeGeneric(const ColumnArray & a
|
||||
size_t size = offsets.size();
|
||||
size_t nested_size = array.getData().size();
|
||||
IColumn::Permutation permutation(nested_size);
|
||||
std::iota(std::begin(permutation), std::end(permutation), 0);
|
||||
iota(permutation.data(), permutation.size(), IColumn::Permutation::value_type(0));
|
||||
|
||||
ColumnArray::Offset current_offset = 0;
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
|
@ -1,5 +1,6 @@
|
||||
#include <Functions/array/arraySort.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Functions/array/arraySort.h>
|
||||
#include <Common/iota.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -55,9 +56,7 @@ ColumnPtr ArraySortImpl<positive, is_partial>::execute(
|
||||
size_t size = offsets.size();
|
||||
size_t nested_size = array.getData().size();
|
||||
IColumn::Permutation permutation(nested_size);
|
||||
|
||||
for (size_t i = 0; i < nested_size; ++i)
|
||||
permutation[i] = i;
|
||||
iota(permutation.data(), nested_size, IColumn::Permutation::value_type(0));
|
||||
|
||||
ColumnArray::Offset current_offset = 0;
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
|
202
src/Functions/idna.cpp
Normal file
202
src/Functions/idna.cpp
Normal file
@ -0,0 +1,202 @@
|
||||
#include "config.h"
|
||||
|
||||
#if USE_IDNA
|
||||
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Functions/FunctionStringToString.h>
|
||||
|
||||
#ifdef __clang__
|
||||
# pragma clang diagnostic push
|
||||
# pragma clang diagnostic ignored "-Wnewline-eof"
|
||||
#endif
|
||||
# include <ada/idna/to_ascii.h>
|
||||
# include <ada/idna/to_unicode.h>
|
||||
# include <ada/idna/unicode_transcoding.h>
|
||||
#ifdef __clang__
|
||||
# pragma clang diagnostic pop
|
||||
#endif
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
/// Implementation of
|
||||
/// - idnaEncode(), tryIdnaEncode() and idnaDecode(), see https://en.wikipedia.org/wiki/Internationalized_domain_name#ToASCII_and_ToUnicode
|
||||
/// and [3] https://www.unicode.org/reports/tr46/#ToUnicode
|
||||
|
||||
enum class ErrorHandling
|
||||
{
|
||||
Throw, /// Throw exception
|
||||
Empty /// Return empty string
|
||||
};
|
||||
|
||||
|
||||
/// Translates a UTF-8 string (typically an Internationalized Domain Name for Applications, IDNA) to an ASCII-encoded equivalent. The
|
||||
/// encoding is performed per domain component and based on Punycode with ASCII Compatible Encoding (ACE) prefix "xn--".
|
||||
/// Example: "straße.münchen.de" --> "xn--strae-oqa.xn--mnchen-3ya.de"
|
||||
/// Note: doesn't do percent decoding. Doesn't trim tabs, spaces or control characters. Expects non-empty inputs.
|
||||
template <ErrorHandling error_handling>
|
||||
struct IdnaEncode
|
||||
{
|
||||
static void vector(
|
||||
const ColumnString::Chars & data,
|
||||
const ColumnString::Offsets & offsets,
|
||||
ColumnString::Chars & res_data,
|
||||
ColumnString::Offsets & res_offsets)
|
||||
{
|
||||
const size_t rows = offsets.size();
|
||||
res_data.reserve(data.size()); /// just a guess, assuming the input is all-ASCII
|
||||
res_offsets.reserve(rows);
|
||||
|
||||
size_t prev_offset = 0;
|
||||
std::string ascii;
|
||||
for (size_t row = 0; row < rows; ++row)
|
||||
{
|
||||
const char * value = reinterpret_cast<const char *>(&data[prev_offset]);
|
||||
const size_t value_length = offsets[row] - prev_offset - 1;
|
||||
|
||||
std::string_view value_view(value, value_length);
|
||||
if (!value_view.empty()) /// to_ascii() expects non-empty input
|
||||
{
|
||||
ascii = ada::idna::to_ascii(value_view);
|
||||
const bool ok = !ascii.empty();
|
||||
if (!ok)
|
||||
{
|
||||
if constexpr (error_handling == ErrorHandling::Throw)
|
||||
{
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "'{}' cannot be converted to ASCII", value_view);
|
||||
}
|
||||
else
|
||||
{
|
||||
static_assert(error_handling == ErrorHandling::Empty);
|
||||
ascii.clear();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
res_data.insert(ascii.c_str(), ascii.c_str() + ascii.size() + 1);
|
||||
res_offsets.push_back(res_data.size());
|
||||
|
||||
prev_offset = offsets[row];
|
||||
|
||||
ascii.clear();
|
||||
}
|
||||
}
|
||||
|
||||
[[noreturn]] static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &)
|
||||
{
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Arguments of type FixedString are not allowed");
|
||||
}
|
||||
};
|
||||
|
||||
/// Translates an ASII-encoded IDNA string back to its UTF-8 representation.
|
||||
struct IdnaDecode
|
||||
{
|
||||
/// As per the specification, invalid inputs are returned as is, i.e. there is no special error handling.
|
||||
static void vector(
|
||||
const ColumnString::Chars & data,
|
||||
const ColumnString::Offsets & offsets,
|
||||
ColumnString::Chars & res_data,
|
||||
ColumnString::Offsets & res_offsets)
|
||||
{
|
||||
const size_t rows = offsets.size();
|
||||
res_data.reserve(data.size()); /// just a guess, assuming the input is all-ASCII
|
||||
res_offsets.reserve(rows);
|
||||
|
||||
size_t prev_offset = 0;
|
||||
std::string unicode;
|
||||
for (size_t row = 0; row < rows; ++row)
|
||||
{
|
||||
const char * ascii = reinterpret_cast<const char *>(&data[prev_offset]);
|
||||
const size_t ascii_length = offsets[row] - prev_offset - 1;
|
||||
std::string_view ascii_view(ascii, ascii_length);
|
||||
|
||||
unicode = ada::idna::to_unicode(ascii_view);
|
||||
|
||||
res_data.insert(unicode.c_str(), unicode.c_str() + unicode.size() + 1);
|
||||
res_offsets.push_back(res_data.size());
|
||||
|
||||
prev_offset = offsets[row];
|
||||
|
||||
unicode.clear();
|
||||
}
|
||||
}
|
||||
|
||||
[[noreturn]] static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &)
|
||||
{
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Arguments of type FixedString are not allowed");
|
||||
}
|
||||
};
|
||||
|
||||
struct NameIdnaEncode { static constexpr auto name = "idnaEncode"; };
|
||||
struct NameTryIdnaEncode { static constexpr auto name = "tryIdnaEncode"; };
|
||||
struct NameIdnaDecode { static constexpr auto name = "idnaDecode"; };
|
||||
|
||||
using FunctionIdnaEncode = FunctionStringToString<IdnaEncode<ErrorHandling::Throw>, NameIdnaEncode>;
|
||||
using FunctionTryIdnaEncode = FunctionStringToString<IdnaEncode<ErrorHandling::Empty>, NameTryIdnaEncode>;
|
||||
using FunctionIdnaDecode = FunctionStringToString<IdnaDecode, NameIdnaDecode>;
|
||||
|
||||
REGISTER_FUNCTION(Idna)
|
||||
{
|
||||
factory.registerFunction<FunctionIdnaEncode>(FunctionDocumentation{
|
||||
.description=R"(
|
||||
Computes an ASCII representation of an Internationalized Domain Name. Throws an exception in case of error.)",
|
||||
.syntax="idnaEncode(str)",
|
||||
.arguments={{"str", "Input string"}},
|
||||
.returned_value="An ASCII-encoded domain name [String](/docs/en/sql-reference/data-types/string.md).",
|
||||
.examples={
|
||||
{"simple",
|
||||
"SELECT idnaEncode('straße.münchen.de') AS ascii;",
|
||||
R"(
|
||||
┌─ascii───────────────────────────┐
|
||||
│ xn--strae-oqa.xn--mnchen-3ya.de │
|
||||
└─────────────────────────────────┘
|
||||
)"
|
||||
}}
|
||||
});
|
||||
|
||||
factory.registerFunction<FunctionTryIdnaEncode>(FunctionDocumentation{
|
||||
.description=R"(
|
||||
Computes a ASCII representation of an Internationalized Domain Name. Returns an empty string in case of error)",
|
||||
.syntax="punycodeEncode(str)",
|
||||
.arguments={{"str", "Input string"}},
|
||||
.returned_value="An ASCII-encoded domain name [String](/docs/en/sql-reference/data-types/string.md).",
|
||||
.examples={
|
||||
{"simple",
|
||||
"SELECT idnaEncodeOrNull('München') AS ascii;",
|
||||
R"(
|
||||
┌─ascii───────────────────────────┐
|
||||
│ xn--strae-oqa.xn--mnchen-3ya.de │
|
||||
└─────────────────────────────────┘
|
||||
)"
|
||||
}}
|
||||
});
|
||||
|
||||
factory.registerFunction<FunctionIdnaDecode>(FunctionDocumentation{
|
||||
.description=R"(
|
||||
Computes the Unicode representation of ASCII-encoded Internationalized Domain Name.)",
|
||||
.syntax="idnaDecode(str)",
|
||||
.arguments={{"str", "Input string"}},
|
||||
.returned_value="An Unicode-encoded domain name [String](/docs/en/sql-reference/data-types/string.md).",
|
||||
.examples={
|
||||
{"simple",
|
||||
"SELECT idnaDecode('xn--strae-oqa.xn--mnchen-3ya.de') AS unicode;",
|
||||
R"(
|
||||
┌─unicode───────────┐
|
||||
│ straße.münchen.de │
|
||||
└───────────────────┘
|
||||
)"
|
||||
}}
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
206
src/Functions/punycode.cpp
Normal file
206
src/Functions/punycode.cpp
Normal file
@ -0,0 +1,206 @@
|
||||
#include "config.h"
|
||||
|
||||
#if USE_IDNA
|
||||
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Functions/FunctionStringToString.h>
|
||||
|
||||
#ifdef __clang__
|
||||
# pragma clang diagnostic push
|
||||
# pragma clang diagnostic ignored "-Wnewline-eof"
|
||||
#endif
|
||||
# include <ada/idna/punycode.h>
|
||||
# include <ada/idna/unicode_transcoding.h>
|
||||
#ifdef __clang__
|
||||
# pragma clang diagnostic pop
|
||||
#endif
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
/// Implementation of
|
||||
/// - punycodeEncode(), punycodeDecode() and tryPunycodeDecode(), see https://en.wikipedia.org/wiki/Punycode
|
||||
|
||||
enum class ErrorHandling
|
||||
{
|
||||
Throw, /// Throw exception
|
||||
Empty /// Return empty string
|
||||
};
|
||||
|
||||
|
||||
struct PunycodeEncode
|
||||
{
|
||||
/// Encoding-as-punycode can only fail if the input isn't valid UTF8. In that case, return undefined output, i.e. garbage-in, garbage-out.
|
||||
static void vector(
|
||||
const ColumnString::Chars & data,
|
||||
const ColumnString::Offsets & offsets,
|
||||
ColumnString::Chars & res_data,
|
||||
ColumnString::Offsets & res_offsets)
|
||||
{
|
||||
const size_t rows = offsets.size();
|
||||
res_data.reserve(data.size()); /// just a guess, assuming the input is all-ASCII
|
||||
res_offsets.reserve(rows);
|
||||
|
||||
size_t prev_offset = 0;
|
||||
std::u32string value_utf32;
|
||||
std::string value_puny;
|
||||
for (size_t row = 0; row < rows; ++row)
|
||||
{
|
||||
const char * value = reinterpret_cast<const char *>(&data[prev_offset]);
|
||||
const size_t value_length = offsets[row] - prev_offset - 1;
|
||||
|
||||
const size_t value_utf32_length = ada::idna::utf32_length_from_utf8(value, value_length);
|
||||
value_utf32.resize(value_utf32_length);
|
||||
const size_t codepoints = ada::idna::utf8_to_utf32(value, value_length, value_utf32.data());
|
||||
if (codepoints == 0)
|
||||
value_utf32.clear(); /// input was empty or no valid UTF-8
|
||||
|
||||
const bool ok = ada::idna::utf32_to_punycode(value_utf32, value_puny);
|
||||
if (!ok)
|
||||
value_puny.clear();
|
||||
|
||||
res_data.insert(value_puny.c_str(), value_puny.c_str() + value_puny.size() + 1);
|
||||
res_offsets.push_back(res_data.size());
|
||||
|
||||
prev_offset = offsets[row];
|
||||
|
||||
value_utf32.clear();
|
||||
value_puny.clear(); /// utf32_to_punycode() appends to its output string
|
||||
}
|
||||
}
|
||||
|
||||
[[noreturn]] static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &)
|
||||
{
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Arguments of type FixedString are not allowed");
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template <ErrorHandling error_handling>
|
||||
struct PunycodeDecode
|
||||
{
|
||||
static void vector(
|
||||
const ColumnString::Chars & data,
|
||||
const ColumnString::Offsets & offsets,
|
||||
ColumnString::Chars & res_data,
|
||||
ColumnString::Offsets & res_offsets)
|
||||
{
|
||||
const size_t rows = offsets.size();
|
||||
res_data.reserve(data.size()); /// just a guess, assuming the input is all-ASCII
|
||||
res_offsets.reserve(rows);
|
||||
|
||||
size_t prev_offset = 0;
|
||||
std::u32string value_utf32;
|
||||
std::string value_utf8;
|
||||
for (size_t row = 0; row < rows; ++row)
|
||||
{
|
||||
const char * value = reinterpret_cast<const char *>(&data[prev_offset]);
|
||||
const size_t value_length = offsets[row] - prev_offset - 1;
|
||||
|
||||
const std::string_view value_punycode(value, value_length);
|
||||
const bool ok = ada::idna::punycode_to_utf32(value_punycode, value_utf32);
|
||||
if (!ok)
|
||||
{
|
||||
if constexpr (error_handling == ErrorHandling::Throw)
|
||||
{
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "'{}' is not a valid Punycode-encoded string", value_punycode);
|
||||
}
|
||||
else
|
||||
{
|
||||
static_assert(error_handling == ErrorHandling::Empty);
|
||||
value_utf32.clear();
|
||||
}
|
||||
}
|
||||
|
||||
const size_t utf8_length = ada::idna::utf8_length_from_utf32(value_utf32.data(), value_utf32.size());
|
||||
value_utf8.resize(utf8_length);
|
||||
ada::idna::utf32_to_utf8(value_utf32.data(), value_utf32.size(), value_utf8.data());
|
||||
|
||||
res_data.insert(value_utf8.c_str(), value_utf8.c_str() + value_utf8.size() + 1);
|
||||
res_offsets.push_back(res_data.size());
|
||||
|
||||
prev_offset = offsets[row];
|
||||
|
||||
value_utf32.clear(); /// punycode_to_utf32() appends to its output string
|
||||
value_utf8.clear();
|
||||
}
|
||||
}
|
||||
|
||||
[[noreturn]] static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &)
|
||||
{
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Arguments of type FixedString are not allowed");
|
||||
}
|
||||
};
|
||||
|
||||
struct NamePunycodeEncode { static constexpr auto name = "punycodeEncode"; };
|
||||
struct NamePunycodeDecode { static constexpr auto name = "punycodeDecode"; };
|
||||
struct NameTryPunycodeDecode { static constexpr auto name = "tryPunycodeDecode"; };
|
||||
|
||||
using FunctionPunycodeEncode = FunctionStringToString<PunycodeEncode, NamePunycodeEncode>;
|
||||
using FunctionPunycodeDecode = FunctionStringToString<PunycodeDecode<ErrorHandling::Throw>, NamePunycodeDecode>;
|
||||
using FunctionTryPunycodeDecode = FunctionStringToString<PunycodeDecode<ErrorHandling::Empty>, NameTryPunycodeDecode>;
|
||||
|
||||
REGISTER_FUNCTION(Punycode)
|
||||
{
|
||||
factory.registerFunction<FunctionPunycodeEncode>(FunctionDocumentation{
|
||||
.description=R"(
|
||||
Computes a Punycode representation of a string.)",
|
||||
.syntax="punycodeEncode(str)",
|
||||
.arguments={{"str", "Input string"}},
|
||||
.returned_value="The punycode representation [String](/docs/en/sql-reference/data-types/string.md).",
|
||||
.examples={
|
||||
{"simple",
|
||||
"SELECT punycodeEncode('München') AS puny;",
|
||||
R"(
|
||||
┌─puny───────┐
|
||||
│ Mnchen-3ya │
|
||||
└────────────┘
|
||||
)"
|
||||
}}
|
||||
});
|
||||
|
||||
factory.registerFunction<FunctionPunycodeDecode>(FunctionDocumentation{
|
||||
.description=R"(
|
||||
Computes a Punycode representation of a string. Throws an exception if the input is not valid Punycode.)",
|
||||
.syntax="punycodeDecode(str)",
|
||||
.arguments={{"str", "A Punycode-encoded string"}},
|
||||
.returned_value="The plaintext representation [String](/docs/en/sql-reference/data-types/string.md).",
|
||||
.examples={
|
||||
{"simple",
|
||||
"SELECT punycodeDecode('Mnchen-3ya') AS plain;",
|
||||
R"(
|
||||
┌─plain───┐
|
||||
│ München │
|
||||
└─────────┘
|
||||
)"
|
||||
}}
|
||||
});
|
||||
|
||||
factory.registerFunction<FunctionTryPunycodeDecode>(FunctionDocumentation{
|
||||
.description=R"(
|
||||
Computes a Punycode representation of a string. Returns an empty string if the input is not valid Punycode.)",
|
||||
.syntax="punycodeDecode(str)",
|
||||
.arguments={{"str", "A Punycode-encoded string"}},
|
||||
.returned_value="The plaintext representation [String](/docs/en/sql-reference/data-types/string.md).",
|
||||
.examples={
|
||||
{"simple",
|
||||
"SELECT tryPunycodeDecode('Mnchen-3ya') AS plain;",
|
||||
R"(
|
||||
┌─plain───┐
|
||||
│ München │
|
||||
└─────────┘
|
||||
)"
|
||||
}}
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
@ -56,8 +56,7 @@ public:
|
||||
auto column = ColumnUInt64::create();
|
||||
auto & data = column->getData();
|
||||
data.resize(input_rows_count);
|
||||
for (size_t i = 0; i < input_rows_count; ++i)
|
||||
data[i] = i;
|
||||
iota(data.data(), input_rows_count, UInt64(0));
|
||||
|
||||
return column;
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
#include "config.h"
|
||||
|
||||
#ifdef ENABLE_SQIDS
|
||||
#if USE_SQIDS
|
||||
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
@ -57,9 +57,10 @@ public:
|
||||
|
||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
|
||||
{
|
||||
size_t num_args = arguments.size();
|
||||
auto col_res = ColumnString::create();
|
||||
col_res->reserve(input_rows_count);
|
||||
|
||||
const size_t num_args = arguments.size();
|
||||
std::vector<UInt64> numbers(num_args);
|
||||
for (size_t i = 0; i < input_rows_count; ++i)
|
||||
{
|
||||
@ -83,7 +84,7 @@ REGISTER_FUNCTION(Sqid)
|
||||
{
|
||||
factory.registerFunction<FunctionSqid>(FunctionDocumentation{
|
||||
.description=R"(
|
||||
Transforms numbers into YouTube-like short URL hash called [Sqid](https://sqids.org/).)",
|
||||
Transforms numbers into a [Sqid](https://sqids.org/) which is a Youtube-like ID string.)",
|
||||
.syntax="sqid(number1, ...)",
|
||||
.arguments={{"number1, ...", "Arbitrarily many UInt8, UInt16, UInt32 or UInt64 arguments"}},
|
||||
.returned_value="A hash id [String](/docs/en/sql-reference/data-types/string.md).",
|
@ -3,6 +3,7 @@
|
||||
#include <Columns/ColumnConst.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Common/iota.h>
|
||||
#include <Common/StringUtils/StringUtils.h>
|
||||
#include <Common/UTF8Helpers.h>
|
||||
#include <Common/HashTable/HashMap.h>
|
||||
@ -31,7 +32,7 @@ struct TranslateImpl
|
||||
if (map_from.size() != map_to.size())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Second and third arguments must be the same length");
|
||||
|
||||
std::iota(map.begin(), map.end(), 0);
|
||||
iota(map.data(), map.size(), UInt8(0));
|
||||
|
||||
for (size_t i = 0; i < map_from.size(); ++i)
|
||||
{
|
||||
@ -129,7 +130,7 @@ struct TranslateUTF8Impl
|
||||
if (map_from_size != map_to_size)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Second and third arguments must be the same length");
|
||||
|
||||
std::iota(map_ascii.begin(), map_ascii.end(), 0);
|
||||
iota(map_ascii.data(), map_ascii.size(), UInt32(0));
|
||||
|
||||
const UInt8 * map_from_ptr = reinterpret_cast<const UInt8 *>(map_from.data());
|
||||
const UInt8 * map_from_end = map_from_ptr + map_from.size();
|
||||
|
@ -1382,8 +1382,12 @@ void skipJSONField(ReadBuffer & buf, StringRef name_of_field)
|
||||
}
|
||||
else
|
||||
{
|
||||
throw Exception(ErrorCodes::INCORRECT_DATA, "Unexpected symbol '{}' for key '{}'",
|
||||
std::string(*buf.position(), 1), name_of_field.toString());
|
||||
throw Exception(
|
||||
ErrorCodes::INCORRECT_DATA,
|
||||
"Cannot read JSON field here: '{}'. Unexpected symbol '{}'{}",
|
||||
String(buf.position(), std::min(buf.available(), size_t(10))),
|
||||
std::string(1, *buf.position()),
|
||||
name_of_field.empty() ? "" : " for key " + name_of_field.toString());
|
||||
}
|
||||
}
|
||||
|
||||
@ -1753,7 +1757,7 @@ void readQuotedField(String & s, ReadBuffer & buf)
|
||||
void readJSONField(String & s, ReadBuffer & buf)
|
||||
{
|
||||
s.clear();
|
||||
auto parse_func = [](ReadBuffer & in) { skipJSONField(in, "json_field"); };
|
||||
auto parse_func = [](ReadBuffer & in) { skipJSONField(in, ""); };
|
||||
readParsedValueInto(s, buf, parse_func);
|
||||
}
|
||||
|
||||
|
@ -115,6 +115,7 @@ public:
|
||||
explicit ActionsDAG(const ColumnsWithTypeAndName & inputs_);
|
||||
|
||||
const Nodes & getNodes() const { return nodes; }
|
||||
static Nodes detachNodes(ActionsDAG && dag) { return std::move(dag.nodes); }
|
||||
const NodeRawConstPtrs & getOutputs() const { return outputs; }
|
||||
/** Output nodes can contain any column returned from DAG.
|
||||
* You may manually change it if needed.
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user