mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-25 17:12:03 +00:00
Merge remote-tracking branch 'origin/master' into revert-58450-revert-56064-feature-server-iface-metrics
This commit is contained in:
commit
9ca4be5d91
@ -375,6 +375,7 @@
|
||||
* Do not interpret the `send_timeout` set on the client side as the `receive_timeout` on the server side and vise-versa. [#56035](https://github.com/ClickHouse/ClickHouse/pull/56035) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Comparison of time intervals with different units will throw an exception. This closes [#55942](https://github.com/ClickHouse/ClickHouse/issues/55942). You might have occasionally rely on the previous behavior when the underlying numeric values were compared regardless of the units. [#56090](https://github.com/ClickHouse/ClickHouse/pull/56090) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Rewrited the experimental `S3Queue` table engine completely: changed the way we keep information in zookeeper which allows to make less zookeeper requests, added caching of zookeeper state in cases when we know the state will not change, improved the polling from s3 process to make it less aggressive, changed the way ttl and max set for trached files is maintained, now it is a background process. Added `system.s3queue` and `system.s3queue_log` tables. Closes [#54998](https://github.com/ClickHouse/ClickHouse/issues/54998). [#54422](https://github.com/ClickHouse/ClickHouse/pull/54422) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Arbitrary paths on HTTP endpoint are no longer interpreted as a request to the `/query` endpoint. [#55521](https://github.com/ClickHouse/ClickHouse/pull/55521) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||
|
||||
#### New Feature
|
||||
* Add function `arrayFold(accumulator, x1, ..., xn -> expression, initial, array1, ..., arrayn)` which applies a lambda function to multiple arrays of the same cardinality and collects the result in an accumulator. [#49794](https://github.com/ClickHouse/ClickHouse/pull/49794) ([Lirikl](https://github.com/Lirikl)).
|
||||
|
@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="23.12.1.1368"
|
||||
ARG VERSION="23.12.2.59"
|
||||
ARG PACKAGES="clickhouse-keeper"
|
||||
ARG DIRECT_DOWNLOAD_URLS=""
|
||||
|
||||
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="23.12.1.1368"
|
||||
ARG VERSION="23.12.2.59"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
ARG DIRECT_DOWNLOAD_URLS=""
|
||||
|
||||
|
@ -30,7 +30,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
||||
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||
ARG VERSION="23.12.1.1368"
|
||||
ARG VERSION="23.12.2.59"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
# set non-empty deb_location_url url to create a docker image
|
||||
|
@ -236,6 +236,10 @@ function check_logs_for_critical_errors()
|
||||
&& echo -e "S3_ERROR No such key thrown (see clickhouse-server.log or no_such_key_errors.txt)$FAIL$(trim_server_logs no_such_key_errors.txt)" >> /test_output/test_results.tsv \
|
||||
|| echo -e "No lost s3 keys$OK" >> /test_output/test_results.tsv
|
||||
|
||||
rg -Fa "it is lost forever" /var/log/clickhouse-server/clickhouse-server*.log | grep 'SharedMergeTreePartCheckThread' > /dev/null \
|
||||
&& echo -e "Lost forever for SharedMergeTree$FAIL" >> /test_output/test_results.tsv \
|
||||
|| echo -e "No SharedMergeTree lost forever in clickhouse-server.log$OK" >> /test_output/test_results.tsv
|
||||
|
||||
# Remove file no_such_key_errors.txt if it's empty
|
||||
[ -s /test_output/no_such_key_errors.txt ] || rm /test_output/no_such_key_errors.txt
|
||||
|
||||
|
51
docs/changelogs/v23.10.6.60-stable.md
Normal file
51
docs/changelogs/v23.10.6.60-stable.md
Normal file
@ -0,0 +1,51 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v23.10.6.60-stable (68907bbe643) FIXME as compared to v23.10.5.20-stable (e84001e5c61)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#58493](https://github.com/ClickHouse/ClickHouse/issues/58493): Fix transfer query to MySQL compatible query. Fixes [#57253](https://github.com/ClickHouse/ClickHouse/issues/57253). Fixes [#52654](https://github.com/ClickHouse/ClickHouse/issues/52654). Fixes [#56729](https://github.com/ClickHouse/ClickHouse/issues/56729). [#56456](https://github.com/ClickHouse/ClickHouse/pull/56456) ([flynn](https://github.com/ucasfl)).
|
||||
* Backported in [#57659](https://github.com/ClickHouse/ClickHouse/issues/57659): Handle sigabrt case when getting PostgreSQl table structure with empty array. [#57618](https://github.com/ClickHouse/ClickHouse/pull/57618) ([Mike Kot (Михаил Кот)](https://github.com/myrrc)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#57586](https://github.com/ClickHouse/ClickHouse/issues/57586): Fix issue caught in https://github.com/docker-library/official-images/pull/15846. [#57571](https://github.com/ClickHouse/ClickHouse/pull/57571) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Flatten only true Nested type if flatten_nested=1, not all Array(Tuple) [#56132](https://github.com/ClickHouse/ClickHouse/pull/56132) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix ALTER COLUMN with ALIAS [#56493](https://github.com/ClickHouse/ClickHouse/pull/56493) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Prevent incompatible ALTER of projection columns [#56948](https://github.com/ClickHouse/ClickHouse/pull/56948) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix segfault after ALTER UPDATE with Nullable MATERIALIZED column [#57147](https://github.com/ClickHouse/ClickHouse/pull/57147) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix incorrect JOIN plan optimization with partially materialized normal projection [#57196](https://github.com/ClickHouse/ClickHouse/pull/57196) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix `ReadonlyReplica` metric for all cases [#57267](https://github.com/ClickHouse/ClickHouse/pull/57267) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Background merges correctly use temporary data storage in the cache [#57275](https://github.com/ClickHouse/ClickHouse/pull/57275) ([vdimir](https://github.com/vdimir)).
|
||||
* MergeTree mutations reuse source part index granularity [#57352](https://github.com/ClickHouse/ClickHouse/pull/57352) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix function jsonMergePatch for partially const columns [#57379](https://github.com/ClickHouse/ClickHouse/pull/57379) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix working with read buffers in StreamingFormatExecutor [#57438](https://github.com/ClickHouse/ClickHouse/pull/57438) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* bugfix: correctly parse SYSTEM STOP LISTEN TCP SECURE [#57483](https://github.com/ClickHouse/ClickHouse/pull/57483) ([joelynch](https://github.com/joelynch)).
|
||||
* Ignore ON CLUSTER clause in grant/revoke queries for management of replicated access entities. [#57538](https://github.com/ClickHouse/ClickHouse/pull/57538) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||
* Disable system.kafka_consumers by default (due to possible live memory leak) [#57822](https://github.com/ClickHouse/ClickHouse/pull/57822) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix invalid memory access in BLAKE3 (Rust) [#57876](https://github.com/ClickHouse/ClickHouse/pull/57876) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Normalize function names in CREATE INDEX [#57906](https://github.com/ClickHouse/ClickHouse/pull/57906) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix invalid preprocessing on Keeper [#58069](https://github.com/ClickHouse/ClickHouse/pull/58069) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix Integer overflow in Poco::UTF32Encoding [#58073](https://github.com/ClickHouse/ClickHouse/pull/58073) ([Andrey Fedotov](https://github.com/anfedotoff)).
|
||||
* Remove parallel parsing for JSONCompactEachRow [#58181](https://github.com/ClickHouse/ClickHouse/pull/58181) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix parallel parsing for JSONCompactEachRow [#58250](https://github.com/ClickHouse/ClickHouse/pull/58250) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix lost blobs after dropping a replica with broken detached parts [#58333](https://github.com/ClickHouse/ClickHouse/pull/58333) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* MergeTreePrefetchedReadPool disable for LIMIT only queries [#58505](https://github.com/ClickHouse/ClickHouse/pull/58505) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
|
||||
#### NO CL CATEGORY
|
||||
|
||||
* Backported in [#57916](https://github.com/ClickHouse/ClickHouse/issues/57916):. [#57909](https://github.com/ClickHouse/ClickHouse/pull/57909) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Pin alpine version of integration tests helper container [#57669](https://github.com/ClickHouse/ClickHouse/pull/57669) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Remove heavy rust stable toolchain [#57905](https://github.com/ClickHouse/ClickHouse/pull/57905) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix docker image for integration tests (fixes CI) [#57952](https://github.com/ClickHouse/ClickHouse/pull/57952) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix test_user_valid_until [#58409](https://github.com/ClickHouse/ClickHouse/pull/58409) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
|
26
docs/changelogs/v23.11.4.24-stable.md
Normal file
26
docs/changelogs/v23.11.4.24-stable.md
Normal file
@ -0,0 +1,26 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v23.11.4.24-stable (e79d840d7fe) FIXME as compared to v23.11.3.23-stable (a14ab450b0e)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Flatten only true Nested type if flatten_nested=1, not all Array(Tuple) [#56132](https://github.com/ClickHouse/ClickHouse/pull/56132) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix working with read buffers in StreamingFormatExecutor [#57438](https://github.com/ClickHouse/ClickHouse/pull/57438) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Disable system.kafka_consumers by default (due to possible live memory leak) [#57822](https://github.com/ClickHouse/ClickHouse/pull/57822) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix invalid preprocessing on Keeper [#58069](https://github.com/ClickHouse/ClickHouse/pull/58069) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix Integer overflow in Poco::UTF32Encoding [#58073](https://github.com/ClickHouse/ClickHouse/pull/58073) ([Andrey Fedotov](https://github.com/anfedotoff)).
|
||||
* Remove parallel parsing for JSONCompactEachRow [#58181](https://github.com/ClickHouse/ClickHouse/pull/58181) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix parallel parsing for JSONCompactEachRow [#58250](https://github.com/ClickHouse/ClickHouse/pull/58250) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix lost blobs after dropping a replica with broken detached parts [#58333](https://github.com/ClickHouse/ClickHouse/pull/58333) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* MergeTreePrefetchedReadPool disable for LIMIT only queries [#58505](https://github.com/ClickHouse/ClickHouse/pull/58505) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Handle another case for preprocessing in Keeper [#58308](https://github.com/ClickHouse/ClickHouse/pull/58308) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix test_user_valid_until [#58409](https://github.com/ClickHouse/ClickHouse/pull/58409) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
|
32
docs/changelogs/v23.12.2.59-stable.md
Normal file
32
docs/changelogs/v23.12.2.59-stable.md
Normal file
@ -0,0 +1,32 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v23.12.2.59-stable (17ab210e761) FIXME as compared to v23.12.1.1368-stable (a2faa65b080)
|
||||
|
||||
#### Backward Incompatible Change
|
||||
* Backported in [#58389](https://github.com/ClickHouse/ClickHouse/issues/58389): The MergeTree setting `clean_deleted_rows` is deprecated, it has no effect anymore. The `CLEANUP` keyword for `OPTIMIZE` is not allowed by default (unless `allow_experimental_replacing_merge_with_cleanup` is enabled). [#58316](https://github.com/ClickHouse/ClickHouse/pull/58316) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Flatten only true Nested type if flatten_nested=1, not all Array(Tuple) [#56132](https://github.com/ClickHouse/ClickHouse/pull/56132) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix working with read buffers in StreamingFormatExecutor [#57438](https://github.com/ClickHouse/ClickHouse/pull/57438) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix lost blobs after dropping a replica with broken detached parts [#58333](https://github.com/ClickHouse/ClickHouse/pull/58333) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix segfault when graphite table does not have agg function [#58453](https://github.com/ClickHouse/ClickHouse/pull/58453) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* MergeTreePrefetchedReadPool disable for LIMIT only queries [#58505](https://github.com/ClickHouse/ClickHouse/pull/58505) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
|
||||
#### NO CL ENTRY
|
||||
|
||||
* NO CL ENTRY: 'Revert "Refreshable materialized views (takeover)"'. [#58296](https://github.com/ClickHouse/ClickHouse/pull/58296) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Fix an error in the release script - it didn't allow to make 23.12. [#58288](https://github.com/ClickHouse/ClickHouse/pull/58288) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Update version_date.tsv and changelogs after v23.12.1.1368-stable [#58290](https://github.com/ClickHouse/ClickHouse/pull/58290) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Fix test_storage_s3_queue/test.py::test_drop_table [#58293](https://github.com/ClickHouse/ClickHouse/pull/58293) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Handle another case for preprocessing in Keeper [#58308](https://github.com/ClickHouse/ClickHouse/pull/58308) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix test_user_valid_until [#58409](https://github.com/ClickHouse/ClickHouse/pull/58409) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
|
36
docs/changelogs/v23.3.19.32-lts.md
Normal file
36
docs/changelogs/v23.3.19.32-lts.md
Normal file
@ -0,0 +1,36 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v23.3.19.32-lts (c4d4ca8ec02) FIXME as compared to v23.3.18.15-lts (7228475d77a)
|
||||
|
||||
#### Backward Incompatible Change
|
||||
* Backported in [#57840](https://github.com/ClickHouse/ClickHouse/issues/57840): Remove function `arrayFold` because it has a bug. This closes [#57816](https://github.com/ClickHouse/ClickHouse/issues/57816). This closes [#57458](https://github.com/ClickHouse/ClickHouse/issues/57458). [#57836](https://github.com/ClickHouse/ClickHouse/pull/57836) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#58489](https://github.com/ClickHouse/ClickHouse/issues/58489): Fix transfer query to MySQL compatible query. Fixes [#57253](https://github.com/ClickHouse/ClickHouse/issues/57253). Fixes [#52654](https://github.com/ClickHouse/ClickHouse/issues/52654). Fixes [#56729](https://github.com/ClickHouse/ClickHouse/issues/56729). [#56456](https://github.com/ClickHouse/ClickHouse/pull/56456) ([flynn](https://github.com/ucasfl)).
|
||||
* Backported in [#57653](https://github.com/ClickHouse/ClickHouse/issues/57653): Handle sigabrt case when getting PostgreSQl table structure with empty array. [#57618](https://github.com/ClickHouse/ClickHouse/pull/57618) ([Mike Kot (Михаил Кот)](https://github.com/myrrc)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#57580](https://github.com/ClickHouse/ClickHouse/issues/57580): Fix issue caught in https://github.com/docker-library/official-images/pull/15846. [#57571](https://github.com/ClickHouse/ClickHouse/pull/57571) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Prevent incompatible ALTER of projection columns [#56948](https://github.com/ClickHouse/ClickHouse/pull/56948) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix segfault after ALTER UPDATE with Nullable MATERIALIZED column [#57147](https://github.com/ClickHouse/ClickHouse/pull/57147) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix incorrect JOIN plan optimization with partially materialized normal projection [#57196](https://github.com/ClickHouse/ClickHouse/pull/57196) ([Amos Bird](https://github.com/amosbird)).
|
||||
* MergeTree mutations reuse source part index granularity [#57352](https://github.com/ClickHouse/ClickHouse/pull/57352) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix invalid memory access in BLAKE3 (Rust) [#57876](https://github.com/ClickHouse/ClickHouse/pull/57876) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Normalize function names in CREATE INDEX [#57906](https://github.com/ClickHouse/ClickHouse/pull/57906) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix invalid preprocessing on Keeper [#58069](https://github.com/ClickHouse/ClickHouse/pull/58069) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix Integer overflow in Poco::UTF32Encoding [#58073](https://github.com/ClickHouse/ClickHouse/pull/58073) ([Andrey Fedotov](https://github.com/anfedotoff)).
|
||||
* Remove parallel parsing for JSONCompactEachRow [#58181](https://github.com/ClickHouse/ClickHouse/pull/58181) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Pin alpine version of integration tests helper container [#57669](https://github.com/ClickHouse/ClickHouse/pull/57669) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix docker image for integration tests (fixes CI) [#57952](https://github.com/ClickHouse/ClickHouse/pull/57952) ([Azat Khuzhin](https://github.com/azat)).
|
||||
|
47
docs/changelogs/v23.8.9.54-lts.md
Normal file
47
docs/changelogs/v23.8.9.54-lts.md
Normal file
@ -0,0 +1,47 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v23.8.9.54-lts (192a1d231fa) FIXME as compared to v23.8.8.20-lts (5e012a03bf2)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#57668](https://github.com/ClickHouse/ClickHouse/issues/57668): Output valid JSON/XML on excetpion during HTTP query execution. Add setting `http_write_exception_in_output_format` to enable/disable this behaviour (enabled by default). [#52853](https://github.com/ClickHouse/ClickHouse/pull/52853) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#58491](https://github.com/ClickHouse/ClickHouse/issues/58491): Fix transfer query to MySQL compatible query. Fixes [#57253](https://github.com/ClickHouse/ClickHouse/issues/57253). Fixes [#52654](https://github.com/ClickHouse/ClickHouse/issues/52654). Fixes [#56729](https://github.com/ClickHouse/ClickHouse/issues/56729). [#56456](https://github.com/ClickHouse/ClickHouse/pull/56456) ([flynn](https://github.com/ucasfl)).
|
||||
* Backported in [#57238](https://github.com/ClickHouse/ClickHouse/issues/57238): Fetching a part waits when that part is fully committed on remote replica. It is better not send part in PreActive state. In case of zero copy this is mandatory restriction. [#56808](https://github.com/ClickHouse/ClickHouse/pull/56808) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Backported in [#57655](https://github.com/ClickHouse/ClickHouse/issues/57655): Handle sigabrt case when getting PostgreSQl table structure with empty array. [#57618](https://github.com/ClickHouse/ClickHouse/pull/57618) ([Mike Kot (Михаил Кот)](https://github.com/myrrc)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#57582](https://github.com/ClickHouse/ClickHouse/issues/57582): Fix issue caught in https://github.com/docker-library/official-images/pull/15846. [#57571](https://github.com/ClickHouse/ClickHouse/pull/57571) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Flatten only true Nested type if flatten_nested=1, not all Array(Tuple) [#56132](https://github.com/ClickHouse/ClickHouse/pull/56132) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix ALTER COLUMN with ALIAS [#56493](https://github.com/ClickHouse/ClickHouse/pull/56493) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Prevent incompatible ALTER of projection columns [#56948](https://github.com/ClickHouse/ClickHouse/pull/56948) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix segfault after ALTER UPDATE with Nullable MATERIALIZED column [#57147](https://github.com/ClickHouse/ClickHouse/pull/57147) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix incorrect JOIN plan optimization with partially materialized normal projection [#57196](https://github.com/ClickHouse/ClickHouse/pull/57196) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix `ReadonlyReplica` metric for all cases [#57267](https://github.com/ClickHouse/ClickHouse/pull/57267) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix working with read buffers in StreamingFormatExecutor [#57438](https://github.com/ClickHouse/ClickHouse/pull/57438) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* bugfix: correctly parse SYSTEM STOP LISTEN TCP SECURE [#57483](https://github.com/ClickHouse/ClickHouse/pull/57483) ([joelynch](https://github.com/joelynch)).
|
||||
* Ignore ON CLUSTER clause in grant/revoke queries for management of replicated access entities. [#57538](https://github.com/ClickHouse/ClickHouse/pull/57538) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||
* Disable system.kafka_consumers by default (due to possible live memory leak) [#57822](https://github.com/ClickHouse/ClickHouse/pull/57822) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix invalid memory access in BLAKE3 (Rust) [#57876](https://github.com/ClickHouse/ClickHouse/pull/57876) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Normalize function names in CREATE INDEX [#57906](https://github.com/ClickHouse/ClickHouse/pull/57906) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix invalid preprocessing on Keeper [#58069](https://github.com/ClickHouse/ClickHouse/pull/58069) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix Integer overflow in Poco::UTF32Encoding [#58073](https://github.com/ClickHouse/ClickHouse/pull/58073) ([Andrey Fedotov](https://github.com/anfedotoff)).
|
||||
* Remove parallel parsing for JSONCompactEachRow [#58181](https://github.com/ClickHouse/ClickHouse/pull/58181) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix parallel parsing for JSONCompactEachRow [#58250](https://github.com/ClickHouse/ClickHouse/pull/58250) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
||||
#### NO CL ENTRY
|
||||
|
||||
* NO CL ENTRY: 'Update PeekableWriteBuffer.cpp'. [#57701](https://github.com/ClickHouse/ClickHouse/pull/57701) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Pin alpine version of integration tests helper container [#57669](https://github.com/ClickHouse/ClickHouse/pull/57669) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Remove heavy rust stable toolchain [#57905](https://github.com/ClickHouse/ClickHouse/pull/57905) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix docker image for integration tests (fixes CI) [#57952](https://github.com/ClickHouse/ClickHouse/pull/57952) ([Azat Khuzhin](https://github.com/azat)).
|
||||
|
@ -3847,6 +3847,8 @@ Possible values:
|
||||
- `none` — Is similar to throw, but distributed DDL query returns no result set.
|
||||
- `null_status_on_timeout` — Returns `NULL` as execution status in some rows of result set instead of throwing `TIMEOUT_EXCEEDED` if query is not finished on the corresponding hosts.
|
||||
- `never_throw` — Do not throw `TIMEOUT_EXCEEDED` and do not rethrow exceptions if query has failed on some hosts.
|
||||
- `null_status_on_timeout_only_active` — similar to `null_status_on_timeout`, but doesn't wait for inactive replicas of the `Replicated` database
|
||||
- `throw_only_active` — similar to `throw`, but doesn't wait for inactive replicas of the `Replicated` database
|
||||
|
||||
Default value: `throw`.
|
||||
|
||||
|
@ -27,7 +27,7 @@ $ clickhouse-format --query "select number from numbers(10) where number%2 order
|
||||
|
||||
Result:
|
||||
|
||||
```sql
|
||||
```bash
|
||||
SELECT number
|
||||
FROM numbers(10)
|
||||
WHERE number % 2
|
||||
@ -49,22 +49,20 @@ SELECT sum(number) FROM numbers(5)
|
||||
3. Multiqueries:
|
||||
|
||||
```bash
|
||||
$ clickhouse-format -n <<< "SELECT * FROM (SELECT 1 AS x UNION ALL SELECT 1 UNION DISTINCT SELECT 3);"
|
||||
$ clickhouse-format -n <<< "SELECT min(number) FROM numbers(5); SELECT max(number) FROM numbers(5);"
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```sql
|
||||
SELECT *
|
||||
FROM
|
||||
(
|
||||
SELECT 1 AS x
|
||||
UNION ALL
|
||||
SELECT 1
|
||||
UNION DISTINCT
|
||||
SELECT 3
|
||||
)
|
||||
```
|
||||
SELECT min(number)
|
||||
FROM numbers(5)
|
||||
;
|
||||
|
||||
SELECT max(number)
|
||||
FROM numbers(5)
|
||||
;
|
||||
|
||||
```
|
||||
|
||||
4. Obfuscating:
|
||||
@ -75,7 +73,7 @@ $ clickhouse-format --seed Hello --obfuscate <<< "SELECT cost_first_screen BETWE
|
||||
|
||||
Result:
|
||||
|
||||
```sql
|
||||
```
|
||||
SELECT treasury_mammoth_hazelnut BETWEEN nutmeg AND span, CASE WHEN chive >= 116 THEN switching ELSE ANYTHING END;
|
||||
```
|
||||
|
||||
@ -87,7 +85,7 @@ $ clickhouse-format --seed World --obfuscate <<< "SELECT cost_first_screen BETWE
|
||||
|
||||
Result:
|
||||
|
||||
```sql
|
||||
```
|
||||
SELECT horse_tape_summer BETWEEN folklore AND moccasins, CASE WHEN intestine >= 116 THEN nonconformist ELSE FORESTRY END;
|
||||
```
|
||||
|
||||
@ -99,7 +97,7 @@ $ clickhouse-format --backslash <<< "SELECT * FROM (SELECT 1 AS x UNION ALL SELE
|
||||
|
||||
Result:
|
||||
|
||||
```sql
|
||||
```
|
||||
SELECT * \
|
||||
FROM \
|
||||
( \
|
||||
|
@ -1483,7 +1483,9 @@ For mode values with a meaning of “with 4 or more days this year,” weeks are
|
||||
|
||||
- Otherwise, it is the last week of the previous year, and the next week is week 1.
|
||||
|
||||
For mode values with a meaning of “contains January 1”, the week contains January 1 is week 1. It does not matter how many days in the new year the week contained, even if it contained only one day.
|
||||
For mode values with a meaning of “contains January 1”, the week contains January 1 is week 1.
|
||||
It does not matter how many days in the new year the week contained, even if it contained only one day.
|
||||
I.e. if the last week of December contains January 1 of the next year, it will be week 1 of the next year.
|
||||
|
||||
**Syntax**
|
||||
|
||||
|
@ -1779,7 +1779,9 @@ Result:
|
||||
|
||||
## sqid
|
||||
|
||||
Transforms numbers into YouTube-like short URL hash called [Sqid](https://sqids.org/).
|
||||
Transforms numbers into a [Sqid](https://sqids.org/) which is a YouTube-like ID string.
|
||||
The output alphabet is `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789`.
|
||||
Do not use this function for hashing - the generated IDs can be decoded back into numbers.
|
||||
|
||||
**Syntax**
|
||||
|
||||
|
@ -53,7 +53,7 @@ The rounded number of the same type as the input number.
|
||||
**Example of use with Float**
|
||||
|
||||
``` sql
|
||||
SELECT number / 2 AS x, round(x) FROM system.numbers LIMIT 3
|
||||
SELECT number / 2 AS x, round(x) FROM system.numbers LIMIT 3;
|
||||
```
|
||||
|
||||
``` text
|
||||
@ -67,7 +67,22 @@ SELECT number / 2 AS x, round(x) FROM system.numbers LIMIT 3
|
||||
**Example of use with Decimal**
|
||||
|
||||
``` sql
|
||||
SELECT cast(number / 2 AS Decimal(10,4)) AS x, round(x) FROM system.numbers LIMIT 3
|
||||
SELECT cast(number / 2 AS Decimal(10,4)) AS x, round(x) FROM system.numbers LIMIT 3;
|
||||
```
|
||||
|
||||
``` text
|
||||
┌───x─┬─round(CAST(divide(number, 2), 'Decimal(10, 4)'))─┐
|
||||
│ 0 │ 0 │
|
||||
│ 0.5 │ 1 │
|
||||
│ 1 │ 1 │
|
||||
└─────┴──────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
If you want to keep the trailing zeros, you need to enable `output_format_decimal_trailing_zeros`
|
||||
|
||||
``` sql
|
||||
SELECT cast(number / 2 AS Decimal(10,4)) AS x, round(x) FROM system.numbers LIMIT 3 settings output_format_decimal_trailing_zeros=1;
|
||||
|
||||
```
|
||||
|
||||
``` text
|
||||
|
@ -578,7 +578,9 @@ SELECT
|
||||
|
||||
- В противном случае это последняя неделя предыдущего года, а следующая неделя - неделя 1.
|
||||
|
||||
Для режимов со значением «содержит 1 января», неделя 1 – это неделя содержащая 1 января. Не имеет значения, сколько дней в новом году содержала неделя, даже если она содержала только один день.
|
||||
Для режимов со значением «содержит 1 января», неделя 1 – это неделя, содержащая 1 января.
|
||||
Не имеет значения, сколько дней нового года содержит эта неделя, даже если она содержит только один день.
|
||||
Так, если последняя неделя декабря содержит 1 января следующего года, то она считается неделей 1 следующего года.
|
||||
|
||||
**Пример**
|
||||
|
||||
|
@ -1272,11 +1272,11 @@ try
|
||||
{
|
||||
Settings::checkNoSettingNamesAtTopLevel(*config, config_path);
|
||||
|
||||
ServerSettings server_settings_;
|
||||
server_settings_.loadSettingsFromConfig(*config);
|
||||
ServerSettings new_server_settings;
|
||||
new_server_settings.loadSettingsFromConfig(*config);
|
||||
|
||||
size_t max_server_memory_usage = server_settings_.max_server_memory_usage;
|
||||
double max_server_memory_usage_to_ram_ratio = server_settings_.max_server_memory_usage_to_ram_ratio;
|
||||
size_t max_server_memory_usage = new_server_settings.max_server_memory_usage;
|
||||
double max_server_memory_usage_to_ram_ratio = new_server_settings.max_server_memory_usage_to_ram_ratio;
|
||||
|
||||
size_t current_physical_server_memory = getMemoryAmount(); /// With cgroups, the amount of memory available to the server can be changed dynamically.
|
||||
size_t default_max_server_memory_usage = static_cast<size_t>(current_physical_server_memory * max_server_memory_usage_to_ram_ratio);
|
||||
@ -1306,9 +1306,9 @@ try
|
||||
total_memory_tracker.setDescription("(total)");
|
||||
total_memory_tracker.setMetric(CurrentMetrics::MemoryTracking);
|
||||
|
||||
size_t merges_mutations_memory_usage_soft_limit = server_settings_.merges_mutations_memory_usage_soft_limit;
|
||||
size_t merges_mutations_memory_usage_soft_limit = new_server_settings.merges_mutations_memory_usage_soft_limit;
|
||||
|
||||
size_t default_merges_mutations_server_memory_usage = static_cast<size_t>(current_physical_server_memory * server_settings_.merges_mutations_memory_usage_to_ram_ratio);
|
||||
size_t default_merges_mutations_server_memory_usage = static_cast<size_t>(current_physical_server_memory * new_server_settings.merges_mutations_memory_usage_to_ram_ratio);
|
||||
if (merges_mutations_memory_usage_soft_limit == 0)
|
||||
{
|
||||
merges_mutations_memory_usage_soft_limit = default_merges_mutations_server_memory_usage;
|
||||
@ -1316,7 +1316,7 @@ try
|
||||
" ({} available * {:.2f} merges_mutations_memory_usage_to_ram_ratio)",
|
||||
formatReadableSizeWithBinarySuffix(merges_mutations_memory_usage_soft_limit),
|
||||
formatReadableSizeWithBinarySuffix(current_physical_server_memory),
|
||||
server_settings_.merges_mutations_memory_usage_to_ram_ratio);
|
||||
new_server_settings.merges_mutations_memory_usage_to_ram_ratio);
|
||||
}
|
||||
else if (merges_mutations_memory_usage_soft_limit > default_merges_mutations_server_memory_usage)
|
||||
{
|
||||
@ -1325,7 +1325,7 @@ try
|
||||
" ({} available * {:.2f} merges_mutations_memory_usage_to_ram_ratio)",
|
||||
formatReadableSizeWithBinarySuffix(merges_mutations_memory_usage_soft_limit),
|
||||
formatReadableSizeWithBinarySuffix(current_physical_server_memory),
|
||||
server_settings_.merges_mutations_memory_usage_to_ram_ratio);
|
||||
new_server_settings.merges_mutations_memory_usage_to_ram_ratio);
|
||||
}
|
||||
|
||||
LOG_INFO(log, "Merges and mutations memory limit is set to {}",
|
||||
@ -1334,7 +1334,7 @@ try
|
||||
background_memory_tracker.setDescription("(background)");
|
||||
background_memory_tracker.setMetric(CurrentMetrics::MergesMutationsMemoryTracking);
|
||||
|
||||
total_memory_tracker.setAllowUseJemallocMemory(server_settings_.allow_use_jemalloc_memory);
|
||||
total_memory_tracker.setAllowUseJemallocMemory(new_server_settings.allow_use_jemalloc_memory);
|
||||
|
||||
auto * global_overcommit_tracker = global_context->getGlobalOvercommitTracker();
|
||||
total_memory_tracker.setOvercommitTracker(global_overcommit_tracker);
|
||||
@ -1358,26 +1358,26 @@ try
|
||||
global_context->setRemoteHostFilter(*config);
|
||||
global_context->setHTTPHeaderFilter(*config);
|
||||
|
||||
global_context->setMaxTableSizeToDrop(server_settings_.max_table_size_to_drop);
|
||||
global_context->setMaxPartitionSizeToDrop(server_settings_.max_partition_size_to_drop);
|
||||
global_context->setMaxTableNumToWarn(server_settings_.max_table_num_to_warn);
|
||||
global_context->setMaxDatabaseNumToWarn(server_settings_.max_database_num_to_warn);
|
||||
global_context->setMaxPartNumToWarn(server_settings_.max_part_num_to_warn);
|
||||
global_context->setMaxTableSizeToDrop(new_server_settings.max_table_size_to_drop);
|
||||
global_context->setMaxPartitionSizeToDrop(new_server_settings.max_partition_size_to_drop);
|
||||
global_context->setMaxTableNumToWarn(new_server_settings.max_table_num_to_warn);
|
||||
global_context->setMaxDatabaseNumToWarn(new_server_settings.max_database_num_to_warn);
|
||||
global_context->setMaxPartNumToWarn(new_server_settings.max_part_num_to_warn);
|
||||
|
||||
ConcurrencyControl::SlotCount concurrent_threads_soft_limit = ConcurrencyControl::Unlimited;
|
||||
if (server_settings_.concurrent_threads_soft_limit_num > 0 && server_settings_.concurrent_threads_soft_limit_num < concurrent_threads_soft_limit)
|
||||
concurrent_threads_soft_limit = server_settings_.concurrent_threads_soft_limit_num;
|
||||
if (server_settings_.concurrent_threads_soft_limit_ratio_to_cores > 0)
|
||||
if (new_server_settings.concurrent_threads_soft_limit_num > 0 && new_server_settings.concurrent_threads_soft_limit_num < concurrent_threads_soft_limit)
|
||||
concurrent_threads_soft_limit = new_server_settings.concurrent_threads_soft_limit_num;
|
||||
if (new_server_settings.concurrent_threads_soft_limit_ratio_to_cores > 0)
|
||||
{
|
||||
auto value = server_settings_.concurrent_threads_soft_limit_ratio_to_cores * std::thread::hardware_concurrency();
|
||||
auto value = new_server_settings.concurrent_threads_soft_limit_ratio_to_cores * std::thread::hardware_concurrency();
|
||||
if (value > 0 && value < concurrent_threads_soft_limit)
|
||||
concurrent_threads_soft_limit = value;
|
||||
}
|
||||
ConcurrencyControl::instance().setMaxConcurrency(concurrent_threads_soft_limit);
|
||||
|
||||
global_context->getProcessList().setMaxSize(server_settings_.max_concurrent_queries);
|
||||
global_context->getProcessList().setMaxInsertQueriesAmount(server_settings_.max_concurrent_insert_queries);
|
||||
global_context->getProcessList().setMaxSelectQueriesAmount(server_settings_.max_concurrent_select_queries);
|
||||
global_context->getProcessList().setMaxSize(new_server_settings.max_concurrent_queries);
|
||||
global_context->getProcessList().setMaxInsertQueriesAmount(new_server_settings.max_concurrent_insert_queries);
|
||||
global_context->getProcessList().setMaxSelectQueriesAmount(new_server_settings.max_concurrent_select_queries);
|
||||
|
||||
if (config->has("keeper_server"))
|
||||
global_context->updateKeeperConfiguration(*config);
|
||||
@ -1388,68 +1388,68 @@ try
|
||||
/// This is done for backward compatibility.
|
||||
if (global_context->areBackgroundExecutorsInitialized())
|
||||
{
|
||||
auto new_pool_size = server_settings_.background_pool_size;
|
||||
auto new_ratio = server_settings_.background_merges_mutations_concurrency_ratio;
|
||||
auto new_pool_size = new_server_settings.background_pool_size;
|
||||
auto new_ratio = new_server_settings.background_merges_mutations_concurrency_ratio;
|
||||
global_context->getMergeMutateExecutor()->increaseThreadsAndMaxTasksCount(new_pool_size, static_cast<size_t>(new_pool_size * new_ratio));
|
||||
global_context->getMergeMutateExecutor()->updateSchedulingPolicy(server_settings_.background_merges_mutations_scheduling_policy.toString());
|
||||
global_context->getMergeMutateExecutor()->updateSchedulingPolicy(new_server_settings.background_merges_mutations_scheduling_policy.toString());
|
||||
}
|
||||
|
||||
if (global_context->areBackgroundExecutorsInitialized())
|
||||
{
|
||||
auto new_pool_size = server_settings_.background_move_pool_size;
|
||||
auto new_pool_size = new_server_settings.background_move_pool_size;
|
||||
global_context->getMovesExecutor()->increaseThreadsAndMaxTasksCount(new_pool_size, new_pool_size);
|
||||
}
|
||||
|
||||
if (global_context->areBackgroundExecutorsInitialized())
|
||||
{
|
||||
auto new_pool_size = server_settings_.background_fetches_pool_size;
|
||||
auto new_pool_size = new_server_settings.background_fetches_pool_size;
|
||||
global_context->getFetchesExecutor()->increaseThreadsAndMaxTasksCount(new_pool_size, new_pool_size);
|
||||
}
|
||||
|
||||
if (global_context->areBackgroundExecutorsInitialized())
|
||||
{
|
||||
auto new_pool_size = server_settings_.background_common_pool_size;
|
||||
auto new_pool_size = new_server_settings.background_common_pool_size;
|
||||
global_context->getCommonExecutor()->increaseThreadsAndMaxTasksCount(new_pool_size, new_pool_size);
|
||||
}
|
||||
|
||||
global_context->getBufferFlushSchedulePool().increaseThreadsCount(server_settings_.background_buffer_flush_schedule_pool_size);
|
||||
global_context->getSchedulePool().increaseThreadsCount(server_settings_.background_schedule_pool_size);
|
||||
global_context->getMessageBrokerSchedulePool().increaseThreadsCount(server_settings_.background_message_broker_schedule_pool_size);
|
||||
global_context->getDistributedSchedulePool().increaseThreadsCount(server_settings_.background_distributed_schedule_pool_size);
|
||||
global_context->getBufferFlushSchedulePool().increaseThreadsCount(new_server_settings.background_buffer_flush_schedule_pool_size);
|
||||
global_context->getSchedulePool().increaseThreadsCount(new_server_settings.background_schedule_pool_size);
|
||||
global_context->getMessageBrokerSchedulePool().increaseThreadsCount(new_server_settings.background_message_broker_schedule_pool_size);
|
||||
global_context->getDistributedSchedulePool().increaseThreadsCount(new_server_settings.background_distributed_schedule_pool_size);
|
||||
|
||||
global_context->getAsyncLoader().setMaxThreads(TablesLoaderForegroundPoolId, server_settings_.tables_loader_foreground_pool_size);
|
||||
global_context->getAsyncLoader().setMaxThreads(TablesLoaderBackgroundLoadPoolId, server_settings_.tables_loader_background_pool_size);
|
||||
global_context->getAsyncLoader().setMaxThreads(TablesLoaderBackgroundStartupPoolId, server_settings_.tables_loader_background_pool_size);
|
||||
global_context->getAsyncLoader().setMaxThreads(TablesLoaderForegroundPoolId, new_server_settings.tables_loader_foreground_pool_size);
|
||||
global_context->getAsyncLoader().setMaxThreads(TablesLoaderBackgroundLoadPoolId, new_server_settings.tables_loader_background_pool_size);
|
||||
global_context->getAsyncLoader().setMaxThreads(TablesLoaderBackgroundStartupPoolId, new_server_settings.tables_loader_background_pool_size);
|
||||
|
||||
getIOThreadPool().reloadConfiguration(
|
||||
server_settings.max_io_thread_pool_size,
|
||||
server_settings.max_io_thread_pool_free_size,
|
||||
server_settings.io_thread_pool_queue_size);
|
||||
new_server_settings.max_io_thread_pool_size,
|
||||
new_server_settings.max_io_thread_pool_free_size,
|
||||
new_server_settings.io_thread_pool_queue_size);
|
||||
|
||||
getBackupsIOThreadPool().reloadConfiguration(
|
||||
server_settings.max_backups_io_thread_pool_size,
|
||||
server_settings.max_backups_io_thread_pool_free_size,
|
||||
server_settings.backups_io_thread_pool_queue_size);
|
||||
new_server_settings.max_backups_io_thread_pool_size,
|
||||
new_server_settings.max_backups_io_thread_pool_free_size,
|
||||
new_server_settings.backups_io_thread_pool_queue_size);
|
||||
|
||||
getActivePartsLoadingThreadPool().reloadConfiguration(
|
||||
server_settings.max_active_parts_loading_thread_pool_size,
|
||||
new_server_settings.max_active_parts_loading_thread_pool_size,
|
||||
0, // We don't need any threads once all the parts will be loaded
|
||||
server_settings.max_active_parts_loading_thread_pool_size);
|
||||
new_server_settings.max_active_parts_loading_thread_pool_size);
|
||||
|
||||
getOutdatedPartsLoadingThreadPool().reloadConfiguration(
|
||||
server_settings.max_outdated_parts_loading_thread_pool_size,
|
||||
new_server_settings.max_outdated_parts_loading_thread_pool_size,
|
||||
0, // We don't need any threads once all the parts will be loaded
|
||||
server_settings.max_outdated_parts_loading_thread_pool_size);
|
||||
new_server_settings.max_outdated_parts_loading_thread_pool_size);
|
||||
|
||||
/// It could grow if we need to synchronously wait until all the data parts will be loaded.
|
||||
getOutdatedPartsLoadingThreadPool().setMaxTurboThreads(
|
||||
server_settings.max_active_parts_loading_thread_pool_size
|
||||
new_server_settings.max_active_parts_loading_thread_pool_size
|
||||
);
|
||||
|
||||
getPartsCleaningThreadPool().reloadConfiguration(
|
||||
server_settings.max_parts_cleaning_thread_pool_size,
|
||||
new_server_settings.max_parts_cleaning_thread_pool_size,
|
||||
0, // We don't need any threads one all the parts will be deleted
|
||||
server_settings.max_parts_cleaning_thread_pool_size);
|
||||
new_server_settings.max_parts_cleaning_thread_pool_size);
|
||||
|
||||
if (config->has("resources"))
|
||||
{
|
||||
|
@ -1379,6 +1379,9 @@
|
||||
|
||||
<!-- Controls how many tasks could be in the queue -->
|
||||
<!-- <max_tasks_in_queue>1000</max_tasks_in_queue> -->
|
||||
|
||||
<!-- Host name of the current node. If specified, will only compare and not resolve hostnames inside the DDL tasks -->
|
||||
<!-- <host_name>replica</host_name> -->
|
||||
</distributed_ddl>
|
||||
|
||||
<!-- Settings to fine tune MergeTree tables. See documentation in source code, in MergeTreeSettings.h -->
|
||||
|
@ -140,8 +140,7 @@ void SettingsProfilesCache::mergeSettingsAndConstraintsFor(EnabledSettings & ena
|
||||
|
||||
auto info = std::make_shared<SettingsProfilesInfo>(access_control);
|
||||
|
||||
info->profiles = merged_settings.toProfileIDs();
|
||||
substituteProfiles(merged_settings, info->profiles_with_implicit, info->names_of_profiles);
|
||||
substituteProfiles(merged_settings, info->profiles, info->profiles_with_implicit, info->names_of_profiles);
|
||||
|
||||
info->settings = merged_settings.toSettingsChanges();
|
||||
info->constraints = merged_settings.toSettingsConstraints(access_control);
|
||||
@ -152,9 +151,12 @@ void SettingsProfilesCache::mergeSettingsAndConstraintsFor(EnabledSettings & ena
|
||||
|
||||
void SettingsProfilesCache::substituteProfiles(
|
||||
SettingsProfileElements & elements,
|
||||
std::vector<UUID> & profiles,
|
||||
std::vector<UUID> & substituted_profiles,
|
||||
std::unordered_map<UUID, String> & names_of_substituted_profiles) const
|
||||
{
|
||||
profiles = elements.toProfileIDs();
|
||||
|
||||
/// We should substitute profiles in reversive order because the same profile can occur
|
||||
/// in `elements` multiple times (with some other settings in between) and in this case
|
||||
/// the last occurrence should override all the previous ones.
|
||||
@ -184,6 +186,11 @@ void SettingsProfilesCache::substituteProfiles(
|
||||
names_of_substituted_profiles.emplace(profile_id, profile->getName());
|
||||
}
|
||||
std::reverse(substituted_profiles.begin(), substituted_profiles.end());
|
||||
|
||||
std::erase_if(profiles, [&substituted_profiles_set](const UUID & profile_id)
|
||||
{
|
||||
return !substituted_profiles_set.contains(profile_id);
|
||||
});
|
||||
}
|
||||
|
||||
std::shared_ptr<const EnabledSettings> SettingsProfilesCache::getEnabledSettings(
|
||||
@ -225,13 +232,13 @@ std::shared_ptr<const SettingsProfilesInfo> SettingsProfilesCache::getSettingsPr
|
||||
if (auto pos = this->profile_infos_cache.get(profile_id))
|
||||
return *pos;
|
||||
|
||||
SettingsProfileElements elements = all_profiles[profile_id]->elements;
|
||||
SettingsProfileElements elements;
|
||||
auto & element = elements.emplace_back();
|
||||
element.parent_profile = profile_id;
|
||||
|
||||
auto info = std::make_shared<SettingsProfilesInfo>(access_control);
|
||||
|
||||
info->profiles.push_back(profile_id);
|
||||
info->profiles_with_implicit.push_back(profile_id);
|
||||
substituteProfiles(elements, info->profiles_with_implicit, info->names_of_profiles);
|
||||
substituteProfiles(elements, info->profiles, info->profiles_with_implicit, info->names_of_profiles);
|
||||
info->settings = elements.toSettingsChanges();
|
||||
info->constraints.merge(elements.toSettingsConstraints(access_control));
|
||||
|
||||
|
@ -37,7 +37,11 @@ private:
|
||||
void profileRemoved(const UUID & profile_id);
|
||||
void mergeSettingsAndConstraints();
|
||||
void mergeSettingsAndConstraintsFor(EnabledSettings & enabled) const;
|
||||
void substituteProfiles(SettingsProfileElements & elements, std::vector<UUID> & substituted_profiles, std::unordered_map<UUID, String> & names_of_substituted_profiles) const;
|
||||
|
||||
void substituteProfiles(SettingsProfileElements & elements,
|
||||
std::vector<UUID> & profiles,
|
||||
std::vector<UUID> & substituted_profiles,
|
||||
std::unordered_map<UUID, String> & names_of_substituted_profiles) const;
|
||||
|
||||
const AccessControl & access_control;
|
||||
std::unordered_map<UUID, SettingsProfilePtr> all_profiles;
|
||||
|
@ -14,8 +14,9 @@
|
||||
#include <DataTypes/DataTypesDecimal.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <Common/PODArray.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/PODArray.h>
|
||||
#include <Common/iota.h>
|
||||
#include <base/types.h>
|
||||
|
||||
#include <boost/math/distributions/normal.hpp>
|
||||
@ -48,7 +49,7 @@ struct LargestTriangleThreeBucketsData : public StatisticalSample<Float64, Float
|
||||
// sort the this->x and this->y in ascending order of this->x using index
|
||||
std::vector<size_t> index(this->x.size());
|
||||
|
||||
std::iota(index.begin(), index.end(), 0);
|
||||
iota(index.data(), index.size(), size_t(0));
|
||||
::sort(index.begin(), index.end(), [&](size_t i1, size_t i2) { return this->x[i1] < this->x[i2]; });
|
||||
|
||||
SampleX temp_x{};
|
||||
|
@ -1,7 +1,8 @@
|
||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||
#include <AggregateFunctions/FactoryHelpers.h>
|
||||
#include <AggregateFunctions/HelpersMinMaxAny.h>
|
||||
#include <AggregateFunctions/findNumeric.h>
|
||||
#include <Common/Concepts.h>
|
||||
#include <Common/findExtreme.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -19,7 +20,7 @@ public:
|
||||
explicit AggregateFunctionsSingleValueMax(const DataTypePtr & type) : Parent(type) { }
|
||||
|
||||
/// Specializations for native numeric types
|
||||
ALWAYS_INLINE inline void addBatchSinglePlace(
|
||||
void addBatchSinglePlace(
|
||||
size_t row_begin,
|
||||
size_t row_end,
|
||||
AggregateDataPtr __restrict place,
|
||||
@ -27,7 +28,7 @@ public:
|
||||
Arena * arena,
|
||||
ssize_t if_argument_pos) const override;
|
||||
|
||||
ALWAYS_INLINE inline void addBatchSinglePlaceNotNull(
|
||||
void addBatchSinglePlaceNotNull(
|
||||
size_t row_begin,
|
||||
size_t row_end,
|
||||
AggregateDataPtr __restrict place,
|
||||
@ -53,10 +54,10 @@ void AggregateFunctionsSingleValueMax<typename DB::AggregateFunctionMaxData<Sing
|
||||
if (if_argument_pos >= 0) \
|
||||
{ \
|
||||
const auto & flags = assert_cast<const ColumnUInt8 &>(*columns[if_argument_pos]).getData(); \
|
||||
opt = findNumericMaxIf(column.getData().data(), flags.data(), row_begin, row_end); \
|
||||
opt = findExtremeMaxIf(column.getData().data(), flags.data(), row_begin, row_end); \
|
||||
} \
|
||||
else \
|
||||
opt = findNumericMax(column.getData().data(), row_begin, row_end); \
|
||||
opt = findExtremeMax(column.getData().data(), row_begin, row_end); \
|
||||
if (opt.has_value()) \
|
||||
this->data(place).changeIfGreater(opt.value()); \
|
||||
}
|
||||
@ -74,7 +75,57 @@ void AggregateFunctionsSingleValueMax<Data>::addBatchSinglePlace(
|
||||
Arena * arena,
|
||||
ssize_t if_argument_pos) const
|
||||
{
|
||||
return Parent::addBatchSinglePlace(row_begin, row_end, place, columns, arena, if_argument_pos);
|
||||
if constexpr (!is_any_of<typename Data::Impl, SingleValueDataString, SingleValueDataGeneric>)
|
||||
{
|
||||
/// Leave other numeric types (large integers, decimals, etc) to keep doing the comparison as it's
|
||||
/// faster than doing a permutation
|
||||
return Parent::addBatchSinglePlace(row_begin, row_end, place, columns, arena, if_argument_pos);
|
||||
}
|
||||
|
||||
constexpr int nan_direction_hint = 1;
|
||||
auto const & column = *columns[0];
|
||||
if (if_argument_pos >= 0)
|
||||
{
|
||||
size_t index = row_begin;
|
||||
const auto & if_flags = assert_cast<const ColumnUInt8 &>(*columns[if_argument_pos]).getData();
|
||||
while (if_flags[index] == 0 && index < row_end)
|
||||
index++;
|
||||
if (index >= row_end)
|
||||
return;
|
||||
|
||||
for (size_t i = index + 1; i < row_end; i++)
|
||||
{
|
||||
if ((if_flags[i] != 0) && (column.compareAt(i, index, column, nan_direction_hint) > 0))
|
||||
index = i;
|
||||
}
|
||||
this->data(place).changeIfGreater(column, index, arena);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (row_begin >= row_end)
|
||||
return;
|
||||
|
||||
/// TODO: Introduce row_begin and row_end to getPermutation
|
||||
if (row_begin != 0 || row_end != column.size())
|
||||
{
|
||||
size_t index = row_begin;
|
||||
for (size_t i = index + 1; i < row_end; i++)
|
||||
{
|
||||
if (column.compareAt(i, index, column, nan_direction_hint) > 0)
|
||||
index = i;
|
||||
}
|
||||
this->data(place).changeIfGreater(column, index, arena);
|
||||
}
|
||||
else
|
||||
{
|
||||
constexpr IColumn::PermutationSortDirection direction = IColumn::PermutationSortDirection::Descending;
|
||||
constexpr IColumn::PermutationSortStability stability = IColumn::PermutationSortStability::Unstable;
|
||||
IColumn::Permutation permutation;
|
||||
constexpr UInt64 limit = 1;
|
||||
column.getPermutation(direction, stability, limit, nan_direction_hint, permutation);
|
||||
this->data(place).changeIfGreater(column, permutation[0], arena);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NOLINTBEGIN(bugprone-macro-parentheses)
|
||||
@ -97,10 +148,10 @@ void AggregateFunctionsSingleValueMax<typename DB::AggregateFunctionMaxData<Sing
|
||||
auto final_flags = std::make_unique<UInt8[]>(row_end); \
|
||||
for (size_t i = row_begin; i < row_end; ++i) \
|
||||
final_flags[i] = (!null_map[i]) & !!if_flags[i]; \
|
||||
opt = findNumericMaxIf(column.getData().data(), final_flags.get(), row_begin, row_end); \
|
||||
opt = findExtremeMaxIf(column.getData().data(), final_flags.get(), row_begin, row_end); \
|
||||
} \
|
||||
else \
|
||||
opt = findNumericMaxNotNull(column.getData().data(), null_map, row_begin, row_end); \
|
||||
opt = findExtremeMaxNotNull(column.getData().data(), null_map, row_begin, row_end); \
|
||||
if (opt.has_value()) \
|
||||
this->data(place).changeIfGreater(opt.value()); \
|
||||
}
|
||||
@ -119,7 +170,46 @@ void AggregateFunctionsSingleValueMax<Data>::addBatchSinglePlaceNotNull(
|
||||
Arena * arena,
|
||||
ssize_t if_argument_pos) const
|
||||
{
|
||||
return Parent::addBatchSinglePlaceNotNull(row_begin, row_end, place, columns, null_map, arena, if_argument_pos);
|
||||
if constexpr (!is_any_of<typename Data::Impl, SingleValueDataString, SingleValueDataGeneric>)
|
||||
{
|
||||
/// Leave other numeric types (large integers, decimals, etc) to keep doing the comparison as it's
|
||||
/// faster than doing a permutation
|
||||
return Parent::addBatchSinglePlaceNotNull(row_begin, row_end, place, columns, null_map, arena, if_argument_pos);
|
||||
}
|
||||
|
||||
constexpr int nan_direction_hint = 1;
|
||||
auto const & column = *columns[0];
|
||||
if (if_argument_pos >= 0)
|
||||
{
|
||||
size_t index = row_begin;
|
||||
const auto & if_flags = assert_cast<const ColumnUInt8 &>(*columns[if_argument_pos]).getData();
|
||||
while ((if_flags[index] == 0 || null_map[index] != 0) && (index < row_end))
|
||||
index++;
|
||||
if (index >= row_end)
|
||||
return;
|
||||
|
||||
for (size_t i = index + 1; i < row_end; i++)
|
||||
{
|
||||
if ((if_flags[i] != 0) && (null_map[i] == 0) && (column.compareAt(i, index, column, nan_direction_hint) > 0))
|
||||
index = i;
|
||||
}
|
||||
this->data(place).changeIfGreater(column, index, arena);
|
||||
}
|
||||
else
|
||||
{
|
||||
size_t index = row_begin;
|
||||
while ((null_map[index] != 0) && (index < row_end))
|
||||
index++;
|
||||
if (index >= row_end)
|
||||
return;
|
||||
|
||||
for (size_t i = index + 1; i < row_end; i++)
|
||||
{
|
||||
if ((null_map[i] == 0) && (column.compareAt(i, index, column, nan_direction_hint) > 0))
|
||||
index = i;
|
||||
}
|
||||
this->data(place).changeIfGreater(column, index, arena);
|
||||
}
|
||||
}
|
||||
|
||||
AggregateFunctionPtr createAggregateFunctionMax(
|
||||
|
@ -1,7 +1,8 @@
|
||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||
#include <AggregateFunctions/FactoryHelpers.h>
|
||||
#include <AggregateFunctions/HelpersMinMaxAny.h>
|
||||
#include <AggregateFunctions/findNumeric.h>
|
||||
#include <Common/Concepts.h>
|
||||
#include <Common/findExtreme.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -20,7 +21,7 @@ public:
|
||||
explicit AggregateFunctionsSingleValueMin(const DataTypePtr & type) : Parent(type) { }
|
||||
|
||||
/// Specializations for native numeric types
|
||||
ALWAYS_INLINE inline void addBatchSinglePlace(
|
||||
void addBatchSinglePlace(
|
||||
size_t row_begin,
|
||||
size_t row_end,
|
||||
AggregateDataPtr __restrict place,
|
||||
@ -28,7 +29,7 @@ public:
|
||||
Arena * arena,
|
||||
ssize_t if_argument_pos) const override;
|
||||
|
||||
ALWAYS_INLINE inline void addBatchSinglePlaceNotNull(
|
||||
void addBatchSinglePlaceNotNull(
|
||||
size_t row_begin,
|
||||
size_t row_end,
|
||||
AggregateDataPtr __restrict place,
|
||||
@ -54,10 +55,10 @@ public:
|
||||
if (if_argument_pos >= 0) \
|
||||
{ \
|
||||
const auto & flags = assert_cast<const ColumnUInt8 &>(*columns[if_argument_pos]).getData(); \
|
||||
opt = findNumericMinIf(column.getData().data(), flags.data(), row_begin, row_end); \
|
||||
opt = findExtremeMinIf(column.getData().data(), flags.data(), row_begin, row_end); \
|
||||
} \
|
||||
else \
|
||||
opt = findNumericMin(column.getData().data(), row_begin, row_end); \
|
||||
opt = findExtremeMin(column.getData().data(), row_begin, row_end); \
|
||||
if (opt.has_value()) \
|
||||
this->data(place).changeIfLess(opt.value()); \
|
||||
}
|
||||
@ -75,7 +76,57 @@ void AggregateFunctionsSingleValueMin<Data>::addBatchSinglePlace(
|
||||
Arena * arena,
|
||||
ssize_t if_argument_pos) const
|
||||
{
|
||||
return Parent::addBatchSinglePlace(row_begin, row_end, place, columns, arena, if_argument_pos);
|
||||
if constexpr (!is_any_of<typename Data::Impl, SingleValueDataString, SingleValueDataGeneric>)
|
||||
{
|
||||
/// Leave other numeric types (large integers, decimals, etc) to keep doing the comparison as it's
|
||||
/// faster than doing a permutation
|
||||
return Parent::addBatchSinglePlace(row_begin, row_end, place, columns, arena, if_argument_pos);
|
||||
}
|
||||
|
||||
constexpr int nan_direction_hint = 1;
|
||||
auto const & column = *columns[0];
|
||||
if (if_argument_pos >= 0)
|
||||
{
|
||||
size_t index = row_begin;
|
||||
const auto & if_flags = assert_cast<const ColumnUInt8 &>(*columns[if_argument_pos]).getData();
|
||||
while (if_flags[index] == 0 && index < row_end)
|
||||
index++;
|
||||
if (index >= row_end)
|
||||
return;
|
||||
|
||||
for (size_t i = index + 1; i < row_end; i++)
|
||||
{
|
||||
if ((if_flags[i] != 0) && (column.compareAt(i, index, column, nan_direction_hint) < 0))
|
||||
index = i;
|
||||
}
|
||||
this->data(place).changeIfLess(column, index, arena);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (row_begin >= row_end)
|
||||
return;
|
||||
|
||||
/// TODO: Introduce row_begin and row_end to getPermutation
|
||||
if (row_begin != 0 || row_end != column.size())
|
||||
{
|
||||
size_t index = row_begin;
|
||||
for (size_t i = index + 1; i < row_end; i++)
|
||||
{
|
||||
if (column.compareAt(i, index, column, nan_direction_hint) < 0)
|
||||
index = i;
|
||||
}
|
||||
this->data(place).changeIfLess(column, index, arena);
|
||||
}
|
||||
else
|
||||
{
|
||||
constexpr IColumn::PermutationSortDirection direction = IColumn::PermutationSortDirection::Ascending;
|
||||
constexpr IColumn::PermutationSortStability stability = IColumn::PermutationSortStability::Unstable;
|
||||
IColumn::Permutation permutation;
|
||||
constexpr UInt64 limit = 1;
|
||||
column.getPermutation(direction, stability, limit, nan_direction_hint, permutation);
|
||||
this->data(place).changeIfLess(column, permutation[0], arena);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NOLINTBEGIN(bugprone-macro-parentheses)
|
||||
@ -98,10 +149,10 @@ void AggregateFunctionsSingleValueMin<Data>::addBatchSinglePlace(
|
||||
auto final_flags = std::make_unique<UInt8[]>(row_end); \
|
||||
for (size_t i = row_begin; i < row_end; ++i) \
|
||||
final_flags[i] = (!null_map[i]) & !!if_flags[i]; \
|
||||
opt = findNumericMinIf(column.getData().data(), final_flags.get(), row_begin, row_end); \
|
||||
opt = findExtremeMinIf(column.getData().data(), final_flags.get(), row_begin, row_end); \
|
||||
} \
|
||||
else \
|
||||
opt = findNumericMinNotNull(column.getData().data(), null_map, row_begin, row_end); \
|
||||
opt = findExtremeMinNotNull(column.getData().data(), null_map, row_begin, row_end); \
|
||||
if (opt.has_value()) \
|
||||
this->data(place).changeIfLess(opt.value()); \
|
||||
}
|
||||
@ -120,7 +171,46 @@ void AggregateFunctionsSingleValueMin<Data>::addBatchSinglePlaceNotNull(
|
||||
Arena * arena,
|
||||
ssize_t if_argument_pos) const
|
||||
{
|
||||
return Parent::addBatchSinglePlaceNotNull(row_begin, row_end, place, columns, null_map, arena, if_argument_pos);
|
||||
if constexpr (!is_any_of<typename Data::Impl, SingleValueDataString, SingleValueDataGeneric>)
|
||||
{
|
||||
/// Leave other numeric types (large integers, decimals, etc) to keep doing the comparison as it's
|
||||
/// faster than doing a permutation
|
||||
return Parent::addBatchSinglePlaceNotNull(row_begin, row_end, place, columns, null_map, arena, if_argument_pos);
|
||||
}
|
||||
|
||||
constexpr int nan_direction_hint = 1;
|
||||
auto const & column = *columns[0];
|
||||
if (if_argument_pos >= 0)
|
||||
{
|
||||
size_t index = row_begin;
|
||||
const auto & if_flags = assert_cast<const ColumnUInt8 &>(*columns[if_argument_pos]).getData();
|
||||
while ((if_flags[index] == 0 || null_map[index] != 0) && (index < row_end))
|
||||
index++;
|
||||
if (index >= row_end)
|
||||
return;
|
||||
|
||||
for (size_t i = index + 1; i < row_end; i++)
|
||||
{
|
||||
if ((if_flags[i] != 0) && (null_map[index] == 0) && (column.compareAt(i, index, column, nan_direction_hint) < 0))
|
||||
index = i;
|
||||
}
|
||||
this->data(place).changeIfLess(column, index, arena);
|
||||
}
|
||||
else
|
||||
{
|
||||
size_t index = row_begin;
|
||||
while ((null_map[index] != 0) && (index < row_end))
|
||||
index++;
|
||||
if (index >= row_end)
|
||||
return;
|
||||
|
||||
for (size_t i = index + 1; i < row_end; i++)
|
||||
{
|
||||
if ((null_map[i] == 0) && (column.compareAt(i, index, column, nan_direction_hint) < 0))
|
||||
index = i;
|
||||
}
|
||||
this->data(place).changeIfLess(column, index, arena);
|
||||
}
|
||||
}
|
||||
|
||||
AggregateFunctionPtr createAggregateFunctionMin(
|
||||
|
@ -965,6 +965,7 @@ template <typename Data>
|
||||
struct AggregateFunctionMinData : Data
|
||||
{
|
||||
using Self = AggregateFunctionMinData;
|
||||
using Impl = Data;
|
||||
|
||||
bool changeIfBetter(const IColumn & column, size_t row_num, Arena * arena) { return this->changeIfLess(column, row_num, arena); }
|
||||
bool changeIfBetter(const Self & to, Arena * arena) { return this->changeIfLess(to, arena); }
|
||||
@ -993,6 +994,7 @@ template <typename Data>
|
||||
struct AggregateFunctionMaxData : Data
|
||||
{
|
||||
using Self = AggregateFunctionMaxData;
|
||||
using Impl = Data;
|
||||
|
||||
bool changeIfBetter(const IColumn & column, size_t row_num, Arena * arena) { return this->changeIfGreater(column, row_num, arena); }
|
||||
bool changeIfBetter(const Self & to, Arena * arena) { return this->changeIfGreater(to, arena); }
|
||||
|
@ -6,6 +6,7 @@
|
||||
|
||||
#include <Common/FieldVisitorConvertToNumber.h>
|
||||
#include <Common/NaNUtils.h>
|
||||
#include <Common/iota.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -63,10 +64,9 @@ struct QuantileLevels
|
||||
|
||||
if (isNaN(levels[i]) || levels[i] < 0 || levels[i] > 1)
|
||||
throw Exception(ErrorCodes::PARAMETER_OUT_OF_BOUND, "Quantile level is out of range [0..1]");
|
||||
|
||||
permutation[i] = i;
|
||||
}
|
||||
|
||||
iota(permutation.data(), size, Permutation::value_type(0));
|
||||
::sort(permutation.begin(), permutation.end(), [this] (size_t a, size_t b) { return levels[a] < levels[b]; });
|
||||
}
|
||||
};
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <base/sort.h>
|
||||
|
||||
#include <Common/ArenaAllocator.h>
|
||||
#include <Common/iota.h>
|
||||
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
@ -30,7 +31,7 @@ std::pair<RanksArray, Float64> computeRanksAndTieCorrection(const Values & value
|
||||
const size_t size = values.size();
|
||||
/// Save initial positions, than sort indices according to the values.
|
||||
std::vector<size_t> indexes(size);
|
||||
std::iota(indexes.begin(), indexes.end(), 0);
|
||||
iota(indexes.data(), indexes.size(), size_t(0));
|
||||
std::sort(indexes.begin(), indexes.end(),
|
||||
[&] (size_t lhs, size_t rhs) { return values[lhs] < values[rhs]; });
|
||||
|
||||
|
@ -1,15 +0,0 @@
|
||||
#include <AggregateFunctions/findNumeric.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
#define INSTANTIATION(T) \
|
||||
template std::optional<T> findNumericMin(const T * __restrict ptr, size_t start, size_t end); \
|
||||
template std::optional<T> findNumericMinNotNull(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end); \
|
||||
template std::optional<T> findNumericMinIf(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end); \
|
||||
template std::optional<T> findNumericMax(const T * __restrict ptr, size_t start, size_t end); \
|
||||
template std::optional<T> findNumericMaxNotNull(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end); \
|
||||
template std::optional<T> findNumericMaxIf(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end);
|
||||
|
||||
FOR_BASIC_NUMERIC_TYPES(INSTANTIATION)
|
||||
#undef INSTANTIATION
|
||||
}
|
@ -1,5 +1,6 @@
|
||||
#include <Analyzer/Passes/FuseFunctionsPass.h>
|
||||
|
||||
#include <Common/iota.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <DataTypes/DataTypeTuple.h>
|
||||
@ -184,7 +185,7 @@ FunctionNodePtr createFusedQuantilesNode(std::vector<QueryTreeNodePtr *> & nodes
|
||||
{
|
||||
/// Sort nodes and parameters in ascending order of quantile level
|
||||
std::vector<size_t> permutation(nodes.size());
|
||||
std::iota(permutation.begin(), permutation.end(), 0);
|
||||
iota(permutation.data(), permutation.size(), size_t(0));
|
||||
std::sort(permutation.begin(), permutation.end(), [&](size_t i, size_t j) { return parameters[i].get<Float64>() < parameters[j].get<Float64>(); });
|
||||
|
||||
std::vector<QueryTreeNodePtr *> new_nodes;
|
||||
|
@ -573,11 +573,12 @@ void RestorerFromBackup::createDatabase(const String & database_name) const
|
||||
create_database_query->if_not_exists = (restore_settings.create_table == RestoreTableCreationMode::kCreateIfNotExists);
|
||||
|
||||
LOG_TRACE(log, "Creating database {}: {}", backQuoteIfNeed(database_name), serializeAST(*create_database_query));
|
||||
|
||||
auto query_context = Context::createCopy(context);
|
||||
query_context->setSetting("allow_deprecated_database_ordinary", 1);
|
||||
try
|
||||
{
|
||||
/// Execute CREATE DATABASE query.
|
||||
InterpreterCreateQuery interpreter{create_database_query, context};
|
||||
InterpreterCreateQuery interpreter{create_database_query, query_context};
|
||||
interpreter.setInternal(true);
|
||||
interpreter.execute();
|
||||
}
|
||||
|
@ -1,18 +1,19 @@
|
||||
#include <Columns/ColumnAggregateFunction.h>
|
||||
#include <Columns/ColumnsCommon.h>
|
||||
#include <Columns/MaskOperations.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Processors/Transforms/ColumnGathererTransform.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/WriteBufferFromArena.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <Common/FieldVisitorToString.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <Processors/Transforms/ColumnGathererTransform.h>
|
||||
#include <Common/AlignedBuffer.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Common/Arena.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/FieldVisitorToString.h>
|
||||
#include <Common/HashTable/Hash.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/iota.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -626,8 +627,7 @@ void ColumnAggregateFunction::getPermutation(PermutationSortDirection /*directio
|
||||
{
|
||||
size_t s = data.size();
|
||||
res.resize(s);
|
||||
for (size_t i = 0; i < s; ++i)
|
||||
res[i] = i;
|
||||
iota(res.data(), s, IColumn::Permutation::value_type(0));
|
||||
}
|
||||
|
||||
void ColumnAggregateFunction::updatePermutation(PermutationSortDirection, PermutationSortStability,
|
||||
|
@ -2,9 +2,10 @@
|
||||
|
||||
#include <Columns/ColumnConst.h>
|
||||
#include <Columns/ColumnsCommon.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/HashTable/Hash.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/iota.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
|
||||
#include <base/defines.h>
|
||||
|
||||
@ -128,8 +129,7 @@ void ColumnConst::getPermutation(PermutationSortDirection /*direction*/, Permuta
|
||||
size_t /*limit*/, int /*nan_direction_hint*/, Permutation & res) const
|
||||
{
|
||||
res.resize(s);
|
||||
for (size_t i = 0; i < s; ++i)
|
||||
res[i] = i;
|
||||
iota(res.data(), s, IColumn::Permutation::value_type(0));
|
||||
}
|
||||
|
||||
void ColumnConst::updatePermutation(PermutationSortDirection /*direction*/, PermutationSortStability /*stability*/,
|
||||
|
@ -1,10 +1,11 @@
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/Arena.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/HashTable/Hash.h>
|
||||
#include <Common/RadixSort.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/iota.h>
|
||||
|
||||
#include <base/sort.h>
|
||||
|
||||
@ -163,8 +164,7 @@ void ColumnDecimal<T>::getPermutation(IColumn::PermutationSortDirection directio
|
||||
if (limit >= data_size)
|
||||
limit = 0;
|
||||
|
||||
for (size_t i = 0; i < data_size; ++i)
|
||||
res[i] = i;
|
||||
iota(res.data(), data_size, IColumn::Permutation::value_type(0));
|
||||
|
||||
if constexpr (is_arithmetic_v<NativeT> && !is_big_int_v<NativeT>)
|
||||
{
|
||||
@ -183,8 +183,7 @@ void ColumnDecimal<T>::getPermutation(IColumn::PermutationSortDirection directio
|
||||
/// Thresholds on size. Lower threshold is arbitrary. Upper threshold is chosen by the type for histogram counters.
|
||||
if (data_size >= 256 && data_size <= std::numeric_limits<UInt32>::max() && use_radix_sort)
|
||||
{
|
||||
for (size_t i = 0; i < data_size; ++i)
|
||||
res[i] = i;
|
||||
iota(res.data(), data_size, IColumn::Permutation::value_type(0));
|
||||
|
||||
bool try_sort = false;
|
||||
|
||||
|
@ -2,6 +2,7 @@
|
||||
#include <Columns/ColumnObject.h>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <Columns/ColumnArray.h>
|
||||
#include <Common/iota.h>
|
||||
#include <DataTypes/ObjectUtils.h>
|
||||
#include <DataTypes/getLeastSupertype.h>
|
||||
#include <DataTypes/DataTypeNothing.h>
|
||||
@ -838,7 +839,7 @@ MutableColumnPtr ColumnObject::cloneResized(size_t new_size) const
|
||||
void ColumnObject::getPermutation(PermutationSortDirection, PermutationSortStability, size_t, int, Permutation & res) const
|
||||
{
|
||||
res.resize(num_rows);
|
||||
std::iota(res.begin(), res.end(), 0);
|
||||
iota(res.data(), res.size(), size_t(0));
|
||||
}
|
||||
|
||||
void ColumnObject::compareColumn(const IColumn & rhs, size_t rhs_row_num,
|
||||
|
@ -1,11 +1,12 @@
|
||||
#include <Columns/ColumnSparse.h>
|
||||
#include <Columns/ColumnsCommon.h>
|
||||
#include <Columns/ColumnCompressed.h>
|
||||
#include <Columns/ColumnSparse.h>
|
||||
#include <Columns/ColumnTuple.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <Common/HashTable/Hash.h>
|
||||
#include <Columns/ColumnsCommon.h>
|
||||
#include <Processors/Transforms/ColumnGathererTransform.h>
|
||||
#include <Common/HashTable/Hash.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/iota.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <bit>
|
||||
@ -499,8 +500,7 @@ void ColumnSparse::getPermutationImpl(IColumn::PermutationSortDirection directio
|
||||
res.resize(_size);
|
||||
if (offsets->empty())
|
||||
{
|
||||
for (size_t i = 0; i < _size; ++i)
|
||||
res[i] = i;
|
||||
iota(res.data(), _size, IColumn::Permutation::value_type(0));
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1,16 +1,17 @@
|
||||
#include <Columns/ColumnTuple.h>
|
||||
|
||||
#include <base/sort.h>
|
||||
#include <Columns/IColumnImpl.h>
|
||||
#include <Columns/ColumnCompressed.h>
|
||||
#include <Columns/IColumnImpl.h>
|
||||
#include <Core/Field.h>
|
||||
#include <Processors/Transforms/ColumnGathererTransform.h>
|
||||
#include <DataTypes/Serializations/SerializationInfoTuple.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <Processors/Transforms/ColumnGathererTransform.h>
|
||||
#include <base/sort.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/iota.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <DataTypes/Serializations/SerializationInfoTuple.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -378,8 +379,7 @@ void ColumnTuple::getPermutationImpl(IColumn::PermutationSortDirection direction
|
||||
{
|
||||
size_t rows = size();
|
||||
res.resize(rows);
|
||||
for (size_t i = 0; i < rows; ++i)
|
||||
res[i] = i;
|
||||
iota(res.data(), rows, IColumn::Permutation::value_type(0));
|
||||
|
||||
if (limit >= rows)
|
||||
limit = 0;
|
||||
|
@ -1,24 +1,25 @@
|
||||
#include "ColumnVector.h"
|
||||
|
||||
#include <Columns/ColumnsCommon.h>
|
||||
#include <Columns/ColumnCompressed.h>
|
||||
#include <Columns/ColumnsCommon.h>
|
||||
#include <Columns/MaskOperations.h>
|
||||
#include <Columns/RadixSortHelper.h>
|
||||
#include <Processors/Transforms/ColumnGathererTransform.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Processors/Transforms/ColumnGathererTransform.h>
|
||||
#include <base/bit_cast.h>
|
||||
#include <base/scope_guard.h>
|
||||
#include <base/sort.h>
|
||||
#include <base/unaligned.h>
|
||||
#include <Common/Arena.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/HashTable/Hash.h>
|
||||
#include <Common/NaNUtils.h>
|
||||
#include <Common/RadixSort.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/TargetSpecific.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <base/sort.h>
|
||||
#include <base/unaligned.h>
|
||||
#include <base/bit_cast.h>
|
||||
#include <base/scope_guard.h>
|
||||
#include <Common/iota.h>
|
||||
|
||||
#include <bit>
|
||||
#include <cmath>
|
||||
@ -244,8 +245,7 @@ void ColumnVector<T>::getPermutation(IColumn::PermutationSortDirection direction
|
||||
if (limit >= data_size)
|
||||
limit = 0;
|
||||
|
||||
for (size_t i = 0; i < data_size; ++i)
|
||||
res[i] = i;
|
||||
iota(res.data(), data_size, IColumn::Permutation::value_type(0));
|
||||
|
||||
if constexpr (is_arithmetic_v<T> && !is_big_int_v<T>)
|
||||
{
|
||||
|
@ -1,7 +1,8 @@
|
||||
#include <Common/Arena.h>
|
||||
#include <Core/Field.h>
|
||||
#include <Columns/IColumnDummy.h>
|
||||
#include <Columns/ColumnsCommon.h>
|
||||
#include <Columns/IColumnDummy.h>
|
||||
#include <Core/Field.h>
|
||||
#include <Common/Arena.h>
|
||||
#include <Common/iota.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -87,8 +88,7 @@ void IColumnDummy::getPermutation(IColumn::PermutationSortDirection /*direction*
|
||||
size_t /*limit*/, int /*nan_direction_hint*/, Permutation & res) const
|
||||
{
|
||||
res.resize(s);
|
||||
for (size_t i = 0; i < s; ++i)
|
||||
res[i] = i;
|
||||
iota(res.data(), s, IColumn::Permutation::value_type(0));
|
||||
}
|
||||
|
||||
ColumnPtr IColumnDummy::replicate(const Offsets & offsets) const
|
||||
|
@ -6,10 +6,11 @@
|
||||
* implementation.
|
||||
*/
|
||||
|
||||
#include <Columns/IColumn.h>
|
||||
#include <Common/PODArray.h>
|
||||
#include <base/sort.h>
|
||||
#include <algorithm>
|
||||
#include <Columns/IColumn.h>
|
||||
#include <base/sort.h>
|
||||
#include <Common/PODArray.h>
|
||||
#include <Common/iota.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -299,8 +300,7 @@ void IColumn::getPermutationImpl(
|
||||
if (limit >= data_size)
|
||||
limit = 0;
|
||||
|
||||
for (size_t i = 0; i < data_size; ++i)
|
||||
res[i] = i;
|
||||
iota(res.data(), data_size, Permutation::value_type(0));
|
||||
|
||||
if (limit)
|
||||
{
|
||||
|
@ -1,6 +1,7 @@
|
||||
#include <Columns/ColumnSparse.h>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
|
||||
#include <Common/iota.h>
|
||||
#include <Common/randomSeed.h>
|
||||
#include <pcg_random.hpp>
|
||||
#include <gtest/gtest.h>
|
||||
@ -191,7 +192,7 @@ TEST(ColumnSparse, Permute)
|
||||
auto [sparse_src, full_src] = createColumns(n, k);
|
||||
|
||||
IColumn::Permutation perm(n);
|
||||
std::iota(perm.begin(), perm.end(), 0);
|
||||
iota(perm.data(), perm.size(), size_t(0));
|
||||
std::shuffle(perm.begin(), perm.end(), rng);
|
||||
|
||||
auto sparse_dst = sparse_src->permute(perm, limit);
|
||||
|
@ -9,7 +9,6 @@
|
||||
#include <Columns/ColumnUnique.h>
|
||||
#include <Columns/ColumnVector.h>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <DataTypes/DataTypeLowCardinality.h>
|
||||
#include <DataTypes/DataTypeMap.h>
|
||||
@ -17,6 +16,7 @@
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <DataTypes/DataTypeTuple.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <Common/iota.h>
|
||||
|
||||
|
||||
using namespace DB;
|
||||
@ -32,8 +32,7 @@ void stableGetColumnPermutation(
|
||||
|
||||
size_t size = column.size();
|
||||
out_permutation.resize(size);
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
out_permutation[i] = i;
|
||||
iota(out_permutation.data(), size, IColumn::Permutation::value_type(0));
|
||||
|
||||
std::stable_sort(
|
||||
out_permutation.begin(),
|
||||
@ -146,10 +145,7 @@ void assertColumnPermutations(ColumnCreateFunc column_create_func, ValueTransfor
|
||||
|
||||
std::vector<std::vector<Field>> ranges(ranges_size);
|
||||
std::vector<size_t> ranges_permutations(ranges_size);
|
||||
for (size_t i = 0; i < ranges_size; ++i)
|
||||
{
|
||||
ranges_permutations[i] = i;
|
||||
}
|
||||
iota(ranges_permutations.data(), ranges_size, IColumn::Permutation::value_type(0));
|
||||
|
||||
IColumn::Permutation actual_permutation;
|
||||
IColumn::Permutation expected_permutation;
|
||||
|
@ -589,6 +589,7 @@
|
||||
M(707, GCP_ERROR) \
|
||||
M(708, ILLEGAL_STATISTIC) \
|
||||
M(709, CANNOT_GET_REPLICATED_DATABASE_SNAPSHOT) \
|
||||
M(710, FAULT_INJECTED) \
|
||||
\
|
||||
M(999, KEEPER_EXCEPTION) \
|
||||
M(1000, POCO_EXCEPTION) \
|
||||
|
@ -34,6 +34,8 @@ static struct InitFiu
|
||||
|
||||
#define APPLY_FOR_FAILPOINTS(ONCE, REGULAR, PAUSEABLE_ONCE, PAUSEABLE) \
|
||||
ONCE(replicated_merge_tree_commit_zk_fail_after_op) \
|
||||
ONCE(replicated_queue_fail_next_entry) \
|
||||
REGULAR(replicated_queue_unfail_entries) \
|
||||
ONCE(replicated_merge_tree_insert_quorum_fail_0) \
|
||||
REGULAR(replicated_merge_tree_commit_zk_fail_when_recovering_from_hw_fault) \
|
||||
REGULAR(use_delayed_remote_source) \
|
||||
|
@ -288,6 +288,18 @@ The server successfully detected this situation and will download merged part fr
|
||||
M(OSReadChars, "Number of bytes read from filesystem, including page cache.") \
|
||||
M(OSWriteChars, "Number of bytes written to filesystem, including page cache.") \
|
||||
\
|
||||
M(ParallelReplicasHandleRequestMicroseconds, "Time spent processing requests for marks from replicas") \
|
||||
M(ParallelReplicasHandleAnnouncementMicroseconds, "Time spent processing replicas announcements") \
|
||||
\
|
||||
M(ParallelReplicasReadAssignedMarks, "Sum across all replicas of how many of scheduled marks were assigned by consistent hash") \
|
||||
M(ParallelReplicasReadUnassignedMarks, "Sum across all replicas of how many unassigned marks were scheduled") \
|
||||
M(ParallelReplicasReadAssignedForStealingMarks, "Sum across all replicas of how many of scheduled marks were assigned for stealing by consistent hash") \
|
||||
\
|
||||
M(ParallelReplicasStealingByHashMicroseconds, "Time spent collecting segments meant for stealing by hash") \
|
||||
M(ParallelReplicasProcessingPartsMicroseconds, "Time spent processing data parts") \
|
||||
M(ParallelReplicasStealingLeftoversMicroseconds, "Time spent collecting orphaned segments") \
|
||||
M(ParallelReplicasCollectingOwnedSegmentsMicroseconds, "Time spent collecting segments meant by hash") \
|
||||
\
|
||||
M(PerfCpuCycles, "Total cycles. Be wary of what happens during CPU frequency scaling.") \
|
||||
M(PerfInstructions, "Retired instructions. Be careful, these can be affected by various issues, most notably hardware interrupt counts.") \
|
||||
M(PerfCacheReferences, "Cache accesses. Usually, this indicates Last Level Cache accesses, but this may vary depending on your CPU. This may include prefetches and coherency messages; again this depends on the design of your CPU.") \
|
||||
|
@ -365,7 +365,7 @@ DECLARE_AVX512VBMI2_SPECIFIC_CODE(
|
||||
FUNCTION_HEADER \
|
||||
\
|
||||
name \
|
||||
FUNCTION_BODY \
|
||||
FUNCTION_BODY \
|
||||
|
||||
/// NOLINTNEXTLINE
|
||||
#define MULTITARGET_FUNCTION_AVX512BW_AVX512F_AVX2_SSE42(FUNCTION_HEADER, name, FUNCTION_BODY) \
|
||||
|
@ -1,18 +1,9 @@
|
||||
#pragma once
|
||||
|
||||
#include <DataTypes/IDataType.h>
|
||||
#include <base/defines.h>
|
||||
#include <base/types.h>
|
||||
#include <Common/Concepts.h>
|
||||
#include <Common/TargetSpecific.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <optional>
|
||||
#include <Common/findExtreme.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
template <typename T>
|
||||
concept is_any_native_number = (is_any_of<T, Int8, Int16, Int32, Int64, UInt8, UInt16, UInt32, UInt64, Float32, Float64>);
|
||||
|
||||
template <is_any_native_number T>
|
||||
struct MinComparator
|
||||
@ -28,8 +19,8 @@ struct MaxComparator
|
||||
|
||||
MULTITARGET_FUNCTION_AVX2_SSE42(
|
||||
MULTITARGET_FUNCTION_HEADER(template <is_any_native_number T, typename ComparatorClass, bool add_all_elements, bool add_if_cond_zero> static std::optional<T> NO_INLINE),
|
||||
findNumericExtremeImpl,
|
||||
MULTITARGET_FUNCTION_BODY((const T * __restrict ptr, const UInt8 * __restrict condition_map [[maybe_unused]], size_t row_begin, size_t row_end)
|
||||
findExtremeImpl,
|
||||
MULTITARGET_FUNCTION_BODY((const T * __restrict ptr, const UInt8 * __restrict condition_map [[maybe_unused]], size_t row_begin, size_t row_end) /// NOLINT
|
||||
{
|
||||
size_t count = row_end - row_begin;
|
||||
ptr += row_begin;
|
||||
@ -86,69 +77,67 @@ MULTITARGET_FUNCTION_AVX2_SSE42(
|
||||
}
|
||||
))
|
||||
|
||||
|
||||
/// Given a vector of T finds the extreme (MIN or MAX) value
|
||||
template <is_any_native_number T, class ComparatorClass, bool add_all_elements, bool add_if_cond_zero>
|
||||
static std::optional<T>
|
||||
findNumericExtreme(const T * __restrict ptr, const UInt8 * __restrict condition_map [[maybe_unused]], size_t start, size_t end)
|
||||
findExtreme(const T * __restrict ptr, const UInt8 * __restrict condition_map [[maybe_unused]], size_t start, size_t end)
|
||||
{
|
||||
#if USE_MULTITARGET_CODE
|
||||
/// We see no benefit from using AVX512BW or AVX512F (over AVX2), so we only declare SSE and AVX2
|
||||
if (isArchSupported(TargetArch::AVX2))
|
||||
return findNumericExtremeImplAVX2<T, ComparatorClass, add_all_elements, add_if_cond_zero>(ptr, condition_map, start, end);
|
||||
return findExtremeImplAVX2<T, ComparatorClass, add_all_elements, add_if_cond_zero>(ptr, condition_map, start, end);
|
||||
|
||||
if (isArchSupported(TargetArch::SSE42))
|
||||
return findNumericExtremeImplSSE42<T, ComparatorClass, add_all_elements, add_if_cond_zero>(ptr, condition_map, start, end);
|
||||
return findExtremeImplSSE42<T, ComparatorClass, add_all_elements, add_if_cond_zero>(ptr, condition_map, start, end);
|
||||
#endif
|
||||
return findNumericExtremeImpl<T, ComparatorClass, add_all_elements, add_if_cond_zero>(ptr, condition_map, start, end);
|
||||
return findExtremeImpl<T, ComparatorClass, add_all_elements, add_if_cond_zero>(ptr, condition_map, start, end);
|
||||
}
|
||||
|
||||
template <is_any_native_number T>
|
||||
std::optional<T> findNumericMin(const T * __restrict ptr, size_t start, size_t end)
|
||||
std::optional<T> findExtremeMin(const T * __restrict ptr, size_t start, size_t end)
|
||||
{
|
||||
return findNumericExtreme<T, MinComparator<T>, true, false>(ptr, nullptr, start, end);
|
||||
return findExtreme<T, MinComparator<T>, true, false>(ptr, nullptr, start, end);
|
||||
}
|
||||
|
||||
template <is_any_native_number T>
|
||||
std::optional<T> findNumericMinNotNull(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end)
|
||||
std::optional<T> findExtremeMinNotNull(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end)
|
||||
{
|
||||
return findNumericExtreme<T, MinComparator<T>, false, true>(ptr, condition_map, start, end);
|
||||
return findExtreme<T, MinComparator<T>, false, true>(ptr, condition_map, start, end);
|
||||
}
|
||||
|
||||
template <is_any_native_number T>
|
||||
std::optional<T> findNumericMinIf(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end)
|
||||
std::optional<T> findExtremeMinIf(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end)
|
||||
{
|
||||
return findNumericExtreme<T, MinComparator<T>, false, false>(ptr, condition_map, start, end);
|
||||
return findExtreme<T, MinComparator<T>, false, false>(ptr, condition_map, start, end);
|
||||
}
|
||||
|
||||
template <is_any_native_number T>
|
||||
std::optional<T> findNumericMax(const T * __restrict ptr, size_t start, size_t end)
|
||||
std::optional<T> findExtremeMax(const T * __restrict ptr, size_t start, size_t end)
|
||||
{
|
||||
return findNumericExtreme<T, MaxComparator<T>, true, false>(ptr, nullptr, start, end);
|
||||
return findExtreme<T, MaxComparator<T>, true, false>(ptr, nullptr, start, end);
|
||||
}
|
||||
|
||||
template <is_any_native_number T>
|
||||
std::optional<T> findNumericMaxNotNull(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end)
|
||||
std::optional<T> findExtremeMaxNotNull(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end)
|
||||
{
|
||||
return findNumericExtreme<T, MaxComparator<T>, false, true>(ptr, condition_map, start, end);
|
||||
return findExtreme<T, MaxComparator<T>, false, true>(ptr, condition_map, start, end);
|
||||
}
|
||||
|
||||
template <is_any_native_number T>
|
||||
std::optional<T> findNumericMaxIf(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end)
|
||||
std::optional<T> findExtremeMaxIf(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end)
|
||||
{
|
||||
return findNumericExtreme<T, MaxComparator<T>, false, false>(ptr, condition_map, start, end);
|
||||
return findExtreme<T, MaxComparator<T>, false, false>(ptr, condition_map, start, end);
|
||||
}
|
||||
|
||||
|
||||
#define EXTERN_INSTANTIATION(T) \
|
||||
extern template std::optional<T> findNumericMin(const T * __restrict ptr, size_t start, size_t end); \
|
||||
extern template std::optional<T> findNumericMinNotNull(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end); \
|
||||
extern template std::optional<T> findNumericMinIf(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end); \
|
||||
extern template std::optional<T> findNumericMax(const T * __restrict ptr, size_t start, size_t end); \
|
||||
extern template std::optional<T> findNumericMaxNotNull(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end); \
|
||||
extern template std::optional<T> findNumericMaxIf(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end);
|
||||
|
||||
FOR_BASIC_NUMERIC_TYPES(EXTERN_INSTANTIATION)
|
||||
#undef EXTERN_INSTANTIATION
|
||||
#define INSTANTIATION(T) \
|
||||
template std::optional<T> findExtremeMin(const T * __restrict ptr, size_t start, size_t end); \
|
||||
template std::optional<T> findExtremeMinNotNull(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end); \
|
||||
template std::optional<T> findExtremeMinIf(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end); \
|
||||
template std::optional<T> findExtremeMax(const T * __restrict ptr, size_t start, size_t end); \
|
||||
template std::optional<T> findExtremeMaxNotNull(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end); \
|
||||
template std::optional<T> findExtremeMaxIf(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end);
|
||||
|
||||
FOR_BASIC_NUMERIC_TYPES(INSTANTIATION)
|
||||
#undef INSTANTIATION
|
||||
}
|
45
src/Common/findExtreme.h
Normal file
45
src/Common/findExtreme.h
Normal file
@ -0,0 +1,45 @@
|
||||
#pragma once
|
||||
|
||||
#include <DataTypes/IDataType.h>
|
||||
#include <base/defines.h>
|
||||
#include <base/types.h>
|
||||
#include <Common/Concepts.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <optional>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
template <typename T>
|
||||
concept is_any_native_number = (is_any_of<T, Int8, Int16, Int32, Int64, UInt8, UInt16, UInt32, UInt64, Float32, Float64>);
|
||||
|
||||
template <is_any_native_number T>
|
||||
std::optional<T> findExtremeMin(const T * __restrict ptr, size_t start, size_t end);
|
||||
|
||||
template <is_any_native_number T>
|
||||
std::optional<T> findExtremeMinNotNull(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end);
|
||||
|
||||
template <is_any_native_number T>
|
||||
std::optional<T> findExtremeMinIf(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end);
|
||||
|
||||
template <is_any_native_number T>
|
||||
std::optional<T> findExtremeMax(const T * __restrict ptr, size_t start, size_t end);
|
||||
|
||||
template <is_any_native_number T>
|
||||
std::optional<T> findExtremeMaxNotNull(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end);
|
||||
|
||||
template <is_any_native_number T>
|
||||
std::optional<T> findExtremeMaxIf(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end);
|
||||
|
||||
#define EXTERN_INSTANTIATION(T) \
|
||||
extern template std::optional<T> findExtremeMin(const T * __restrict ptr, size_t start, size_t end); \
|
||||
extern template std::optional<T> findExtremeMinNotNull(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end); \
|
||||
extern template std::optional<T> findExtremeMinIf(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end); \
|
||||
extern template std::optional<T> findExtremeMax(const T * __restrict ptr, size_t start, size_t end); \
|
||||
extern template std::optional<T> findExtremeMaxNotNull(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end); \
|
||||
extern template std::optional<T> findExtremeMaxIf(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end);
|
||||
|
||||
FOR_BASIC_NUMERIC_TYPES(EXTERN_INSTANTIATION)
|
||||
#undef EXTERN_INSTANTIATION
|
||||
|
||||
}
|
36
src/Common/iota.cpp
Normal file
36
src/Common/iota.cpp
Normal file
@ -0,0 +1,36 @@
|
||||
#include <base/defines.h>
|
||||
#include <Common/iota.h>
|
||||
#include <Common/TargetSpecific.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
MULTITARGET_FUNCTION_AVX2_SSE42(
|
||||
MULTITARGET_FUNCTION_HEADER(template <iota_supported_types T> void NO_INLINE),
|
||||
iotaImpl, MULTITARGET_FUNCTION_BODY((T * begin, size_t count, T first_value) /// NOLINT
|
||||
{
|
||||
for (size_t i = 0; i < count; i++)
|
||||
*(begin + i) = static_cast<T>(first_value + i);
|
||||
})
|
||||
)
|
||||
|
||||
template <iota_supported_types T>
|
||||
void iota(T * begin, size_t count, T first_value)
|
||||
{
|
||||
#if USE_MULTITARGET_CODE
|
||||
if (isArchSupported(TargetArch::AVX2))
|
||||
return iotaImplAVX2(begin, count, first_value);
|
||||
|
||||
if (isArchSupported(TargetArch::SSE42))
|
||||
return iotaImplSSE42(begin, count, first_value);
|
||||
#endif
|
||||
return iotaImpl(begin, count, first_value);
|
||||
}
|
||||
|
||||
template void iota(UInt8 * begin, size_t count, UInt8 first_value);
|
||||
template void iota(UInt32 * begin, size_t count, UInt32 first_value);
|
||||
template void iota(UInt64 * begin, size_t count, UInt64 first_value);
|
||||
#if defined(OS_DARWIN)
|
||||
template void iota(size_t * begin, size_t count, size_t first_value);
|
||||
#endif
|
||||
}
|
34
src/Common/iota.h
Normal file
34
src/Common/iota.h
Normal file
@ -0,0 +1,34 @@
|
||||
#pragma once
|
||||
|
||||
#include <base/types.h>
|
||||
#include <Common/Concepts.h>
|
||||
|
||||
/// This is a replacement for std::iota to use dynamic dispatch
|
||||
/// Note that is only defined for containers with contiguous memory only
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/// Make sure to add any new type to the extern declaration at the end of the file and instantiate it in iota.cpp
|
||||
|
||||
template <typename T>
|
||||
concept iota_supported_types = (is_any_of<
|
||||
T,
|
||||
UInt8,
|
||||
UInt32,
|
||||
UInt64
|
||||
#if defined(OS_DARWIN)
|
||||
,
|
||||
size_t
|
||||
#endif
|
||||
>);
|
||||
|
||||
template <iota_supported_types T> void iota(T * begin, size_t count, T first_value);
|
||||
|
||||
extern template void iota(UInt8 * begin, size_t count, UInt8 first_value);
|
||||
extern template void iota(UInt32 * begin, size_t count, UInt32 first_value);
|
||||
extern template void iota(UInt64 * begin, size_t count, UInt64 first_value);
|
||||
#if defined(OS_DARWIN)
|
||||
extern template void iota(size_t * begin, size_t count, size_t first_value);
|
||||
#endif
|
||||
}
|
@ -1,5 +1,6 @@
|
||||
#include <Common/levenshteinDistance.h>
|
||||
#include <Common/PODArray.h>
|
||||
#include <Common/iota.h>
|
||||
#include <Common/levenshteinDistance.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -11,8 +12,7 @@ size_t levenshteinDistance(const String & lhs, const String & rhs)
|
||||
|
||||
PODArrayWithStackMemory<size_t, 64> row(n + 1);
|
||||
|
||||
for (size_t i = 1; i <= n; ++i)
|
||||
row[i] = i;
|
||||
iota(row.data() + 1, n, size_t(1));
|
||||
|
||||
for (size_t j = 1; j <= m; ++j)
|
||||
{
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <Common/HashTable/HashMap.h>
|
||||
#include <Common/HashTable/HashSet.h>
|
||||
#include <Common/HashTable/Hash.h>
|
||||
#include <Common/iota.h>
|
||||
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
@ -20,7 +21,7 @@ namespace
|
||||
std::vector<UInt64> getVectorWithNumbersUpToN(size_t n)
|
||||
{
|
||||
std::vector<UInt64> res(n);
|
||||
std::iota(res.begin(), res.end(), 0);
|
||||
iota(res.data(), res.size(), UInt64(0));
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -26,6 +26,8 @@ namespace DB
|
||||
M(UInt64, max_active_parts_loading_thread_pool_size, 64, "The number of threads to load active set of data parts (Active ones) at startup.", 0) \
|
||||
M(UInt64, max_outdated_parts_loading_thread_pool_size, 32, "The number of threads to load inactive set of data parts (Outdated ones) at startup.", 0) \
|
||||
M(UInt64, max_parts_cleaning_thread_pool_size, 128, "The number of threads for concurrent removal of inactive data parts.", 0) \
|
||||
M(UInt64, max_mutations_bandwidth_for_server, 0, "The maximum read speed of all mutations on server in bytes per second. Zero means unlimited.", 0) \
|
||||
M(UInt64, max_merges_bandwidth_for_server, 0, "The maximum read speed of all merges on server in bytes per second. Zero means unlimited.", 0) \
|
||||
M(UInt64, max_replicated_fetches_network_bandwidth_for_server, 0, "The maximum speed of data exchange over the network in bytes per second for replicated fetches. Zero means unlimited.", 0) \
|
||||
M(UInt64, max_replicated_sends_network_bandwidth_for_server, 0, "The maximum speed of data exchange over the network in bytes per second for replicated sends. Zero means unlimited.", 0) \
|
||||
M(UInt64, max_remote_read_network_bandwidth_for_server, 0, "The maximum speed of data exchange over the network in bytes per second for read. Zero means unlimited.", 0) \
|
||||
|
@ -185,6 +185,7 @@ class IColumn;
|
||||
M(Float, parallel_replicas_single_task_marks_count_multiplier, 2, "A multiplier which will be added during calculation for minimal number of marks to retrieve from coordinator. This will be applied only for remote replicas.", 0) \
|
||||
M(Bool, parallel_replicas_for_non_replicated_merge_tree, false, "If true, ClickHouse will use parallel replicas algorithm also for non-replicated MergeTree tables", 0) \
|
||||
M(UInt64, parallel_replicas_min_number_of_rows_per_replica, 0, "Limit the number of replicas used in a query to (estimated rows to read / min_number_of_rows_per_replica). The max is still limited by 'max_parallel_replicas'", 0) \
|
||||
M(UInt64, parallel_replicas_mark_segment_size, 128, "Parts virtually divided into segments to be distributed between replicas for parallel reading. This setting controls the size of these segments. Not recommended to change until you're absolutely sure in what you're doing", 0) \
|
||||
\
|
||||
M(Bool, skip_unavailable_shards, false, "If true, ClickHouse silently skips unavailable shards. Shard is marked as unavailable when: 1) The shard cannot be reached due to a connection failure. 2) Shard is unresolvable through DNS. 3) Table does not exist on the shard.", 0) \
|
||||
\
|
||||
@ -584,6 +585,7 @@ class IColumn;
|
||||
M(Bool, enable_early_constant_folding, true, "Enable query optimization where we analyze function and subqueries results and rewrite query if there're constants there", 0) \
|
||||
M(Bool, deduplicate_blocks_in_dependent_materialized_views, false, "Should deduplicate blocks for materialized views if the block is not a duplicate for the table. Use true to always deduplicate in dependent tables.", 0) \
|
||||
M(Bool, materialized_views_ignore_errors, false, "Allows to ignore errors for MATERIALIZED VIEW, and deliver original block to the table regardless of MVs", 0) \
|
||||
M(Bool, ignore_materialized_views_with_dropped_target_table, false, "Ignore MVs with dropped taraget table during pushing to views", 0) \
|
||||
M(Bool, allow_experimental_refreshable_materialized_view, false, "Allow refreshable materialized views (CREATE MATERIALIZED VIEW <name> REFRESH ...).", 0) \
|
||||
M(Bool, stop_refreshable_materialized_views_on_startup, false, "On server startup, prevent scheduling of refreshable materialized views, as if with SYSTEM STOP VIEWS. You can manually start them with SYSTEM START VIEWS or SYSTEM START VIEW <name> afterwards. Also applies to newly created views. Has no effect on non-refreshable materialized views.", 0) \
|
||||
M(Bool, use_compact_format_in_distributed_parts_names, true, "Changes format of directories names for distributed table insert parts.", 0) \
|
||||
|
@ -115,6 +115,8 @@ IMPLEMENT_SETTING_ENUM(DistributedDDLOutputMode, ErrorCodes::BAD_ARGUMENTS,
|
||||
{{"none", DistributedDDLOutputMode::NONE},
|
||||
{"throw", DistributedDDLOutputMode::THROW},
|
||||
{"null_status_on_timeout", DistributedDDLOutputMode::NULL_STATUS_ON_TIMEOUT},
|
||||
{"throw_only_active", DistributedDDLOutputMode::THROW_ONLY_ACTIVE},
|
||||
{"null_status_on_timeout_only_active", DistributedDDLOutputMode::NULL_STATUS_ON_TIMEOUT_ONLY_ACTIVE},
|
||||
{"never_throw", DistributedDDLOutputMode::NEVER_THROW}})
|
||||
|
||||
IMPLEMENT_SETTING_ENUM(StreamingHandleErrorMode, ErrorCodes::BAD_ARGUMENTS,
|
||||
|
@ -173,6 +173,8 @@ enum class DistributedDDLOutputMode
|
||||
THROW,
|
||||
NULL_STATUS_ON_TIMEOUT,
|
||||
NEVER_THROW,
|
||||
THROW_ONLY_ACTIVE,
|
||||
NULL_STATUS_ON_TIMEOUT_ONLY_ACTIVE,
|
||||
};
|
||||
|
||||
DECLARE_SETTING_ENUM(DistributedDDLOutputMode)
|
||||
|
@ -92,9 +92,16 @@ void validate(const ASTCreateQuery & create_query)
|
||||
|
||||
DatabasePtr DatabaseFactory::get(const ASTCreateQuery & create, const String & metadata_path, ContextPtr context)
|
||||
{
|
||||
const auto engine_name = create.storage->engine->name;
|
||||
/// check if the database engine is a valid one before proceeding
|
||||
if (!database_engines.contains(create.storage->engine->name))
|
||||
throw Exception(ErrorCodes::UNKNOWN_DATABASE_ENGINE, "Unknown database engine: {}", create.storage->engine->name);
|
||||
if (!database_engines.contains(engine_name))
|
||||
{
|
||||
auto hints = getHints(engine_name);
|
||||
if (!hints.empty())
|
||||
throw Exception(ErrorCodes::UNKNOWN_DATABASE_ENGINE, "Unknown database engine {}. Maybe you meant: {}", engine_name, toString(hints));
|
||||
else
|
||||
throw Exception(ErrorCodes::UNKNOWN_DATABASE_ENGINE, "Unknown database engine: {}", create.storage->engine->name);
|
||||
}
|
||||
|
||||
/// if the engine is found (i.e. registered with the factory instance), then validate if the
|
||||
/// supplied engine arguments, settings and table overrides are valid for the engine.
|
||||
|
@ -1,5 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <Common/NamePrompter.h>
|
||||
#include <Interpreters/Context_fwd.h>
|
||||
#include <Databases/IDatabase.h>
|
||||
#include <Parsers/ASTCreateQuery.h>
|
||||
@ -24,7 +25,7 @@ static inline ValueType safeGetLiteralValue(const ASTPtr &ast, const String &eng
|
||||
return ast->as<ASTLiteral>()->value.safeGet<ValueType>();
|
||||
}
|
||||
|
||||
class DatabaseFactory : private boost::noncopyable
|
||||
class DatabaseFactory : private boost::noncopyable, public IHints<>
|
||||
{
|
||||
public:
|
||||
|
||||
@ -52,6 +53,14 @@ public:
|
||||
|
||||
const DatabaseEngines & getDatabaseEngines() const { return database_engines; }
|
||||
|
||||
std::vector<String> getAllRegisteredNames() const override
|
||||
{
|
||||
std::vector<String> result;
|
||||
auto getter = [](const auto & pair) { return pair.first; };
|
||||
std::transform(database_engines.begin(), database_engines.end(), std::back_inserter(result), getter);
|
||||
return result;
|
||||
}
|
||||
|
||||
private:
|
||||
DatabaseEngines database_engines;
|
||||
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#include <Dictionaries/IDictionary.h>
|
||||
#include <Common/CurrentThread.h>
|
||||
#include <Common/iota.h>
|
||||
#include <Common/scope_guard_safe.h>
|
||||
#include <Common/ConcurrentBoundedQueue.h>
|
||||
#include <Common/ThreadPool.h>
|
||||
@ -53,7 +54,7 @@ public:
|
||||
LOG_TRACE(dictionary.log, "Will load the dictionary using {} threads (with {} backlog)", shards, backlog);
|
||||
|
||||
shards_slots.resize(shards);
|
||||
std::iota(shards_slots.begin(), shards_slots.end(), 0);
|
||||
iota(shards_slots.data(), shards_slots.size(), UInt64(0));
|
||||
|
||||
for (size_t shard = 0; shard < shards; ++shard)
|
||||
{
|
||||
|
@ -5,6 +5,7 @@
|
||||
|
||||
#include <base/sort.h>
|
||||
|
||||
#include <Common/iota.h>
|
||||
#include <Columns/ColumnArray.h>
|
||||
#include <Columns/ColumnTuple.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
@ -507,7 +508,7 @@ const IColumn * unrollSimplePolygons(const ColumnPtr & column, Offset & offset)
|
||||
if (!ptr_polygons)
|
||||
throw Exception(ErrorCodes::TYPE_MISMATCH, "Expected a column containing arrays of points");
|
||||
offset.ring_offsets.assign(ptr_polygons->getOffsets());
|
||||
std::iota(offset.polygon_offsets.begin(), offset.polygon_offsets.end(), 1);
|
||||
iota<IColumn::Offsets::value_type>(offset.polygon_offsets.data(), offset.polygon_offsets.size(), IColumn::Offsets::value_type(1));
|
||||
offset.multi_polygon_offsets.assign(offset.polygon_offsets);
|
||||
|
||||
return ptr_polygons->getDataPtr().get();
|
||||
|
@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <base/types.h>
|
||||
#include <Common/iota.h>
|
||||
#include <Common/ThreadPool.h>
|
||||
#include <Poco/Logger.h>
|
||||
|
||||
@ -184,7 +185,7 @@ public:
|
||||
{
|
||||
setBoundingBox();
|
||||
std::vector<size_t> order(polygons.size());
|
||||
std::iota(order.begin(), order.end(), 0);
|
||||
iota(order.data(), order.size(), size_t(0));
|
||||
root = makeCell(min_x, min_y, max_x, max_y, order);
|
||||
}
|
||||
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <Functions/FunctionsStringSimilarity.h>
|
||||
#include <Common/PODArray.h>
|
||||
#include <Common/UTF8Helpers.h>
|
||||
#include <Common/iota.h>
|
||||
|
||||
#ifdef __SSE4_2__
|
||||
# include <nmmintrin.h>
|
||||
@ -246,8 +247,7 @@ struct ByteEditDistanceImpl
|
||||
ResultType insertion = 0;
|
||||
ResultType deletion = 0;
|
||||
|
||||
for (size_t i = 0; i <= haystack_size; ++i)
|
||||
distances0[i] = i;
|
||||
iota(distances0.data(), haystack_size + 1, ResultType(0));
|
||||
|
||||
for (size_t pos_needle = 0; pos_needle < needle_size; ++pos_needle)
|
||||
{
|
||||
|
@ -1,5 +1,6 @@
|
||||
#include <Columns/ColumnArray.h>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <Common/iota.h>
|
||||
#include <Common/randomSeed.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
@ -80,7 +81,7 @@ public:
|
||||
const size_t cur_samples = std::min(num_elements, samples);
|
||||
|
||||
indices.resize(num_elements);
|
||||
std::iota(indices.begin(), indices.end(), prev_array_offset);
|
||||
iota(indices.data(), indices.size(), prev_array_offset);
|
||||
std::shuffle(indices.begin(), indices.end(), rng);
|
||||
|
||||
for (UInt64 i = 0; i < cur_samples; i++)
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <Functions/FunctionHelpers.h>
|
||||
#include <Functions/IFunction.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/iota.h>
|
||||
#include <Common/randomSeed.h>
|
||||
#include <Common/shuffle.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
@ -150,7 +151,7 @@ ColumnPtr FunctionArrayShuffleImpl<Traits>::executeGeneric(const ColumnArray & a
|
||||
size_t size = offsets.size();
|
||||
size_t nested_size = array.getData().size();
|
||||
IColumn::Permutation permutation(nested_size);
|
||||
std::iota(std::begin(permutation), std::end(permutation), 0);
|
||||
iota(permutation.data(), permutation.size(), IColumn::Permutation::value_type(0));
|
||||
|
||||
ColumnArray::Offset current_offset = 0;
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
|
@ -1,5 +1,6 @@
|
||||
#include <Functions/array/arraySort.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Functions/array/arraySort.h>
|
||||
#include <Common/iota.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -55,9 +56,7 @@ ColumnPtr ArraySortImpl<positive, is_partial>::execute(
|
||||
size_t size = offsets.size();
|
||||
size_t nested_size = array.getData().size();
|
||||
IColumn::Permutation permutation(nested_size);
|
||||
|
||||
for (size_t i = 0; i < nested_size; ++i)
|
||||
permutation[i] = i;
|
||||
iota(permutation.data(), nested_size, IColumn::Permutation::value_type(0));
|
||||
|
||||
ColumnArray::Offset current_offset = 0;
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
|
@ -56,8 +56,7 @@ public:
|
||||
auto column = ColumnUInt64::create();
|
||||
auto & data = column->getData();
|
||||
data.resize(input_rows_count);
|
||||
for (size_t i = 0; i < input_rows_count; ++i)
|
||||
data[i] = i;
|
||||
iota(data.data(), input_rows_count, UInt64(0));
|
||||
|
||||
return column;
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
#include "config.h"
|
||||
|
||||
#ifdef ENABLE_SQIDS
|
||||
#if USE_SQIDS
|
||||
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
@ -57,9 +57,10 @@ public:
|
||||
|
||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
|
||||
{
|
||||
size_t num_args = arguments.size();
|
||||
auto col_res = ColumnString::create();
|
||||
col_res->reserve(input_rows_count);
|
||||
|
||||
const size_t num_args = arguments.size();
|
||||
std::vector<UInt64> numbers(num_args);
|
||||
for (size_t i = 0; i < input_rows_count; ++i)
|
||||
{
|
||||
@ -83,7 +84,7 @@ REGISTER_FUNCTION(Sqid)
|
||||
{
|
||||
factory.registerFunction<FunctionSqid>(FunctionDocumentation{
|
||||
.description=R"(
|
||||
Transforms numbers into YouTube-like short URL hash called [Sqid](https://sqids.org/).)",
|
||||
Transforms numbers into a [Sqid](https://sqids.org/) which is a Youtube-like ID string.)",
|
||||
.syntax="sqid(number1, ...)",
|
||||
.arguments={{"number1, ...", "Arbitrarily many UInt8, UInt16, UInt32 or UInt64 arguments"}},
|
||||
.returned_value="A hash id [String](/docs/en/sql-reference/data-types/string.md).",
|
@ -3,6 +3,7 @@
|
||||
#include <Columns/ColumnConst.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Common/iota.h>
|
||||
#include <Common/StringUtils/StringUtils.h>
|
||||
#include <Common/UTF8Helpers.h>
|
||||
#include <Common/HashTable/HashMap.h>
|
||||
@ -31,7 +32,7 @@ struct TranslateImpl
|
||||
if (map_from.size() != map_to.size())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Second and third arguments must be the same length");
|
||||
|
||||
std::iota(map.begin(), map.end(), 0);
|
||||
iota(map.data(), map.size(), UInt8(0));
|
||||
|
||||
for (size_t i = 0; i < map_from.size(); ++i)
|
||||
{
|
||||
@ -129,7 +130,7 @@ struct TranslateUTF8Impl
|
||||
if (map_from_size != map_to_size)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Second and third arguments must be the same length");
|
||||
|
||||
std::iota(map_ascii.begin(), map_ascii.end(), 0);
|
||||
iota(map_ascii.data(), map_ascii.size(), UInt32(0));
|
||||
|
||||
const UInt8 * map_from_ptr = reinterpret_cast<const UInt8 *>(map_from.data());
|
||||
const UInt8 * map_from_end = map_from_ptr + map_from.size();
|
||||
|
@ -412,7 +412,8 @@ void executeQueryWithParallelReplicas(
|
||||
new_cluster = not_optimized_cluster->getClusterWithReplicasAsShards(settings, settings.max_parallel_replicas);
|
||||
}
|
||||
|
||||
auto coordinator = std::make_shared<ParallelReplicasReadingCoordinator>(new_cluster->getShardCount());
|
||||
auto coordinator
|
||||
= std::make_shared<ParallelReplicasReadingCoordinator>(new_cluster->getShardCount(), settings.parallel_replicas_mark_segment_size);
|
||||
auto external_tables = new_context->getExternalTables();
|
||||
auto read_from_remote = std::make_unique<ReadFromParallelRemoteReplicasStep>(
|
||||
query_ast,
|
||||
|
@ -330,6 +330,9 @@ struct ContextSharedPart : boost::noncopyable
|
||||
|
||||
mutable ThrottlerPtr backups_server_throttler; /// A server-wide throttler for BACKUPs
|
||||
|
||||
mutable ThrottlerPtr mutations_throttler; /// A server-wide throttler for mutations
|
||||
mutable ThrottlerPtr merges_throttler; /// A server-wide throttler for merges
|
||||
|
||||
MultiVersion<Macros> macros; /// Substitutions extracted from config.
|
||||
std::unique_ptr<DDLWorker> ddl_worker TSA_GUARDED_BY(mutex); /// Process ddl commands from zk.
|
||||
LoadTaskPtr ddl_worker_startup_task; /// To postpone `ddl_worker->startup()` after all tables startup
|
||||
@ -738,6 +741,12 @@ struct ContextSharedPart : boost::noncopyable
|
||||
|
||||
if (auto bandwidth = server_settings.max_backup_bandwidth_for_server)
|
||||
backups_server_throttler = std::make_shared<Throttler>(bandwidth);
|
||||
|
||||
if (auto bandwidth = server_settings.max_mutations_bandwidth_for_server)
|
||||
mutations_throttler = std::make_shared<Throttler>(bandwidth);
|
||||
|
||||
if (auto bandwidth = server_settings.max_merges_bandwidth_for_server)
|
||||
merges_throttler = std::make_shared<Throttler>(bandwidth);
|
||||
}
|
||||
};
|
||||
|
||||
@ -3001,6 +3010,16 @@ ThrottlerPtr Context::getBackupsThrottler() const
|
||||
return throttler;
|
||||
}
|
||||
|
||||
ThrottlerPtr Context::getMutationsThrottler() const
|
||||
{
|
||||
return shared->mutations_throttler;
|
||||
}
|
||||
|
||||
ThrottlerPtr Context::getMergesThrottler() const
|
||||
{
|
||||
return shared->merges_throttler;
|
||||
}
|
||||
|
||||
bool Context::hasDistributedDDL() const
|
||||
{
|
||||
return getConfigRef().has("distributed_ddl");
|
||||
|
@ -1328,6 +1328,9 @@ public:
|
||||
|
||||
ThrottlerPtr getBackupsThrottler() const;
|
||||
|
||||
ThrottlerPtr getMutationsThrottler() const;
|
||||
ThrottlerPtr getMergesThrottler() const;
|
||||
|
||||
/// Kitchen sink
|
||||
using ContextData::KitchenSink;
|
||||
using ContextData::kitchen_sink;
|
||||
|
@ -215,20 +215,47 @@ ContextMutablePtr DDLTaskBase::makeQueryContext(ContextPtr from_context, const Z
|
||||
}
|
||||
|
||||
|
||||
bool DDLTask::findCurrentHostID(ContextPtr global_context, Poco::Logger * log, const ZooKeeperPtr & zookeeper)
|
||||
bool DDLTask::findCurrentHostID(ContextPtr global_context, Poco::Logger * log, const ZooKeeperPtr & zookeeper, const std::optional<std::string> & config_host_name)
|
||||
{
|
||||
bool host_in_hostlist = false;
|
||||
std::exception_ptr first_exception = nullptr;
|
||||
|
||||
const auto maybe_secure_port = global_context->getTCPPortSecure();
|
||||
const auto port = global_context->getTCPPort();
|
||||
|
||||
if (config_host_name)
|
||||
{
|
||||
bool is_local_port = (maybe_secure_port && HostID(*config_host_name, *maybe_secure_port).isLocalAddress(*maybe_secure_port)) ||
|
||||
HostID(*config_host_name, port).isLocalAddress(port);
|
||||
|
||||
if (!is_local_port)
|
||||
throw Exception(
|
||||
ErrorCodes::DNS_ERROR,
|
||||
"{} is not a local address. Check parameter 'host_name' in the configuration",
|
||||
*config_host_name);
|
||||
}
|
||||
|
||||
for (const HostID & host : entry.hosts)
|
||||
{
|
||||
auto maybe_secure_port = global_context->getTCPPortSecure();
|
||||
if (config_host_name)
|
||||
{
|
||||
if (config_host_name != host.host_name)
|
||||
continue;
|
||||
|
||||
if (maybe_secure_port != host.port && port != host.port)
|
||||
continue;
|
||||
|
||||
host_in_hostlist = true;
|
||||
host_id = host;
|
||||
host_id_str = host.toString();
|
||||
break;
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
/// The port is considered local if it matches TCP or TCP secure port that the server is listening.
|
||||
bool is_local_port
|
||||
= (maybe_secure_port && host.isLocalAddress(*maybe_secure_port)) || host.isLocalAddress(global_context->getTCPPort());
|
||||
= (maybe_secure_port && host.isLocalAddress(*maybe_secure_port)) || host.isLocalAddress(port);
|
||||
|
||||
if (!is_local_port)
|
||||
continue;
|
||||
|
@ -44,6 +44,9 @@ struct HostID
|
||||
explicit HostID(const Cluster::Address & address)
|
||||
: host_name(address.host_name), port(address.port) {}
|
||||
|
||||
HostID(const String & host_name_, UInt16 port_)
|
||||
: host_name(host_name_), port(port_) {}
|
||||
|
||||
static HostID fromString(const String & host_port_str);
|
||||
|
||||
String toString() const
|
||||
@ -143,7 +146,7 @@ struct DDLTask : public DDLTaskBase
|
||||
{
|
||||
DDLTask(const String & name, const String & path) : DDLTaskBase(name, path) {}
|
||||
|
||||
bool findCurrentHostID(ContextPtr global_context, Poco::Logger * log, const ZooKeeperPtr & zookeeper);
|
||||
bool findCurrentHostID(ContextPtr global_context, Poco::Logger * log, const ZooKeeperPtr & zookeeper, const std::optional<std::string> & config_host_name);
|
||||
|
||||
void setClusterInfo(ContextPtr context, Poco::Logger * log);
|
||||
|
||||
|
@ -107,6 +107,9 @@ DDLWorker::DDLWorker(
|
||||
cleanup_delay_period = config->getUInt64(prefix + ".cleanup_delay_period", static_cast<UInt64>(cleanup_delay_period));
|
||||
max_tasks_in_queue = std::max<UInt64>(1, config->getUInt64(prefix + ".max_tasks_in_queue", max_tasks_in_queue));
|
||||
|
||||
if (config->has(prefix + ".host_name"))
|
||||
config_host_name = config->getString(prefix + ".host_name");
|
||||
|
||||
if (config->has(prefix + ".profile"))
|
||||
context->setSetting("profile", config->getString(prefix + ".profile"));
|
||||
}
|
||||
@ -214,7 +217,7 @@ DDLTaskPtr DDLWorker::initAndCheckTask(const String & entry_name, String & out_r
|
||||
/// Stage 2: resolve host_id and check if we should execute query or not
|
||||
/// Multiple clusters can use single DDL queue path in ZooKeeper,
|
||||
/// So we should skip task if we cannot find current host in cluster hosts list.
|
||||
if (!task->findCurrentHostID(context, log, zookeeper))
|
||||
if (!task->findCurrentHostID(context, log, zookeeper, config_host_name))
|
||||
{
|
||||
out_reason = "There is no a local address in host list";
|
||||
return add_to_skip_set();
|
||||
|
@ -153,6 +153,8 @@ protected:
|
||||
ContextMutablePtr context;
|
||||
Poco::Logger * log;
|
||||
|
||||
std::optional<std::string> config_host_name; /// host_name from config
|
||||
|
||||
std::string host_fqdn; /// current host domain name
|
||||
std::string host_fqdn_id; /// host_name:port
|
||||
std::string queue_dir; /// dir with queue of queries
|
||||
|
@ -2501,7 +2501,12 @@ void InterpreterSelectQuery::executeFetchColumns(QueryProcessingStage::Enum proc
|
||||
max_block_size = std::max<UInt64>(1, max_block_limited);
|
||||
max_threads_execute_query = max_streams = 1;
|
||||
}
|
||||
if (max_block_limited < local_limits.local_limits.size_limits.max_rows)
|
||||
if (local_limits.local_limits.size_limits.max_rows != 0)
|
||||
{
|
||||
if (max_block_limited < local_limits.local_limits.size_limits.max_rows)
|
||||
query_info.limit = max_block_limited;
|
||||
}
|
||||
else
|
||||
{
|
||||
query_info.limit = max_block_limited;
|
||||
}
|
||||
|
@ -1280,6 +1280,7 @@ void MutationsInterpreter::Source::read(
|
||||
VirtualColumns virtual_columns(std::move(required_columns), part);
|
||||
|
||||
createReadFromPartStep(
|
||||
MergeTreeSequentialSourceType::Mutation,
|
||||
plan, *data, storage_snapshot, part,
|
||||
std::move(virtual_columns.columns_to_read),
|
||||
apply_deleted_mask_, filter, context_,
|
||||
|
@ -200,8 +200,6 @@ public:
|
||||
Status prepare() override;
|
||||
|
||||
private:
|
||||
static Strings getChildrenAllowNoNode(const std::shared_ptr<zkutil::ZooKeeper> & zookeeper, const String & node_path);
|
||||
|
||||
static Block getSampleBlock(ContextPtr context_, bool hosts_to_wait);
|
||||
|
||||
Strings getNewAndUpdate(const Strings & current_list_of_finished_hosts);
|
||||
@ -228,7 +226,8 @@ private:
|
||||
NameSet waiting_hosts; /// hosts from task host list
|
||||
NameSet finished_hosts; /// finished hosts from host list
|
||||
NameSet ignoring_hosts; /// appeared hosts that are not in hosts list
|
||||
Strings current_active_hosts; /// Hosts that were in active state at the last check
|
||||
Strings current_active_hosts; /// Hosts that are currently executing the task
|
||||
NameSet offline_hosts; /// Hosts that are not currently running
|
||||
size_t num_hosts_finished = 0;
|
||||
|
||||
/// Save the first detected error and throw it at the end of execution
|
||||
@ -237,7 +236,10 @@ private:
|
||||
Int64 timeout_seconds = 120;
|
||||
bool is_replicated_database = false;
|
||||
bool throw_on_timeout = true;
|
||||
bool only_running_hosts = false;
|
||||
|
||||
bool timeout_exceeded = false;
|
||||
bool stop_waiting_offline_hosts = false;
|
||||
};
|
||||
|
||||
|
||||
@ -310,12 +312,15 @@ DDLQueryStatusSource::DDLQueryStatusSource(
|
||||
, log(&Poco::Logger::get("DDLQueryStatusSource"))
|
||||
{
|
||||
auto output_mode = context->getSettingsRef().distributed_ddl_output_mode;
|
||||
throw_on_timeout = output_mode == DistributedDDLOutputMode::THROW || output_mode == DistributedDDLOutputMode::NONE;
|
||||
throw_on_timeout = output_mode == DistributedDDLOutputMode::THROW || output_mode == DistributedDDLOutputMode::THROW_ONLY_ACTIVE
|
||||
|| output_mode == DistributedDDLOutputMode::NONE;
|
||||
|
||||
if (hosts_to_wait)
|
||||
{
|
||||
waiting_hosts = NameSet(hosts_to_wait->begin(), hosts_to_wait->end());
|
||||
is_replicated_database = true;
|
||||
only_running_hosts = output_mode == DistributedDDLOutputMode::THROW_ONLY_ACTIVE ||
|
||||
output_mode == DistributedDDLOutputMode::NULL_STATUS_ON_TIMEOUT_ONLY_ACTIVE;
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -377,6 +382,38 @@ Chunk DDLQueryStatusSource::generateChunkWithUnfinishedHosts() const
|
||||
return Chunk(std::move(columns), unfinished_hosts.size());
|
||||
}
|
||||
|
||||
static NameSet getOfflineHosts(const String & node_path, const NameSet & hosts_to_wait, const ZooKeeperPtr & zookeeper, Poco::Logger * log)
|
||||
{
|
||||
fs::path replicas_path;
|
||||
if (node_path.ends_with('/'))
|
||||
replicas_path = fs::path(node_path).parent_path().parent_path().parent_path() / "replicas";
|
||||
else
|
||||
replicas_path = fs::path(node_path).parent_path().parent_path() / "replicas";
|
||||
|
||||
Strings paths;
|
||||
Strings hosts_array;
|
||||
for (const auto & host : hosts_to_wait)
|
||||
{
|
||||
hosts_array.push_back(host);
|
||||
paths.push_back(replicas_path / host / "active");
|
||||
}
|
||||
|
||||
NameSet offline;
|
||||
auto res = zookeeper->tryGet(paths);
|
||||
for (size_t i = 0; i < res.size(); ++i)
|
||||
if (res[i].error == Coordination::Error::ZNONODE)
|
||||
offline.insert(hosts_array[i]);
|
||||
|
||||
if (offline.size() == hosts_to_wait.size())
|
||||
{
|
||||
/// Avoid reporting that all hosts are offline
|
||||
LOG_WARNING(log, "Did not find active hosts, will wait for all {} hosts. This should not happen often", offline.size());
|
||||
return {};
|
||||
}
|
||||
|
||||
return offline;
|
||||
}
|
||||
|
||||
Chunk DDLQueryStatusSource::generate()
|
||||
{
|
||||
bool all_hosts_finished = num_hosts_finished >= waiting_hosts.size();
|
||||
@ -398,7 +435,7 @@ Chunk DDLQueryStatusSource::generate()
|
||||
if (isCancelled())
|
||||
return {};
|
||||
|
||||
if (timeout_seconds >= 0 && watch.elapsedSeconds() > timeout_seconds)
|
||||
if (stop_waiting_offline_hosts || (timeout_seconds >= 0 && watch.elapsedSeconds() > timeout_seconds))
|
||||
{
|
||||
timeout_exceeded = true;
|
||||
|
||||
@ -406,7 +443,7 @@ Chunk DDLQueryStatusSource::generate()
|
||||
size_t num_active_hosts = current_active_hosts.size();
|
||||
|
||||
constexpr auto msg_format = "Watching task {} is executing longer than distributed_ddl_task_timeout (={}) seconds. "
|
||||
"There are {} unfinished hosts ({} of them are currently active), "
|
||||
"There are {} unfinished hosts ({} of them are currently executing the task), "
|
||||
"they are going to execute the query in background";
|
||||
if (throw_on_timeout)
|
||||
{
|
||||
@ -425,10 +462,7 @@ Chunk DDLQueryStatusSource::generate()
|
||||
return generateChunkWithUnfinishedHosts();
|
||||
}
|
||||
|
||||
if (num_hosts_finished != 0 || try_number != 0)
|
||||
{
|
||||
sleepForMilliseconds(std::min<size_t>(1000, 50 * (try_number + 1)));
|
||||
}
|
||||
sleepForMilliseconds(std::min<size_t>(1000, 50 * try_number));
|
||||
|
||||
bool node_exists = false;
|
||||
Strings tmp_hosts;
|
||||
@ -440,9 +474,21 @@ Chunk DDLQueryStatusSource::generate()
|
||||
retries_ctl.retryLoop([&]()
|
||||
{
|
||||
auto zookeeper = context->getZooKeeper();
|
||||
node_exists = zookeeper->exists(node_path);
|
||||
tmp_hosts = getChildrenAllowNoNode(zookeeper, fs::path(node_path) / node_to_wait);
|
||||
tmp_active_hosts = getChildrenAllowNoNode(zookeeper, fs::path(node_path) / "active");
|
||||
Strings paths = {String(fs::path(node_path) / node_to_wait), String(fs::path(node_path) / "active")};
|
||||
auto res = zookeeper->tryGetChildren(paths);
|
||||
for (size_t i = 0; i < res.size(); ++i)
|
||||
if (res[i].error != Coordination::Error::ZOK && res[i].error != Coordination::Error::ZNONODE)
|
||||
throw Coordination::Exception::fromPath(res[i].error, paths[i]);
|
||||
|
||||
if (res[0].error == Coordination::Error::ZNONODE)
|
||||
node_exists = zookeeper->exists(node_path);
|
||||
else
|
||||
node_exists = true;
|
||||
tmp_hosts = res[0].names;
|
||||
tmp_active_hosts = res[1].names;
|
||||
|
||||
if (only_running_hosts)
|
||||
offline_hosts = getOfflineHosts(node_path, waiting_hosts, zookeeper, log);
|
||||
});
|
||||
}
|
||||
|
||||
@ -460,6 +506,17 @@ Chunk DDLQueryStatusSource::generate()
|
||||
|
||||
Strings new_hosts = getNewAndUpdate(tmp_hosts);
|
||||
++try_number;
|
||||
|
||||
if (only_running_hosts)
|
||||
{
|
||||
size_t num_finished_or_offline = 0;
|
||||
for (const auto & host : waiting_hosts)
|
||||
num_finished_or_offline += finished_hosts.contains(host) || offline_hosts.contains(host);
|
||||
|
||||
if (num_finished_or_offline == waiting_hosts.size())
|
||||
stop_waiting_offline_hosts = true;
|
||||
}
|
||||
|
||||
if (new_hosts.empty())
|
||||
continue;
|
||||
|
||||
@ -470,7 +527,13 @@ Chunk DDLQueryStatusSource::generate()
|
||||
{
|
||||
ExecutionStatus status(-1, "Cannot obtain error message");
|
||||
|
||||
if (node_to_wait == "finished")
|
||||
/// Replicated database retries in case of error, it should not write error status.
|
||||
#ifdef ABORT_ON_LOGICAL_ERROR
|
||||
bool need_check_status = true;
|
||||
#else
|
||||
bool need_check_status = !is_replicated_database;
|
||||
#endif
|
||||
if (need_check_status)
|
||||
{
|
||||
String status_data;
|
||||
bool finished_exists = false;
|
||||
@ -496,7 +559,6 @@ Chunk DDLQueryStatusSource::generate()
|
||||
if (status.code != 0 && !first_exception
|
||||
&& context->getSettingsRef().distributed_ddl_output_mode != DistributedDDLOutputMode::NEVER_THROW)
|
||||
{
|
||||
/// Replicated database retries in case of error, it should not write error status.
|
||||
if (is_replicated_database)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "There was an error on {}: {} (probably it's a bug)", host_id, status.message);
|
||||
|
||||
@ -555,15 +617,6 @@ IProcessor::Status DDLQueryStatusSource::prepare()
|
||||
return ISource::prepare();
|
||||
}
|
||||
|
||||
Strings DDLQueryStatusSource::getChildrenAllowNoNode(const std::shared_ptr<zkutil::ZooKeeper> & zookeeper, const String & node_path)
|
||||
{
|
||||
Strings res;
|
||||
Coordination::Error code = zookeeper->tryGetChildren(node_path, res);
|
||||
if (code != Coordination::Error::ZOK && code != Coordination::Error::ZNONODE)
|
||||
throw Coordination::Exception::fromPath(code, node_path);
|
||||
return res;
|
||||
}
|
||||
|
||||
Strings DDLQueryStatusSource::getNewAndUpdate(const Strings & current_list_of_finished_hosts)
|
||||
{
|
||||
Strings diff;
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <Columns/ColumnNullable.h>
|
||||
#include <Columns/ColumnTuple.h>
|
||||
#include <Functions/FunctionHelpers.h>
|
||||
#include <Common/iota.h>
|
||||
|
||||
#ifdef __SSE2__
|
||||
#include <emmintrin.h>
|
||||
@ -155,8 +156,7 @@ void getBlockSortPermutationImpl(const Block & block, const SortDescription & de
|
||||
{
|
||||
size_t size = block.rows();
|
||||
permutation.resize(size);
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
permutation[i] = i;
|
||||
iota(permutation.data(), size, IColumn::Permutation::value_type(0));
|
||||
|
||||
if (limit >= size)
|
||||
limit = 0;
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <memory>
|
||||
#include <thread>
|
||||
|
||||
#include <Common/iota.h>
|
||||
#include <Common/randomSeed.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
@ -788,7 +789,7 @@ TEST_F(FileCacheTest, writeBuffer)
|
||||
|
||||
/// get random permutation of indexes
|
||||
std::vector<size_t> indexes(data.size());
|
||||
std::iota(indexes.begin(), indexes.end(), 0);
|
||||
iota(indexes.data(), indexes.size(), size_t(0));
|
||||
std::shuffle(indexes.begin(), indexes.end(), rng);
|
||||
|
||||
for (auto i : indexes)
|
||||
|
@ -8,6 +8,8 @@
|
||||
#include <Analyzer/QueryNode.h>
|
||||
#include <Analyzer/TableNode.h>
|
||||
#include <Analyzer/TableFunctionNode.h>
|
||||
#include <Analyzer/JoinNode.h>
|
||||
#include <Analyzer/ListNode.h>
|
||||
|
||||
#include <Planner/PlannerContext.h>
|
||||
#include <Planner/PlannerActionsVisitor.h>
|
||||
@ -33,6 +35,28 @@ public:
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
/// Special case for USING clause which contains references to ALIAS columns.
|
||||
/// We can not modify such ColumnNode.
|
||||
if (auto * join_node = node->as<JoinNode>())
|
||||
{
|
||||
if (!join_node->isUsingJoinExpression())
|
||||
return;
|
||||
|
||||
auto & using_list = join_node->getJoinExpression()->as<ListNode&>();
|
||||
for (auto & using_element : using_list)
|
||||
{
|
||||
auto & column_node = using_element->as<ColumnNode&>();
|
||||
/// This list contains column nodes from left and right tables.
|
||||
auto & columns_from_subtrees = column_node.getExpressionOrThrow()->as<ListNode&>().getNodes();
|
||||
|
||||
/// Visit left table column node.
|
||||
visitUsingColumn(columns_from_subtrees[0]);
|
||||
/// Visit right table column node.
|
||||
visitUsingColumn(columns_from_subtrees[1]);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
auto * column_node = node->as<ColumnNode>();
|
||||
if (!column_node)
|
||||
return;
|
||||
@ -55,7 +79,13 @@ public:
|
||||
if (column_node->hasExpression() && column_source_node_type != QueryTreeNodeType::ARRAY_JOIN)
|
||||
{
|
||||
/// Replace ALIAS column with expression
|
||||
table_expression_data.addAliasColumnName(column_node->getColumnName());
|
||||
bool column_already_exists = table_expression_data.hasColumn(column_node->getColumnName());
|
||||
if (!column_already_exists)
|
||||
{
|
||||
auto column_identifier = planner_context.getGlobalPlannerContext()->createColumnIdentifier(node);
|
||||
table_expression_data.addAliasColumnName(column_node->getColumnName(), column_identifier);
|
||||
}
|
||||
|
||||
node = column_node->getExpression();
|
||||
visitImpl(node);
|
||||
return;
|
||||
@ -78,13 +108,38 @@ public:
|
||||
table_expression_data.addColumn(column_node->getColumn(), column_identifier);
|
||||
}
|
||||
|
||||
static bool needChildVisit(const QueryTreeNodePtr &, const QueryTreeNodePtr & child_node)
|
||||
static bool needChildVisit(const QueryTreeNodePtr & parent, const QueryTreeNodePtr & child_node)
|
||||
{
|
||||
if (auto * join_node = parent->as<JoinNode>())
|
||||
{
|
||||
if (join_node->getJoinExpression() == child_node && join_node->isUsingJoinExpression())
|
||||
return false;
|
||||
}
|
||||
auto child_node_type = child_node->getNodeType();
|
||||
return !(child_node_type == QueryTreeNodeType::QUERY || child_node_type == QueryTreeNodeType::UNION);
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
void visitUsingColumn(QueryTreeNodePtr & node)
|
||||
{
|
||||
auto & column_node = node->as<ColumnNode&>();
|
||||
if (column_node.hasExpression())
|
||||
{
|
||||
auto & table_expression_data = planner_context.getOrCreateTableExpressionData(column_node.getColumnSource());
|
||||
bool column_already_exists = table_expression_data.hasColumn(column_node.getColumnName());
|
||||
if (column_already_exists)
|
||||
return;
|
||||
|
||||
auto column_identifier = planner_context.getGlobalPlannerContext()->createColumnIdentifier(node);
|
||||
table_expression_data.addAliasColumnName(column_node.getColumnName(), column_identifier);
|
||||
|
||||
visitImpl(column_node.getExpressionOrThrow());
|
||||
}
|
||||
else
|
||||
visitImpl(node);
|
||||
}
|
||||
|
||||
PlannerContext & planner_context;
|
||||
};
|
||||
|
||||
|
@ -645,7 +645,12 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres
|
||||
max_threads_execute_query = 1;
|
||||
}
|
||||
|
||||
if (max_block_size_limited < select_query_info.local_storage_limits.local_limits.size_limits.max_rows)
|
||||
if (select_query_info.local_storage_limits.local_limits.size_limits.max_rows != 0)
|
||||
{
|
||||
if (max_block_size_limited < select_query_info.local_storage_limits.local_limits.size_limits.max_rows)
|
||||
table_expression_query_info.limit = max_block_size_limited;
|
||||
}
|
||||
else
|
||||
{
|
||||
table_expression_query_info.limit = max_block_size_limited;
|
||||
}
|
||||
@ -978,6 +983,57 @@ void joinCastPlanColumnsToNullable(QueryPlan & plan_to_add_cast, PlannerContextP
|
||||
plan_to_add_cast.addStep(std::move(cast_join_columns_step));
|
||||
}
|
||||
|
||||
/// Actions to calculate table columns that have a functional representation (ALIASes and subcolumns)
|
||||
/// and used in USING clause of JOIN expression.
|
||||
struct UsingAliasKeyActions
|
||||
{
|
||||
UsingAliasKeyActions(
|
||||
const ColumnsWithTypeAndName & left_plan_output_columns,
|
||||
const ColumnsWithTypeAndName & right_plan_output_columns
|
||||
)
|
||||
: left_alias_columns_keys(std::make_shared<ActionsDAG>(left_plan_output_columns))
|
||||
, right_alias_columns_keys(std::make_shared<ActionsDAG>(right_plan_output_columns))
|
||||
{}
|
||||
|
||||
void addLeftColumn(QueryTreeNodePtr & node, const ColumnsWithTypeAndName & plan_output_columns, const PlannerContextPtr & planner_context)
|
||||
{
|
||||
addColumnImpl(left_alias_columns_keys, node, plan_output_columns, planner_context);
|
||||
}
|
||||
|
||||
void addRightColumn(QueryTreeNodePtr & node, const ColumnsWithTypeAndName & plan_output_columns, const PlannerContextPtr & planner_context)
|
||||
{
|
||||
addColumnImpl(right_alias_columns_keys, node, plan_output_columns, planner_context);
|
||||
}
|
||||
|
||||
ActionsDAGPtr getLeftActions()
|
||||
{
|
||||
left_alias_columns_keys->projectInput();
|
||||
return std::move(left_alias_columns_keys);
|
||||
}
|
||||
|
||||
ActionsDAGPtr getRightActions()
|
||||
{
|
||||
right_alias_columns_keys->projectInput();
|
||||
return std::move(right_alias_columns_keys);
|
||||
}
|
||||
|
||||
private:
|
||||
void addColumnImpl(ActionsDAGPtr & alias_columns_keys, QueryTreeNodePtr & node, const ColumnsWithTypeAndName & plan_output_columns, const PlannerContextPtr & planner_context)
|
||||
{
|
||||
auto & column_node = node->as<ColumnNode&>();
|
||||
if (column_node.hasExpression())
|
||||
{
|
||||
auto dag = buildActionsDAGFromExpressionNode(column_node.getExpressionOrThrow(), plan_output_columns, planner_context);
|
||||
const auto & left_inner_column_identifier = planner_context->getColumnNodeIdentifierOrThrow(node);
|
||||
dag->addOrReplaceInOutputs(dag->addAlias(*dag->getOutputs().front(), left_inner_column_identifier));
|
||||
alias_columns_keys->mergeInplace(std::move(*dag));
|
||||
}
|
||||
}
|
||||
|
||||
ActionsDAGPtr left_alias_columns_keys;
|
||||
ActionsDAGPtr right_alias_columns_keys;
|
||||
};
|
||||
|
||||
JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_expression,
|
||||
JoinTreeQueryPlan left_join_tree_query_plan,
|
||||
JoinTreeQueryPlan right_join_tree_query_plan,
|
||||
@ -1034,6 +1090,8 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_
|
||||
|
||||
if (join_node.isUsingJoinExpression())
|
||||
{
|
||||
UsingAliasKeyActions using_alias_key_actions{left_plan_output_columns, right_plan_output_columns};
|
||||
|
||||
auto & join_node_using_columns_list = join_node.getJoinExpression()->as<ListNode &>();
|
||||
for (auto & join_node_using_node : join_node_using_columns_list.getNodes())
|
||||
{
|
||||
@ -1043,9 +1101,13 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_
|
||||
auto & left_inner_column_node = inner_columns_list.getNodes().at(0);
|
||||
auto & left_inner_column = left_inner_column_node->as<ColumnNode &>();
|
||||
|
||||
using_alias_key_actions.addLeftColumn(left_inner_column_node, left_plan_output_columns, planner_context);
|
||||
|
||||
auto & right_inner_column_node = inner_columns_list.getNodes().at(1);
|
||||
auto & right_inner_column = right_inner_column_node->as<ColumnNode &>();
|
||||
|
||||
using_alias_key_actions.addRightColumn(right_inner_column_node, right_plan_output_columns, planner_context);
|
||||
|
||||
const auto & join_node_using_column_node_type = join_node_using_column_node.getColumnType();
|
||||
if (!left_inner_column.getColumnType()->equals(*join_node_using_column_node_type))
|
||||
{
|
||||
@ -1059,6 +1121,14 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_
|
||||
right_plan_column_name_to_cast_type.emplace(right_inner_column_identifier, join_node_using_column_node_type);
|
||||
}
|
||||
}
|
||||
|
||||
auto left_alias_columns_keys_step = std::make_unique<ExpressionStep>(left_plan.getCurrentDataStream(), using_alias_key_actions.getLeftActions());
|
||||
left_alias_columns_keys_step->setStepDescription("Actions for left table alias column keys");
|
||||
left_plan.addStep(std::move(left_alias_columns_keys_step));
|
||||
|
||||
auto right_alias_columns_keys_step = std::make_unique<ExpressionStep>(right_plan.getCurrentDataStream(), using_alias_key_actions.getRightActions());
|
||||
right_alias_columns_keys_step->setStepDescription("Actions for right table alias column keys");
|
||||
right_plan.addStep(std::move(right_alias_columns_keys_step));
|
||||
}
|
||||
|
||||
auto join_cast_plan_output_nodes = [&](QueryPlan & plan_to_add_cast, std::unordered_map<std::string, DataTypePtr> & plan_column_name_to_cast_type)
|
||||
|
@ -80,9 +80,11 @@ public:
|
||||
}
|
||||
|
||||
/// Add alias column name
|
||||
void addAliasColumnName(const std::string & column_name)
|
||||
void addAliasColumnName(const std::string & column_name, const ColumnIdentifier & column_identifier)
|
||||
{
|
||||
alias_columns_names.insert(column_name);
|
||||
|
||||
column_name_to_column_identifier.emplace(column_name, column_identifier);
|
||||
}
|
||||
|
||||
/// Get alias columns names
|
||||
|
@ -127,7 +127,12 @@ struct Pattern
|
||||
{
|
||||
hash.update(rule_type);
|
||||
hash.update(regexp_str);
|
||||
hash.update(function->getName());
|
||||
if (function)
|
||||
{
|
||||
hash.update(function->getName());
|
||||
for (const auto & p : function->getParameters())
|
||||
hash.update(toString(p));
|
||||
}
|
||||
for (const auto & r : retentions)
|
||||
{
|
||||
hash.update(r.age);
|
||||
|
@ -418,7 +418,13 @@ Pipe ReadFromMergeTree::readFromPool(
|
||||
&& settings.allow_prefetched_read_pool_for_local_filesystem
|
||||
&& MergeTreePrefetchedReadPool::checkReadMethodAllowed(reader_settings.read_settings.local_fs_method);
|
||||
|
||||
if (allow_prefetched_remote || allow_prefetched_local)
|
||||
/** Do not use prefetched read pool if query is trivial limit query.
|
||||
* Because time spend during filling per thread tasks can be greater than whole query
|
||||
* execution for big tables with small limit.
|
||||
*/
|
||||
bool use_prefetched_read_pool = query_info.limit == 0 && (allow_prefetched_remote || allow_prefetched_local);
|
||||
|
||||
if (use_prefetched_read_pool)
|
||||
{
|
||||
pool = std::make_shared<MergeTreePrefetchedReadPool>(
|
||||
std::move(parts_with_range),
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <QueryPipeline/QueryPipelineBuilder.h>
|
||||
#include <Storages/MergeTree/KeyCondition.h>
|
||||
#include <Storages/System/StorageSystemNumbers.h>
|
||||
#include <Common/iota.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
|
||||
namespace DB
|
||||
@ -40,11 +41,10 @@ protected:
|
||||
auto column = ColumnUInt64::create(block_size);
|
||||
ColumnUInt64::Container & vec = column->getData();
|
||||
|
||||
size_t curr = next; /// The local variable for some reason works faster (>20%) than member of class.
|
||||
UInt64 curr = next; /// The local variable for some reason works faster (>20%) than member of class.
|
||||
UInt64 * pos = vec.data(); /// This also accelerates the code.
|
||||
UInt64 * end = &vec[block_size];
|
||||
while (pos < end)
|
||||
*pos++ = curr++;
|
||||
iota(pos, static_cast<size_t>(end - pos), curr);
|
||||
|
||||
next += step;
|
||||
|
||||
@ -211,17 +211,18 @@ protected:
|
||||
{
|
||||
auto start_value_64 = static_cast<UInt64>(start_value);
|
||||
auto end_value_64 = static_cast<UInt64>(end_value);
|
||||
while (start_value_64 < end_value_64)
|
||||
*(pos++) = start_value_64++;
|
||||
auto size = end_value_64 - start_value_64;
|
||||
iota(pos, static_cast<size_t>(size), start_value_64);
|
||||
pos += size;
|
||||
}
|
||||
};
|
||||
|
||||
if (can_provide > need)
|
||||
{
|
||||
UInt64 start_value = first_value(range) + cursor.offset_in_range;
|
||||
UInt64 end_value = start_value + need; /// end_value will never overflow
|
||||
while (start_value < end_value)
|
||||
*(pos++) = start_value++;
|
||||
/// end_value will never overflow
|
||||
iota(pos, static_cast<size_t>(need), start_value);
|
||||
pos += need;
|
||||
|
||||
provided += need;
|
||||
cursor.offset_in_range += need;
|
||||
|
@ -18,31 +18,25 @@ protected:
|
||||
|
||||
void setKeyConditionImpl(const SelectQueryInfo & query_info, ContextPtr context, const Block & keys)
|
||||
{
|
||||
if (!context->getSettingsRef().allow_experimental_analyzer)
|
||||
{
|
||||
key_condition = std::make_shared<const KeyCondition>(
|
||||
query_info,
|
||||
context,
|
||||
keys.getNames(),
|
||||
std::make_shared<ExpressionActions>(std::make_shared<ActionsDAG>(keys.getColumnsWithTypeAndName())));
|
||||
}
|
||||
key_condition = std::make_shared<const KeyCondition>(
|
||||
query_info,
|
||||
context,
|
||||
keys.getNames(),
|
||||
std::make_shared<ExpressionActions>(std::make_shared<ActionsDAG>(keys.getColumnsWithTypeAndName())));
|
||||
}
|
||||
|
||||
void setKeyConditionImpl(const ActionsDAG::NodeRawConstPtrs & nodes, ContextPtr context, const Block & keys)
|
||||
{
|
||||
if (context->getSettingsRef().allow_experimental_analyzer)
|
||||
{
|
||||
std::unordered_map<std::string, DB::ColumnWithTypeAndName> node_name_to_input_column;
|
||||
for (const auto & column : keys.getColumnsWithTypeAndName())
|
||||
node_name_to_input_column.insert({column.name, column});
|
||||
std::unordered_map<std::string, DB::ColumnWithTypeAndName> node_name_to_input_column;
|
||||
for (const auto & column : keys.getColumnsWithTypeAndName())
|
||||
node_name_to_input_column.insert({column.name, column});
|
||||
|
||||
auto filter_actions_dag = ActionsDAG::buildFilterActionsDAG(nodes, node_name_to_input_column, context);
|
||||
key_condition = std::make_shared<const KeyCondition>(
|
||||
filter_actions_dag,
|
||||
context,
|
||||
keys.getNames(),
|
||||
std::make_shared<ExpressionActions>(std::make_shared<ActionsDAG>(keys.getColumnsWithTypeAndName())));
|
||||
}
|
||||
auto filter_actions_dag = ActionsDAG::buildFilterActionsDAG(nodes, node_name_to_input_column, context);
|
||||
key_condition = std::make_shared<const KeyCondition>(
|
||||
filter_actions_dag,
|
||||
context,
|
||||
keys.getNames(),
|
||||
std::make_shared<ExpressionActions>(std::make_shared<ActionsDAG>(keys.getColumnsWithTypeAndName())));
|
||||
}
|
||||
|
||||
public:
|
||||
|
@ -1,7 +1,8 @@
|
||||
#include <Processors/Transforms/PartialSortingTransform.h>
|
||||
#include <Interpreters/sortBlock.h>
|
||||
#include <Core/SortCursor.h>
|
||||
#include <Interpreters/sortBlock.h>
|
||||
#include <Processors/Transforms/PartialSortingTransform.h>
|
||||
#include <Common/PODArray.h>
|
||||
#include <Common/iota.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -36,9 +37,7 @@ size_t getFilterMask(const ColumnRawPtrs & raw_block_columns, const Columns & th
|
||||
else
|
||||
{
|
||||
rows_to_compare.resize(num_rows);
|
||||
|
||||
for (size_t i = 0; i < num_rows; ++i)
|
||||
rows_to_compare[i] = i;
|
||||
iota(rows_to_compare.data(), num_rows, UInt64(0));
|
||||
|
||||
size_t size = description.size();
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
|
@ -39,6 +39,7 @@ namespace DB
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int UNKNOWN_TABLE;
|
||||
}
|
||||
|
||||
ThreadStatusesHolder::~ThreadStatusesHolder()
|
||||
@ -316,7 +317,21 @@ Chain buildPushingToViewsChain(
|
||||
type = QueryViewsLogElement::ViewType::MATERIALIZED;
|
||||
result_chain.addTableLock(lock);
|
||||
|
||||
StoragePtr inner_table = materialized_view->getTargetTable();
|
||||
StoragePtr inner_table = materialized_view->tryGetTargetTable();
|
||||
/// If target table was dropped, ignore this materialized view.
|
||||
if (!inner_table)
|
||||
{
|
||||
if (context->getSettingsRef().ignore_materialized_views_with_dropped_target_table)
|
||||
continue;
|
||||
|
||||
throw Exception(
|
||||
ErrorCodes::UNKNOWN_TABLE,
|
||||
"Target table '{}' of view '{}' doesn't exists. To ignore this view use setting "
|
||||
"ignore_materialized_views_with_dropped_target_table",
|
||||
materialized_view->getTargetTableId().getFullTableName(),
|
||||
view_id.getFullTableName());
|
||||
}
|
||||
|
||||
auto inner_table_id = inner_table->getStorageID();
|
||||
auto inner_metadata_snapshot = inner_table->getInMemoryMetadataPtr();
|
||||
|
||||
|
@ -1,14 +1,12 @@
|
||||
#include <QueryPipeline/QueryPipelineBuilder.h>
|
||||
|
||||
#include <Common/CurrentThread.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include "Core/UUID.h"
|
||||
#include <Core/SortDescription.h>
|
||||
#include <Core/UUID.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/ExpressionActions.h>
|
||||
#include <Interpreters/IJoin.h>
|
||||
#include <Interpreters/TableJoin.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Processors/ConcatProcessor.h>
|
||||
#include <Processors/DelayedPortsProcessor.h>
|
||||
#include <Processors/Executors/PipelineExecutor.h>
|
||||
@ -25,11 +23,14 @@
|
||||
#include <Processors/Transforms/ExtremesTransform.h>
|
||||
#include <Processors/Transforms/JoiningTransform.h>
|
||||
#include <Processors/Transforms/MergeJoinTransform.h>
|
||||
#include <Processors/Transforms/PasteJoinTransform.h>
|
||||
#include <Processors/Transforms/MergingAggregatedMemoryEfficientTransform.h>
|
||||
#include <Processors/Transforms/PartialSortingTransform.h>
|
||||
#include <Processors/Transforms/PasteJoinTransform.h>
|
||||
#include <Processors/Transforms/TotalsHavingTransform.h>
|
||||
#include <QueryPipeline/narrowPipe.h>
|
||||
#include <Common/CurrentThread.h>
|
||||
#include <Common/iota.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -619,8 +620,7 @@ void QueryPipelineBuilder::addPipelineBefore(QueryPipelineBuilder pipeline)
|
||||
bool has_extremes = pipe.getExtremesPort();
|
||||
size_t num_extra_ports = (has_totals ? 1 : 0) + (has_extremes ? 1 : 0);
|
||||
IProcessor::PortNumbers delayed_streams(pipe.numOutputPorts() + num_extra_ports);
|
||||
for (size_t i = 0; i < delayed_streams.size(); ++i)
|
||||
delayed_streams[i] = i;
|
||||
iota(delayed_streams.data(), delayed_streams.size(), IProcessor::PortNumbers::value_type(0));
|
||||
|
||||
auto * collected_processors = pipe.collected_processors;
|
||||
|
||||
|
@ -15,6 +15,9 @@
|
||||
#include <Processors/Transforms/AddingDefaultsTransform.h>
|
||||
#include <Processors/Transforms/ExtractColumnsTransform.h>
|
||||
#include <Processors/Sources/ConstChunkGenerator.h>
|
||||
#include <Processors/Sources/NullSource.h>
|
||||
#include <Processors/QueryPlan/QueryPlan.h>
|
||||
#include <Processors/QueryPlan/SourceStepWithFilter.h>
|
||||
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/CompressionMethod.h>
|
||||
@ -408,22 +411,22 @@ ColumnsDescription StorageHDFS::getTableStructureFromData(
|
||||
class HDFSSource::DisclosedGlobIterator::Impl
|
||||
{
|
||||
public:
|
||||
Impl(const String & uri, const ASTPtr & query, const NamesAndTypesList & virtual_columns, const ContextPtr & context)
|
||||
Impl(const String & uri, const ActionsDAG::Node * predicate, const NamesAndTypesList & virtual_columns, const ContextPtr & context)
|
||||
{
|
||||
const auto [path_from_uri, uri_without_path] = getPathFromUriAndUriWithoutPath(uri);
|
||||
uris = getPathsList(path_from_uri, uri_without_path, context);
|
||||
ASTPtr filter_ast;
|
||||
ActionsDAGPtr filter_dag;
|
||||
if (!uris.empty())
|
||||
filter_ast = VirtualColumnUtils::createPathAndFileFilterAst(query, virtual_columns, uris[0].path, context);
|
||||
filter_dag = VirtualColumnUtils::createPathAndFileFilterDAG(predicate, virtual_columns);
|
||||
|
||||
if (filter_ast)
|
||||
if (filter_dag)
|
||||
{
|
||||
std::vector<String> paths;
|
||||
paths.reserve(uris.size());
|
||||
for (const auto & path_with_info : uris)
|
||||
paths.push_back(path_with_info.path);
|
||||
|
||||
VirtualColumnUtils::filterByPathOrFile(uris, paths, query, virtual_columns, context, filter_ast);
|
||||
VirtualColumnUtils::filterByPathOrFile(uris, paths, filter_dag, virtual_columns, context);
|
||||
}
|
||||
auto file_progress_callback = context->getFileProgressCallback();
|
||||
|
||||
@ -456,21 +459,21 @@ private:
|
||||
class HDFSSource::URISIterator::Impl : WithContext
|
||||
{
|
||||
public:
|
||||
explicit Impl(const std::vector<String> & uris_, const ASTPtr & query, const NamesAndTypesList & virtual_columns, const ContextPtr & context_)
|
||||
explicit Impl(const std::vector<String> & uris_, const ActionsDAG::Node * predicate, const NamesAndTypesList & virtual_columns, const ContextPtr & context_)
|
||||
: WithContext(context_), uris(uris_), file_progress_callback(context_->getFileProgressCallback())
|
||||
{
|
||||
ASTPtr filter_ast;
|
||||
ActionsDAGPtr filter_dag;
|
||||
if (!uris.empty())
|
||||
filter_ast = VirtualColumnUtils::createPathAndFileFilterAst(query, virtual_columns, getPathFromUriAndUriWithoutPath(uris[0]).first, getContext());
|
||||
filter_dag = VirtualColumnUtils::createPathAndFileFilterDAG(predicate, virtual_columns);
|
||||
|
||||
if (filter_ast)
|
||||
if (filter_dag)
|
||||
{
|
||||
std::vector<String> paths;
|
||||
paths.reserve(uris.size());
|
||||
for (const auto & uri : uris)
|
||||
paths.push_back(getPathFromUriAndUriWithoutPath(uri).first);
|
||||
|
||||
VirtualColumnUtils::filterByPathOrFile(uris, paths, query, virtual_columns, getContext(), filter_ast);
|
||||
VirtualColumnUtils::filterByPathOrFile(uris, paths, filter_dag, virtual_columns, getContext());
|
||||
}
|
||||
|
||||
if (!uris.empty())
|
||||
@ -517,16 +520,16 @@ private:
|
||||
std::function<void(FileProgress)> file_progress_callback;
|
||||
};
|
||||
|
||||
HDFSSource::DisclosedGlobIterator::DisclosedGlobIterator(const String & uri, const ASTPtr & query, const NamesAndTypesList & virtual_columns, const ContextPtr & context)
|
||||
: pimpl(std::make_shared<HDFSSource::DisclosedGlobIterator::Impl>(uri, query, virtual_columns, context)) {}
|
||||
HDFSSource::DisclosedGlobIterator::DisclosedGlobIterator(const String & uri, const ActionsDAG::Node * predicate, const NamesAndTypesList & virtual_columns, const ContextPtr & context)
|
||||
: pimpl(std::make_shared<HDFSSource::DisclosedGlobIterator::Impl>(uri, predicate, virtual_columns, context)) {}
|
||||
|
||||
StorageHDFS::PathWithInfo HDFSSource::DisclosedGlobIterator::next()
|
||||
{
|
||||
return pimpl->next();
|
||||
}
|
||||
|
||||
HDFSSource::URISIterator::URISIterator(const std::vector<String> & uris_, const ASTPtr & query, const NamesAndTypesList & virtual_columns, const ContextPtr & context)
|
||||
: pimpl(std::make_shared<HDFSSource::URISIterator::Impl>(uris_, query, virtual_columns, context))
|
||||
HDFSSource::URISIterator::URISIterator(const std::vector<String> & uris_, const ActionsDAG::Node * predicate, const NamesAndTypesList & virtual_columns, const ContextPtr & context)
|
||||
: pimpl(std::make_shared<HDFSSource::URISIterator::Impl>(uris_, predicate, virtual_columns, context))
|
||||
{
|
||||
}
|
||||
|
||||
@ -541,8 +544,7 @@ HDFSSource::HDFSSource(
|
||||
ContextPtr context_,
|
||||
UInt64 max_block_size_,
|
||||
std::shared_ptr<IteratorWrapper> file_iterator_,
|
||||
bool need_only_count_,
|
||||
const SelectQueryInfo & query_info_)
|
||||
bool need_only_count_)
|
||||
: ISource(info.source_header, false)
|
||||
, WithContext(context_)
|
||||
, storage(std::move(storage_))
|
||||
@ -553,7 +555,6 @@ HDFSSource::HDFSSource(
|
||||
, file_iterator(file_iterator_)
|
||||
, columns_description(info.columns_description)
|
||||
, need_only_count(need_only_count_)
|
||||
, query_info(query_info_)
|
||||
{
|
||||
initialize();
|
||||
}
|
||||
@ -843,7 +844,57 @@ bool StorageHDFS::supportsSubsetOfColumns(const ContextPtr & context_) const
|
||||
return FormatFactory::instance().checkIfFormatSupportsSubsetOfColumns(format_name, context_);
|
||||
}
|
||||
|
||||
Pipe StorageHDFS::read(
|
||||
class ReadFromHDFS : public SourceStepWithFilter
|
||||
{
|
||||
public:
|
||||
std::string getName() const override { return "ReadFromHDFS"; }
|
||||
void initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) override;
|
||||
void applyFilters() override;
|
||||
|
||||
ReadFromHDFS(
|
||||
Block sample_block,
|
||||
ReadFromFormatInfo info_,
|
||||
bool need_only_count_,
|
||||
std::shared_ptr<StorageHDFS> storage_,
|
||||
ContextPtr context_,
|
||||
size_t max_block_size_,
|
||||
size_t num_streams_)
|
||||
: SourceStepWithFilter(DataStream{.header = std::move(sample_block)})
|
||||
, info(std::move(info_))
|
||||
, need_only_count(need_only_count_)
|
||||
, storage(std::move(storage_))
|
||||
, context(std::move(context_))
|
||||
, max_block_size(max_block_size_)
|
||||
, num_streams(num_streams_)
|
||||
{
|
||||
}
|
||||
|
||||
private:
|
||||
ReadFromFormatInfo info;
|
||||
const bool need_only_count;
|
||||
std::shared_ptr<StorageHDFS> storage;
|
||||
|
||||
ContextPtr context;
|
||||
size_t max_block_size;
|
||||
size_t num_streams;
|
||||
|
||||
std::shared_ptr<HDFSSource::IteratorWrapper> iterator_wrapper;
|
||||
|
||||
void createIterator(const ActionsDAG::Node * predicate);
|
||||
};
|
||||
|
||||
void ReadFromHDFS::applyFilters()
|
||||
{
|
||||
auto filter_actions_dag = ActionsDAG::buildFilterActionsDAG(filter_nodes.nodes, {}, context);
|
||||
const ActionsDAG::Node * predicate = nullptr;
|
||||
if (filter_actions_dag)
|
||||
predicate = filter_actions_dag->getOutputs().at(0);
|
||||
|
||||
createIterator(predicate);
|
||||
}
|
||||
|
||||
void StorageHDFS::read(
|
||||
QueryPlan & query_plan,
|
||||
const Names & column_names,
|
||||
const StorageSnapshotPtr & storage_snapshot,
|
||||
SelectQueryInfo & query_info,
|
||||
@ -852,18 +903,40 @@ Pipe StorageHDFS::read(
|
||||
size_t max_block_size,
|
||||
size_t num_streams)
|
||||
{
|
||||
std::shared_ptr<HDFSSource::IteratorWrapper> iterator_wrapper{nullptr};
|
||||
if (distributed_processing)
|
||||
auto read_from_format_info = prepareReadingFromFormat(column_names, storage_snapshot, supportsSubsetOfColumns(context_), virtual_columns);
|
||||
bool need_only_count = (query_info.optimize_trivial_count || read_from_format_info.requested_columns.empty())
|
||||
&& context_->getSettingsRef().optimize_count_from_files;
|
||||
|
||||
auto this_ptr = std::static_pointer_cast<StorageHDFS>(shared_from_this());
|
||||
|
||||
auto reading = std::make_unique<ReadFromHDFS>(
|
||||
read_from_format_info.source_header,
|
||||
std::move(read_from_format_info),
|
||||
need_only_count,
|
||||
std::move(this_ptr),
|
||||
context_,
|
||||
max_block_size,
|
||||
num_streams);
|
||||
|
||||
query_plan.addStep(std::move(reading));
|
||||
}
|
||||
|
||||
void ReadFromHDFS::createIterator(const ActionsDAG::Node * predicate)
|
||||
{
|
||||
if (iterator_wrapper)
|
||||
return;
|
||||
|
||||
if (storage->distributed_processing)
|
||||
{
|
||||
iterator_wrapper = std::make_shared<HDFSSource::IteratorWrapper>(
|
||||
[callback = context_->getReadTaskCallback()]() -> StorageHDFS::PathWithInfo {
|
||||
[callback = context->getReadTaskCallback()]() -> StorageHDFS::PathWithInfo {
|
||||
return StorageHDFS::PathWithInfo{callback(), std::nullopt};
|
||||
});
|
||||
}
|
||||
else if (is_path_with_globs)
|
||||
else if (storage->is_path_with_globs)
|
||||
{
|
||||
/// Iterate through disclosed globs and make a source for each file
|
||||
auto glob_iterator = std::make_shared<HDFSSource::DisclosedGlobIterator>(uris[0], query_info.query, virtual_columns, context_);
|
||||
auto glob_iterator = std::make_shared<HDFSSource::DisclosedGlobIterator>(storage->uris[0], predicate, storage->virtual_columns, context);
|
||||
iterator_wrapper = std::make_shared<HDFSSource::IteratorWrapper>([glob_iterator]()
|
||||
{
|
||||
return glob_iterator->next();
|
||||
@ -871,31 +944,38 @@ Pipe StorageHDFS::read(
|
||||
}
|
||||
else
|
||||
{
|
||||
auto uris_iterator = std::make_shared<HDFSSource::URISIterator>(uris, query_info.query, virtual_columns, context_);
|
||||
auto uris_iterator = std::make_shared<HDFSSource::URISIterator>(storage->uris, predicate, storage->virtual_columns, context);
|
||||
iterator_wrapper = std::make_shared<HDFSSource::IteratorWrapper>([uris_iterator]()
|
||||
{
|
||||
return uris_iterator->next();
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
auto read_from_format_info = prepareReadingFromFormat(column_names, storage_snapshot, supportsSubsetOfColumns(context_), getVirtuals());
|
||||
bool need_only_count = (query_info.optimize_trivial_count || read_from_format_info.requested_columns.empty())
|
||||
&& context_->getSettingsRef().optimize_count_from_files;
|
||||
void ReadFromHDFS::initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &)
|
||||
{
|
||||
createIterator(nullptr);
|
||||
|
||||
Pipes pipes;
|
||||
auto this_ptr = std::static_pointer_cast<StorageHDFS>(shared_from_this());
|
||||
for (size_t i = 0; i < num_streams; ++i)
|
||||
{
|
||||
pipes.emplace_back(std::make_shared<HDFSSource>(
|
||||
read_from_format_info,
|
||||
this_ptr,
|
||||
context_,
|
||||
info,
|
||||
storage,
|
||||
context,
|
||||
max_block_size,
|
||||
iterator_wrapper,
|
||||
need_only_count,
|
||||
query_info));
|
||||
need_only_count));
|
||||
}
|
||||
return Pipe::unitePipes(std::move(pipes));
|
||||
|
||||
auto pipe = Pipe::unitePipes(std::move(pipes));
|
||||
if (pipe.empty())
|
||||
pipe = Pipe(std::make_shared<NullSource>(info.source_header));
|
||||
|
||||
for (const auto & processor : pipe.getProcessors())
|
||||
processors.emplace_back(processor);
|
||||
|
||||
pipeline.init(std::move(pipe));
|
||||
}
|
||||
|
||||
SinkToStoragePtr StorageHDFS::write(const ASTPtr & query, const StorageMetadataPtr & metadata_snapshot, ContextPtr context_, bool /*async_insert*/)
|
||||
|
@ -51,7 +51,8 @@ public:
|
||||
|
||||
String getName() const override { return "HDFS"; }
|
||||
|
||||
Pipe read(
|
||||
void read(
|
||||
QueryPlan & query_plan,
|
||||
const Names & column_names,
|
||||
const StorageSnapshotPtr & storage_snapshot,
|
||||
SelectQueryInfo & query_info,
|
||||
@ -93,6 +94,7 @@ public:
|
||||
|
||||
protected:
|
||||
friend class HDFSSource;
|
||||
friend class ReadFromHDFS;
|
||||
|
||||
private:
|
||||
std::vector<String> uris;
|
||||
@ -114,7 +116,7 @@ public:
|
||||
class DisclosedGlobIterator
|
||||
{
|
||||
public:
|
||||
DisclosedGlobIterator(const String & uri_, const ASTPtr & query, const NamesAndTypesList & virtual_columns, const ContextPtr & context);
|
||||
DisclosedGlobIterator(const String & uri_, const ActionsDAG::Node * predicate, const NamesAndTypesList & virtual_columns, const ContextPtr & context);
|
||||
StorageHDFS::PathWithInfo next();
|
||||
private:
|
||||
class Impl;
|
||||
@ -125,7 +127,7 @@ public:
|
||||
class URISIterator
|
||||
{
|
||||
public:
|
||||
URISIterator(const std::vector<String> & uris_, const ASTPtr & query, const NamesAndTypesList & virtual_columns, const ContextPtr & context);
|
||||
URISIterator(const std::vector<String> & uris_, const ActionsDAG::Node * predicate, const NamesAndTypesList & virtual_columns, const ContextPtr & context);
|
||||
StorageHDFS::PathWithInfo next();
|
||||
private:
|
||||
class Impl;
|
||||
@ -142,8 +144,7 @@ public:
|
||||
ContextPtr context_,
|
||||
UInt64 max_block_size_,
|
||||
std::shared_ptr<IteratorWrapper> file_iterator_,
|
||||
bool need_only_count_,
|
||||
const SelectQueryInfo & query_info_);
|
||||
bool need_only_count_);
|
||||
|
||||
String getName() const override;
|
||||
|
||||
@ -162,7 +163,6 @@ private:
|
||||
ColumnsDescription columns_description;
|
||||
bool need_only_count;
|
||||
size_t total_rows_in_file = 0;
|
||||
SelectQueryInfo query_info;
|
||||
|
||||
std::unique_ptr<ReadBuffer> read_buf;
|
||||
std::shared_ptr<IInputFormat> input_format;
|
||||
|
@ -79,9 +79,9 @@ void StorageHDFSCluster::addColumnsStructureToQuery(ASTPtr & query, const String
|
||||
}
|
||||
|
||||
|
||||
RemoteQueryExecutor::Extension StorageHDFSCluster::getTaskIteratorExtension(ASTPtr query, const ContextPtr & context) const
|
||||
RemoteQueryExecutor::Extension StorageHDFSCluster::getTaskIteratorExtension(const ActionsDAG::Node * predicate, const ContextPtr & context) const
|
||||
{
|
||||
auto iterator = std::make_shared<HDFSSource::DisclosedGlobIterator>(uri, query, virtual_columns, context);
|
||||
auto iterator = std::make_shared<HDFSSource::DisclosedGlobIterator>(uri, predicate, virtual_columns, context);
|
||||
auto callback = std::make_shared<std::function<String()>>([iter = std::move(iterator)]() mutable -> String { return iter->next().path; });
|
||||
return RemoteQueryExecutor::Extension{.task_iterator = std::move(callback)};
|
||||
}
|
||||
|
@ -35,7 +35,7 @@ public:
|
||||
|
||||
NamesAndTypesList getVirtuals() const override;
|
||||
|
||||
RemoteQueryExecutor::Extension getTaskIteratorExtension(ASTPtr query, const ContextPtr & context) const override;
|
||||
RemoteQueryExecutor::Extension getTaskIteratorExtension(const ActionsDAG::Node * predicate, const ContextPtr & context) const override;
|
||||
|
||||
bool supportsSubcolumns() const override { return true; }
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
#include "Storages/IStorageCluster.h"
|
||||
#include <Storages/IStorageCluster.h>
|
||||
|
||||
#include "Common/Exception.h"
|
||||
#include "Core/QueryProcessingStage.h"
|
||||
#include <Common/Exception.h>
|
||||
#include <Core/QueryProcessingStage.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <IO/ConnectionTimeouts.h>
|
||||
#include <Interpreters/Context.h>
|
||||
@ -11,11 +11,14 @@
|
||||
#include <Interpreters/AddDefaultDatabaseVisitor.h>
|
||||
#include <Interpreters/TranslateQualifiedNamesVisitor.h>
|
||||
#include <Interpreters/InterpreterSelectQueryAnalyzer.h>
|
||||
#include <Parsers/queryToString.h>
|
||||
#include <Processors/Sources/NullSource.h>
|
||||
#include <Processors/Sources/RemoteSource.h>
|
||||
#include <Processors/QueryPlan/SourceStepWithFilter.h>
|
||||
#include <QueryPipeline/narrowPipe.h>
|
||||
#include <QueryPipeline/Pipe.h>
|
||||
#include <Processors/Sources/RemoteSource.h>
|
||||
#include <QueryPipeline/RemoteQueryExecutor.h>
|
||||
#include <Parsers/queryToString.h>
|
||||
#include <QueryPipeline/QueryPipelineBuilder.h>
|
||||
#include <Storages/IStorage.h>
|
||||
#include <Storages/SelectQueryInfo.h>
|
||||
#include <Storages/StorageDictionary.h>
|
||||
@ -38,9 +41,66 @@ IStorageCluster::IStorageCluster(
|
||||
{
|
||||
}
|
||||
|
||||
class ReadFromCluster : public SourceStepWithFilter
|
||||
{
|
||||
public:
|
||||
std::string getName() const override { return "ReadFromCluster"; }
|
||||
void initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) override;
|
||||
void applyFilters() override;
|
||||
|
||||
ReadFromCluster(
|
||||
Block sample_block,
|
||||
std::shared_ptr<IStorageCluster> storage_,
|
||||
ASTPtr query_to_send_,
|
||||
QueryProcessingStage::Enum processed_stage_,
|
||||
ClusterPtr cluster_,
|
||||
Poco::Logger * log_,
|
||||
ContextPtr context_)
|
||||
: SourceStepWithFilter(DataStream{.header = std::move(sample_block)})
|
||||
, storage(std::move(storage_))
|
||||
, query_to_send(std::move(query_to_send_))
|
||||
, processed_stage(processed_stage_)
|
||||
, cluster(std::move(cluster_))
|
||||
, log(log_)
|
||||
, context(std::move(context_))
|
||||
{
|
||||
}
|
||||
|
||||
private:
|
||||
std::shared_ptr<IStorageCluster> storage;
|
||||
ASTPtr query_to_send;
|
||||
QueryProcessingStage::Enum processed_stage;
|
||||
ClusterPtr cluster;
|
||||
Poco::Logger * log;
|
||||
ContextPtr context;
|
||||
|
||||
std::optional<RemoteQueryExecutor::Extension> extension;
|
||||
|
||||
void createExtension(const ActionsDAG::Node * predicate);
|
||||
ContextPtr updateSettings(const Settings & settings);
|
||||
};
|
||||
|
||||
void ReadFromCluster::applyFilters()
|
||||
{
|
||||
auto filter_actions_dag = ActionsDAG::buildFilterActionsDAG(filter_nodes.nodes, {}, context);
|
||||
const ActionsDAG::Node * predicate = nullptr;
|
||||
if (filter_actions_dag)
|
||||
predicate = filter_actions_dag->getOutputs().at(0);
|
||||
|
||||
createExtension(predicate);
|
||||
}
|
||||
|
||||
void ReadFromCluster::createExtension(const ActionsDAG::Node * predicate)
|
||||
{
|
||||
if (extension)
|
||||
return;
|
||||
|
||||
extension = storage->getTaskIteratorExtension(predicate, context);
|
||||
}
|
||||
|
||||
/// The code executes on initiator
|
||||
Pipe IStorageCluster::read(
|
||||
void IStorageCluster::read(
|
||||
QueryPlan & query_plan,
|
||||
const Names & column_names,
|
||||
const StorageSnapshotPtr & storage_snapshot,
|
||||
SelectQueryInfo & query_info,
|
||||
@ -49,10 +109,10 @@ Pipe IStorageCluster::read(
|
||||
size_t /*max_block_size*/,
|
||||
size_t /*num_streams*/)
|
||||
{
|
||||
updateBeforeRead(context);
|
||||
storage_snapshot->check(column_names);
|
||||
|
||||
updateBeforeRead(context);
|
||||
auto cluster = getCluster(context);
|
||||
auto extension = getTaskIteratorExtension(query_info.query, context);
|
||||
|
||||
/// Calculate the header. This is significant, because some columns could be thrown away in some cases like query with count(*)
|
||||
|
||||
@ -70,12 +130,6 @@ Pipe IStorageCluster::read(
|
||||
query_to_send = interpreter.getQueryInfo().query->clone();
|
||||
}
|
||||
|
||||
const Scalars & scalars = context->hasQueryContext() ? context->getQueryContext()->getScalars() : Scalars{};
|
||||
|
||||
Pipes pipes;
|
||||
|
||||
const bool add_agg_info = processed_stage == QueryProcessingStage::WithMergeableState;
|
||||
|
||||
if (!structure_argument_was_provided)
|
||||
addColumnsStructureToQuery(query_to_send, storage_snapshot->metadata->getColumns().getAll().toNamesAndTypesDescription(), context);
|
||||
|
||||
@ -89,7 +143,29 @@ Pipe IStorageCluster::read(
|
||||
/* only_replace_in_join_= */true);
|
||||
visitor.visit(query_to_send);
|
||||
|
||||
auto new_context = updateSettings(context, context->getSettingsRef());
|
||||
auto this_ptr = std::static_pointer_cast<IStorageCluster>(shared_from_this());
|
||||
|
||||
auto reading = std::make_unique<ReadFromCluster>(
|
||||
sample_block,
|
||||
std::move(this_ptr),
|
||||
std::move(query_to_send),
|
||||
processed_stage,
|
||||
cluster,
|
||||
log,
|
||||
context);
|
||||
|
||||
query_plan.addStep(std::move(reading));
|
||||
}
|
||||
|
||||
void ReadFromCluster::initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &)
|
||||
{
|
||||
createExtension(nullptr);
|
||||
|
||||
const Scalars & scalars = context->hasQueryContext() ? context->getQueryContext()->getScalars() : Scalars{};
|
||||
const bool add_agg_info = processed_stage == QueryProcessingStage::WithMergeableState;
|
||||
|
||||
Pipes pipes;
|
||||
auto new_context = updateSettings(context->getSettingsRef());
|
||||
const auto & current_settings = new_context->getSettingsRef();
|
||||
auto timeouts = ConnectionTimeouts::getTCPTimeoutsWithFailover(current_settings);
|
||||
for (const auto & shard_info : cluster->getShardsInfo())
|
||||
@ -100,7 +176,7 @@ Pipe IStorageCluster::read(
|
||||
auto remote_query_executor = std::make_shared<RemoteQueryExecutor>(
|
||||
std::vector<IConnectionPool::Entry>{try_result},
|
||||
queryToString(query_to_send),
|
||||
sample_block,
|
||||
getOutputStream().header,
|
||||
new_context,
|
||||
/*throttler=*/nullptr,
|
||||
scalars,
|
||||
@ -113,8 +189,14 @@ Pipe IStorageCluster::read(
|
||||
}
|
||||
}
|
||||
|
||||
storage_snapshot->check(column_names);
|
||||
return Pipe::unitePipes(std::move(pipes));
|
||||
auto pipe = Pipe::unitePipes(std::move(pipes));
|
||||
if (pipe.empty())
|
||||
pipe = Pipe(std::make_shared<NullSource>(getOutputStream().header));
|
||||
|
||||
for (const auto & processor : pipe.getProcessors())
|
||||
processors.emplace_back(processor);
|
||||
|
||||
pipeline.init(std::move(pipe));
|
||||
}
|
||||
|
||||
QueryProcessingStage::Enum IStorageCluster::getQueryProcessingStage(
|
||||
@ -129,7 +211,7 @@ QueryProcessingStage::Enum IStorageCluster::getQueryProcessingStage(
|
||||
return QueryProcessingStage::Enum::FetchColumns;
|
||||
}
|
||||
|
||||
ContextPtr IStorageCluster::updateSettings(ContextPtr context, const Settings & settings)
|
||||
ContextPtr ReadFromCluster::updateSettings(const Settings & settings)
|
||||
{
|
||||
Settings new_settings = settings;
|
||||
|
||||
|
@ -22,7 +22,8 @@ public:
|
||||
Poco::Logger * log_,
|
||||
bool structure_argument_was_provided_);
|
||||
|
||||
Pipe read(
|
||||
void read(
|
||||
QueryPlan & query_plan,
|
||||
const Names & column_names,
|
||||
const StorageSnapshotPtr & storage_snapshot,
|
||||
SelectQueryInfo & query_info,
|
||||
@ -33,7 +34,7 @@ public:
|
||||
|
||||
ClusterPtr getCluster(ContextPtr context) const;
|
||||
/// Query is needed for pruning by virtual columns (_file, _path)
|
||||
virtual RemoteQueryExecutor::Extension getTaskIteratorExtension(ASTPtr query, const ContextPtr & context) const = 0;
|
||||
virtual RemoteQueryExecutor::Extension getTaskIteratorExtension(const ActionsDAG::Node * predicate, const ContextPtr & context) const = 0;
|
||||
|
||||
QueryProcessingStage::Enum getQueryProcessingStage(ContextPtr, QueryProcessingStage::Enum, const StorageSnapshotPtr &, SelectQueryInfo &) const override;
|
||||
|
||||
@ -45,8 +46,6 @@ protected:
|
||||
virtual void addColumnsStructureToQuery(ASTPtr & query, const String & structure, const ContextPtr & context) = 0;
|
||||
|
||||
private:
|
||||
ContextPtr updateSettings(ContextPtr context, const Settings & settings);
|
||||
|
||||
Poco::Logger * log;
|
||||
String cluster_name;
|
||||
bool structure_argument_was_provided;
|
||||
|
@ -43,6 +43,8 @@ ReplicatedMergeMutateTaskBase::PrepareResult MergeFromLogEntryTask::prepare()
|
||||
LOG_TRACE(log, "Executing log entry to merge parts {} to {}",
|
||||
fmt::join(entry.source_parts, ", "), entry.new_part_name);
|
||||
|
||||
StorageMetadataPtr metadata_snapshot = storage.getInMemoryMetadataPtr();
|
||||
int32_t metadata_version = metadata_snapshot->getMetadataVersion();
|
||||
const auto storage_settings_ptr = storage.getSettings();
|
||||
|
||||
if (storage_settings_ptr->always_fetch_merged_part)
|
||||
@ -129,6 +131,18 @@ ReplicatedMergeMutateTaskBase::PrepareResult MergeFromLogEntryTask::prepare()
|
||||
};
|
||||
}
|
||||
|
||||
int32_t part_metadata_version = source_part_or_covering->getMetadataVersion();
|
||||
if (part_metadata_version > metadata_version)
|
||||
{
|
||||
LOG_DEBUG(log, "Source part metadata version {} is newer then the table metadata version {}. ALTER_METADATA is still in progress.",
|
||||
part_metadata_version, metadata_version);
|
||||
return PrepareResult{
|
||||
.prepared_successfully = false,
|
||||
.need_to_check_missing_part_in_fetch = false,
|
||||
.part_log_writer = {}
|
||||
};
|
||||
}
|
||||
|
||||
parts.push_back(source_part_or_covering);
|
||||
}
|
||||
|
||||
@ -176,8 +190,6 @@ ReplicatedMergeMutateTaskBase::PrepareResult MergeFromLogEntryTask::prepare()
|
||||
/// It will live until the whole task is being destroyed
|
||||
table_lock_holder = storage.lockForShare(RWLockImpl::NO_QUERY, storage_settings_ptr->lock_acquire_timeout_for_background_operations);
|
||||
|
||||
StorageMetadataPtr metadata_snapshot = storage.getInMemoryMetadataPtr();
|
||||
|
||||
auto future_merged_part = std::make_shared<FutureMergedMutatedPart>(parts, entry.new_part_format);
|
||||
if (future_merged_part->name != entry.new_part_name)
|
||||
{
|
||||
|
@ -570,6 +570,7 @@ void MergeTask::VerticalMergeStage::prepareVerticalMergeForOneColumn() const
|
||||
for (size_t part_num = 0; part_num < global_ctx->future_part->parts.size(); ++part_num)
|
||||
{
|
||||
Pipe pipe = createMergeTreeSequentialSource(
|
||||
MergeTreeSequentialSourceType::Merge,
|
||||
*global_ctx->data,
|
||||
global_ctx->storage_snapshot,
|
||||
global_ctx->future_part->parts[part_num],
|
||||
@ -925,6 +926,7 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::createMergedStream()
|
||||
for (const auto & part : global_ctx->future_part->parts)
|
||||
{
|
||||
Pipe pipe = createMergeTreeSequentialSource(
|
||||
MergeTreeSequentialSourceType::Merge,
|
||||
*global_ctx->data,
|
||||
global_ctx->storage_snapshot,
|
||||
part,
|
||||
|
@ -3985,8 +3985,15 @@ MergeTreeData::PartsToRemoveFromZooKeeper MergeTreeData::removePartsInRangeFromW
|
||||
/// FIXME refactor removePartsFromWorkingSet(...), do not remove parts twice
|
||||
removePartsFromWorkingSet(txn, parts_to_remove, clear_without_timeout, lock);
|
||||
|
||||
/// We can only create a covering part for a blocks range that starts with 0 (otherwise we may get "intersecting parts"
|
||||
/// if we remove a range from the middle when dropping a part).
|
||||
/// Maybe we could do it by incrementing mutation version to get a name for the empty covering part,
|
||||
/// but it's okay to simply avoid creating it for DROP PART (for a part in the middle).
|
||||
/// NOTE: Block numbers in ReplicatedMergeTree start from 0. For MergeTree, is_new_syntax is always false.
|
||||
assert(!create_empty_part || supportsReplication());
|
||||
bool range_in_the_middle = drop_range.min_block;
|
||||
bool is_new_syntax = format_version >= MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING;
|
||||
if (create_empty_part && !parts_to_remove.empty() && is_new_syntax)
|
||||
if (create_empty_part && !parts_to_remove.empty() && is_new_syntax && !range_in_the_middle)
|
||||
{
|
||||
/// We are going to remove a lot of parts from zookeeper just after returning from this function.
|
||||
/// And we will remove parts from disk later (because some queries may use them).
|
||||
@ -3995,12 +4002,9 @@ MergeTreeData::PartsToRemoveFromZooKeeper MergeTreeData::removePartsInRangeFromW
|
||||
/// We don't need to commit it to zk, and don't even need to activate it.
|
||||
|
||||
MergeTreePartInfo empty_info = drop_range;
|
||||
empty_info.level = empty_info.mutation = 0;
|
||||
if (!empty_info.min_block)
|
||||
empty_info.min_block = MergeTreePartInfo::MAX_BLOCK_NUMBER;
|
||||
empty_info.min_block = empty_info.level = empty_info.mutation = 0;
|
||||
for (const auto & part : parts_to_remove)
|
||||
{
|
||||
empty_info.min_block = std::min(empty_info.min_block, part->info.min_block);
|
||||
empty_info.level = std::max(empty_info.level, part->info.level);
|
||||
empty_info.mutation = std::max(empty_info.mutation, part->info.mutation);
|
||||
}
|
||||
|
@ -1,22 +1,23 @@
|
||||
#include <Storages/MergeTree/MergeTreeIndexFullText.h>
|
||||
|
||||
#include <Columns/ColumnArray.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <Common/OptimizedRegularExpression.h>
|
||||
#include <Core/Defines.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Interpreters/ExpressionActions.h>
|
||||
#include <Interpreters/ExpressionAnalyzer.h>
|
||||
#include <Interpreters/TreeRewriter.h>
|
||||
#include <Interpreters/misc.h>
|
||||
#include <Storages/MergeTree/MergeTreeData.h>
|
||||
#include <Storages/MergeTree/RPNBuilder.h>
|
||||
#include <Storages/MergeTree/MergeTreeIndexUtils.h>
|
||||
#include <Parsers/ASTIdentifier.h>
|
||||
#include <Parsers/ASTLiteral.h>
|
||||
#include <Parsers/ASTSubquery.h>
|
||||
#include <Parsers/ASTSelectQuery.h>
|
||||
#include <Core/Defines.h>
|
||||
#include <Parsers/ASTSubquery.h>
|
||||
#include <Storages/MergeTree/MergeTreeData.h>
|
||||
#include <Storages/MergeTree/MergeTreeIndexUtils.h>
|
||||
#include <Storages/MergeTree/RPNBuilder.h>
|
||||
|
||||
#include <Poco/Logger.h>
|
||||
|
||||
@ -201,6 +202,7 @@ bool MergeTreeConditionFullText::alwaysUnknownOrTrue() const
|
||||
|| element.function == RPNElement::FUNCTION_IN
|
||||
|| element.function == RPNElement::FUNCTION_NOT_IN
|
||||
|| element.function == RPNElement::FUNCTION_MULTI_SEARCH
|
||||
|| element.function == RPNElement::FUNCTION_MATCH
|
||||
|| element.function == RPNElement::FUNCTION_HAS_ANY
|
||||
|| element.function == RPNElement::ALWAYS_FALSE)
|
||||
{
|
||||
@ -285,8 +287,27 @@ bool MergeTreeConditionFullText::mayBeTrueOnGranule(MergeTreeIndexGranulePtr idx
|
||||
for (size_t row = 0; row < bloom_filters.size(); ++row)
|
||||
result[row] = result[row] && granule->bloom_filters[element.key_column].contains(bloom_filters[row]);
|
||||
|
||||
rpn_stack.emplace_back(
|
||||
std::find(std::cbegin(result), std::cend(result), true) != std::end(result), true);
|
||||
rpn_stack.emplace_back(std::find(std::cbegin(result), std::cend(result), true) != std::end(result), true);
|
||||
}
|
||||
else if (element.function == RPNElement::FUNCTION_MATCH)
|
||||
{
|
||||
if (!element.set_bloom_filters.empty())
|
||||
{
|
||||
/// Alternative substrings
|
||||
std::vector<bool> result(element.set_bloom_filters.back().size(), true);
|
||||
|
||||
const auto & bloom_filters = element.set_bloom_filters[0];
|
||||
|
||||
for (size_t row = 0; row < bloom_filters.size(); ++row)
|
||||
result[row] = result[row] && granule->bloom_filters[element.key_column].contains(bloom_filters[row]);
|
||||
|
||||
rpn_stack.emplace_back(std::find(std::cbegin(result), std::cend(result), true) != std::end(result), true);
|
||||
}
|
||||
else if (element.bloom_filter)
|
||||
{
|
||||
/// Required substrings
|
||||
rpn_stack.emplace_back(granule->bloom_filters[element.key_column].contains(*element.bloom_filter), true);
|
||||
}
|
||||
}
|
||||
else if (element.function == RPNElement::FUNCTION_NOT)
|
||||
{
|
||||
@ -392,6 +413,7 @@ bool MergeTreeConditionFullText::extractAtomFromTree(const RPNBuilderTreeNode &
|
||||
function_name == "notEquals" ||
|
||||
function_name == "has" ||
|
||||
function_name == "mapContains" ||
|
||||
function_name == "match" ||
|
||||
function_name == "like" ||
|
||||
function_name == "notLike" ||
|
||||
function_name.starts_with("hasToken") ||
|
||||
@ -513,6 +535,7 @@ bool MergeTreeConditionFullText::traverseTreeEquals(
|
||||
token_extractor->stringToBloomFilter(value.data(), value.size(), *out.bloom_filter);
|
||||
return true;
|
||||
}
|
||||
|
||||
else if (function_name == "has")
|
||||
{
|
||||
out.key_column = *key_index;
|
||||
@ -600,6 +623,39 @@ bool MergeTreeConditionFullText::traverseTreeEquals(
|
||||
out.set_bloom_filters = std::move(bloom_filters);
|
||||
return true;
|
||||
}
|
||||
else if (function_name == "match")
|
||||
{
|
||||
out.key_column = *key_index;
|
||||
out.function = RPNElement::FUNCTION_MATCH;
|
||||
out.bloom_filter = std::make_unique<BloomFilter>(params);
|
||||
|
||||
auto & value = const_value.get<String>();
|
||||
String required_substring;
|
||||
bool dummy_is_trivial, dummy_required_substring_is_prefix;
|
||||
std::vector<String> alternatives;
|
||||
OptimizedRegularExpression::analyze(value, required_substring, dummy_is_trivial, dummy_required_substring_is_prefix, alternatives);
|
||||
|
||||
if (required_substring.empty() && alternatives.empty())
|
||||
return false;
|
||||
|
||||
/// out.set_bloom_filters means alternatives exist
|
||||
/// out.bloom_filter means required_substring exists
|
||||
if (!alternatives.empty())
|
||||
{
|
||||
std::vector<std::vector<BloomFilter>> bloom_filters;
|
||||
bloom_filters.emplace_back();
|
||||
for (const auto & alternative : alternatives)
|
||||
{
|
||||
bloom_filters.back().emplace_back(params);
|
||||
token_extractor->stringToBloomFilter(alternative.data(), alternative.size(), bloom_filters.back().back());
|
||||
}
|
||||
out.set_bloom_filters = std::move(bloom_filters);
|
||||
}
|
||||
else
|
||||
token_extractor->stringToBloomFilter(required_substring.data(), required_substring.size(), *out.bloom_filter);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
@ -90,6 +90,7 @@ private:
|
||||
FUNCTION_NOT_EQUALS,
|
||||
FUNCTION_HAS,
|
||||
FUNCTION_IN,
|
||||
FUNCTION_MATCH,
|
||||
FUNCTION_NOT_IN,
|
||||
FUNCTION_MULTI_SEARCH,
|
||||
FUNCTION_HAS_ANY,
|
||||
|
@ -1,5 +1,6 @@
|
||||
#include <Storages/MergeTree/MergeTreeReadPoolParallelReplicas.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -30,12 +31,10 @@ MergeTreeReadPoolParallelReplicas::MergeTreeReadPoolParallelReplicas(
|
||||
settings_,
|
||||
context_)
|
||||
, extension(std::move(extension_))
|
||||
, coordination_mode(CoordinationMode::Default)
|
||||
{
|
||||
extension.all_callback(InitialAllRangesAnnouncement(
|
||||
CoordinationMode::Default,
|
||||
parts_ranges.getDescriptions(),
|
||||
extension.number_of_current_replica
|
||||
));
|
||||
extension.all_callback(
|
||||
InitialAllRangesAnnouncement(coordination_mode, parts_ranges.getDescriptions(), extension.number_of_current_replica));
|
||||
}
|
||||
|
||||
MergeTreeReadTaskPtr MergeTreeReadPoolParallelReplicas::getTask(size_t /*task_idx*/, MergeTreeReadTask * previous_task)
|
||||
@ -48,7 +47,7 @@ MergeTreeReadTaskPtr MergeTreeReadPoolParallelReplicas::getTask(size_t /*task_id
|
||||
if (buffered_ranges.empty())
|
||||
{
|
||||
auto result = extension.callback(ParallelReadRequest(
|
||||
CoordinationMode::Default,
|
||||
coordination_mode,
|
||||
extension.number_of_current_replica,
|
||||
pool_settings.min_marks_for_concurrent_read * pool_settings.threads,
|
||||
/// For Default coordination mode we don't need to pass part names.
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user