mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 15:12:02 +00:00
Compare commits
122 Commits
6ba686d251
...
91383aa87c
Author | SHA1 | Date | |
---|---|---|---|
|
91383aa87c | ||
|
e7054029c4 | ||
|
b578d1af1c | ||
|
114499526e | ||
|
7cfe1ec25c | ||
|
064a072146 | ||
|
a34191f3b1 | ||
|
d477bef82d | ||
|
293821a186 | ||
|
906b7aebc6 | ||
|
5fe151529a | ||
|
080b8f74be | ||
|
0a35b111ff | ||
|
6fb8f2b4ee | ||
|
0f265ce33d | ||
|
a824217193 | ||
|
80504e7b9b | ||
|
d3f3bc3565 | ||
|
55116da80d | ||
|
642657d02c | ||
|
7aabd7d2fd | ||
|
61fa4e7a47 | ||
|
5d6b861ff0 | ||
|
c0b36c946d | ||
|
2b20b2d4de | ||
|
6f5210644b | ||
|
4d5ac30a87 | ||
|
30125b5a89 | ||
|
1a479b43fd | ||
|
1165ae756d | ||
|
a4ee666ec5 | ||
|
c9c537e3ab | ||
|
573d83ff97 | ||
|
b5406a4be1 | ||
|
44d3a94c61 | ||
|
b0894bffe6 | ||
|
20c62dc978 | ||
|
894bbbf021 | ||
|
df361bd5ec | ||
|
dff93d2c80 | ||
|
6775e60331 | ||
|
b22423068b | ||
|
d50a9cdec1 | ||
|
eec720dab6 | ||
|
0fd3694373 | ||
|
e1a7bd9163 | ||
|
b094f5344f | ||
|
e538080665 | ||
|
60e4bcbbf0 | ||
|
131d01922f | ||
|
f5739dfe06 | ||
|
4c790999eb | ||
|
012cf0763f | ||
|
dc862b1411 | ||
|
5bb9ddea2b | ||
|
e9ff092d0b | ||
|
4200b3d5cb | ||
|
f89193fa41 | ||
|
f8e08967af | ||
|
06c46ee75b | ||
|
8921eec672 | ||
|
55252b635f | ||
|
a780f64cb5 | ||
|
3ae4370ce5 | ||
|
fa453c3664 | ||
|
4264fbc037 | ||
|
1692360233 | ||
|
e7b89537bf | ||
|
52cdd88eb6 | ||
|
0bd8ebf626 | ||
|
9c0e1df166 | ||
|
28fbd8a4ef | ||
|
7c3a013d56 | ||
|
980b02bfd6 | ||
|
5f61e19340 | ||
|
c63cec756f | ||
|
1547dd2bde | ||
|
51fbc629c6 | ||
|
9d75415090 | ||
|
859d2bfe27 | ||
|
0b9c24f31d | ||
|
0dc18247df | ||
|
a541b106dd | ||
|
a93d191980 | ||
|
1ea0163dfe | ||
|
5340ac5fbc | ||
|
a9e793532a | ||
|
ce33943b43 | ||
|
91e65feaae | ||
|
add4718634 | ||
|
8d14d85230 | ||
|
b3f084459f | ||
|
7a740819b9 | ||
|
54dd3afd49 | ||
|
7ef5c366e8 | ||
|
664e9b3db9 | ||
|
84467077b8 | ||
|
95f45d2eaf | ||
|
e73e8e7a08 | ||
|
c6a197aed3 | ||
|
be4439e3ec | ||
|
8cf6323125 | ||
|
8a89cd31a1 | ||
|
2f6ad1271c | ||
|
bff252ea73 | ||
|
ca880ccdee | ||
|
6e5465ae51 | ||
|
38f9ef6bc9 | ||
|
a52eff299e | ||
|
2e58ac5611 | ||
|
e87de3cfcd | ||
|
5965297d8b | ||
|
1afd3a7c3a | ||
|
4a7a04b35b | ||
|
915daafd3a | ||
|
e01a448bcc | ||
|
434458cc83 | ||
|
2ad50a5f3c | ||
|
fe637452ec | ||
|
e416a2b3d2 | ||
|
24eeaffa7a | ||
|
0ccbb554b9 |
1
.github/workflows/release_branches.yml
vendored
1
.github/workflows/release_branches.yml
vendored
@ -130,6 +130,7 @@ jobs:
|
||||
with:
|
||||
build_name: package_debug
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
force: true
|
||||
BuilderBinDarwin:
|
||||
needs: [RunConfig, BuildDockers]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
|
17
README.md
17
README.md
@ -34,7 +34,7 @@ curl https://clickhouse.com/ | sh
|
||||
|
||||
Every month we get together with the community (users, contributors, customers, those interested in learning more about ClickHouse) to discuss what is coming in the latest release. If you are interested in sharing what you've built on ClickHouse, let us know.
|
||||
|
||||
* [v24.8 Community Call](https://clickhouse.com/company/events/v24-8-community-release-call) - August 20
|
||||
* [v24.9 Community Call](https://clickhouse.com/company/events/v24-9-community-release-call) - September 26
|
||||
|
||||
## Upcoming Events
|
||||
|
||||
@ -44,13 +44,22 @@ The following upcoming meetups are featuring creator of ClickHouse & CTO, Alexey
|
||||
|
||||
* [ClickHouse Guangzhou User Group Meetup](https://mp.weixin.qq.com/s/GSvo-7xUoVzCsuUvlLTpCw) - August 25
|
||||
* [San Francisco Meetup (Cloudflare)](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/302540575) - September 5
|
||||
* [Raleigh Meetup (Deutsche Bank)](https://www.meetup.com/clickhouse-nc-meetup-group/events/302557230) - September 9
|
||||
* [New York Meetup (Ramp)](https://www.meetup.com/clickhouse-new-york-user-group/events/302575342) - September 10
|
||||
* [Raleigh Meetup (Deutsche Bank)](https://www.meetup.com/triangletechtalks/events/302723486/) - September 9
|
||||
* [New York Meetup (Rokt)](https://www.meetup.com/clickhouse-new-york-user-group/events/302575342) - September 10
|
||||
* [Chicago Meetup (Jump Capital)](https://lu.ma/43tvmrfw) - September 12
|
||||
|
||||
Other upcoming meetups
|
||||
* [Seattle Meetup (Statsig)](https://www.meetup.com/clickhouse-seattle-user-group/events/302518075/) - August 27
|
||||
* [Melbourne Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302732666/) - August 27
|
||||
* [Sydney Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302862966/) - September 5
|
||||
* [Zurich Meetup](https://www.meetup.com/clickhouse-switzerland-meetup-group/events/302267429/) - September 5
|
||||
* [Toronto Meetup (Shopify)](https://www.meetup.com/clickhouse-toronto-user-group/events/301490855/) - September 10
|
||||
* [Austin Meetup](https://www.meetup.com/clickhouse-austin-user-group/events/302558689/) - September 17
|
||||
* [London Meetup](https://www.meetup.com/clickhouse-london-user-group/events/302977267) - September 17
|
||||
|
||||
## Recent Recordings
|
||||
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
||||
* **Recording available**: [**v24.4 Release Call**](https://www.youtube.com/watch?v=dtUqgcfOGmE) All the features of 24.4, one convenient video! Watch it now!
|
||||
* **Recording available**: [**v24.8 LTS Release Call**](https://www.youtube.com/watch?v=AeLmp2jc51k) All the features of 24.8 LTS, one convenient video! Watch it now!
|
||||
|
||||
## Interested in joining ClickHouse and making it your full-time job?
|
||||
|
||||
|
@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="24.8.1.2684"
|
||||
ARG VERSION="24.8.2.3"
|
||||
ARG PACKAGES="clickhouse-keeper"
|
||||
ARG DIRECT_DOWNLOAD_URLS=""
|
||||
|
||||
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="24.8.1.2684"
|
||||
ARG VERSION="24.8.2.3"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
ARG DIRECT_DOWNLOAD_URLS=""
|
||||
|
||||
|
@ -28,7 +28,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
||||
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||
ARG VERSION="24.8.1.2684"
|
||||
ARG VERSION="24.8.2.3"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
#docker-official-library:off
|
||||
|
@ -40,6 +40,3 @@ RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
ARG sqllogic_test_repo="https://github.com/gregrahn/sqllogictest.git"
|
||||
|
||||
RUN git clone --recursive ${sqllogic_test_repo}
|
||||
|
||||
COPY run.sh /
|
||||
CMD ["/bin/bash", "/run.sh"]
|
||||
|
71
docs/changelogs/v24.5.5.41-stable.md
Normal file
71
docs/changelogs/v24.5.5.41-stable.md
Normal file
@ -0,0 +1,71 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.5.5.41-stable (441d4a6ebe3) FIXME as compared to v24.5.4.49-stable (63b760955a0)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#66768](https://github.com/ClickHouse/ClickHouse/issues/66768): Make allow_experimental_analyzer be controlled by the initiator for distributed queries. This ensures compatibility and correctness during operations in mixed version clusters. [#65777](https://github.com/ClickHouse/ClickHouse/pull/65777) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#65350](https://github.com/ClickHouse/ClickHouse/issues/65350): Fix possible abort on uncaught exception in ~WriteBufferFromFileDescriptor in StatusFile. [#64206](https://github.com/ClickHouse/ClickHouse/pull/64206) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#65621](https://github.com/ClickHouse/ClickHouse/issues/65621): Fix `Cannot find column` in distributed query with `ARRAY JOIN` by `Nested` column. Fixes [#64755](https://github.com/ClickHouse/ClickHouse/issues/64755). [#64801](https://github.com/ClickHouse/ClickHouse/pull/64801) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#67902](https://github.com/ClickHouse/ClickHouse/issues/67902): Fixing the `Not-ready Set` error after the `PREWHERE` optimization for StorageMerge. [#65057](https://github.com/ClickHouse/ClickHouse/pull/65057) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66884](https://github.com/ClickHouse/ClickHouse/issues/66884): Fix unexpeced size of low cardinality column in function calls. [#65298](https://github.com/ClickHouse/ClickHouse/pull/65298) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#65933](https://github.com/ClickHouse/ClickHouse/issues/65933): For queries that read from `PostgreSQL`, cancel the internal `PostgreSQL` query if the ClickHouse query is finished. Otherwise, `ClickHouse` query cannot be canceled until the internal `PostgreSQL` query is finished. [#65771](https://github.com/ClickHouse/ClickHouse/pull/65771) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Backported in [#66301](https://github.com/ClickHouse/ClickHouse/issues/66301): Better handling of join conditions involving `IS NULL` checks (for example `ON (a = b AND (a IS NOT NULL) AND (b IS NOT NULL) ) OR ( (a IS NULL) AND (b IS NULL) )` is rewritten to `ON a <=> b`), fix incorrect optimization when condition other then `IS NULL` are present. [#65835](https://github.com/ClickHouse/ClickHouse/pull/65835) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#66328](https://github.com/ClickHouse/ClickHouse/issues/66328): Add missing settings `input_format_csv_skip_first_lines/input_format_tsv_skip_first_lines/input_format_csv_try_infer_numbers_from_strings/input_format_csv_try_infer_strings_from_quoted_tuples` in schema inference cache because they can change the resulting schema. It prevents from incorrect result of schema inference with these settings changed. [#65980](https://github.com/ClickHouse/ClickHouse/pull/65980) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#68252](https://github.com/ClickHouse/ClickHouse/issues/68252): Fixed `Not-ready Set` in some system tables when filtering using subqueries. [#66018](https://github.com/ClickHouse/ClickHouse/pull/66018) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Backported in [#66155](https://github.com/ClickHouse/ClickHouse/issues/66155): Fixed buffer overflow bug in `unbin`/`unhex` implementation. [#66106](https://github.com/ClickHouse/ClickHouse/pull/66106) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Backported in [#66454](https://github.com/ClickHouse/ClickHouse/issues/66454): Fixed a bug in ZooKeeper client: a session could get stuck in unusable state after receiving a hardware error from ZooKeeper. For example, this might happen due to "soft memory limit" in ClickHouse Keeper. [#66140](https://github.com/ClickHouse/ClickHouse/pull/66140) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Backported in [#66226](https://github.com/ClickHouse/ClickHouse/issues/66226): Fix issue in SumIfToCountIfVisitor and signed integers. [#66146](https://github.com/ClickHouse/ClickHouse/pull/66146) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#66680](https://github.com/ClickHouse/ClickHouse/issues/66680): Fix handling limit for `system.numbers_mt` when no index can be used. [#66231](https://github.com/ClickHouse/ClickHouse/pull/66231) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Backported in [#66604](https://github.com/ClickHouse/ClickHouse/issues/66604): Fixed how the ClickHouse server detects the maximum number of usable CPU cores as specified by cgroups v2 if the server runs in a container such as Docker. In more detail, containers often run their process in the root cgroup which has an empty name. In that case, ClickHouse ignored the CPU limits set by cgroups v2. [#66237](https://github.com/ClickHouse/ClickHouse/pull/66237) ([filimonov](https://github.com/filimonov)).
|
||||
* Backported in [#66360](https://github.com/ClickHouse/ClickHouse/issues/66360): Fix the `Not-ready set` error when a subquery with `IN` is used in the constraint. [#66261](https://github.com/ClickHouse/ClickHouse/pull/66261) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#68064](https://github.com/ClickHouse/ClickHouse/issues/68064): Fix boolean literals in query sent to external database (for engines like `PostgreSQL`). [#66282](https://github.com/ClickHouse/ClickHouse/pull/66282) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#68158](https://github.com/ClickHouse/ClickHouse/issues/68158): Fix cluster() for inter-server secret (preserve initial user as before). [#66364](https://github.com/ClickHouse/ClickHouse/pull/66364) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Backported in [#66972](https://github.com/ClickHouse/ClickHouse/issues/66972): Fix `Column identifier is already registered` error with `group_by_use_nulls=true` and new analyzer. [#66400](https://github.com/ClickHouse/ClickHouse/pull/66400) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66691](https://github.com/ClickHouse/ClickHouse/issues/66691): Fix the VALID UNTIL clause in the user definition resetting after a restart. Closes [#66405](https://github.com/ClickHouse/ClickHouse/issues/66405). [#66409](https://github.com/ClickHouse/ClickHouse/pull/66409) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Backported in [#66969](https://github.com/ClickHouse/ClickHouse/issues/66969): Fix `Cannot find column` error for queries with constant expression in `GROUP BY` key and new analyzer enabled. [#66433](https://github.com/ClickHouse/ClickHouse/pull/66433) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66720](https://github.com/ClickHouse/ClickHouse/issues/66720): Correctly track memory for `Allocator::realloc`. [#66548](https://github.com/ClickHouse/ClickHouse/pull/66548) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#66951](https://github.com/ClickHouse/ClickHouse/issues/66951): Fix an invalid result for queries with `WINDOW`. This could happen when `PARTITION` columns have sparse serialization and window functions are executed in parallel. [#66579](https://github.com/ClickHouse/ClickHouse/pull/66579) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66757](https://github.com/ClickHouse/ClickHouse/issues/66757): Fix `Unknown identifier` and `Column is not under aggregate function` errors for queries with the expression `(column IS NULL).` The bug was triggered by [#65088](https://github.com/ClickHouse/ClickHouse/issues/65088), with the disabled analyzer only. [#66654](https://github.com/ClickHouse/ClickHouse/pull/66654) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66948](https://github.com/ClickHouse/ClickHouse/issues/66948): Fix `Method getResultType is not supported for QUERY query node` error when scalar subquery was used as the first argument of IN (with new analyzer). [#66655](https://github.com/ClickHouse/ClickHouse/pull/66655) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#68115](https://github.com/ClickHouse/ClickHouse/issues/68115): Fix possible PARAMETER_OUT_OF_BOUND error during reading variant subcolumn. [#66659](https://github.com/ClickHouse/ClickHouse/pull/66659) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#67633](https://github.com/ClickHouse/ClickHouse/issues/67633): Fix for occasional deadlock in Context::getDDLWorker. [#66843](https://github.com/ClickHouse/ClickHouse/pull/66843) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Backported in [#67481](https://github.com/ClickHouse/ClickHouse/issues/67481): In rare cases ClickHouse could consider parts as broken because of some unexpected projections on disk. Now it's fixed. [#66898](https://github.com/ClickHouse/ClickHouse/pull/66898) ([alesapin](https://github.com/alesapin)).
|
||||
* Backported in [#67814](https://github.com/ClickHouse/ClickHouse/issues/67814): Only relevant to the experimental Variant data type. Fix crash with Variant + AggregateFunction type. [#67122](https://github.com/ClickHouse/ClickHouse/pull/67122) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#67197](https://github.com/ClickHouse/ClickHouse/issues/67197): TRUNCATE DATABASE used to stop replication as if it was a DROP DATABASE query, it's fixed. [#67129](https://github.com/ClickHouse/ClickHouse/pull/67129) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Backported in [#67379](https://github.com/ClickHouse/ClickHouse/issues/67379): Fix error `Cannot convert column because it is non constant in source stream but must be constant in result.` for a query that reads from the `Merge` table over the `Distriburted` table with one shard. [#67146](https://github.com/ClickHouse/ClickHouse/pull/67146) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#67501](https://github.com/ClickHouse/ClickHouse/issues/67501): Fix crash in DistributedAsyncInsert when connection is empty. [#67219](https://github.com/ClickHouse/ClickHouse/pull/67219) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||
* Backported in [#67886](https://github.com/ClickHouse/ClickHouse/issues/67886): Correctly parse file name/URI containing `::` if it's not an archive. [#67433](https://github.com/ClickHouse/ClickHouse/pull/67433) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#67576](https://github.com/ClickHouse/ClickHouse/issues/67576): Fix execution of nested short-circuit functions. [#67520](https://github.com/ClickHouse/ClickHouse/pull/67520) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#67850](https://github.com/ClickHouse/ClickHouse/issues/67850): Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Backported in [#68272](https://github.com/ClickHouse/ClickHouse/issues/68272): Fix inserting into stream like engines (Kafka, RabbitMQ, NATS) through HTTP interface. [#67554](https://github.com/ClickHouse/ClickHouse/pull/67554) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Backported in [#67807](https://github.com/ClickHouse/ClickHouse/issues/67807): Fix reloading SQL UDFs with UNION. Previously, restarting the server could make UDF invalid. [#67665](https://github.com/ClickHouse/ClickHouse/pull/67665) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#67836](https://github.com/ClickHouse/ClickHouse/issues/67836): Fix potential stack overflow in `JSONMergePatch` function. Renamed this function from `jsonMergePatch` to `JSONMergePatch` because the previous name was wrong. The previous name is still kept for compatibility. Improved diagnostic of errors in the function. This closes [#67304](https://github.com/ClickHouse/ClickHouse/issues/67304). [#67756](https://github.com/ClickHouse/ClickHouse/pull/67756) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Backported in [#67991](https://github.com/ClickHouse/ClickHouse/issues/67991): Validate experimental/suspicious data types in ALTER ADD/MODIFY COLUMN. [#67911](https://github.com/ClickHouse/ClickHouse/pull/67911) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#68207](https://github.com/ClickHouse/ClickHouse/issues/68207): Fix wrong `count()` result when there is non-deterministic function in predicate. [#67922](https://github.com/ClickHouse/ClickHouse/pull/67922) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Backported in [#68091](https://github.com/ClickHouse/ClickHouse/issues/68091): Fixed the calculation of the maximum thread soft limit in containerized environments where the usable CPU count is limited. [#67963](https://github.com/ClickHouse/ClickHouse/pull/67963) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Backported in [#68122](https://github.com/ClickHouse/ClickHouse/issues/68122): Fixed skipping of untouched parts in mutations with new analyzer. Previously with enabled analyzer data in part could be rewritten by mutation even if mutation doesn't affect this part according to predicate. [#68052](https://github.com/ClickHouse/ClickHouse/pull/68052) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Backported in [#68171](https://github.com/ClickHouse/ClickHouse/issues/68171): Removes an incorrect optimization to remove sorting in subqueries that use `OFFSET`. Fixes [#67906](https://github.com/ClickHouse/ClickHouse/issues/67906). [#68099](https://github.com/ClickHouse/ClickHouse/pull/68099) ([Graham Campbell](https://github.com/GrahamCampbell)).
|
||||
* Backported in [#68337](https://github.com/ClickHouse/ClickHouse/issues/68337): Try fix postgres crash when query is cancelled. [#68288](https://github.com/ClickHouse/ClickHouse/pull/68288) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Backported in [#68667](https://github.com/ClickHouse/ClickHouse/issues/68667): Fix `LOGICAL_ERROR`s when functions `sipHash64Keyed`, `sipHash128Keyed`, or `sipHash128ReferenceKeyed` are applied to empty arrays or tuples. [#68630](https://github.com/ClickHouse/ClickHouse/pull/68630) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#66387](https://github.com/ClickHouse/ClickHouse/issues/66387): Disable broken cases from 02911_join_on_nullsafe_optimization. [#66310](https://github.com/ClickHouse/ClickHouse/pull/66310) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#66426](https://github.com/ClickHouse/ClickHouse/issues/66426): Ignore subquery for IN in DDLLoadingDependencyVisitor. [#66395](https://github.com/ClickHouse/ClickHouse/pull/66395) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66544](https://github.com/ClickHouse/ClickHouse/issues/66544): Add additional log masking in CI. [#66523](https://github.com/ClickHouse/ClickHouse/pull/66523) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#66859](https://github.com/ClickHouse/ClickHouse/issues/66859): Fix data race in S3::ClientCache. [#66644](https://github.com/ClickHouse/ClickHouse/pull/66644) ([Konstantin Morozov](https://github.com/k-morozov)).
|
||||
* Backported in [#66875](https://github.com/ClickHouse/ClickHouse/issues/66875): Support one more case in JOIN ON ... IS NULL. [#66725](https://github.com/ClickHouse/ClickHouse/pull/66725) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#67059](https://github.com/ClickHouse/ClickHouse/issues/67059): Increase asio pool size in case the server is tiny. [#66761](https://github.com/ClickHouse/ClickHouse/pull/66761) ([alesapin](https://github.com/alesapin)).
|
||||
* Backported in [#66945](https://github.com/ClickHouse/ClickHouse/issues/66945): Small fix in realloc memory tracking. [#66820](https://github.com/ClickHouse/ClickHouse/pull/66820) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#67252](https://github.com/ClickHouse/ClickHouse/issues/67252): Followup [#66725](https://github.com/ClickHouse/ClickHouse/issues/66725). [#66869](https://github.com/ClickHouse/ClickHouse/pull/66869) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#67412](https://github.com/ClickHouse/ClickHouse/issues/67412): CI: Fix build results for release branches. [#67402](https://github.com/ClickHouse/ClickHouse/pull/67402) ([Max K.](https://github.com/maxknv)).
|
||||
* Update version after release. [#67862](https://github.com/ClickHouse/ClickHouse/pull/67862) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Backported in [#68077](https://github.com/ClickHouse/ClickHouse/issues/68077): Add an explicit error for `ALTER MODIFY SQL SECURITY` on non-view tables. [#67953](https://github.com/ClickHouse/ClickHouse/pull/67953) ([pufit](https://github.com/pufit)).
|
||||
|
33
docs/changelogs/v24.5.6.45-stable.md
Normal file
33
docs/changelogs/v24.5.6.45-stable.md
Normal file
@ -0,0 +1,33 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.5.6.45-stable (bdca8604c29) FIXME as compared to v24.5.5.78-stable (0138248cb62)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#67902](https://github.com/ClickHouse/ClickHouse/issues/67902): Fixing the `Not-ready Set` error after the `PREWHERE` optimization for StorageMerge. [#65057](https://github.com/ClickHouse/ClickHouse/pull/65057) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#68252](https://github.com/ClickHouse/ClickHouse/issues/68252): Fixed `Not-ready Set` in some system tables when filtering using subqueries. [#66018](https://github.com/ClickHouse/ClickHouse/pull/66018) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Backported in [#68064](https://github.com/ClickHouse/ClickHouse/issues/68064): Fix boolean literals in query sent to external database (for engines like `PostgreSQL`). [#66282](https://github.com/ClickHouse/ClickHouse/pull/66282) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#68158](https://github.com/ClickHouse/ClickHouse/issues/68158): Fix cluster() for inter-server secret (preserve initial user as before). [#66364](https://github.com/ClickHouse/ClickHouse/pull/66364) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Backported in [#68115](https://github.com/ClickHouse/ClickHouse/issues/68115): Fix possible PARAMETER_OUT_OF_BOUND error during reading variant subcolumn. [#66659](https://github.com/ClickHouse/ClickHouse/pull/66659) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#67886](https://github.com/ClickHouse/ClickHouse/issues/67886): Correctly parse file name/URI containing `::` if it's not an archive. [#67433](https://github.com/ClickHouse/ClickHouse/pull/67433) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#68272](https://github.com/ClickHouse/ClickHouse/issues/68272): Fix inserting into stream like engines (Kafka, RabbitMQ, NATS) through HTTP interface. [#67554](https://github.com/ClickHouse/ClickHouse/pull/67554) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Backported in [#67807](https://github.com/ClickHouse/ClickHouse/issues/67807): Fix reloading SQL UDFs with UNION. Previously, restarting the server could make UDF invalid. [#67665](https://github.com/ClickHouse/ClickHouse/pull/67665) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#67836](https://github.com/ClickHouse/ClickHouse/issues/67836): Fix potential stack overflow in `JSONMergePatch` function. Renamed this function from `jsonMergePatch` to `JSONMergePatch` because the previous name was wrong. The previous name is still kept for compatibility. Improved diagnostic of errors in the function. This closes [#67304](https://github.com/ClickHouse/ClickHouse/issues/67304). [#67756](https://github.com/ClickHouse/ClickHouse/pull/67756) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Backported in [#67991](https://github.com/ClickHouse/ClickHouse/issues/67991): Validate experimental/suspicious data types in ALTER ADD/MODIFY COLUMN. [#67911](https://github.com/ClickHouse/ClickHouse/pull/67911) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#68207](https://github.com/ClickHouse/ClickHouse/issues/68207): Fix wrong `count()` result when there is non-deterministic function in predicate. [#67922](https://github.com/ClickHouse/ClickHouse/pull/67922) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Backported in [#68091](https://github.com/ClickHouse/ClickHouse/issues/68091): Fixed the calculation of the maximum thread soft limit in containerized environments where the usable CPU count is limited. [#67963](https://github.com/ClickHouse/ClickHouse/pull/67963) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Backported in [#68122](https://github.com/ClickHouse/ClickHouse/issues/68122): Fixed skipping of untouched parts in mutations with new analyzer. Previously with enabled analyzer data in part could be rewritten by mutation even if mutation doesn't affect this part according to predicate. [#68052](https://github.com/ClickHouse/ClickHouse/pull/68052) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Backported in [#68171](https://github.com/ClickHouse/ClickHouse/issues/68171): Removes an incorrect optimization to remove sorting in subqueries that use `OFFSET`. Fixes [#67906](https://github.com/ClickHouse/ClickHouse/issues/67906). [#68099](https://github.com/ClickHouse/ClickHouse/pull/68099) ([Graham Campbell](https://github.com/GrahamCampbell)).
|
||||
* Backported in [#68337](https://github.com/ClickHouse/ClickHouse/issues/68337): Try fix postgres crash when query is cancelled. [#68288](https://github.com/ClickHouse/ClickHouse/pull/68288) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Backported in [#68667](https://github.com/ClickHouse/ClickHouse/issues/68667): Fix `LOGICAL_ERROR`s when functions `sipHash64Keyed`, `sipHash128Keyed`, or `sipHash128ReferenceKeyed` are applied to empty arrays or tuples. [#68630](https://github.com/ClickHouse/ClickHouse/pull/68630) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Update version after release. [#67862](https://github.com/ClickHouse/ClickHouse/pull/67862) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Backported in [#68077](https://github.com/ClickHouse/ClickHouse/issues/68077): Add an explicit error for `ALTER MODIFY SQL SECURITY` on non-view tables. [#67953](https://github.com/ClickHouse/ClickHouse/pull/67953) ([pufit](https://github.com/pufit)).
|
||||
* Backported in [#68756](https://github.com/ClickHouse/ClickHouse/issues/68756): To make patch release possible from every commit on release branch, package_debug build is required and must not be skipped. [#68750](https://github.com/ClickHouse/ClickHouse/pull/68750) ([Max K.](https://github.com/maxknv)).
|
||||
|
83
docs/changelogs/v24.6.3.38-stable.md
Normal file
83
docs/changelogs/v24.6.3.38-stable.md
Normal file
@ -0,0 +1,83 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.6.3.38-stable (4e33c831589) FIXME as compared to v24.6.2.17-stable (5710a8b5c0c)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#66770](https://github.com/ClickHouse/ClickHouse/issues/66770): Make allow_experimental_analyzer be controlled by the initiator for distributed queries. This ensures compatibility and correctness during operations in mixed version clusters. [#65777](https://github.com/ClickHouse/ClickHouse/pull/65777) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#66885](https://github.com/ClickHouse/ClickHouse/issues/66885): Fix unexpeced size of low cardinality column in function calls. [#65298](https://github.com/ClickHouse/ClickHouse/pull/65298) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#66303](https://github.com/ClickHouse/ClickHouse/issues/66303): Better handling of join conditions involving `IS NULL` checks (for example `ON (a = b AND (a IS NOT NULL) AND (b IS NOT NULL) ) OR ( (a IS NULL) AND (b IS NULL) )` is rewritten to `ON a <=> b`), fix incorrect optimization when condition other then `IS NULL` are present. [#65835](https://github.com/ClickHouse/ClickHouse/pull/65835) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#66330](https://github.com/ClickHouse/ClickHouse/issues/66330): Add missing settings `input_format_csv_skip_first_lines/input_format_tsv_skip_first_lines/input_format_csv_try_infer_numbers_from_strings/input_format_csv_try_infer_strings_from_quoted_tuples` in schema inference cache because they can change the resulting schema. It prevents from incorrect result of schema inference with these settings changed. [#65980](https://github.com/ClickHouse/ClickHouse/pull/65980) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#66157](https://github.com/ClickHouse/ClickHouse/issues/66157): Fixed buffer overflow bug in `unbin`/`unhex` implementation. [#66106](https://github.com/ClickHouse/ClickHouse/pull/66106) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Backported in [#66210](https://github.com/ClickHouse/ClickHouse/issues/66210): Disable the `merge-filters` optimization introduced in [#64760](https://github.com/ClickHouse/ClickHouse/issues/64760). It may cause an exception if optimization merges two filter expressions and does not apply a short-circuit evaluation. [#66126](https://github.com/ClickHouse/ClickHouse/pull/66126) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66456](https://github.com/ClickHouse/ClickHouse/issues/66456): Fixed a bug in ZooKeeper client: a session could get stuck in unusable state after receiving a hardware error from ZooKeeper. For example, this might happen due to "soft memory limit" in ClickHouse Keeper. [#66140](https://github.com/ClickHouse/ClickHouse/pull/66140) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Backported in [#66228](https://github.com/ClickHouse/ClickHouse/issues/66228): Fix issue in SumIfToCountIfVisitor and signed integers. [#66146](https://github.com/ClickHouse/ClickHouse/pull/66146) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#66183](https://github.com/ClickHouse/ClickHouse/issues/66183): Fix rare case with missing data in the result of distributed query, close [#61432](https://github.com/ClickHouse/ClickHouse/issues/61432). [#66174](https://github.com/ClickHouse/ClickHouse/pull/66174) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#66271](https://github.com/ClickHouse/ClickHouse/issues/66271): Don't throw `TIMEOUT_EXCEEDED` for `none_only_active` mode of `distributed_ddl_output_mode`. [#66218](https://github.com/ClickHouse/ClickHouse/pull/66218) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Backported in [#66682](https://github.com/ClickHouse/ClickHouse/issues/66682): Fix handling limit for `system.numbers_mt` when no index can be used. [#66231](https://github.com/ClickHouse/ClickHouse/pull/66231) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Backported in [#66587](https://github.com/ClickHouse/ClickHouse/issues/66587): Fixed how the ClickHouse server detects the maximum number of usable CPU cores as specified by cgroups v2 if the server runs in a container such as Docker. In more detail, containers often run their process in the root cgroup which has an empty name. In that case, ClickHouse ignored the CPU limits set by cgroups v2. [#66237](https://github.com/ClickHouse/ClickHouse/pull/66237) ([filimonov](https://github.com/filimonov)).
|
||||
* Backported in [#66362](https://github.com/ClickHouse/ClickHouse/issues/66362): Fix the `Not-ready set` error when a subquery with `IN` is used in the constraint. [#66261](https://github.com/ClickHouse/ClickHouse/pull/66261) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#68066](https://github.com/ClickHouse/ClickHouse/issues/68066): Fix boolean literals in query sent to external database (for engines like `PostgreSQL`). [#66282](https://github.com/ClickHouse/ClickHouse/pull/66282) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#68566](https://github.com/ClickHouse/ClickHouse/issues/68566): Fix indexHint function case found by fuzzer. [#66286](https://github.com/ClickHouse/ClickHouse/pull/66286) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Backported in [#68159](https://github.com/ClickHouse/ClickHouse/issues/68159): Fix cluster() for inter-server secret (preserve initial user as before). [#66364](https://github.com/ClickHouse/ClickHouse/pull/66364) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Backported in [#66613](https://github.com/ClickHouse/ClickHouse/issues/66613): Fix `Column identifier is already registered` error with `group_by_use_nulls=true` and new analyzer. [#66400](https://github.com/ClickHouse/ClickHouse/pull/66400) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66693](https://github.com/ClickHouse/ClickHouse/issues/66693): Fix the VALID UNTIL clause in the user definition resetting after a restart. Closes [#66405](https://github.com/ClickHouse/ClickHouse/issues/66405). [#66409](https://github.com/ClickHouse/ClickHouse/pull/66409) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Backported in [#66577](https://github.com/ClickHouse/ClickHouse/issues/66577): Fix `Cannot find column` error for queries with constant expression in `GROUP BY` key and new analyzer enabled. [#66433](https://github.com/ClickHouse/ClickHouse/pull/66433) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66721](https://github.com/ClickHouse/ClickHouse/issues/66721): Correctly track memory for `Allocator::realloc`. [#66548](https://github.com/ClickHouse/ClickHouse/pull/66548) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#66670](https://github.com/ClickHouse/ClickHouse/issues/66670): Fix reading of uninitialized memory when hashing empty tuples. This closes [#66559](https://github.com/ClickHouse/ClickHouse/issues/66559). [#66562](https://github.com/ClickHouse/ClickHouse/pull/66562) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Backported in [#66952](https://github.com/ClickHouse/ClickHouse/issues/66952): Fix an invalid result for queries with `WINDOW`. This could happen when `PARTITION` columns have sparse serialization and window functions are executed in parallel. [#66579](https://github.com/ClickHouse/ClickHouse/pull/66579) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66956](https://github.com/ClickHouse/ClickHouse/issues/66956): Fix removing named collections in local storage. [#66599](https://github.com/ClickHouse/ClickHouse/pull/66599) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Backported in [#66716](https://github.com/ClickHouse/ClickHouse/issues/66716): Fix removing named collections in local storage. [#66599](https://github.com/ClickHouse/ClickHouse/pull/66599) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Backported in [#66759](https://github.com/ClickHouse/ClickHouse/issues/66759): Fix `Unknown identifier` and `Column is not under aggregate function` errors for queries with the expression `(column IS NULL).` The bug was triggered by [#65088](https://github.com/ClickHouse/ClickHouse/issues/65088), with the disabled analyzer only. [#66654](https://github.com/ClickHouse/ClickHouse/pull/66654) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66751](https://github.com/ClickHouse/ClickHouse/issues/66751): Fix `Method getResultType is not supported for QUERY query node` error when scalar subquery was used as the first argument of IN (with new analyzer). [#66655](https://github.com/ClickHouse/ClickHouse/pull/66655) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#68116](https://github.com/ClickHouse/ClickHouse/issues/68116): Fix possible PARAMETER_OUT_OF_BOUND error during reading variant subcolumn. [#66659](https://github.com/ClickHouse/ClickHouse/pull/66659) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#67635](https://github.com/ClickHouse/ClickHouse/issues/67635): Fix for occasional deadlock in Context::getDDLWorker. [#66843](https://github.com/ClickHouse/ClickHouse/pull/66843) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Backported in [#67482](https://github.com/ClickHouse/ClickHouse/issues/67482): In rare cases ClickHouse could consider parts as broken because of some unexpected projections on disk. Now it's fixed. [#66898](https://github.com/ClickHouse/ClickHouse/pull/66898) ([alesapin](https://github.com/alesapin)).
|
||||
* Backported in [#67816](https://github.com/ClickHouse/ClickHouse/issues/67816): Only relevant to the experimental Variant data type. Fix crash with Variant + AggregateFunction type. [#67122](https://github.com/ClickHouse/ClickHouse/pull/67122) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#67199](https://github.com/ClickHouse/ClickHouse/issues/67199): TRUNCATE DATABASE used to stop replication as if it was a DROP DATABASE query, it's fixed. [#67129](https://github.com/ClickHouse/ClickHouse/pull/67129) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Backported in [#67381](https://github.com/ClickHouse/ClickHouse/issues/67381): Fix error `Cannot convert column because it is non constant in source stream but must be constant in result.` for a query that reads from the `Merge` table over the `Distriburted` table with one shard. [#67146](https://github.com/ClickHouse/ClickHouse/pull/67146) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#67244](https://github.com/ClickHouse/ClickHouse/issues/67244): This closes [#67156](https://github.com/ClickHouse/ClickHouse/issues/67156). This closes [#66447](https://github.com/ClickHouse/ClickHouse/issues/66447). The bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/62907. [#67178](https://github.com/ClickHouse/ClickHouse/pull/67178) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Backported in [#67503](https://github.com/ClickHouse/ClickHouse/issues/67503): Fix crash in DistributedAsyncInsert when connection is empty. [#67219](https://github.com/ClickHouse/ClickHouse/pull/67219) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||
* Backported in [#67887](https://github.com/ClickHouse/ClickHouse/issues/67887): Correctly parse file name/URI containing `::` if it's not an archive. [#67433](https://github.com/ClickHouse/ClickHouse/pull/67433) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#67578](https://github.com/ClickHouse/ClickHouse/issues/67578): Fix execution of nested short-circuit functions. [#67520](https://github.com/ClickHouse/ClickHouse/pull/67520) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#68611](https://github.com/ClickHouse/ClickHouse/issues/68611): Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Backported in [#67852](https://github.com/ClickHouse/ClickHouse/issues/67852): Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Backported in [#68275](https://github.com/ClickHouse/ClickHouse/issues/68275): Fix inserting into stream like engines (Kafka, RabbitMQ, NATS) through HTTP interface. [#67554](https://github.com/ClickHouse/ClickHouse/pull/67554) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Backported in [#67808](https://github.com/ClickHouse/ClickHouse/issues/67808): Fix reloading SQL UDFs with UNION. Previously, restarting the server could make UDF invalid. [#67665](https://github.com/ClickHouse/ClickHouse/pull/67665) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#67838](https://github.com/ClickHouse/ClickHouse/issues/67838): Fix potential stack overflow in `JSONMergePatch` function. Renamed this function from `jsonMergePatch` to `JSONMergePatch` because the previous name was wrong. The previous name is still kept for compatibility. Improved diagnostic of errors in the function. This closes [#67304](https://github.com/ClickHouse/ClickHouse/issues/67304). [#67756](https://github.com/ClickHouse/ClickHouse/pull/67756) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Backported in [#67993](https://github.com/ClickHouse/ClickHouse/issues/67993): Validate experimental/suspicious data types in ALTER ADD/MODIFY COLUMN. [#67911](https://github.com/ClickHouse/ClickHouse/pull/67911) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#68208](https://github.com/ClickHouse/ClickHouse/issues/68208): Fix wrong `count()` result when there is non-deterministic function in predicate. [#67922](https://github.com/ClickHouse/ClickHouse/pull/67922) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Backported in [#68093](https://github.com/ClickHouse/ClickHouse/issues/68093): Fixed the calculation of the maximum thread soft limit in containerized environments where the usable CPU count is limited. [#67963](https://github.com/ClickHouse/ClickHouse/pull/67963) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Backported in [#68124](https://github.com/ClickHouse/ClickHouse/issues/68124): Fixed skipping of untouched parts in mutations with new analyzer. Previously with enabled analyzer data in part could be rewritten by mutation even if mutation doesn't affect this part according to predicate. [#68052](https://github.com/ClickHouse/ClickHouse/pull/68052) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Backported in [#68221](https://github.com/ClickHouse/ClickHouse/issues/68221): Fixed a NULL pointer dereference, triggered by a specially crafted query, that crashed the server via hopEnd, hopStart, tumbleEnd, and tumbleStart. [#68098](https://github.com/ClickHouse/ClickHouse/pull/68098) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||
* Backported in [#68173](https://github.com/ClickHouse/ClickHouse/issues/68173): Removes an incorrect optimization to remove sorting in subqueries that use `OFFSET`. Fixes [#67906](https://github.com/ClickHouse/ClickHouse/issues/67906). [#68099](https://github.com/ClickHouse/ClickHouse/pull/68099) ([Graham Campbell](https://github.com/GrahamCampbell)).
|
||||
* Backported in [#68339](https://github.com/ClickHouse/ClickHouse/issues/68339): Try fix postgres crash when query is cancelled. [#68288](https://github.com/ClickHouse/ClickHouse/pull/68288) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Backported in [#68396](https://github.com/ClickHouse/ClickHouse/issues/68396): Fix missing sync replica mode in query `SYSTEM SYNC REPLICA`. [#68326](https://github.com/ClickHouse/ClickHouse/pull/68326) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Backported in [#68668](https://github.com/ClickHouse/ClickHouse/issues/68668): Fix `LOGICAL_ERROR`s when functions `sipHash64Keyed`, `sipHash128Keyed`, or `sipHash128ReferenceKeyed` are applied to empty arrays or tuples. [#68630](https://github.com/ClickHouse/ClickHouse/pull/68630) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
|
||||
#### NO CL ENTRY
|
||||
|
||||
* NO CL ENTRY: 'Revert "Backport [#66599](https://github.com/ClickHouse/ClickHouse/issues/66599) to 24.6: Fix dropping named collection in local storage"'. [#66922](https://github.com/ClickHouse/ClickHouse/pull/66922) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#66332](https://github.com/ClickHouse/ClickHouse/issues/66332): Do not raise a NOT_IMPLEMENTED error when getting s3 metrics with a multiple disk configuration. [#65403](https://github.com/ClickHouse/ClickHouse/pull/65403) ([Elena Torró](https://github.com/elenatorro)).
|
||||
* Backported in [#66142](https://github.com/ClickHouse/ClickHouse/issues/66142): Fix flaky test_storage_s3_queue tests. [#66009](https://github.com/ClickHouse/ClickHouse/pull/66009) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Backported in [#66389](https://github.com/ClickHouse/ClickHouse/issues/66389): Disable broken cases from 02911_join_on_nullsafe_optimization. [#66310](https://github.com/ClickHouse/ClickHouse/pull/66310) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#66428](https://github.com/ClickHouse/ClickHouse/issues/66428): Ignore subquery for IN in DDLLoadingDependencyVisitor. [#66395](https://github.com/ClickHouse/ClickHouse/pull/66395) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66546](https://github.com/ClickHouse/ClickHouse/issues/66546): Add additional log masking in CI. [#66523](https://github.com/ClickHouse/ClickHouse/pull/66523) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#66861](https://github.com/ClickHouse/ClickHouse/issues/66861): Fix data race in S3::ClientCache. [#66644](https://github.com/ClickHouse/ClickHouse/pull/66644) ([Konstantin Morozov](https://github.com/k-morozov)).
|
||||
* Backported in [#66877](https://github.com/ClickHouse/ClickHouse/issues/66877): Support one more case in JOIN ON ... IS NULL. [#66725](https://github.com/ClickHouse/ClickHouse/pull/66725) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#67061](https://github.com/ClickHouse/ClickHouse/issues/67061): Increase asio pool size in case the server is tiny. [#66761](https://github.com/ClickHouse/ClickHouse/pull/66761) ([alesapin](https://github.com/alesapin)).
|
||||
* Backported in [#66940](https://github.com/ClickHouse/ClickHouse/issues/66940): Small fix in realloc memory tracking. [#66820](https://github.com/ClickHouse/ClickHouse/pull/66820) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#67254](https://github.com/ClickHouse/ClickHouse/issues/67254): Followup [#66725](https://github.com/ClickHouse/ClickHouse/issues/66725). [#66869](https://github.com/ClickHouse/ClickHouse/pull/66869) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#67414](https://github.com/ClickHouse/ClickHouse/issues/67414): CI: Fix build results for release branches. [#67402](https://github.com/ClickHouse/ClickHouse/pull/67402) ([Max K.](https://github.com/maxknv)).
|
||||
* Update version after release. [#67909](https://github.com/ClickHouse/ClickHouse/pull/67909) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Backported in [#68079](https://github.com/ClickHouse/ClickHouse/issues/68079): Add an explicit error for `ALTER MODIFY SQL SECURITY` on non-view tables. [#67953](https://github.com/ClickHouse/ClickHouse/pull/67953) ([pufit](https://github.com/pufit)).
|
||||
|
33
docs/changelogs/v24.6.4.42-stable.md
Normal file
33
docs/changelogs/v24.6.4.42-stable.md
Normal file
@ -0,0 +1,33 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.6.4.42-stable (c534bb4b4dd) FIXME as compared to v24.6.3.95-stable (8325c920d11)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#68066](https://github.com/ClickHouse/ClickHouse/issues/68066): Fix boolean literals in query sent to external database (for engines like `PostgreSQL`). [#66282](https://github.com/ClickHouse/ClickHouse/pull/66282) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#68566](https://github.com/ClickHouse/ClickHouse/issues/68566): Fix indexHint function case found by fuzzer. [#66286](https://github.com/ClickHouse/ClickHouse/pull/66286) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Backported in [#68159](https://github.com/ClickHouse/ClickHouse/issues/68159): Fix cluster() for inter-server secret (preserve initial user as before). [#66364](https://github.com/ClickHouse/ClickHouse/pull/66364) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Backported in [#68116](https://github.com/ClickHouse/ClickHouse/issues/68116): Fix possible PARAMETER_OUT_OF_BOUND error during reading variant subcolumn. [#66659](https://github.com/ClickHouse/ClickHouse/pull/66659) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#67887](https://github.com/ClickHouse/ClickHouse/issues/67887): Correctly parse file name/URI containing `::` if it's not an archive. [#67433](https://github.com/ClickHouse/ClickHouse/pull/67433) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#68611](https://github.com/ClickHouse/ClickHouse/issues/68611): Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Backported in [#68275](https://github.com/ClickHouse/ClickHouse/issues/68275): Fix inserting into stream like engines (Kafka, RabbitMQ, NATS) through HTTP interface. [#67554](https://github.com/ClickHouse/ClickHouse/pull/67554) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Backported in [#67993](https://github.com/ClickHouse/ClickHouse/issues/67993): Validate experimental/suspicious data types in ALTER ADD/MODIFY COLUMN. [#67911](https://github.com/ClickHouse/ClickHouse/pull/67911) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#68208](https://github.com/ClickHouse/ClickHouse/issues/68208): Fix wrong `count()` result when there is non-deterministic function in predicate. [#67922](https://github.com/ClickHouse/ClickHouse/pull/67922) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Backported in [#68093](https://github.com/ClickHouse/ClickHouse/issues/68093): Fixed the calculation of the maximum thread soft limit in containerized environments where the usable CPU count is limited. [#67963](https://github.com/ClickHouse/ClickHouse/pull/67963) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Backported in [#68124](https://github.com/ClickHouse/ClickHouse/issues/68124): Fixed skipping of untouched parts in mutations with new analyzer. Previously with enabled analyzer data in part could be rewritten by mutation even if mutation doesn't affect this part according to predicate. [#68052](https://github.com/ClickHouse/ClickHouse/pull/68052) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Backported in [#68221](https://github.com/ClickHouse/ClickHouse/issues/68221): Fixed a NULL pointer dereference, triggered by a specially crafted query, that crashed the server via hopEnd, hopStart, tumbleEnd, and tumbleStart. [#68098](https://github.com/ClickHouse/ClickHouse/pull/68098) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||
* Backported in [#68173](https://github.com/ClickHouse/ClickHouse/issues/68173): Removes an incorrect optimization to remove sorting in subqueries that use `OFFSET`. Fixes [#67906](https://github.com/ClickHouse/ClickHouse/issues/67906). [#68099](https://github.com/ClickHouse/ClickHouse/pull/68099) ([Graham Campbell](https://github.com/GrahamCampbell)).
|
||||
* Backported in [#68339](https://github.com/ClickHouse/ClickHouse/issues/68339): Try fix postgres crash when query is cancelled. [#68288](https://github.com/ClickHouse/ClickHouse/pull/68288) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Backported in [#68396](https://github.com/ClickHouse/ClickHouse/issues/68396): Fix missing sync replica mode in query `SYSTEM SYNC REPLICA`. [#68326](https://github.com/ClickHouse/ClickHouse/pull/68326) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Backported in [#68668](https://github.com/ClickHouse/ClickHouse/issues/68668): Fix `LOGICAL_ERROR`s when functions `sipHash64Keyed`, `sipHash128Keyed`, or `sipHash128ReferenceKeyed` are applied to empty arrays or tuples. [#68630](https://github.com/ClickHouse/ClickHouse/pull/68630) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Update version after release. [#67909](https://github.com/ClickHouse/ClickHouse/pull/67909) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Backported in [#68079](https://github.com/ClickHouse/ClickHouse/issues/68079): Add an explicit error for `ALTER MODIFY SQL SECURITY` on non-view tables. [#67953](https://github.com/ClickHouse/ClickHouse/pull/67953) ([pufit](https://github.com/pufit)).
|
||||
* Backported in [#68758](https://github.com/ClickHouse/ClickHouse/issues/68758): To make patch release possible from every commit on release branch, package_debug build is required and must not be skipped. [#68750](https://github.com/ClickHouse/ClickHouse/pull/68750) ([Max K.](https://github.com/maxknv)).
|
||||
|
55
docs/changelogs/v24.7.3.47-stable.md
Normal file
55
docs/changelogs/v24.7.3.47-stable.md
Normal file
@ -0,0 +1,55 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.7.3.47-stable (2e50fe27a14) FIXME as compared to v24.7.2.13-stable (6e41f601b2f)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#68232](https://github.com/ClickHouse/ClickHouse/issues/68232): Fixed `Not-ready Set` in some system tables when filtering using subqueries. [#66018](https://github.com/ClickHouse/ClickHouse/pull/66018) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Backported in [#67969](https://github.com/ClickHouse/ClickHouse/issues/67969): Fixed reading of subcolumns after `ALTER ADD COLUMN` query. [#66243](https://github.com/ClickHouse/ClickHouse/pull/66243) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Backported in [#68068](https://github.com/ClickHouse/ClickHouse/issues/68068): Fix boolean literals in query sent to external database (for engines like `PostgreSQL`). [#66282](https://github.com/ClickHouse/ClickHouse/pull/66282) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#67637](https://github.com/ClickHouse/ClickHouse/issues/67637): Fix for occasional deadlock in Context::getDDLWorker. [#66843](https://github.com/ClickHouse/ClickHouse/pull/66843) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Backported in [#67820](https://github.com/ClickHouse/ClickHouse/issues/67820): Fix possible deadlock on query cancel with parallel replicas. [#66905](https://github.com/ClickHouse/ClickHouse/pull/66905) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Backported in [#67818](https://github.com/ClickHouse/ClickHouse/issues/67818): Only relevant to the experimental Variant data type. Fix crash with Variant + AggregateFunction type. [#67122](https://github.com/ClickHouse/ClickHouse/pull/67122) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#67766](https://github.com/ClickHouse/ClickHouse/issues/67766): Fix crash of `uniq` and `uniqTheta ` with `tuple()` argument. Closes [#67303](https://github.com/ClickHouse/ClickHouse/issues/67303). [#67306](https://github.com/ClickHouse/ClickHouse/pull/67306) ([flynn](https://github.com/ucasfl)).
|
||||
* Backported in [#67881](https://github.com/ClickHouse/ClickHouse/issues/67881): Correctly parse file name/URI containing `::` if it's not an archive. [#67433](https://github.com/ClickHouse/ClickHouse/pull/67433) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#68613](https://github.com/ClickHouse/ClickHouse/issues/68613): Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Backported in [#67854](https://github.com/ClickHouse/ClickHouse/issues/67854): Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Backported in [#68278](https://github.com/ClickHouse/ClickHouse/issues/68278): Fix inserting into stream like engines (Kafka, RabbitMQ, NATS) through HTTP interface. [#67554](https://github.com/ClickHouse/ClickHouse/pull/67554) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Backported in [#68040](https://github.com/ClickHouse/ClickHouse/issues/68040): Fix creation of view with recursive CTE. [#67587](https://github.com/ClickHouse/ClickHouse/pull/67587) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Backported in [#68038](https://github.com/ClickHouse/ClickHouse/issues/68038): Fix crash on `percent_rank`. `percent_rank`'s default frame type is changed to `range unbounded preceding and unbounded following`. `IWindowFunction`'s default window frame is considered and now window functions without window frame definition in sql can be put into different `WindowTransfomer`s properly. [#67661](https://github.com/ClickHouse/ClickHouse/pull/67661) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Backported in [#67713](https://github.com/ClickHouse/ClickHouse/issues/67713): Fix reloading SQL UDFs with UNION. Previously, restarting the server could make UDF invalid. [#67665](https://github.com/ClickHouse/ClickHouse/pull/67665) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#67840](https://github.com/ClickHouse/ClickHouse/issues/67840): Fix potential stack overflow in `JSONMergePatch` function. Renamed this function from `jsonMergePatch` to `JSONMergePatch` because the previous name was wrong. The previous name is still kept for compatibility. Improved diagnostic of errors in the function. This closes [#67304](https://github.com/ClickHouse/ClickHouse/issues/67304). [#67756](https://github.com/ClickHouse/ClickHouse/pull/67756) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Backported in [#67995](https://github.com/ClickHouse/ClickHouse/issues/67995): Validate experimental/suspicious data types in ALTER ADD/MODIFY COLUMN. [#67911](https://github.com/ClickHouse/ClickHouse/pull/67911) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#68224](https://github.com/ClickHouse/ClickHouse/issues/68224): Fix wrong `count()` result when there is non-deterministic function in predicate. [#67922](https://github.com/ClickHouse/ClickHouse/pull/67922) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Backported in [#68095](https://github.com/ClickHouse/ClickHouse/issues/68095): Fixed the calculation of the maximum thread soft limit in containerized environments where the usable CPU count is limited. [#67963](https://github.com/ClickHouse/ClickHouse/pull/67963) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Backported in [#68126](https://github.com/ClickHouse/ClickHouse/issues/68126): Fixed skipping of untouched parts in mutations with new analyzer. Previously with enabled analyzer data in part could be rewritten by mutation even if mutation doesn't affect this part according to predicate. [#68052](https://github.com/ClickHouse/ClickHouse/pull/68052) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Backported in [#68223](https://github.com/ClickHouse/ClickHouse/issues/68223): Fixed a NULL pointer dereference, triggered by a specially crafted query, that crashed the server via hopEnd, hopStart, tumbleEnd, and tumbleStart. [#68098](https://github.com/ClickHouse/ClickHouse/pull/68098) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||
* Backported in [#68175](https://github.com/ClickHouse/ClickHouse/issues/68175): Removes an incorrect optimization to remove sorting in subqueries that use `OFFSET`. Fixes [#67906](https://github.com/ClickHouse/ClickHouse/issues/67906). [#68099](https://github.com/ClickHouse/ClickHouse/pull/68099) ([Graham Campbell](https://github.com/GrahamCampbell)).
|
||||
* Backported in [#68341](https://github.com/ClickHouse/ClickHouse/issues/68341): Try fix postgres crash when query is cancelled. [#68288](https://github.com/ClickHouse/ClickHouse/pull/68288) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Backported in [#68398](https://github.com/ClickHouse/ClickHouse/issues/68398): Fix missing sync replica mode in query `SYSTEM SYNC REPLICA`. [#68326](https://github.com/ClickHouse/ClickHouse/pull/68326) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Backported in [#68669](https://github.com/ClickHouse/ClickHouse/issues/68669): Fix `LOGICAL_ERROR`s when functions `sipHash64Keyed`, `sipHash128Keyed`, or `sipHash128ReferenceKeyed` are applied to empty arrays or tuples. [#68630](https://github.com/ClickHouse/ClickHouse/pull/68630) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#67518](https://github.com/ClickHouse/ClickHouse/issues/67518): Split slow test 03036_dynamic_read_subcolumns. [#66954](https://github.com/ClickHouse/ClickHouse/pull/66954) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Backported in [#67516](https://github.com/ClickHouse/ClickHouse/issues/67516): Split 01508_partition_pruning_long. [#66983](https://github.com/ClickHouse/ClickHouse/pull/66983) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Backported in [#67529](https://github.com/ClickHouse/ClickHouse/issues/67529): Reduce max time of 00763_long_lock_buffer_alter_destination_table. [#67185](https://github.com/ClickHouse/ClickHouse/pull/67185) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#67803](https://github.com/ClickHouse/ClickHouse/issues/67803): Disable some Dynamic tests under sanitizers, rewrite 03202_dynamic_null_map_subcolumn to sql. [#67359](https://github.com/ClickHouse/ClickHouse/pull/67359) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#67643](https://github.com/ClickHouse/ClickHouse/issues/67643): [Green CI] Fix potentially flaky test_mask_sensitive_info integration test. [#67506](https://github.com/ClickHouse/ClickHouse/pull/67506) ([Alexey Katsman](https://github.com/alexkats)).
|
||||
* Backported in [#67609](https://github.com/ClickHouse/ClickHouse/issues/67609): Fix test_zookeeper_config_load_balancing after adding the xdist worker name to the instance. [#67590](https://github.com/ClickHouse/ClickHouse/pull/67590) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||
* Backported in [#67871](https://github.com/ClickHouse/ClickHouse/issues/67871): Fix 02434_cancel_insert_when_client_dies. [#67600](https://github.com/ClickHouse/ClickHouse/pull/67600) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#67704](https://github.com/ClickHouse/ClickHouse/issues/67704): Fix 02910_bad_logs_level_in_local in fast tests. [#67603](https://github.com/ClickHouse/ClickHouse/pull/67603) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#67689](https://github.com/ClickHouse/ClickHouse/issues/67689): Fix 01605_adaptive_granularity_block_borders. [#67605](https://github.com/ClickHouse/ClickHouse/pull/67605) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Backported in [#67827](https://github.com/ClickHouse/ClickHouse/issues/67827): Try fix 03143_asof_join_ddb_long. [#67620](https://github.com/ClickHouse/ClickHouse/pull/67620) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Backported in [#67892](https://github.com/ClickHouse/ClickHouse/issues/67892): Revert "Merge pull request [#66510](https://github.com/ClickHouse/ClickHouse/issues/66510) from canhld94/fix_trivial_count_non_deterministic_func". [#67800](https://github.com/ClickHouse/ClickHouse/pull/67800) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Backported in [#68081](https://github.com/ClickHouse/ClickHouse/issues/68081): Add an explicit error for `ALTER MODIFY SQL SECURITY` on non-view tables. [#67953](https://github.com/ClickHouse/ClickHouse/pull/67953) ([pufit](https://github.com/pufit)).
|
||||
* Update version after release. [#68044](https://github.com/ClickHouse/ClickHouse/pull/68044) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Backported in [#68269](https://github.com/ClickHouse/ClickHouse/issues/68269): [Green CI] Fix test 01903_correct_block_size_prediction_with_default. [#68203](https://github.com/ClickHouse/ClickHouse/pull/68203) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||
* Backported in [#68432](https://github.com/ClickHouse/ClickHouse/issues/68432): tests: make 01600_parts_states_metrics_long better. [#68265](https://github.com/ClickHouse/ClickHouse/pull/68265) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Backported in [#68538](https://github.com/ClickHouse/ClickHouse/issues/68538): CI: Native build for package_aarch64. [#68457](https://github.com/ClickHouse/ClickHouse/pull/68457) ([Max K.](https://github.com/maxknv)).
|
||||
* Backported in [#68555](https://github.com/ClickHouse/ClickHouse/issues/68555): CI: Minor release workflow fix. [#68536](https://github.com/ClickHouse/ClickHouse/pull/68536) ([Max K.](https://github.com/maxknv)).
|
||||
|
36
docs/changelogs/v24.7.4.51-stable.md
Normal file
36
docs/changelogs/v24.7.4.51-stable.md
Normal file
@ -0,0 +1,36 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.7.4.51-stable (70fe2f6fa52) FIXME as compared to v24.7.3.42-stable (63730bc4293)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#68232](https://github.com/ClickHouse/ClickHouse/issues/68232): Fixed `Not-ready Set` in some system tables when filtering using subqueries. [#66018](https://github.com/ClickHouse/ClickHouse/pull/66018) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Backported in [#68068](https://github.com/ClickHouse/ClickHouse/issues/68068): Fix boolean literals in query sent to external database (for engines like `PostgreSQL`). [#66282](https://github.com/ClickHouse/ClickHouse/pull/66282) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#68613](https://github.com/ClickHouse/ClickHouse/issues/68613): Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Backported in [#68278](https://github.com/ClickHouse/ClickHouse/issues/68278): Fix inserting into stream like engines (Kafka, RabbitMQ, NATS) through HTTP interface. [#67554](https://github.com/ClickHouse/ClickHouse/pull/67554) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Backported in [#68040](https://github.com/ClickHouse/ClickHouse/issues/68040): Fix creation of view with recursive CTE. [#67587](https://github.com/ClickHouse/ClickHouse/pull/67587) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Backported in [#68038](https://github.com/ClickHouse/ClickHouse/issues/68038): Fix crash on `percent_rank`. `percent_rank`'s default frame type is changed to `range unbounded preceding and unbounded following`. `IWindowFunction`'s default window frame is considered and now window functions without window frame definition in sql can be put into different `WindowTransfomer`s properly. [#67661](https://github.com/ClickHouse/ClickHouse/pull/67661) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Backported in [#68224](https://github.com/ClickHouse/ClickHouse/issues/68224): Fix wrong `count()` result when there is non-deterministic function in predicate. [#67922](https://github.com/ClickHouse/ClickHouse/pull/67922) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Backported in [#68095](https://github.com/ClickHouse/ClickHouse/issues/68095): Fixed the calculation of the maximum thread soft limit in containerized environments where the usable CPU count is limited. [#67963](https://github.com/ClickHouse/ClickHouse/pull/67963) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Backported in [#68126](https://github.com/ClickHouse/ClickHouse/issues/68126): Fixed skipping of untouched parts in mutations with new analyzer. Previously with enabled analyzer data in part could be rewritten by mutation even if mutation doesn't affect this part according to predicate. [#68052](https://github.com/ClickHouse/ClickHouse/pull/68052) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Backported in [#68223](https://github.com/ClickHouse/ClickHouse/issues/68223): Fixed a NULL pointer dereference, triggered by a specially crafted query, that crashed the server via hopEnd, hopStart, tumbleEnd, and tumbleStart. [#68098](https://github.com/ClickHouse/ClickHouse/pull/68098) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||
* Backported in [#68175](https://github.com/ClickHouse/ClickHouse/issues/68175): Removes an incorrect optimization to remove sorting in subqueries that use `OFFSET`. Fixes [#67906](https://github.com/ClickHouse/ClickHouse/issues/67906). [#68099](https://github.com/ClickHouse/ClickHouse/pull/68099) ([Graham Campbell](https://github.com/GrahamCampbell)).
|
||||
* Backported in [#68341](https://github.com/ClickHouse/ClickHouse/issues/68341): Try fix postgres crash when query is cancelled. [#68288](https://github.com/ClickHouse/ClickHouse/pull/68288) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Backported in [#68398](https://github.com/ClickHouse/ClickHouse/issues/68398): Fix missing sync replica mode in query `SYSTEM SYNC REPLICA`. [#68326](https://github.com/ClickHouse/ClickHouse/pull/68326) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Backported in [#68669](https://github.com/ClickHouse/ClickHouse/issues/68669): Fix `LOGICAL_ERROR`s when functions `sipHash64Keyed`, `sipHash128Keyed`, or `sipHash128ReferenceKeyed` are applied to empty arrays or tuples. [#68630](https://github.com/ClickHouse/ClickHouse/pull/68630) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#67803](https://github.com/ClickHouse/ClickHouse/issues/67803): Disable some Dynamic tests under sanitizers, rewrite 03202_dynamic_null_map_subcolumn to sql. [#67359](https://github.com/ClickHouse/ClickHouse/pull/67359) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#68081](https://github.com/ClickHouse/ClickHouse/issues/68081): Add an explicit error for `ALTER MODIFY SQL SECURITY` on non-view tables. [#67953](https://github.com/ClickHouse/ClickHouse/pull/67953) ([pufit](https://github.com/pufit)).
|
||||
* Update version after release. [#68044](https://github.com/ClickHouse/ClickHouse/pull/68044) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Backported in [#68269](https://github.com/ClickHouse/ClickHouse/issues/68269): [Green CI] Fix test 01903_correct_block_size_prediction_with_default. [#68203](https://github.com/ClickHouse/ClickHouse/pull/68203) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||
* Backported in [#68432](https://github.com/ClickHouse/ClickHouse/issues/68432): tests: make 01600_parts_states_metrics_long better. [#68265](https://github.com/ClickHouse/ClickHouse/pull/68265) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Backported in [#68538](https://github.com/ClickHouse/ClickHouse/issues/68538): CI: Native build for package_aarch64. [#68457](https://github.com/ClickHouse/ClickHouse/pull/68457) ([Max K.](https://github.com/maxknv)).
|
||||
* Backported in [#68555](https://github.com/ClickHouse/ClickHouse/issues/68555): CI: Minor release workflow fix. [#68536](https://github.com/ClickHouse/ClickHouse/pull/68536) ([Max K.](https://github.com/maxknv)).
|
||||
* Backported in [#68760](https://github.com/ClickHouse/ClickHouse/issues/68760): To make patch release possible from every commit on release branch, package_debug build is required and must not be skipped. [#68750](https://github.com/ClickHouse/ClickHouse/pull/68750) ([Max K.](https://github.com/maxknv)).
|
||||
|
12
docs/changelogs/v24.8.2.3-lts.md
Normal file
12
docs/changelogs/v24.8.2.3-lts.md
Normal file
@ -0,0 +1,12 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.8.2.3-lts (b54f79ed323) FIXME as compared to v24.8.1.2684-lts (161c62fd295)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#68670](https://github.com/ClickHouse/ClickHouse/issues/68670): Fix `LOGICAL_ERROR`s when functions `sipHash64Keyed`, `sipHash128Keyed`, or `sipHash128ReferenceKeyed` are applied to empty arrays or tuples. [#68630](https://github.com/ClickHouse/ClickHouse/pull/68630) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
|
@ -80,7 +80,7 @@ For partitioning by month, use the `toYYYYMM(date_column)` expression, where `da
|
||||
`PRIMARY KEY` — The primary key if it [differs from the sorting key](#choosing-a-primary-key-that-differs-from-the-sorting-key). Optional.
|
||||
|
||||
Specifying a sorting key (using `ORDER BY` clause) implicitly specifies a primary key.
|
||||
It is usually not necessary to specify the primary key in addition to the primary key.
|
||||
It is usually not necessary to specify the primary key in addition to the sorting key.
|
||||
|
||||
#### SAMPLE BY
|
||||
|
||||
|
@ -70,7 +70,7 @@ SELECT '{"a" : {"b" : 42},"c" : [1, 2, 3], "d" : "Hello, World!"}'::JSON as json
|
||||
└────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
CAST from named `Tuple`, `Map` and `Object('json')` to `JSON` type will be supported later.
|
||||
CAST from `JSON`, named `Tuple`, `Map` and `Object('json')` to `JSON` type will be supported later.
|
||||
|
||||
## Reading JSON paths as subcolumns
|
||||
|
||||
|
@ -22,18 +22,26 @@ grep -q sse4_2 /proc/cpuinfo && echo "SSE 4.2 supported" || echo "SSE 4.2 not su
|
||||
|
||||
### Из deb-пакетов {#install-from-deb-packages}
|
||||
|
||||
Яндекс рекомендует использовать официальные скомпилированные `deb`-пакеты для Debian или Ubuntu. Для установки пакетов выполните:
|
||||
Рекомендуется использовать официальные скомпилированные `deb`-пакеты для Debian или Ubuntu. Для установки пакетов выполните:
|
||||
|
||||
``` bash
|
||||
sudo apt-get install -y apt-transport-https ca-certificates dirmngr
|
||||
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 8919F6BD2B48D754
|
||||
sudo apt-get install -y apt-transport-https ca-certificates curl gnupg
|
||||
curl -fsSL 'https://packages.clickhouse.com/rpm/lts/repodata/repomd.xml.key' | sudo gpg --dearmor -o /usr/share/keyrings/clickhouse-keyring.gpg
|
||||
|
||||
echo "deb https://packages.clickhouse.com/deb stable main" | sudo tee \
|
||||
echo "deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb stable main" | sudo tee \
|
||||
/etc/apt/sources.list.d/clickhouse.list
|
||||
sudo apt-get update
|
||||
```
|
||||
|
||||
#### Установка ClickHouse server и client
|
||||
|
||||
```bash
|
||||
sudo apt-get install -y clickhouse-server clickhouse-client
|
||||
```
|
||||
|
||||
#### Запуск ClickHouse server
|
||||
|
||||
```bash
|
||||
sudo service clickhouse-server start
|
||||
clickhouse-client # or "clickhouse-client --password" if you've set up a password.
|
||||
```
|
||||
@ -55,7 +63,7 @@ clickhouse-client # or "clickhouse-client --password" if you've set up a passwor
|
||||
:::
|
||||
### Из rpm-пакетов {#from-rpm-packages}
|
||||
|
||||
Команда ClickHouse в Яндексе рекомендует использовать официальные предкомпилированные `rpm`-пакеты для CentOS, RedHat и всех остальных дистрибутивов Linux, основанных на rpm.
|
||||
Команда ClickHouse рекомендует использовать официальные предкомпилированные `rpm`-пакеты для CentOS, RedHat и всех остальных дистрибутивов Linux, основанных на rpm.
|
||||
|
||||
#### Установка официального репозитория
|
||||
|
||||
@ -102,7 +110,7 @@ sudo yum install clickhouse-server clickhouse-client
|
||||
|
||||
### Из tgz-архивов {#from-tgz-archives}
|
||||
|
||||
Команда ClickHouse в Яндексе рекомендует использовать предкомпилированные бинарники из `tgz`-архивов для всех дистрибутивов, где невозможна установка `deb`- и `rpm`- пакетов.
|
||||
Команда ClickHouse рекомендует использовать предкомпилированные бинарники из `tgz`-архивов для всех дистрибутивов, где невозможна установка `deb`- и `rpm`- пакетов.
|
||||
|
||||
Интересующую версию архивов можно скачать вручную с помощью `curl` или `wget` из репозитория https://packages.clickhouse.com/tgz/.
|
||||
После этого архивы нужно распаковать и воспользоваться скриптами установки. Пример установки самой свежей версии:
|
||||
|
@ -120,7 +120,7 @@ void RoleCache::collectEnabledRoles(EnabledRoles & enabled_roles, SubscriptionsO
|
||||
SubscriptionsOnRoles new_subscriptions_on_roles;
|
||||
new_subscriptions_on_roles.reserve(subscriptions_on_roles.size());
|
||||
|
||||
auto get_role_function = [this, &subscriptions_on_roles](const UUID & id) TSA_NO_THREAD_SAFETY_ANALYSIS { return getRole(id, subscriptions_on_roles); };
|
||||
auto get_role_function = [this, &new_subscriptions_on_roles](const UUID & id) TSA_NO_THREAD_SAFETY_ANALYSIS { return getRole(id, new_subscriptions_on_roles); };
|
||||
|
||||
for (const auto & current_role : enabled_roles.params.current_roles)
|
||||
collectRoles(*new_info, skip_ids, get_role_function, current_role, true, false);
|
||||
|
@ -692,7 +692,7 @@ QueryTreeNodePtr IdentifierResolver::tryResolveIdentifierFromStorage(
|
||||
result_column_node = it->second;
|
||||
}
|
||||
/// Check if it's a dynamic subcolumn
|
||||
else
|
||||
else if (table_expression_data.supports_subcolumns)
|
||||
{
|
||||
auto [column_name, dynamic_subcolumn_name] = Nested::splitName(identifier_full_name);
|
||||
auto jt = table_expression_data.column_name_to_column_node.find(column_name);
|
||||
|
@ -4379,7 +4379,10 @@ void QueryAnalyzer::initializeTableExpressionData(const QueryTreeNodePtr & table
|
||||
|
||||
auto get_column_options = GetColumnsOptions(GetColumnsOptions::All).withExtendedObjects().withVirtuals();
|
||||
if (storage_snapshot->storage.supportsSubcolumns())
|
||||
{
|
||||
get_column_options.withSubcolumns();
|
||||
table_expression_data.supports_subcolumns = true;
|
||||
}
|
||||
|
||||
auto column_names_and_types = storage_snapshot->getColumns(get_column_options);
|
||||
table_expression_data.column_names_and_types = NamesAndTypes(column_names_and_types.begin(), column_names_and_types.end());
|
||||
|
@ -36,6 +36,7 @@ struct AnalysisTableExpressionData
|
||||
std::string database_name;
|
||||
std::string table_name;
|
||||
bool should_qualify_columns = true;
|
||||
bool supports_subcolumns = false;
|
||||
NamesAndTypes column_names_and_types;
|
||||
ColumnNameToColumnNodeMap column_name_to_column_node;
|
||||
std::unordered_set<std::string> subcolumn_names; /// Subset columns that are subcolumns of other columns
|
||||
|
@ -1181,13 +1181,14 @@ void ColumnDynamic::takeDynamicStructureFromSourceColumns(const Columns & source
|
||||
/// Check if the number of all dynamic types exceeds the limit.
|
||||
if (!canAddNewVariants(0, all_variants.size()))
|
||||
{
|
||||
/// Create list of variants with their sizes and sort it.
|
||||
std::vector<std::pair<size_t, DataTypePtr>> variants_with_sizes;
|
||||
/// Create a list of variants with their sizes and names and then sort it.
|
||||
std::vector<std::tuple<size_t, String, DataTypePtr>> variants_with_sizes;
|
||||
variants_with_sizes.reserve(all_variants.size());
|
||||
for (const auto & variant : all_variants)
|
||||
{
|
||||
if (variant->getName() != getSharedVariantTypeName())
|
||||
variants_with_sizes.emplace_back(total_sizes[variant->getName()], variant);
|
||||
auto variant_name = variant->getName();
|
||||
if (variant_name != getSharedVariantTypeName())
|
||||
variants_with_sizes.emplace_back(total_sizes[variant_name], variant_name, variant);
|
||||
}
|
||||
std::sort(variants_with_sizes.begin(), variants_with_sizes.end(), std::greater());
|
||||
|
||||
@ -1196,14 +1197,14 @@ void ColumnDynamic::takeDynamicStructureFromSourceColumns(const Columns & source
|
||||
result_variants.reserve(max_dynamic_types + 1); /// +1 for shared variant.
|
||||
/// Add shared variant.
|
||||
result_variants.push_back(getSharedVariantDataType());
|
||||
for (const auto & [size, variant] : variants_with_sizes)
|
||||
for (const auto & [size, variant_name, variant_type] : variants_with_sizes)
|
||||
{
|
||||
/// Add variant to the resulting variants list until we reach max_dynamic_types.
|
||||
if (canAddNewVariant(result_variants.size()))
|
||||
result_variants.push_back(variant);
|
||||
result_variants.push_back(variant_type);
|
||||
/// Add all remaining variants into shared_variants_statistics until we reach its max size.
|
||||
else if (new_statistics.shared_variants_statistics.size() < Statistics::MAX_SHARED_VARIANT_STATISTICS_SIZE)
|
||||
new_statistics.shared_variants_statistics[variant->getName()] = size;
|
||||
new_statistics.shared_variants_statistics[variant_name] = size;
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
@ -127,7 +127,7 @@ std::string ColumnObject::getName() const
|
||||
{
|
||||
WriteBufferFromOwnString ss;
|
||||
ss << "Object(";
|
||||
ss << "max_dynamic_paths=" << max_dynamic_paths;
|
||||
ss << "max_dynamic_paths=" << global_max_dynamic_paths;
|
||||
ss << ", max_dynamic_types=" << max_dynamic_types;
|
||||
std::vector<String> sorted_typed_paths;
|
||||
sorted_typed_paths.reserve(typed_paths.size());
|
||||
@ -1045,9 +1045,9 @@ void ColumnObject::forEachSubcolumnRecursively(DB::IColumn::RecursiveMutableColu
|
||||
|
||||
bool ColumnObject::structureEquals(const IColumn & rhs) const
|
||||
{
|
||||
/// 2 Object columns have equal structure if they have the same typed paths and max_dynamic_paths/max_dynamic_types.
|
||||
/// 2 Object columns have equal structure if they have the same typed paths and global_max_dynamic_paths/max_dynamic_types.
|
||||
const auto * rhs_object = typeid_cast<const ColumnObject *>(&rhs);
|
||||
if (!rhs_object || typed_paths.size() != rhs_object->typed_paths.size() || max_dynamic_paths != rhs_object->max_dynamic_paths || max_dynamic_types != rhs_object->max_dynamic_types)
|
||||
if (!rhs_object || typed_paths.size() != rhs_object->typed_paths.size() || global_max_dynamic_paths != rhs_object->global_max_dynamic_paths || max_dynamic_types != rhs_object->max_dynamic_types)
|
||||
return false;
|
||||
|
||||
for (const auto & [path, column] : typed_paths)
|
||||
|
@ -953,7 +953,7 @@ ColumnPtr ColumnVariant::index(const IColumn & indexes, size_t limit) const
|
||||
{
|
||||
/// If we have only NULLs, index will take no effect, just return resized column.
|
||||
if (hasOnlyNulls())
|
||||
return cloneResized(limit);
|
||||
return cloneResized(limit == 0 ? indexes.size(): limit);
|
||||
|
||||
/// Optimization when we have only one non empty variant and no NULLs.
|
||||
/// In this case local_discriminators column is filled with identical values and offsets column
|
||||
@ -1009,8 +1009,16 @@ ColumnPtr ColumnVariant::indexImpl(const PaddedPODArray<Type> & indexes, size_t
|
||||
new_variants.reserve(num_variants);
|
||||
for (size_t i = 0; i != num_variants; ++i)
|
||||
{
|
||||
size_t nested_limit = nested_perms[i].size() == variants[i]->size() ? 0 : nested_perms[i].size();
|
||||
new_variants.emplace_back(variants[i]->permute(nested_perms[i], nested_limit));
|
||||
/// Check if no values from this variant were selected.
|
||||
if (nested_perms[i].empty())
|
||||
{
|
||||
new_variants.emplace_back(variants[i]->cloneEmpty());
|
||||
}
|
||||
else
|
||||
{
|
||||
size_t nested_limit = nested_perms[i].size() == variants[i]->size() ? 0 : nested_perms[i].size();
|
||||
new_variants.emplace_back(variants[i]->permute(nested_perms[i], nested_limit));
|
||||
}
|
||||
}
|
||||
|
||||
/// We cannot use new_offsets column as an offset column, because it became invalid after variants permutation.
|
||||
|
@ -273,6 +273,25 @@ void SystemLogBase<LogElement>::startup()
|
||||
saving_thread = std::make_unique<ThreadFromGlobalPool>([this] { savingThreadFunction(); });
|
||||
}
|
||||
|
||||
template <typename LogElement>
|
||||
void SystemLogBase<LogElement>::stopFlushThread()
|
||||
{
|
||||
{
|
||||
std::lock_guard lock(thread_mutex);
|
||||
|
||||
if (!saving_thread || !saving_thread->joinable())
|
||||
return;
|
||||
|
||||
if (is_shutdown)
|
||||
return;
|
||||
|
||||
is_shutdown = true;
|
||||
queue->shutdown();
|
||||
}
|
||||
|
||||
saving_thread->join();
|
||||
}
|
||||
|
||||
template <typename LogElement>
|
||||
void SystemLogBase<LogElement>::add(LogElement element)
|
||||
{
|
||||
|
@ -216,6 +216,8 @@ public:
|
||||
static consteval bool shouldTurnOffLogger() { return false; }
|
||||
|
||||
protected:
|
||||
void stopFlushThread() final;
|
||||
|
||||
std::shared_ptr<SystemLogQueue<LogElement>> queue;
|
||||
};
|
||||
}
|
||||
|
@ -72,11 +72,13 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
|
||||
{"24.9",
|
||||
{
|
||||
{"input_format_try_infer_variants", false, false, "Try to infer Variant type in text formats when there is more than one possible type for column/array elements"},
|
||||
{"join_output_by_rowlist_perkey_rows_threshold", 0, 5, "The lower limit of per-key average rows in the right table to determine whether to output by row list in hash join."},
|
||||
{"create_if_not_exists", false, false, "New setting."},
|
||||
{"allow_materialized_view_with_bad_select", true, true, "Support (but not enable yet) stricter validation in CREATE MATERIALIZED VIEW"},
|
||||
}
|
||||
},
|
||||
{"24.8",
|
||||
{
|
||||
{"create_if_not_exists", false, false, "New setting."},
|
||||
{"rows_before_aggregation", true, true, "Provide exact value for rows_before_aggregation statistic, represents the number of rows read before aggregation"},
|
||||
{"restore_replace_external_table_functions_to_null", false, false, "New setting."},
|
||||
{"restore_replace_external_engines_to_null", false, false, "New setting."},
|
||||
@ -85,7 +87,6 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
|
||||
{"use_hive_partitioning", false, false, "Allows to use hive partitioning for File, URL, S3, AzureBlobStorage and HDFS engines."},
|
||||
{"allow_experimental_kafka_offsets_storage_in_keeper", false, false, "Allow the usage of experimental Kafka storage engine that stores the committed offsets in ClickHouse Keeper"},
|
||||
{"allow_archive_path_syntax", true, true, "Added new setting to allow disabling archive path syntax."},
|
||||
{"allow_materialized_view_with_bad_select", true, true, "Support (but not enable yet) stricter validation in CREATE MATERIALIZED VIEW"},
|
||||
{"query_cache_tag", "", "", "New setting for labeling query cache settings."},
|
||||
{"allow_experimental_time_series_table", false, false, "Added new setting to allow the TimeSeries table engine"},
|
||||
{"enable_analyzer", 1, 1, "Added an alias to a setting `allow_experimental_analyzer`."},
|
||||
@ -93,7 +94,6 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
|
||||
{"allow_experimental_json_type", false, false, "Add new experimental JSON type"},
|
||||
{"use_json_alias_for_old_object_type", true, false, "Use JSON type alias to create new JSON type"},
|
||||
{"type_json_skip_duplicated_paths", false, false, "Allow to skip duplicated paths during JSON parsing"},
|
||||
{"join_output_by_rowlist_perkey_rows_threshold", 0, 5, "The lower limit of per-key average rows in the right table to determine whether to output by row list in hash join."},
|
||||
{"allow_experimental_vector_similarity_index", false, false, "Added new setting to allow experimental vector similarity indexes"},
|
||||
{"input_format_try_infer_datetimes_only_datetime64", true, false, "Allow to infer DateTime instead of DateTime64 in data formats"}
|
||||
}
|
||||
|
@ -93,9 +93,9 @@ namespace impl
|
||||
if (is_const)
|
||||
i = 0;
|
||||
assert(key0->size() == key1->size());
|
||||
if (offsets != nullptr)
|
||||
if (offsets != nullptr && i > 0)
|
||||
{
|
||||
const auto * const begin = offsets->begin();
|
||||
const auto * const begin = std::upper_bound(offsets->begin(), offsets->end(), i - 1);
|
||||
const auto * upper = std::upper_bound(begin, offsets->end(), i);
|
||||
if (upper != offsets->end())
|
||||
i = upper - begin;
|
||||
|
@ -701,7 +701,6 @@ ColumnsDescription InterpreterCreateQuery::getColumnsDescription(
|
||||
col_decl.codec, column.type, sanity_check_compression_codecs, allow_experimental_codecs, enable_deflate_qpl_codec, enable_zstd_qat_codec);
|
||||
}
|
||||
|
||||
column.statistics.column_name = column.name; /// We assign column name here for better exception error message.
|
||||
if (col_decl.statistics_desc)
|
||||
{
|
||||
if (!skip_checks && !context_->getSettingsRef().allow_experimental_statistics)
|
||||
|
@ -10,7 +10,7 @@ void PeriodicLog<LogElement>::startCollect(size_t collect_interval_milliseconds_
|
||||
{
|
||||
collect_interval_milliseconds = collect_interval_milliseconds_;
|
||||
is_shutdown_metric_thread = false;
|
||||
flush_thread = std::make_unique<ThreadFromGlobalPool>([this] { threadFunction(); });
|
||||
collecting_thread = std::make_unique<ThreadFromGlobalPool>([this] { threadFunction(); });
|
||||
}
|
||||
|
||||
template <typename LogElement>
|
||||
@ -19,15 +19,15 @@ void PeriodicLog<LogElement>::stopCollect()
|
||||
bool old_val = false;
|
||||
if (!is_shutdown_metric_thread.compare_exchange_strong(old_val, true))
|
||||
return;
|
||||
if (flush_thread)
|
||||
flush_thread->join();
|
||||
if (collecting_thread)
|
||||
collecting_thread->join();
|
||||
}
|
||||
|
||||
template <typename LogElement>
|
||||
void PeriodicLog<LogElement>::shutdown()
|
||||
{
|
||||
stopCollect();
|
||||
this->stopFlushThread();
|
||||
Base::shutdown();
|
||||
}
|
||||
|
||||
template <typename LogElement>
|
||||
|
@ -17,6 +17,7 @@ template <typename LogElement>
|
||||
class PeriodicLog : public SystemLog<LogElement>
|
||||
{
|
||||
using SystemLog<LogElement>::SystemLog;
|
||||
using Base = SystemLog<LogElement>;
|
||||
|
||||
public:
|
||||
using TimePoint = std::chrono::system_clock::time_point;
|
||||
@ -24,18 +25,18 @@ public:
|
||||
/// Launches a background thread to collect metrics with interval
|
||||
void startCollect(size_t collect_interval_milliseconds_);
|
||||
|
||||
/// Stop background thread
|
||||
void stopCollect();
|
||||
|
||||
void shutdown() final;
|
||||
|
||||
protected:
|
||||
/// Stop background thread
|
||||
void stopCollect();
|
||||
|
||||
virtual void stepFunction(TimePoint current_time) = 0;
|
||||
|
||||
private:
|
||||
void threadFunction();
|
||||
|
||||
std::unique_ptr<ThreadFromGlobalPool> flush_thread;
|
||||
std::unique_ptr<ThreadFromGlobalPool> collecting_thread;
|
||||
size_t collect_interval_milliseconds;
|
||||
std::atomic<bool> is_shutdown_metric_thread{false};
|
||||
};
|
||||
|
@ -402,32 +402,13 @@ SystemLog<LogElement>::SystemLog(
|
||||
template <typename LogElement>
|
||||
void SystemLog<LogElement>::shutdown()
|
||||
{
|
||||
stopFlushThread();
|
||||
Base::stopFlushThread();
|
||||
|
||||
auto table = DatabaseCatalog::instance().tryGetTable(table_id, getContext());
|
||||
if (table)
|
||||
table->flushAndShutdown();
|
||||
}
|
||||
|
||||
template <typename LogElement>
|
||||
void SystemLog<LogElement>::stopFlushThread()
|
||||
{
|
||||
{
|
||||
std::lock_guard lock(thread_mutex);
|
||||
|
||||
if (!saving_thread || !saving_thread->joinable())
|
||||
return;
|
||||
|
||||
if (is_shutdown)
|
||||
return;
|
||||
|
||||
is_shutdown = true;
|
||||
queue->shutdown();
|
||||
}
|
||||
|
||||
saving_thread->join();
|
||||
}
|
||||
|
||||
|
||||
template <typename LogElement>
|
||||
void SystemLog<LogElement>::savingThreadFunction()
|
||||
|
@ -125,8 +125,6 @@ public:
|
||||
|
||||
void shutdown() override;
|
||||
|
||||
void stopFlushThread() override;
|
||||
|
||||
/** Creates new table if it does not exist.
|
||||
* Renames old table if its structure is not suitable.
|
||||
* This cannot be done in constructor to avoid deadlock while renaming a table under locked Context when SystemLog object is created.
|
||||
@ -136,9 +134,6 @@ public:
|
||||
protected:
|
||||
LoggerPtr log;
|
||||
|
||||
using ISystemLog::is_shutdown;
|
||||
using ISystemLog::saving_thread;
|
||||
using ISystemLog::thread_mutex;
|
||||
using Base::queue;
|
||||
|
||||
StoragePtr getStorage() const;
|
||||
|
@ -706,9 +706,9 @@ void AlterCommand::apply(StorageInMemoryMetadata & metadata, ContextPtr context)
|
||||
}
|
||||
|
||||
auto stats_vec = ColumnStatisticsDescription::fromAST(statistics_decl, metadata.columns);
|
||||
for (const auto & stats : stats_vec)
|
||||
for (const auto & [stats_column_name, stats] : stats_vec)
|
||||
{
|
||||
metadata.columns.modify(stats.column_name,
|
||||
metadata.columns.modify(stats_column_name,
|
||||
[&](ColumnDescription & column) { column.statistics.merge(stats, column.name, column.type, if_not_exists); });
|
||||
}
|
||||
}
|
||||
@ -735,14 +735,14 @@ void AlterCommand::apply(StorageInMemoryMetadata & metadata, ContextPtr context)
|
||||
{
|
||||
if (!metadata.columns.has(statistics_column_name))
|
||||
{
|
||||
throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Cannot add statistics for column {}: this column is not found", statistics_column_name);
|
||||
throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Cannot modify statistics for column {}: this column is not found", statistics_column_name);
|
||||
}
|
||||
}
|
||||
|
||||
auto stats_vec = ColumnStatisticsDescription::fromAST(statistics_decl, metadata.columns);
|
||||
for (const auto & stats : stats_vec)
|
||||
for (const auto & [stats_column_name, stats] : stats_vec)
|
||||
{
|
||||
metadata.columns.modify(stats.column_name,
|
||||
metadata.columns.modify(stats_column_name,
|
||||
[&](ColumnDescription & column) { column.statistics.assign(stats); });
|
||||
}
|
||||
}
|
||||
@ -867,8 +867,6 @@ void AlterCommand::apply(StorageInMemoryMetadata & metadata, ContextPtr context)
|
||||
rename_visitor.visit(column_to_modify.default_desc.expression);
|
||||
if (column_to_modify.ttl)
|
||||
rename_visitor.visit(column_to_modify.ttl);
|
||||
if (column_to_modify.name == column_name && !column_to_modify.statistics.empty())
|
||||
column_to_modify.statistics.column_name = rename_to;
|
||||
});
|
||||
}
|
||||
if (metadata.table_ttl.definition_ast)
|
||||
|
@ -113,7 +113,15 @@ bool ColumnDescription::operator==(const ColumnDescription & other) const
|
||||
&& ast_to_str(ttl) == ast_to_str(other.ttl);
|
||||
}
|
||||
|
||||
void ColumnDescription::writeText(WriteBuffer & buf) const
|
||||
String formatASTStateAware(IAST & ast, IAST::FormatState & state)
|
||||
{
|
||||
WriteBufferFromOwnString buf;
|
||||
IAST::FormatSettings settings(buf, true, false);
|
||||
ast.formatImpl(settings, state, IAST::FormatStateStacked());
|
||||
return buf.str();
|
||||
}
|
||||
|
||||
void ColumnDescription::writeText(WriteBuffer & buf, IAST::FormatState & state, bool include_comment) const
|
||||
{
|
||||
/// NOTE: Serialization format is insane.
|
||||
|
||||
@ -126,20 +134,21 @@ void ColumnDescription::writeText(WriteBuffer & buf) const
|
||||
writeChar('\t', buf);
|
||||
DB::writeText(DB::toString(default_desc.kind), buf);
|
||||
writeChar('\t', buf);
|
||||
writeEscapedString(queryToString(default_desc.expression), buf);
|
||||
writeEscapedString(formatASTStateAware(*default_desc.expression, state), buf);
|
||||
}
|
||||
|
||||
if (!comment.empty())
|
||||
if (!comment.empty() && include_comment)
|
||||
{
|
||||
writeChar('\t', buf);
|
||||
DB::writeText("COMMENT ", buf);
|
||||
writeEscapedString(queryToString(ASTLiteral(Field(comment))), buf);
|
||||
auto ast = ASTLiteral(Field(comment));
|
||||
writeEscapedString(formatASTStateAware(ast, state), buf);
|
||||
}
|
||||
|
||||
if (codec)
|
||||
{
|
||||
writeChar('\t', buf);
|
||||
writeEscapedString(queryToString(codec), buf);
|
||||
writeEscapedString(formatASTStateAware(*codec, state), buf);
|
||||
}
|
||||
|
||||
if (!settings.empty())
|
||||
@ -150,21 +159,21 @@ void ColumnDescription::writeText(WriteBuffer & buf) const
|
||||
ASTSetQuery ast;
|
||||
ast.is_standalone = false;
|
||||
ast.changes = settings;
|
||||
writeEscapedString(queryToString(ast), buf);
|
||||
writeEscapedString(formatASTStateAware(ast, state), buf);
|
||||
DB::writeText(")", buf);
|
||||
}
|
||||
|
||||
if (!statistics.empty())
|
||||
{
|
||||
writeChar('\t', buf);
|
||||
writeEscapedString(queryToString(statistics.getAST()), buf);
|
||||
writeEscapedString(formatASTStateAware(*statistics.getAST(), state), buf);
|
||||
}
|
||||
|
||||
if (ttl)
|
||||
{
|
||||
writeChar('\t', buf);
|
||||
DB::writeText("TTL ", buf);
|
||||
writeEscapedString(queryToString(ttl), buf);
|
||||
writeEscapedString(formatASTStateAware(*ttl, state), buf);
|
||||
}
|
||||
|
||||
writeChar('\n', buf);
|
||||
@ -209,11 +218,7 @@ void ColumnDescription::readText(ReadBuffer & buf)
|
||||
settings = col_ast->settings->as<ASTSetQuery &>().changes;
|
||||
|
||||
if (col_ast->statistics_desc)
|
||||
{
|
||||
statistics = ColumnStatisticsDescription::fromColumnDeclaration(*col_ast, type);
|
||||
/// every column has name `x` here, so we have to set the name manually.
|
||||
statistics.column_name = name;
|
||||
}
|
||||
}
|
||||
else
|
||||
throw Exception(ErrorCodes::CANNOT_PARSE_TEXT, "Cannot parse column description");
|
||||
@ -895,16 +900,17 @@ void ColumnsDescription::resetColumnTTLs()
|
||||
}
|
||||
|
||||
|
||||
String ColumnsDescription::toString() const
|
||||
String ColumnsDescription::toString(bool include_comments) const
|
||||
{
|
||||
WriteBufferFromOwnString buf;
|
||||
IAST::FormatState ast_format_state;
|
||||
|
||||
writeCString("columns format version: 1\n", buf);
|
||||
DB::writeText(columns.size(), buf);
|
||||
writeCString(" columns:\n", buf);
|
||||
|
||||
for (const ColumnDescription & column : columns)
|
||||
column.writeText(buf);
|
||||
column.writeText(buf, ast_format_state, include_comments);
|
||||
|
||||
return buf.str();
|
||||
}
|
||||
|
@ -104,7 +104,7 @@ struct ColumnDescription
|
||||
bool operator==(const ColumnDescription & other) const;
|
||||
bool operator!=(const ColumnDescription & other) const { return !(*this == other); }
|
||||
|
||||
void writeText(WriteBuffer & buf) const;
|
||||
void writeText(WriteBuffer & buf, IAST::FormatState & state, bool include_comment) const;
|
||||
void readText(ReadBuffer & buf);
|
||||
};
|
||||
|
||||
@ -137,7 +137,7 @@ public:
|
||||
/// NOTE Must correspond with Nested::flatten function.
|
||||
void flattenNested(); /// TODO: remove, insert already flattened Nested columns.
|
||||
|
||||
bool operator==(const ColumnsDescription & other) const { return columns == other.columns; }
|
||||
bool operator==(const ColumnsDescription & other) const { return toString(false) == other.toString(false); }
|
||||
bool operator!=(const ColumnsDescription & other) const { return !(*this == other); }
|
||||
|
||||
auto begin() const { return columns.begin(); }
|
||||
@ -221,7 +221,7 @@ public:
|
||||
/// Does column has non default specified compression codec
|
||||
bool hasCompressionCodec(const String & column_name) const;
|
||||
|
||||
String toString() const;
|
||||
String toString(bool include_comments = true) const;
|
||||
static ColumnsDescription parse(const String & str);
|
||||
|
||||
size_t size() const
|
||||
|
@ -444,8 +444,8 @@ StorageHive::StorageHive(
|
||||
storage_metadata.setComment(comment_);
|
||||
storage_metadata.partition_key = KeyDescription::getKeyFromAST(partition_by_ast, storage_metadata.columns, getContext());
|
||||
|
||||
setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.columns, getContext()));
|
||||
setInMemoryMetadata(storage_metadata);
|
||||
setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), getContext()));
|
||||
}
|
||||
|
||||
void StorageHive::lazyInitialize()
|
||||
|
@ -75,7 +75,7 @@ static ColumnsStatistics getStatisticsForColumns(
|
||||
const auto * desc = all_columns.tryGet(column.name);
|
||||
if (desc && !desc->statistics.empty())
|
||||
{
|
||||
auto statistics = MergeTreeStatisticsFactory::instance().get(desc->statistics);
|
||||
auto statistics = MergeTreeStatisticsFactory::instance().get(*desc);
|
||||
all_statistics.push_back(std::move(statistics));
|
||||
}
|
||||
}
|
||||
|
@ -552,7 +552,7 @@ static std::set<ColumnStatisticsPtr> getStatisticsToRecalculate(const StorageMet
|
||||
{
|
||||
if (!col_desc.statistics.empty() && materialized_stats.contains(col_desc.name))
|
||||
{
|
||||
stats_to_recalc.insert(stats_factory.get(col_desc.statistics));
|
||||
stats_to_recalc.insert(stats_factory.get(col_desc));
|
||||
}
|
||||
}
|
||||
return stats_to_recalc;
|
||||
@ -1557,7 +1557,7 @@ private:
|
||||
|
||||
if (ctx->materialized_statistics.contains(col.name))
|
||||
{
|
||||
stats_to_rewrite.push_back(MergeTreeStatisticsFactory::instance().get(col.statistics));
|
||||
stats_to_rewrite.push_back(MergeTreeStatisticsFactory::instance().get(col));
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -538,6 +538,9 @@ static StoragePtr create(const StorageFactory::Arguments & args)
|
||||
|
||||
if (replica_name.empty())
|
||||
throw Exception(ErrorCodes::NO_REPLICA_NAME_GIVEN, "No replica name in config{}", verbose_help_message);
|
||||
// '\t' and '\n' will interrupt parsing 'source replica' in ReplicatedMergeTreeLogEntryData::readText
|
||||
if (replica_name.find('\t') != String::npos || replica_name.find('\n') != String::npos)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Replica name must not contain '\\t' or '\\n'");
|
||||
|
||||
arg_cnt = engine_args.size(); /// Update `arg_cnt` here because extractZooKeeperPathAndReplicaNameFromEngineArgs() could add arguments.
|
||||
arg_num = 2; /// zookeeper_path and replica_name together are always two arguments.
|
||||
|
@ -94,7 +94,7 @@ StorageObjectStorage::StorageObjectStorage(
|
||||
if (sample_path.empty() && context->getSettingsRef().use_hive_partitioning)
|
||||
sample_path = getPathSample(metadata, context);
|
||||
|
||||
setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(metadata.getColumns(), context, sample_path, format_settings));
|
||||
setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(metadata.columns, context, sample_path, format_settings));
|
||||
setInMemoryMetadata(metadata);
|
||||
}
|
||||
|
||||
|
@ -68,7 +68,7 @@ StorageObjectStorageCluster::StorageObjectStorageCluster(
|
||||
if (sample_path.empty() && context_->getSettingsRef().use_hive_partitioning)
|
||||
sample_path = getPathSample(metadata, context_);
|
||||
|
||||
setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(metadata.getColumns(), context_, sample_path));
|
||||
setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(metadata.columns, context_, sample_path));
|
||||
setInMemoryMetadata(metadata);
|
||||
}
|
||||
|
||||
|
@ -208,7 +208,7 @@ Chunk StorageObjectStorageSource::generate()
|
||||
.filename = &filename,
|
||||
.last_modified = object_info->metadata->last_modified,
|
||||
.etag = &(object_info->metadata->etag)
|
||||
}, getContext(), read_from_format_info.columns_description);
|
||||
}, getContext());
|
||||
|
||||
const auto & partition_columns = configuration->getPartitionColumns();
|
||||
if (!partition_columns.empty() && chunk_size && chunk.hasColumns())
|
||||
@ -280,7 +280,7 @@ StorageObjectStorageSource::ReaderHolder StorageObjectStorageSource::createReade
|
||||
const std::shared_ptr<IIterator> & file_iterator,
|
||||
const ConfigurationPtr & configuration,
|
||||
const ObjectStoragePtr & object_storage,
|
||||
const ReadFromFormatInfo & read_from_format_info,
|
||||
ReadFromFormatInfo & read_from_format_info,
|
||||
const std::optional<FormatSettings> & format_settings,
|
||||
const std::shared_ptr<const KeyCondition> & key_condition_,
|
||||
const ContextPtr & context_,
|
||||
|
@ -74,7 +74,7 @@ protected:
|
||||
const UInt64 max_block_size;
|
||||
const bool need_only_count;
|
||||
const size_t max_parsing_threads;
|
||||
const ReadFromFormatInfo read_from_format_info;
|
||||
ReadFromFormatInfo read_from_format_info;
|
||||
const std::shared_ptr<ThreadPool> create_reader_pool;
|
||||
|
||||
std::shared_ptr<IIterator> file_iterator;
|
||||
@ -122,7 +122,7 @@ protected:
|
||||
const std::shared_ptr<IIterator> & file_iterator,
|
||||
const ConfigurationPtr & configuration,
|
||||
const ObjectStoragePtr & object_storage,
|
||||
const ReadFromFormatInfo & read_from_format_info,
|
||||
ReadFromFormatInfo & read_from_format_info,
|
||||
const std::optional<FormatSettings> & format_settings,
|
||||
const std::shared_ptr<const KeyCondition> & key_condition_,
|
||||
const ContextPtr & context_,
|
||||
|
@ -524,7 +524,7 @@ Chunk ObjectStorageQueueSource::generateImpl()
|
||||
{
|
||||
.path = path,
|
||||
.size = reader.getObjectInfo()->metadata->size_bytes
|
||||
}, getContext(), read_from_format_info.columns_description);
|
||||
}, getContext());
|
||||
|
||||
return chunk;
|
||||
}
|
||||
|
@ -128,7 +128,7 @@ private:
|
||||
const std::shared_ptr<FileIterator> file_iterator;
|
||||
const ConfigurationPtr configuration;
|
||||
const ObjectStoragePtr object_storage;
|
||||
const ReadFromFormatInfo read_from_format_info;
|
||||
ReadFromFormatInfo read_from_format_info;
|
||||
const std::optional<FormatSettings> format_settings;
|
||||
const ObjectStorageQueueSettings queue_settings;
|
||||
const std::shared_ptr<ObjectStorageQueueMetadata> files_metadata;
|
||||
|
@ -169,7 +169,7 @@ StorageObjectStorageQueue::StorageObjectStorageQueue(
|
||||
storage_metadata.setColumns(columns);
|
||||
storage_metadata.setConstraints(constraints_);
|
||||
storage_metadata.setComment(comment);
|
||||
setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), context_));
|
||||
setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.columns, context_));
|
||||
setInMemoryMetadata(storage_metadata);
|
||||
|
||||
LOG_INFO(log, "Using zookeeper path: {}", zk_path.string());
|
||||
|
@ -58,8 +58,8 @@ IStatistics::IStatistics(const SingleStatisticsDescription & stat_)
|
||||
{
|
||||
}
|
||||
|
||||
ColumnStatistics::ColumnStatistics(const ColumnStatisticsDescription & stats_desc_)
|
||||
: stats_desc(stats_desc_)
|
||||
ColumnStatistics::ColumnStatistics(const ColumnStatisticsDescription & stats_desc_, const String & column_name_)
|
||||
: stats_desc(stats_desc_), column_name(column_name_)
|
||||
{
|
||||
}
|
||||
|
||||
@ -176,7 +176,7 @@ String ColumnStatistics::getFileName() const
|
||||
|
||||
const String & ColumnStatistics::columnName() const
|
||||
{
|
||||
return stats_desc.column_name;
|
||||
return column_name;
|
||||
}
|
||||
|
||||
UInt64 ColumnStatistics::rowCount() const
|
||||
@ -227,15 +227,15 @@ void MergeTreeStatisticsFactory::validate(const ColumnStatisticsDescription & st
|
||||
}
|
||||
}
|
||||
|
||||
ColumnStatisticsPtr MergeTreeStatisticsFactory::get(const ColumnStatisticsDescription & stats) const
|
||||
ColumnStatisticsPtr MergeTreeStatisticsFactory::get(const ColumnDescription & column_desc) const
|
||||
{
|
||||
ColumnStatisticsPtr column_stat = std::make_shared<ColumnStatistics>(stats);
|
||||
for (const auto & [type, desc] : stats.types_to_desc)
|
||||
ColumnStatisticsPtr column_stat = std::make_shared<ColumnStatistics>(column_desc.statistics, column_desc.name);
|
||||
for (const auto & [type, desc] : column_desc.statistics.types_to_desc)
|
||||
{
|
||||
auto it = creators.find(type);
|
||||
if (it == creators.end())
|
||||
throw Exception(ErrorCodes::INCORRECT_QUERY, "Unknown statistic type '{}'. Available types: 'tdigest' 'uniq' and 'count_min'", type);
|
||||
auto stat_ptr = (it->second)(desc, stats.data_type);
|
||||
auto stat_ptr = (it->second)(desc, column_desc.type);
|
||||
column_stat->stats[type] = stat_ptr;
|
||||
}
|
||||
return column_stat;
|
||||
@ -246,7 +246,7 @@ ColumnsStatistics MergeTreeStatisticsFactory::getMany(const ColumnsDescription &
|
||||
ColumnsStatistics result;
|
||||
for (const auto & col : columns)
|
||||
if (!col.statistics.empty())
|
||||
result.push_back(get(col.statistics));
|
||||
result.push_back(get(col));
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -54,7 +54,7 @@ using StatisticsPtr = std::shared_ptr<IStatistics>;
|
||||
class ColumnStatistics
|
||||
{
|
||||
public:
|
||||
explicit ColumnStatistics(const ColumnStatisticsDescription & stats_desc_);
|
||||
explicit ColumnStatistics(const ColumnStatisticsDescription & stats_desc_, const String & column_name_);
|
||||
|
||||
void serialize(WriteBuffer & buf);
|
||||
void deserialize(ReadBuffer & buf);
|
||||
@ -73,10 +73,12 @@ public:
|
||||
private:
|
||||
friend class MergeTreeStatisticsFactory;
|
||||
ColumnStatisticsDescription stats_desc;
|
||||
String column_name;
|
||||
std::map<StatisticsType, StatisticsPtr> stats;
|
||||
UInt64 rows = 0; /// the number of rows in the column
|
||||
};
|
||||
|
||||
struct ColumnDescription;
|
||||
class ColumnsDescription;
|
||||
using ColumnStatisticsPtr = std::shared_ptr<ColumnStatistics>;
|
||||
using ColumnsStatistics = std::vector<ColumnStatisticsPtr>;
|
||||
@ -91,7 +93,7 @@ public:
|
||||
using Validator = std::function<void(const SingleStatisticsDescription & stats, const DataTypePtr & data_type)>;
|
||||
using Creator = std::function<StatisticsPtr(const SingleStatisticsDescription & stats, const DataTypePtr & data_type)>;
|
||||
|
||||
ColumnStatisticsPtr get(const ColumnStatisticsDescription & stats) const;
|
||||
ColumnStatisticsPtr get(const ColumnDescription & column_desc) const;
|
||||
ColumnsStatistics getMany(const ColumnsDescription & columns) const;
|
||||
|
||||
void registerValidator(StatisticsType type, Validator validator);
|
||||
|
@ -6,7 +6,6 @@
|
||||
#include <Parsers/ASTStatisticsDeclaration.h>
|
||||
#include <Parsers/queryToString.h>
|
||||
#include <Parsers/ParserCreateQuery.h>
|
||||
#include <Poco/Logger.h>
|
||||
#include <Storages/ColumnsDescription.h>
|
||||
|
||||
|
||||
@ -97,16 +96,13 @@ void ColumnStatisticsDescription::merge(const ColumnStatisticsDescription & othe
|
||||
{
|
||||
chassert(merging_column_type);
|
||||
|
||||
if (column_name.empty())
|
||||
column_name = merging_column_name;
|
||||
|
||||
data_type = merging_column_type;
|
||||
|
||||
for (const auto & [stats_type, stats_desc]: other.types_to_desc)
|
||||
{
|
||||
if (!if_not_exists && types_to_desc.contains(stats_type))
|
||||
{
|
||||
throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics type name {} has existed in column {}", stats_type, column_name);
|
||||
throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics type name {} has existed in column {}", stats_type, merging_column_name);
|
||||
}
|
||||
else if (!types_to_desc.contains(stats_type))
|
||||
types_to_desc.emplace(stats_type, stats_desc);
|
||||
@ -115,9 +111,6 @@ void ColumnStatisticsDescription::merge(const ColumnStatisticsDescription & othe
|
||||
|
||||
void ColumnStatisticsDescription::assign(const ColumnStatisticsDescription & other)
|
||||
{
|
||||
if (other.column_name != column_name)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot assign statistics from column {} to {}", column_name, other.column_name);
|
||||
|
||||
types_to_desc = other.types_to_desc;
|
||||
data_type = other.data_type;
|
||||
}
|
||||
@ -127,7 +120,7 @@ void ColumnStatisticsDescription::clear()
|
||||
types_to_desc.clear();
|
||||
}
|
||||
|
||||
std::vector<ColumnStatisticsDescription> ColumnStatisticsDescription::fromAST(const ASTPtr & definition_ast, const ColumnsDescription & columns)
|
||||
std::vector<std::pair<String, ColumnStatisticsDescription>> ColumnStatisticsDescription::fromAST(const ASTPtr & definition_ast, const ColumnsDescription & columns)
|
||||
{
|
||||
const auto * stat_definition_ast = definition_ast->as<ASTStatisticsDeclaration>();
|
||||
if (!stat_definition_ast)
|
||||
@ -145,7 +138,7 @@ std::vector<ColumnStatisticsDescription> ColumnStatisticsDescription::fromAST(co
|
||||
statistics_types.emplace(stat.type, stat);
|
||||
}
|
||||
|
||||
std::vector<ColumnStatisticsDescription> result;
|
||||
std::vector<std::pair<String, ColumnStatisticsDescription>> result;
|
||||
result.reserve(stat_definition_ast->columns->children.size());
|
||||
|
||||
for (const auto & column_ast : stat_definition_ast->columns->children)
|
||||
@ -157,10 +150,9 @@ std::vector<ColumnStatisticsDescription> ColumnStatisticsDescription::fromAST(co
|
||||
throw Exception(ErrorCodes::INCORRECT_QUERY, "Incorrect column name {}", physical_column_name);
|
||||
|
||||
const auto & column = columns.getPhysical(physical_column_name);
|
||||
stats.column_name = column.name;
|
||||
stats.data_type = column.type;
|
||||
stats.types_to_desc = statistics_types;
|
||||
result.push_back(stats);
|
||||
result.emplace_back(physical_column_name, stats);
|
||||
}
|
||||
|
||||
if (result.empty())
|
||||
@ -175,14 +167,13 @@ ColumnStatisticsDescription ColumnStatisticsDescription::fromColumnDeclaration(c
|
||||
if (stat_type_list_ast->children.empty())
|
||||
throw Exception(ErrorCodes::INCORRECT_QUERY, "We expect at least one statistics type for column {}", queryToString(column));
|
||||
ColumnStatisticsDescription stats;
|
||||
stats.column_name = column.name;
|
||||
for (const auto & ast : stat_type_list_ast->children)
|
||||
{
|
||||
const auto & stat_type = ast->as<const ASTFunction &>().name;
|
||||
|
||||
SingleStatisticsDescription stat(stringToStatisticsType(Poco::toLower(stat_type)), ast->clone());
|
||||
if (stats.types_to_desc.contains(stat.type))
|
||||
throw Exception(ErrorCodes::INCORRECT_QUERY, "Column {} already contains statistics type {}", stats.column_name, stat_type);
|
||||
throw Exception(ErrorCodes::INCORRECT_QUERY, "Column {} already contains statistics type {}", column.name, stat_type);
|
||||
stats.types_to_desc.emplace(stat.type, std::move(stat));
|
||||
}
|
||||
stats.data_type = data_type;
|
||||
|
@ -55,12 +55,12 @@ struct ColumnStatisticsDescription
|
||||
|
||||
ASTPtr getAST() const;
|
||||
|
||||
static std::vector<ColumnStatisticsDescription> fromAST(const ASTPtr & definition_ast, const ColumnsDescription & columns);
|
||||
/// get a vector of <column name, statistics desc> pair
|
||||
static std::vector<std::pair<String, ColumnStatisticsDescription>> fromAST(const ASTPtr & definition_ast, const ColumnsDescription & columns);
|
||||
static ColumnStatisticsDescription fromColumnDeclaration(const ASTColumnDeclaration & column, DataTypePtr data_type);
|
||||
|
||||
using StatisticsTypeDescMap = std::map<StatisticsType, SingleStatisticsDescription>;
|
||||
StatisticsTypeDescMap types_to_desc;
|
||||
String column_name;
|
||||
DataTypePtr data_type;
|
||||
};
|
||||
|
||||
|
@ -290,6 +290,10 @@ VirtualColumnsDescription StorageDistributed::createVirtuals()
|
||||
|
||||
desc.addEphemeral("_shard_num", std::make_shared<DataTypeUInt32>(), "Deprecated. Use function shardNum instead");
|
||||
|
||||
/// Add virtual columns from table with Merge engine.
|
||||
desc.addEphemeral("_database", std::make_shared<DataTypeLowCardinality>(std::make_shared<DataTypeString>()), "The name of database which the row comes from");
|
||||
desc.addEphemeral("_table", std::make_shared<DataTypeLowCardinality>(std::make_shared<DataTypeString>()), "The name of table which the row comes from");
|
||||
|
||||
return desc;
|
||||
}
|
||||
|
||||
|
@ -1112,9 +1112,9 @@ void StorageFile::setStorageMetadata(CommonArguments args)
|
||||
|
||||
storage_metadata.setConstraints(args.constraints);
|
||||
storage_metadata.setComment(args.comment);
|
||||
setInMemoryMetadata(storage_metadata);
|
||||
|
||||
setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), args.getContext(), paths.empty() ? "" : paths[0], format_settings));
|
||||
setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.columns, args.getContext(), paths.empty() ? "" : paths[0], format_settings));
|
||||
setInMemoryMetadata(storage_metadata);
|
||||
}
|
||||
|
||||
|
||||
@ -1468,7 +1468,7 @@ Chunk StorageFileSource::generate()
|
||||
.size = current_file_size,
|
||||
.filename = (filename_override.has_value() ? &filename_override.value() : nullptr),
|
||||
.last_modified = current_file_last_modified
|
||||
}, getContext(), columns_description);
|
||||
}, getContext());
|
||||
|
||||
return chunk;
|
||||
}
|
||||
|
@ -60,8 +60,8 @@ StorageFileCluster::StorageFileCluster(
|
||||
}
|
||||
|
||||
storage_metadata.setConstraints(constraints_);
|
||||
setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.columns, context, paths.empty() ? "" : paths[0]));
|
||||
setInMemoryMetadata(storage_metadata);
|
||||
setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), context, paths.empty() ? "" : paths[0]));
|
||||
}
|
||||
|
||||
void StorageFileCluster::updateQueryToSendIfNeeded(DB::ASTPtr & query, const StorageSnapshotPtr & storage_snapshot, const DB::ContextPtr & context)
|
||||
|
@ -642,10 +642,6 @@ std::vector<ReadFromMerge::ChildPlan> ReadFromMerge::createChildrenPlans(SelectQ
|
||||
column_names_as_aliases.push_back(ExpressionActions::getSmallestColumn(storage_metadata_snapshot->getColumns().getAllPhysical()).name);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
auto child = createPlanForTable(
|
||||
nested_storage_snaphsot,
|
||||
@ -657,6 +653,7 @@ std::vector<ReadFromMerge::ChildPlan> ReadFromMerge::createChildrenPlans(SelectQ
|
||||
row_policy_data_opt,
|
||||
modified_context,
|
||||
current_streams);
|
||||
|
||||
child.plan.addInterpreterContext(modified_context);
|
||||
|
||||
if (child.plan.isInitialized())
|
||||
@ -914,12 +911,14 @@ SelectQueryInfo ReadFromMerge::getModifiedQueryInfo(const ContextMutablePtr & mo
|
||||
modified_query_info.table_expression = replacement_table_expression;
|
||||
modified_query_info.planner_context->getOrCreateTableExpressionData(replacement_table_expression);
|
||||
|
||||
auto get_column_options = GetColumnsOptions(GetColumnsOptions::All).withExtendedObjects().withVirtuals();
|
||||
if (storage_snapshot_->storage.supportsSubcolumns())
|
||||
get_column_options.withSubcolumns();
|
||||
auto get_column_options = GetColumnsOptions(GetColumnsOptions::All)
|
||||
.withExtendedObjects()
|
||||
.withSubcolumns(storage_snapshot_->storage.supportsSubcolumns());
|
||||
|
||||
std::unordered_map<std::string, QueryTreeNodePtr> column_name_to_node;
|
||||
|
||||
/// Consider only non-virtual columns of storage while checking for _table and _database columns.
|
||||
/// I.e. always override virtual columns with these names from underlying table (if any).
|
||||
if (!storage_snapshot_->tryGetColumn(get_column_options, "_table"))
|
||||
{
|
||||
auto table_name_node = std::make_shared<ConstantNode>(current_storage_id.table_name);
|
||||
@ -946,6 +945,7 @@ SelectQueryInfo ReadFromMerge::getModifiedQueryInfo(const ContextMutablePtr & mo
|
||||
column_name_to_node.emplace("_database", function_node);
|
||||
}
|
||||
|
||||
get_column_options.withVirtuals();
|
||||
auto storage_columns = storage_snapshot_->metadata->getColumns();
|
||||
|
||||
bool with_aliases = /* common_processed_stage == QueryProcessingStage::FetchColumns && */ !storage_columns.getAliases().empty();
|
||||
|
@ -6340,7 +6340,7 @@ void StorageReplicatedMergeTree::alter(
|
||||
"Metadata on replica is not up to date with common metadata in Zookeeper. "
|
||||
"It means that this replica still not applied some of previous alters."
|
||||
" Probably too many alters executing concurrently (highly not recommended). "
|
||||
"You can retry the query");
|
||||
"You can retry this error");
|
||||
|
||||
/// Cannot retry automatically, because some zookeeper ops were lost on the first attempt. Will retry on DDLWorker-level.
|
||||
if (query_context->getZooKeeperMetadataTransaction())
|
||||
|
@ -165,9 +165,9 @@ IStorageURLBase::IStorageURLBase(
|
||||
|
||||
storage_metadata.setConstraints(constraints_);
|
||||
storage_metadata.setComment(comment);
|
||||
setInMemoryMetadata(storage_metadata);
|
||||
|
||||
setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), context_, getSampleURI(uri, context_), format_settings));
|
||||
setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.columns, context_, getSampleURI(uri, context_), format_settings));
|
||||
setInMemoryMetadata(storage_metadata);
|
||||
}
|
||||
|
||||
|
||||
@ -435,7 +435,7 @@ Chunk StorageURLSource::generate()
|
||||
{
|
||||
.path = curr_uri.getPath(),
|
||||
.size = current_file_size,
|
||||
}, getContext(), columns_description);
|
||||
}, getContext());
|
||||
return chunk;
|
||||
}
|
||||
|
||||
|
@ -75,8 +75,8 @@ StorageURLCluster::StorageURLCluster(
|
||||
}
|
||||
|
||||
storage_metadata.setConstraints(constraints_);
|
||||
setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.columns, context, getSampleURI(uri, context)));
|
||||
setInMemoryMetadata(storage_metadata);
|
||||
setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), context, getSampleURI(uri, context)));
|
||||
}
|
||||
|
||||
void StorageURLCluster::updateQueryToSendIfNeeded(ASTPtr & query, const StorageSnapshotPtr & storage_snapshot, const ContextPtr & context)
|
||||
|
@ -129,36 +129,45 @@ NameSet getVirtualNamesForFileLikeStorage()
|
||||
return {"_path", "_file", "_size", "_time", "_etag"};
|
||||
}
|
||||
|
||||
std::unordered_map<std::string, std::string> parseHivePartitioningKeysAndValues(const String & path, const ColumnsDescription & storage_columns)
|
||||
std::unordered_map<std::string, std::string> parseHivePartitioningKeysAndValues(const String & path)
|
||||
{
|
||||
std::string pattern = "([^/]+)=([^/]+)/";
|
||||
re2::StringPiece input_piece(path);
|
||||
|
||||
std::unordered_map<std::string, std::string> key_values;
|
||||
std::string key, value;
|
||||
std::unordered_set<String> used_keys;
|
||||
std::unordered_map<std::string, std::string> used_keys;
|
||||
while (RE2::FindAndConsume(&input_piece, pattern, &key, &value))
|
||||
{
|
||||
if (used_keys.contains(key))
|
||||
throw Exception(ErrorCodes::INCORRECT_DATA, "Path '{}' to file with enabled hive-style partitioning contains duplicated partition key {}, only unique keys are allowed", path, key);
|
||||
used_keys.insert(key);
|
||||
auto it = used_keys.find(key);
|
||||
if (it != used_keys.end() && it->second != value)
|
||||
throw Exception(ErrorCodes::INCORRECT_DATA, "Path '{}' to file with enabled hive-style partitioning contains duplicated partition key {} with different values, only unique keys are allowed", path, key);
|
||||
used_keys.insert({key, value});
|
||||
|
||||
auto col_name = "_" + key;
|
||||
while (storage_columns.has(col_name))
|
||||
col_name = "_" + col_name;
|
||||
auto col_name = key;
|
||||
key_values[col_name] = value;
|
||||
}
|
||||
return key_values;
|
||||
}
|
||||
|
||||
VirtualColumnsDescription getVirtualsForFileLikeStorage(const ColumnsDescription & storage_columns, const ContextPtr & context, const std::string & path, std::optional<FormatSettings> format_settings_)
|
||||
VirtualColumnsDescription getVirtualsForFileLikeStorage(ColumnsDescription & storage_columns, const ContextPtr & context, const std::string & path, std::optional<FormatSettings> format_settings_)
|
||||
{
|
||||
VirtualColumnsDescription desc;
|
||||
|
||||
auto add_virtual = [&](const auto & name, const auto & type)
|
||||
{
|
||||
if (storage_columns.has(name))
|
||||
{
|
||||
if (!context->getSettingsRef().use_hive_partitioning)
|
||||
return;
|
||||
|
||||
if (storage_columns.size() == 1)
|
||||
throw Exception(ErrorCodes::INCORRECT_DATA, "Cannot use hive partitioning for file {}: it contains only partition columns. Disable use_hive_partitioning setting to read this file", path);
|
||||
auto local_type = storage_columns.get(name).type;
|
||||
storage_columns.remove(name);
|
||||
desc.addEphemeral(name, local_type, "");
|
||||
return;
|
||||
}
|
||||
|
||||
desc.addEphemeral(name, type, "");
|
||||
};
|
||||
@ -171,7 +180,7 @@ VirtualColumnsDescription getVirtualsForFileLikeStorage(const ColumnsDescription
|
||||
|
||||
if (context->getSettingsRef().use_hive_partitioning)
|
||||
{
|
||||
auto map = parseHivePartitioningKeysAndValues(path, storage_columns);
|
||||
auto map = parseHivePartitioningKeysAndValues(path);
|
||||
auto format_settings = format_settings_ ? *format_settings_ : getFormatSettings(context);
|
||||
for (auto & item : map)
|
||||
{
|
||||
@ -244,11 +253,11 @@ ColumnPtr getFilterByPathAndFileIndexes(const std::vector<String> & paths, const
|
||||
|
||||
void addRequestedFileLikeStorageVirtualsToChunk(
|
||||
Chunk & chunk, const NamesAndTypesList & requested_virtual_columns,
|
||||
VirtualsForFileLikeStorage virtual_values, ContextPtr context, const ColumnsDescription & columns)
|
||||
VirtualsForFileLikeStorage virtual_values, ContextPtr context)
|
||||
{
|
||||
std::unordered_map<std::string, std::string> hive_map;
|
||||
if (context->getSettingsRef().use_hive_partitioning)
|
||||
hive_map = parseHivePartitioningKeysAndValues(virtual_values.path, columns);
|
||||
hive_map = parseHivePartitioningKeysAndValues(virtual_values.path);
|
||||
|
||||
for (const auto & virtual_column : requested_virtual_columns)
|
||||
{
|
||||
|
@ -70,7 +70,7 @@ auto extractSingleValueFromBlock(const Block & block, const String & name)
|
||||
|
||||
NameSet getVirtualNamesForFileLikeStorage();
|
||||
VirtualColumnsDescription getVirtualsForFileLikeStorage(
|
||||
const ColumnsDescription & storage_columns,
|
||||
ColumnsDescription & storage_columns,
|
||||
const ContextPtr & context,
|
||||
const std::string & sample_path = "",
|
||||
std::optional<FormatSettings> format_settings_ = std::nullopt);
|
||||
@ -105,7 +105,7 @@ struct VirtualsForFileLikeStorage
|
||||
|
||||
void addRequestedFileLikeStorageVirtualsToChunk(
|
||||
Chunk & chunk, const NamesAndTypesList & requested_virtual_columns,
|
||||
VirtualsForFileLikeStorage virtual_values, ContextPtr context, const ColumnsDescription & columns);
|
||||
VirtualsForFileLikeStorage virtual_values, ContextPtr context);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -333,7 +333,10 @@ def _pre_action(s3, job_name, batch, indata, pr_info):
|
||||
CI.JobNames.BUILD_CHECK,
|
||||
): # we might want to rerun build report job
|
||||
rerun_helper = RerunHelper(commit, _get_ext_check_name(job_name))
|
||||
if rerun_helper.is_already_finished_by_status():
|
||||
if (
|
||||
rerun_helper.is_already_finished_by_status()
|
||||
and not Utils.is_job_triggered_manually()
|
||||
):
|
||||
print("WARNING: Rerunning job with GH status ")
|
||||
status = rerun_helper.get_finished_status()
|
||||
assert status
|
||||
@ -344,7 +347,7 @@ def _pre_action(s3, job_name, batch, indata, pr_info):
|
||||
skip_status = status.state
|
||||
|
||||
# ci cache check
|
||||
if not to_be_skipped and not no_cache:
|
||||
if not to_be_skipped and not no_cache and not Utils.is_job_triggered_manually():
|
||||
ci_cache = CiCache(s3, indata["jobs_data"]["digests"]).update()
|
||||
job_config = CI.get_job_config(job_name)
|
||||
if ci_cache.is_successful(
|
||||
|
@ -501,9 +501,10 @@ class CI:
|
||||
JobNames.SQLANCER_DEBUG: CommonJobConfigs.SQLLANCER_TEST.with_properties(
|
||||
required_builds=[BuildNames.PACKAGE_DEBUG],
|
||||
),
|
||||
JobNames.SQL_LOGIC_TEST: CommonJobConfigs.SQLLOGIC_TEST.with_properties(
|
||||
required_builds=[BuildNames.PACKAGE_RELEASE],
|
||||
),
|
||||
# TODO: job does not work at all, uncomment and fix
|
||||
# JobNames.SQL_LOGIC_TEST: CommonJobConfigs.SQLLOGIC_TEST.with_properties(
|
||||
# required_builds=[BuildNames.PACKAGE_RELEASE],
|
||||
# ),
|
||||
JobNames.SQLTEST: CommonJobConfigs.SQL_TEST.with_properties(
|
||||
required_builds=[BuildNames.PACKAGE_RELEASE],
|
||||
),
|
||||
|
@ -204,7 +204,7 @@ class JobNames(metaclass=WithIter):
|
||||
PERFORMANCE_TEST_AMD64 = "Performance Comparison (release)"
|
||||
PERFORMANCE_TEST_ARM64 = "Performance Comparison (aarch64)"
|
||||
|
||||
SQL_LOGIC_TEST = "Sqllogic test (release)"
|
||||
# SQL_LOGIC_TEST = "Sqllogic test (release)"
|
||||
|
||||
SQLANCER = "SQLancer (release)"
|
||||
SQLANCER_DEBUG = "SQLancer (debug)"
|
||||
|
@ -18,6 +18,7 @@ class Envs:
|
||||
)
|
||||
S3_BUILDS_BUCKET = os.getenv("S3_BUILDS_BUCKET", "clickhouse-builds")
|
||||
GITHUB_WORKFLOW = os.getenv("GITHUB_WORKFLOW", "")
|
||||
GITHUB_ACTOR = os.getenv("GITHUB_ACTOR", "")
|
||||
|
||||
|
||||
class WithIter(type):
|
||||
@ -282,3 +283,7 @@ class Utils:
|
||||
):
|
||||
res = res.replace(*r)
|
||||
return res
|
||||
|
||||
@staticmethod
|
||||
def is_job_triggered_manually():
|
||||
return "robot" not in Envs.GITHUB_ACTOR
|
||||
|
@ -31,7 +31,7 @@ IMAGE_NAME = "clickhouse/sqllogic-test"
|
||||
|
||||
def get_run_command(
|
||||
builds_path: Path,
|
||||
repo_tests_path: Path,
|
||||
repo_path: Path,
|
||||
result_path: Path,
|
||||
server_log_path: Path,
|
||||
image: DockerImage,
|
||||
@ -39,11 +39,11 @@ def get_run_command(
|
||||
return (
|
||||
f"docker run "
|
||||
f"--volume={builds_path}:/package_folder "
|
||||
f"--volume={repo_tests_path}:/clickhouse-tests "
|
||||
f"--volume={repo_path}:/repo "
|
||||
f"--volume={result_path}:/test_output "
|
||||
f"--volume={server_log_path}:/var/log/clickhouse-server "
|
||||
"--security-opt seccomp=unconfined " # required to issue io_uring sys-calls
|
||||
f"--cap-add=SYS_PTRACE {image}"
|
||||
f"--cap-add=SYS_PTRACE {image} /repo/tests/docker_scripts/sqllogic_runner.sh"
|
||||
)
|
||||
|
||||
|
||||
@ -94,8 +94,6 @@ def main():
|
||||
|
||||
docker_image = pull_image(get_docker_image(IMAGE_NAME))
|
||||
|
||||
repo_tests_path = repo_path / "tests"
|
||||
|
||||
packages_path = temp_path / "packages"
|
||||
packages_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
@ -111,7 +109,7 @@ def main():
|
||||
|
||||
run_command = get_run_command( # run script inside docker
|
||||
packages_path,
|
||||
repo_tests_path,
|
||||
repo_path,
|
||||
result_path,
|
||||
server_log_path,
|
||||
docker_image,
|
||||
|
@ -60,7 +60,6 @@ MESSAGES_TO_RETRY = [
|
||||
"is already started to be removing by another replica right now",
|
||||
# This is from LSan, and it indicates its own internal problem:
|
||||
"Unable to get registers from thread",
|
||||
"You can retry",
|
||||
]
|
||||
|
||||
MAX_RETRIES = 3
|
||||
@ -3567,7 +3566,7 @@ if __name__ == "__main__":
|
||||
f"Cannot access the specified directory with queries ({args.queries})",
|
||||
file=sys.stderr,
|
||||
)
|
||||
sys.exit(1)
|
||||
assert False, "No --queries provided"
|
||||
|
||||
CAPTURE_CLIENT_STACKTRACE = args.capture_client_stacktrace
|
||||
|
||||
|
@ -15,10 +15,10 @@ echo "Files in current directory"
|
||||
ls -la ./
|
||||
echo "Files in root directory"
|
||||
ls -la /
|
||||
echo "Files in /clickhouse-tests directory"
|
||||
ls -la /clickhouse-tests
|
||||
echo "Files in /clickhouse-tests/sqllogic directory"
|
||||
ls -la /clickhouse-tests/sqllogic
|
||||
echo "Files in /repo/tests directory"
|
||||
ls -la /repo/tests
|
||||
echo "Files in /repo/tests/sqllogic directory"
|
||||
ls -la /repo/tests/sqllogic
|
||||
echo "Files in /package_folder directory"
|
||||
ls -la /package_folder
|
||||
echo "Files in /test_output"
|
||||
@ -45,13 +45,13 @@ function run_tests()
|
||||
|
||||
cd /test_output
|
||||
|
||||
/clickhouse-tests/sqllogic/runner.py --help 2>&1 \
|
||||
/repo/tests/sqllogic/runner.py --help 2>&1 \
|
||||
| ts '%Y-%m-%d %H:%M:%S'
|
||||
|
||||
mkdir -p /test_output/self-test
|
||||
/clickhouse-tests/sqllogic/runner.py --log-file /test_output/runner-self-test.log \
|
||||
/repo/tests/sqllogic/runner.py --log-file /test_output/runner-self-test.log \
|
||||
self-test \
|
||||
--self-test-dir /clickhouse-tests/sqllogic/self-test \
|
||||
--self-test-dir /repo/tests/sqllogic/self-test \
|
||||
--out-dir /test_output/self-test \
|
||||
2>&1 \
|
||||
| ts '%Y-%m-%d %H:%M:%S'
|
||||
@ -63,7 +63,7 @@ function run_tests()
|
||||
if [ -d /sqllogictest ]
|
||||
then
|
||||
mkdir -p /test_output/statements-test
|
||||
/clickhouse-tests/sqllogic/runner.py \
|
||||
/repo/tests/sqllogic/runner.py \
|
||||
--log-file /test_output/runner-statements-test.log \
|
||||
--log-level info \
|
||||
statements-test \
|
||||
@ -77,7 +77,7 @@ function run_tests()
|
||||
tar -zcvf statements-check.tar.gz statements-test 1>/dev/null
|
||||
|
||||
mkdir -p /test_output/complete-test
|
||||
/clickhouse-tests/sqllogic/runner.py \
|
||||
/repo/tests/sqllogic/runner.py \
|
||||
--log-file /test_output/runner-complete-test.log \
|
||||
--log-level info \
|
||||
complete-test \
|
@ -339,7 +339,7 @@ export -f run_tests
|
||||
if [ "$NUM_TRIES" -gt "1" ]; then
|
||||
# We don't run tests with Ordinary database in PRs, only in master.
|
||||
# So run new/changed tests with Ordinary at least once in flaky check.
|
||||
NUM_TRIES=1; USE_DATABASE_ORDINARY=1; run_tests \
|
||||
NUM_TRIES=1 USE_DATABASE_ORDINARY=1 run_tests \
|
||||
| sed 's/All tests have finished/Redacted: a message about tests finish is deleted/' | sed 's/No tests were run/Redacted: a message about no tests run is deleted/' ||:
|
||||
fi
|
||||
|
||||
|
@ -10,8 +10,7 @@ dmesg --clear
|
||||
# shellcheck disable=SC1091
|
||||
source /setup_export_logs.sh
|
||||
|
||||
ln -s /repo/tests/clickhouse-test/ci/stress.py /usr/bin/stress
|
||||
ln -s /repo/tests/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
|
||||
ln -s /repo/tests/clickhouse-test /usr/bin/clickhouse-test
|
||||
|
||||
# Stress tests and upgrade check uses similar code that was placed
|
||||
# in a separate bash library. See tests/ci/stress_tests.lib
|
||||
@ -266,6 +265,7 @@ fi
|
||||
|
||||
start_server
|
||||
|
||||
cd /repo/tests/ || exit 1 # clickhouse-test can find queries dir from there
|
||||
python3 /repo/tests/ci/stress.py --hung-check --drop-databases --output-folder /test_output --skip-func-tests "$SKIP_TESTS_OPTION" --global-time-limit 1200 \
|
||||
&& echo -e "Test script exit code$OK" >> /test_output/test_results.tsv \
|
||||
|| echo -e "Test script failed$FAIL script exit code: $?" >> /test_output/test_results.tsv
|
||||
|
@ -0,0 +1,26 @@
|
||||
<clickhouse>
|
||||
<keeper_server>
|
||||
<tcp_port>2181</tcp_port>
|
||||
<server_id>1</server_id>
|
||||
<log_storage_path>/var/lib/clickhouse/coordination/log</log_storage_path>
|
||||
<snapshot_storage_path>/var/lib/clickhouse/coordination/snapshots</snapshot_storage_path>
|
||||
<coordination_settings>
|
||||
<session_timeout_ms>20000</session_timeout_ms>
|
||||
</coordination_settings>
|
||||
<raft_configuration>
|
||||
<server>
|
||||
<id>1</id>
|
||||
<hostname>localhost</hostname>
|
||||
<port>9444</port>
|
||||
</server>
|
||||
</raft_configuration>
|
||||
</keeper_server>
|
||||
|
||||
<zookeeper>
|
||||
<node index="1">
|
||||
<host>localhost</host>
|
||||
<port>2181</port>
|
||||
</node>
|
||||
<session_timeout_ms>20000</session_timeout_ms>
|
||||
</zookeeper>
|
||||
</clickhouse>
|
@ -0,0 +1,8 @@
|
||||
<clickhouse>
|
||||
<users>
|
||||
<default>
|
||||
<profile>default</profile>
|
||||
<no_password></no_password>
|
||||
</default>
|
||||
</users>
|
||||
</clickhouse>
|
@ -0,0 +1,71 @@
|
||||
import pytest
|
||||
import random
|
||||
import string
|
||||
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
node = cluster.add_instance(
|
||||
"node",
|
||||
main_configs=[
|
||||
"config/enable_keeper.xml",
|
||||
"config/users.xml",
|
||||
],
|
||||
stay_alive=True,
|
||||
with_minio=True,
|
||||
macros={"shard": 1, "replica": 1},
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def start_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
yield cluster
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
def randomize_table_name(table_name, random_suffix_length=10):
|
||||
letters = string.ascii_letters + string.digits
|
||||
return f"{table_name}_{''.join(random.choice(letters) for _ in range(random_suffix_length))}"
|
||||
|
||||
|
||||
@pytest.mark.parametrize("engine", ["ReplicatedMergeTree"])
|
||||
def test_aliases_in_default_expr_not_break_table_structure(start_cluster, engine):
|
||||
"""
|
||||
Making sure that using aliases in columns' default expressions does not lead to having different columns metadata in ZooKeeper and on disk.
|
||||
Issue: https://github.com/ClickHouse/clickhouse-private/issues/5150
|
||||
"""
|
||||
|
||||
data = '{"event": {"col1-key": "col1-val", "col2-key": "col2-val"}}'
|
||||
|
||||
table_name = randomize_table_name("t")
|
||||
|
||||
node.query(
|
||||
f"""
|
||||
DROP TABLE IF EXISTS {table_name};
|
||||
CREATE TABLE {table_name}
|
||||
(
|
||||
`data` String,
|
||||
`col1` String DEFAULT JSONExtractString(JSONExtractString(data, 'event') AS event, 'col1-key'),
|
||||
`col2` String MATERIALIZED JSONExtractString(JSONExtractString(data, 'event') AS event, 'col2-key')
|
||||
)
|
||||
ENGINE = {engine}('/test/{table_name}', '{{replica}}')
|
||||
ORDER BY col1
|
||||
"""
|
||||
)
|
||||
|
||||
node.restart_clickhouse()
|
||||
|
||||
node.query(
|
||||
f"""
|
||||
INSERT INTO {table_name} (data) VALUES ('{data}');
|
||||
"""
|
||||
)
|
||||
assert node.query(f"SELECT data FROM {table_name}").strip() == data
|
||||
assert node.query(f"SELECT col1 FROM {table_name}").strip() == "col1-val"
|
||||
assert node.query(f"SELECT col2 FROM {table_name}").strip() == "col2-val"
|
||||
|
||||
node.query(f"DROP TABLE {table_name}")
|
@ -6,11 +6,17 @@ from helpers.cluster import ClickHouseCluster
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
node1 = cluster.add_instance(
|
||||
"node1", user_configs=["config/config.xml"], with_zookeeper=True
|
||||
"node1",
|
||||
user_configs=["config/config.xml"],
|
||||
with_zookeeper=True,
|
||||
macros={"replica": "a", "shard": "shard1"},
|
||||
)
|
||||
|
||||
node2 = cluster.add_instance(
|
||||
"node2", user_configs=["config/config.xml"], with_zookeeper=True
|
||||
"node2",
|
||||
user_configs=["config/config.xml"],
|
||||
with_zookeeper=True,
|
||||
macros={"replica": "b", "shard": "shard1"},
|
||||
)
|
||||
|
||||
|
||||
@ -129,8 +135,8 @@ def test_single_node_normal(started_cluster):
|
||||
|
||||
|
||||
def test_replicated_table_ddl(started_cluster):
|
||||
node1.query("DROP TABLE IF EXISTS test_stat")
|
||||
node2.query("DROP TABLE IF EXISTS test_stat")
|
||||
node1.query("DROP TABLE IF EXISTS test_stat SYNC")
|
||||
node2.query("DROP TABLE IF EXISTS test_stat SYNC")
|
||||
|
||||
node1.query(
|
||||
"""
|
||||
@ -183,3 +189,19 @@ def test_replicated_table_ddl(started_cluster):
|
||||
)
|
||||
check_stat_file_on_disk(node2, "test_stat", "all_0_0_0_3", "a", True)
|
||||
check_stat_file_on_disk(node2, "test_stat", "all_0_0_0_3", "b", True)
|
||||
|
||||
|
||||
def test_replicated_db(started_cluster):
|
||||
node1.query("DROP DATABASE IF EXISTS test SYNC")
|
||||
node2.query("DROP DATABASE IF EXISTS test SYNC")
|
||||
node1.query(
|
||||
"CREATE DATABASE test ENGINE = Replicated('/test/shared_stats', '{shard}', '{replica}')"
|
||||
)
|
||||
node2.query(
|
||||
"CREATE DATABASE test ENGINE = Replicated('/test/shared_stats', '{shard}', '{replica}')"
|
||||
)
|
||||
node1.query(
|
||||
"CREATE TABLE test.test_stats (a Int64, b Int64) ENGINE = ReplicatedMergeTree() ORDER BY()"
|
||||
)
|
||||
node2.query("ALTER TABLE test.test_stats MODIFY COLUMN b Float64")
|
||||
node2.query("ALTER TABLE test.test_stats MODIFY STATISTICS b TYPE tdigest")
|
||||
|
@ -1,5 +1,6 @@
|
||||
import time
|
||||
import pytest
|
||||
import random
|
||||
from helpers.client import QueryRuntimeException
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.test_tools import TSV
|
||||
@ -418,72 +419,215 @@ def test_function_current_roles():
|
||||
)
|
||||
|
||||
|
||||
def test_role_expiration():
|
||||
instance.query("CREATE USER ure")
|
||||
@pytest.mark.parametrize("with_extra_role", [False, True])
|
||||
def test_role_expiration(with_extra_role):
|
||||
instance.query("CREATE ROLE rre")
|
||||
instance.query("GRANT rre TO ure")
|
||||
instance.query("CREATE USER ure DEFAULT ROLE rre")
|
||||
|
||||
instance.query("CREATE TABLE IF NOT EXISTS tre (id Int) Engine=Log")
|
||||
instance.query("INSERT INTO tre VALUES (0)")
|
||||
instance.query("CREATE TABLE table1 (id Int) Engine=Log")
|
||||
instance.query("CREATE TABLE table2 (id Int) Engine=Log")
|
||||
instance.query("INSERT INTO table1 VALUES (1)")
|
||||
instance.query("INSERT INTO table2 VALUES (2)")
|
||||
|
||||
instance.query("GRANT SELECT ON table1 TO rre")
|
||||
|
||||
assert instance.query("SELECT * FROM table1", user="ure") == "1\n"
|
||||
assert "Not enough privileges" in instance.query_and_get_error(
|
||||
"SELECT * FROM tre", user="ure"
|
||||
"SELECT * FROM table2", user="ure"
|
||||
)
|
||||
|
||||
instance.query("GRANT SELECT ON tre TO rre")
|
||||
|
||||
assert instance.query("SELECT * FROM tre", user="ure") == "0\n"
|
||||
|
||||
# access_control_improvements/role_cache_expiration_time_seconds value is 2 for the test
|
||||
# so we wait >2 seconds until the role is expired
|
||||
time.sleep(5)
|
||||
|
||||
instance.query("CREATE TABLE IF NOT EXISTS tre1 (id Int) Engine=Log")
|
||||
instance.query("INSERT INTO tre1 VALUES (0)")
|
||||
instance.query("GRANT SELECT ON tre1 TO rre")
|
||||
if with_extra_role:
|
||||
# Expiration of role "rre" from the role cache can be caused by another role being used.
|
||||
instance.query("CREATE ROLE extra_role")
|
||||
instance.query("CREATE USER extra_user DEFAULT ROLE extra_role")
|
||||
instance.query("GRANT SELECT ON table1 TO extra_role")
|
||||
assert instance.query("SELECT * FROM table1", user="extra_user") == "1\n"
|
||||
|
||||
assert instance.query("SELECT * from tre1", user="ure") == "0\n"
|
||||
instance.query("GRANT SELECT ON table2 TO rre")
|
||||
assert instance.query("SELECT * FROM table1", user="ure") == "1\n"
|
||||
assert instance.query("SELECT * FROM table2", user="ure") == "2\n"
|
||||
|
||||
instance.query("DROP USER ure")
|
||||
instance.query("DROP ROLE rre")
|
||||
instance.query("DROP TABLE tre")
|
||||
instance.query("DROP TABLE tre1")
|
||||
instance.query("DROP USER ure")
|
||||
instance.query("DROP TABLE table1")
|
||||
instance.query("DROP TABLE table2")
|
||||
|
||||
if with_extra_role:
|
||||
instance.query("DROP ROLE extra_role")
|
||||
instance.query("DROP USER extra_user")
|
||||
|
||||
|
||||
def test_two_roles_expiration():
|
||||
instance.query("CREATE USER ure")
|
||||
instance.query("CREATE ROLE rre")
|
||||
instance.query("GRANT rre TO ure")
|
||||
def test_roles_cache():
|
||||
# This test takes 20 seconds.
|
||||
test_time = 20
|
||||
|
||||
instance.query("CREATE ROLE rre_second")
|
||||
|
||||
instance.query("CREATE TABLE IF NOT EXISTS tre (id Int) Engine=Log")
|
||||
instance.query("INSERT INTO tre VALUES (0)")
|
||||
|
||||
assert "Not enough privileges" in instance.query_and_get_error(
|
||||
"SELECT * FROM tre", user="ure"
|
||||
)
|
||||
|
||||
instance.query("GRANT SELECT ON tre TO rre")
|
||||
|
||||
assert instance.query("SELECT * FROM tre", user="ure") == "0\n"
|
||||
|
||||
# access_control_improvements/role_cache_expiration_time_seconds value is 2 for the test
|
||||
# so we wait >2 seconds until the roles are expired
|
||||
time.sleep(5)
|
||||
# Three users A, B, C.
|
||||
users = ["A", "B", "C"]
|
||||
instance.query("CREATE USER " + ", ".join(users))
|
||||
|
||||
# Table "tbl" has 10 columns. Each of the users has access to a different set of columns.
|
||||
num_columns = 10
|
||||
columns = [f"x{i}" for i in range(1, num_columns + 1)]
|
||||
columns_with_types = [column + " Int64" for column in columns]
|
||||
columns_with_types_comma_separated = ", ".join(columns_with_types)
|
||||
values = list(range(1, num_columns + 1))
|
||||
values_comma_separated = ", ".join([str(value) for value in values])
|
||||
instance.query(
|
||||
"GRANT SELECT ON tre1 TO rre_second"
|
||||
) # we expect that both rre and rre_second are gone from cache upon this operation
|
||||
f"CREATE TABLE tbl ({columns_with_types_comma_separated}) ENGINE=MergeTree ORDER BY tuple()"
|
||||
)
|
||||
instance.query(f"INSERT INTO tbl VALUES ({values_comma_separated})")
|
||||
columns_to_values = dict([(f"x{i}", i) for i in range(1, num_columns + 1)])
|
||||
|
||||
instance.query("CREATE TABLE IF NOT EXISTS tre1 (id Int) Engine=Log")
|
||||
instance.query("INSERT INTO tre1 VALUES (0)")
|
||||
instance.query("GRANT SELECT ON tre1 TO rre")
|
||||
# In this test we create and modify roles multiple times along with updating the following variables.
|
||||
# Then we check that each of the users has access to the expected set of columns.
|
||||
roles = []
|
||||
users_to_roles = dict([(user, []) for user in users])
|
||||
roles_to_columns = {}
|
||||
|
||||
assert instance.query("SELECT * from tre1", user="ure") == "0\n"
|
||||
# Checks that each of the users can access the expected set of columns and can't access other columns.
|
||||
def check():
|
||||
for user in random.sample(users, len(users)):
|
||||
expected_roles = users_to_roles[user]
|
||||
expected_columns = list(
|
||||
set(sum([roles_to_columns[role] for role in expected_roles], []))
|
||||
)
|
||||
expected_result = sorted(
|
||||
[columns_to_values[column] for column in expected_columns]
|
||||
)
|
||||
query = " UNION ALL ".join(
|
||||
[
|
||||
f"SELECT * FROM viewIfPermitted(SELECT {column} AS c FROM tbl ELSE null('c Int64'))"
|
||||
for column in columns
|
||||
]
|
||||
)
|
||||
result = instance.query(query, user=user).splitlines()
|
||||
result = sorted([int(value) for value in result])
|
||||
ok = result == expected_result
|
||||
if not ok:
|
||||
print(f"Show grants for {user}:")
|
||||
print(
|
||||
instance.query(
|
||||
"SHOW GRANTS FOR " + ", ".join([user] + expected_roles)
|
||||
)
|
||||
)
|
||||
print(f"Expected result: {expected_result}")
|
||||
print(f"Got unexpected result: {result}")
|
||||
assert ok
|
||||
|
||||
instance.query("DROP USER ure")
|
||||
instance.query("DROP ROLE rre")
|
||||
instance.query("DROP ROLE rre_second")
|
||||
instance.query("DROP TABLE tre")
|
||||
instance.query("DROP TABLE tre1")
|
||||
# Grants one of our roles a permission to access one of the columns.
|
||||
def grant_column():
|
||||
columns_used_in_roles = sum(roles_to_columns.values(), [])
|
||||
columns_to_choose = [
|
||||
column for column in columns if column not in columns_used_in_roles
|
||||
]
|
||||
if not columns_to_choose or not roles:
|
||||
return False
|
||||
column = random.choice(columns_to_choose)
|
||||
role = random.choice(roles)
|
||||
instance.query(f"GRANT SELECT({column}) ON tbl TO {role}")
|
||||
roles_to_columns[role].append(column)
|
||||
return True
|
||||
|
||||
# Revokes a permission to access one of the granted column from all our roles.
|
||||
def revoke_column():
|
||||
columns_used_in_roles = sum(roles_to_columns.values(), [])
|
||||
columns_to_choose = list(set(columns_used_in_roles))
|
||||
if not columns_to_choose or not roles:
|
||||
return False
|
||||
column = random.choice(columns_to_choose)
|
||||
roles_str = ", ".join(roles)
|
||||
instance.query(f"REVOKE SELECT({column}) ON tbl FROM {roles_str}")
|
||||
for role in roles_to_columns:
|
||||
if column in roles_to_columns[role]:
|
||||
roles_to_columns[role].remove(column)
|
||||
return True
|
||||
|
||||
# Creates a role and grants it to one of the users.
|
||||
def create_role():
|
||||
for role in ["R1", "R2", "R3"]:
|
||||
if role not in roles:
|
||||
instance.query(f"CREATE ROLE {role}")
|
||||
roles.append(role)
|
||||
if role not in roles_to_columns:
|
||||
roles_to_columns[role] = []
|
||||
if "R1" not in users_to_roles["A"]:
|
||||
instance.query("GRANT R1 TO A")
|
||||
users_to_roles["A"].append("R1")
|
||||
elif "R2" not in users_to_roles["B"]:
|
||||
instance.query("GRANT R2 TO B")
|
||||
users_to_roles["B"].append("R2")
|
||||
elif "R3" not in users_to_roles["B"]:
|
||||
instance.query("GRANT R3 TO R2")
|
||||
users_to_roles["B"].append("R3")
|
||||
elif "R3" not in users_to_roles["C"]:
|
||||
instance.query("GRANT R3 TO C")
|
||||
users_to_roles["C"].append("R3")
|
||||
else:
|
||||
return False
|
||||
return True
|
||||
|
||||
# Drops one of our roles.
|
||||
def drop_role():
|
||||
if not roles:
|
||||
return False
|
||||
role = random.choice(roles)
|
||||
instance.query(f"DROP ROLE {role}")
|
||||
roles.remove(role)
|
||||
for u in users_to_roles:
|
||||
if role in users_to_roles[u]:
|
||||
users_to_roles[u].remove(role)
|
||||
del roles_to_columns[role]
|
||||
if (role == "R2") and ("R3" in users_to_roles["B"]):
|
||||
users_to_roles["B"].remove("R3")
|
||||
return True
|
||||
|
||||
# Modifies some grants or roles randomly.
|
||||
def modify():
|
||||
while True:
|
||||
rnd = random.random()
|
||||
if rnd < 0.4:
|
||||
if grant_column():
|
||||
break
|
||||
elif rnd < 0.5:
|
||||
if revoke_column():
|
||||
break
|
||||
elif rnd < 0.9:
|
||||
if create_role():
|
||||
break
|
||||
else:
|
||||
if drop_role():
|
||||
break
|
||||
|
||||
def maybe_modify():
|
||||
if random.random() < 0.9:
|
||||
modify()
|
||||
modify()
|
||||
|
||||
# Sleeping is necessary in this test because the role cache in ClickHouse has expiration timeout.
|
||||
def maybe_sleep():
|
||||
if random.random() < 0.1:
|
||||
# "role_cache_expiration_time_seconds" is set to 2 seconds in the test configuration.
|
||||
# We need a sleep longer than that in this test sometimes.
|
||||
seconds = random.random() * 5
|
||||
print(f"Sleeping {seconds} seconds")
|
||||
time.sleep(seconds)
|
||||
|
||||
# Main part of the test.
|
||||
start_time = time.time()
|
||||
end_time = start_time + test_time
|
||||
|
||||
while time.time() < end_time:
|
||||
check()
|
||||
maybe_sleep()
|
||||
maybe_modify()
|
||||
maybe_sleep()
|
||||
|
||||
check()
|
||||
|
||||
instance.query("DROP USER " + ", ".join(users))
|
||||
instance.query("DROP ROLE " + ", ".join(roles))
|
||||
instance.query("DROP TABLE tbl")
|
||||
|
@ -1513,19 +1513,19 @@ def test_hive_partitioning_with_one_parameter(cluster):
|
||||
azure_query(
|
||||
node,
|
||||
f"INSERT INTO TABLE FUNCTION azureBlobStorage(azure_conf2, storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}',"
|
||||
f" container='cont', blob_path='{path}', format='CSV', compression='auto', structure='{table_format}') VALUES {values}",
|
||||
f" container='cont', blob_path='{path}', format='CSVWithNames', compression='auto', structure='{table_format}') VALUES {values}",
|
||||
settings={"azure_truncate_on_insert": 1},
|
||||
)
|
||||
|
||||
query = (
|
||||
f"SELECT column1, column2, _file, _path, _column1 FROM azureBlobStorage(azure_conf2, "
|
||||
f"SELECT column2, _file, _path, column1 FROM azureBlobStorage(azure_conf2, "
|
||||
f"storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', container='cont', "
|
||||
f"blob_path='{path}', format='CSV', structure='{table_format}')"
|
||||
f"blob_path='{path}', format='CSVWithNames', structure='{table_format}')"
|
||||
)
|
||||
assert azure_query(
|
||||
node, query, settings={"use_hive_partitioning": 1}
|
||||
).splitlines() == [
|
||||
"Elizabeth\tGordon\tsample.csv\t{bucket}/{max_path}\tElizabeth".format(
|
||||
"Gordon\tsample.csv\t{bucket}/{max_path}\tElizabeth".format(
|
||||
bucket="cont", max_path=path
|
||||
)
|
||||
]
|
||||
@ -1533,14 +1533,14 @@ def test_hive_partitioning_with_one_parameter(cluster):
|
||||
query = (
|
||||
f"SELECT column2 FROM azureBlobStorage(azure_conf2, "
|
||||
f"storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', container='cont', "
|
||||
f"blob_path='{path}', format='CSV', structure='{table_format}') WHERE column1=_column1;"
|
||||
f"blob_path='{path}', format='CSVWithNames', structure='{table_format}');"
|
||||
)
|
||||
assert azure_query(
|
||||
node, query, settings={"use_hive_partitioning": 1}
|
||||
).splitlines() == ["Gordon"]
|
||||
|
||||
|
||||
def test_hive_partitioning_with_two_parameters(cluster):
|
||||
def test_hive_partitioning_with_all_parameters(cluster):
|
||||
# type: (ClickHouseCluster) -> None
|
||||
node = cluster.instances["node"] # type: ClickHouseInstance
|
||||
table_format = "column1 String, column2 String"
|
||||
@ -1551,40 +1551,19 @@ def test_hive_partitioning_with_two_parameters(cluster):
|
||||
azure_query(
|
||||
node,
|
||||
f"INSERT INTO TABLE FUNCTION azureBlobStorage(azure_conf2, storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}',"
|
||||
f" container='cont', blob_path='{path}', format='CSV', compression='auto', structure='{table_format}') VALUES {values_1}, {values_2}",
|
||||
f" container='cont', blob_path='{path}', format='CSVWithNames', compression='auto', structure='{table_format}') VALUES {values_1}, {values_2}",
|
||||
settings={"azure_truncate_on_insert": 1},
|
||||
)
|
||||
|
||||
query = (
|
||||
f"SELECT column1, column2, _file, _path, _column1, _column2 FROM azureBlobStorage(azure_conf2, "
|
||||
f"SELECT column1, column2, _file, _path FROM azureBlobStorage(azure_conf2, "
|
||||
f"storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', container='cont', "
|
||||
f"blob_path='{path}', format='CSV', structure='{table_format}') WHERE column1=_column1;"
|
||||
f"blob_path='{path}', format='CSVWithNames', structure='{table_format}');"
|
||||
)
|
||||
assert azure_query(
|
||||
node, query, settings={"use_hive_partitioning": 1}
|
||||
).splitlines() == [
|
||||
"Elizabeth\tGordon\tsample.csv\t{bucket}/{max_path}\tElizabeth\tGordon".format(
|
||||
bucket="cont", max_path=path
|
||||
)
|
||||
]
|
||||
pattern = r"DB::Exception: Cannot use hive partitioning for file"
|
||||
|
||||
query = (
|
||||
f"SELECT column1 FROM azureBlobStorage(azure_conf2, "
|
||||
f"storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', container='cont', "
|
||||
f"blob_path='{path}', format='CSV', structure='{table_format}') WHERE column2=_column2;"
|
||||
)
|
||||
assert azure_query(
|
||||
node, query, settings={"use_hive_partitioning": 1}
|
||||
).splitlines() == ["Elizabeth"]
|
||||
|
||||
query = (
|
||||
f"SELECT column1 FROM azureBlobStorage(azure_conf2, "
|
||||
f"storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', container='cont', "
|
||||
f"blob_path='{path}', format='CSV', structure='{table_format}') WHERE column2=_column2 AND column1=_column1;"
|
||||
)
|
||||
assert azure_query(
|
||||
node, query, settings={"use_hive_partitioning": 1}
|
||||
).splitlines() == ["Elizabeth"]
|
||||
with pytest.raises(Exception, match=pattern):
|
||||
azure_query(node, query, settings={"use_hive_partitioning": 1})
|
||||
|
||||
|
||||
def test_hive_partitioning_without_setting(cluster):
|
||||
@ -1593,19 +1572,19 @@ def test_hive_partitioning_without_setting(cluster):
|
||||
table_format = "column1 String, column2 String"
|
||||
values_1 = f"('Elizabeth', 'Gordon')"
|
||||
values_2 = f"('Emilia', 'Gregor')"
|
||||
path = "a/column1=Elizabeth/column2=Gordon/sample.csv"
|
||||
path = "a/column1=Elizabeth/column2=Gordon/column3=Gordon/sample.csv"
|
||||
|
||||
azure_query(
|
||||
node,
|
||||
f"INSERT INTO TABLE FUNCTION azureBlobStorage(azure_conf2, storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}',"
|
||||
f" container='cont', blob_path='{path}', format='CSV', compression='auto', structure='{table_format}') VALUES {values_1}, {values_2}",
|
||||
f" container='cont', blob_path='{path}', format='CSVWithNames', compression='auto', structure='{table_format}') VALUES {values_1}, {values_2}",
|
||||
settings={"azure_truncate_on_insert": 1},
|
||||
)
|
||||
|
||||
query = (
|
||||
f"SELECT column1, column2, _file, _path, _column1, _column2 FROM azureBlobStorage(azure_conf2, "
|
||||
f"SELECT column1, column2, _file, _path, column3 FROM azureBlobStorage(azure_conf2, "
|
||||
f"storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', container='cont', "
|
||||
f"blob_path='{path}', format='CSV', structure='{table_format}') WHERE column1=_column1;"
|
||||
f"blob_path='{path}', format='CSVWithNames', structure='{table_format}');"
|
||||
)
|
||||
pattern = re.compile(
|
||||
r"DB::Exception: Unknown expression identifier '.*' in scope.*", re.DOTALL
|
||||
|
@ -1259,33 +1259,21 @@ def test_respect_object_existence_on_partitioned_write(started_cluster):
|
||||
|
||||
def test_hive_partitioning_with_one_parameter(started_cluster):
|
||||
hdfs_api = started_cluster.hdfs_api
|
||||
hdfs_api.write_data(f"/column0=Elizabeth/parquet_1", f"Elizabeth\tGordon\n")
|
||||
assert hdfs_api.read_data(f"/column0=Elizabeth/parquet_1") == f"Elizabeth\tGordon\n"
|
||||
hdfs_api.write_data(
|
||||
f"/column0=Elizabeth/file_1", f"column0,column1\nElizabeth,Gordon\n"
|
||||
)
|
||||
assert (
|
||||
hdfs_api.read_data(f"/column0=Elizabeth/file_1")
|
||||
== f"column0,column1\nElizabeth,Gordon\n"
|
||||
)
|
||||
|
||||
r = node1.query(
|
||||
"SELECT _column0 FROM hdfs('hdfs://hdfs1:9000/column0=Elizabeth/parquet_1', 'TSV')",
|
||||
"SELECT column0 FROM hdfs('hdfs://hdfs1:9000/column0=Elizabeth/file_1', 'CSVWithNames')",
|
||||
settings={"use_hive_partitioning": 1},
|
||||
)
|
||||
assert r == f"Elizabeth\n"
|
||||
|
||||
|
||||
def test_hive_partitioning_with_two_parameters(started_cluster):
|
||||
hdfs_api = started_cluster.hdfs_api
|
||||
hdfs_api.write_data(
|
||||
f"/column0=Elizabeth/column1=Gordon/parquet_2", f"Elizabeth\tGordon\n"
|
||||
)
|
||||
assert (
|
||||
hdfs_api.read_data(f"/column0=Elizabeth/column1=Gordon/parquet_2")
|
||||
== f"Elizabeth\tGordon\n"
|
||||
)
|
||||
|
||||
r = node1.query(
|
||||
"SELECT _column1 FROM hdfs('hdfs://hdfs1:9000/column0=Elizabeth/column1=Gordon/parquet_2', 'TSV');",
|
||||
settings={"use_hive_partitioning": 1},
|
||||
)
|
||||
assert r == f"Gordon\n"
|
||||
|
||||
|
||||
def test_hive_partitioning_without_setting(started_cluster):
|
||||
hdfs_api = started_cluster.hdfs_api
|
||||
hdfs_api.write_data(
|
||||
@ -1301,7 +1289,7 @@ def test_hive_partitioning_without_setting(started_cluster):
|
||||
|
||||
with pytest.raises(QueryRuntimeException, match=pattern):
|
||||
node1.query(
|
||||
f"SELECT _column1 FROM hdfs('hdfs://hdfs1:9000/column0=Elizabeth/column1=Gordon/parquet_2', 'TSV');",
|
||||
f"SELECT column1 FROM hdfs('hdfs://hdfs1:9000/column0=Elizabeth/column1=Gordon/parquet_2', 'TSV');",
|
||||
settings={"use_hive_partitioning": 0},
|
||||
)
|
||||
|
||||
|
@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
|
||||
$CLICKHOUSE_CLIENT -n --query="
|
||||
$CLICKHOUSE_CLIENT --query="
|
||||
DROP TABLE IF EXISTS users;
|
||||
CREATE TABLE users (UserID UInt64) ENGINE = Log;
|
||||
INSERT INTO users VALUES (1468013291393583084);
|
||||
|
@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
$CLICKHOUSE_CLIENT -n --ignore-error --query="
|
||||
$CLICKHOUSE_CLIENT --ignore-error --query="
|
||||
DROP TABLE IF EXISTS test1_00550;
|
||||
DROP TABLE IF EXISTS test2_00550;
|
||||
DROP TABLE IF EXISTS test3_00550;
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: no-random-settings, no-random-merge-tree-settings
|
||||
-- small number of insert threads can make insert terribly slow, especially with some build like msan
|
||||
DROP TABLE IF EXISTS mt;
|
||||
|
||||
CREATE TABLE mt (x UInt64) ENGINE = MergeTree ORDER BY x SETTINGS parts_to_delay_insert = 100000, parts_to_throw_insert = 100000;
|
||||
|
@ -26,6 +26,10 @@ while [[ $($CLICKHOUSE_CLIENT --query "KILL MUTATION WHERE mutation_id='00000000
|
||||
sleep 1
|
||||
done
|
||||
|
||||
while [[ $($CLICKHOUSE_CLIENT --query "SELECT * FROM system.replication_queue WHERE type='ALTER_METADATA' AND database = '$CLICKHOUSE_DATABASE'" 2>&1) ]]; do
|
||||
sleep 1
|
||||
done
|
||||
|
||||
$CLICKHOUSE_CLIENT --query "SHOW CREATE TABLE table_for_bad_alters;" # Type changed, but we can revert back
|
||||
|
||||
$CLICKHOUSE_CLIENT --query "INSERT INTO table_for_bad_alters VALUES(2, 2, 7)"
|
||||
|
@ -13,7 +13,8 @@ opts=(
|
||||
|
||||
DATABASE_ORDINARY="${CLICKHOUSE_DATABASE}_ordinary"
|
||||
|
||||
$CLICKHOUSE_CLIENT "${opts[@]}" --allow_deprecated_database_ordinary=1 --multiquery "
|
||||
$CLICKHOUSE_CLIENT "${opts[@]}" --query "
|
||||
SET allow_deprecated_database_ordinary = 1;
|
||||
SET allow_experimental_window_view = 1;
|
||||
SET window_view_clean_interval = 1;
|
||||
|
||||
@ -28,8 +29,7 @@ $CLICKHOUSE_CLIENT "${opts[@]}" --allow_deprecated_database_ordinary=1 --multiqu
|
||||
INSERT INTO ${DATABASE_ORDINARY}.mt VALUES (1, 2, toDateTime('1990/01/01 12:00:01', 'US/Samoa'));
|
||||
INSERT INTO ${DATABASE_ORDINARY}.mt VALUES (1, 3, toDateTime('1990/01/01 12:00:02', 'US/Samoa'));
|
||||
INSERT INTO ${DATABASE_ORDINARY}.mt VALUES (1, 4, toDateTime('1990/01/01 12:00:05', 'US/Samoa'));
|
||||
INSERT INTO ${DATABASE_ORDINARY}.mt VALUES (1, 5, toDateTime('1990/01/01 12:00:06', 'US/Samoa'));
|
||||
"
|
||||
INSERT INTO ${DATABASE_ORDINARY}.mt VALUES (1, 5, toDateTime('1990/01/01 12:00:06', 'US/Samoa'));"
|
||||
|
||||
while true; do
|
||||
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count(*) FROM ${DATABASE_ORDINARY}.\`.inner.wv\`" | grep -q "5" && break || sleep .5 ||:
|
||||
|
@ -13,7 +13,7 @@ REPLICA=$($CLICKHOUSE_CLIENT --query "Select getMacro('replica')")
|
||||
|
||||
SCALE=1000
|
||||
|
||||
$CLICKHOUSE_CLIENT -n --query "
|
||||
$CLICKHOUSE_CLIENT --query "
|
||||
DROP TABLE IF EXISTS r1;
|
||||
DROP TABLE IF EXISTS r2;
|
||||
CREATE TABLE r1 (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/{shard}', '1{replica}') ORDER BY x
|
||||
@ -23,11 +23,11 @@ $CLICKHOUSE_CLIENT -n --query "
|
||||
DETACH TABLE r2;
|
||||
"
|
||||
|
||||
$CLICKHOUSE_CLIENT --max_block_size 1 --min_insert_block_size_rows 1 --min_insert_block_size_bytes 1 --max_insert_threads 16 --query "INSERT INTO r1 SELECT * FROM numbers_mt(${SCALE})"
|
||||
# insert_keeper_fault_injection_probability=0 -- can slowdown insert a lot (produce a lot of parts)
|
||||
$CLICKHOUSE_CLIENT --insert_keeper_fault_injection_probability=0 --max_block_size 1 --min_insert_block_size_rows 1 --min_insert_block_size_bytes 1 --max_insert_threads 16 --query "INSERT INTO r1 SELECT * FROM numbers_mt(${SCALE})"
|
||||
|
||||
|
||||
# Now wait for cleanup thread
|
||||
|
||||
for _ in {1..60}; do
|
||||
$CLICKHOUSE_CLIENT --query "SYSTEM FLUSH LOGS"
|
||||
[[ $($CLICKHOUSE_CLIENT --query "SELECT sum(toUInt32(extract(message, 'Removed (\d+) old log entries'))) FROM system.text_log WHERE event_date >= yesterday() AND logger_name LIKE '%' || '$CLICKHOUSE_DATABASE' || '%r1%(ReplicatedMergeTreeCleanupThread)%' AND message LIKE '%Removed % old log entries%'") -gt $((SCALE - 10)) ]] && break;
|
||||
@ -46,7 +46,7 @@ $CLICKHOUSE_CLIENT --receive_timeout 600 --query "SYSTEM SYNC REPLICA r2" # Need
|
||||
|
||||
$CLICKHOUSE_CLIENT --query "SELECT value FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/$SHARD/replicas/2$REPLICA' AND name = 'is_lost'";
|
||||
|
||||
$CLICKHOUSE_CLIENT -n --query "
|
||||
$CLICKHOUSE_CLIENT --query "
|
||||
DROP TABLE IF EXISTS r1;
|
||||
DROP TABLE IF EXISTS r2;
|
||||
"
|
||||
|
@ -9,7 +9,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
|
||||
function check_log
|
||||
{
|
||||
${CLICKHOUSE_CLIENT} --format=JSONEachRow -nq "
|
||||
${CLICKHOUSE_CLIENT} --format=JSONEachRow -q "
|
||||
set enable_analyzer = 1;
|
||||
system flush logs;
|
||||
|
||||
|
@ -1,2 +1,2 @@
|
||||
9fd46251e5574c633cbfbb9293671888 -
|
||||
9fd46251e5574c633cbfbb9293671888 -
|
||||
48fad37bc89fc3bcc29c4750897b6709 -
|
||||
48fad37bc89fc3bcc29c4750897b6709 -
|
||||
|
@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
${CLICKHOUSE_CLIENT} -n --query "
|
||||
${CLICKHOUSE_CLIENT} --query "
|
||||
DROP TABLE IF EXISTS t;
|
||||
CREATE TABLE t (a LowCardinality(Nullable(String))) ENGINE = Memory;
|
||||
"
|
||||
@ -12,7 +12,7 @@ CREATE TABLE t (a LowCardinality(Nullable(String))) ENGINE = Memory;
|
||||
${CLICKHOUSE_CLIENT} --query "INSERT INTO t FORMAT RawBLOB" < ${BASH_SOURCE[0]}
|
||||
|
||||
cat ${BASH_SOURCE[0]} | md5sum
|
||||
${CLICKHOUSE_CLIENT} -n --query "SELECT * FROM t FORMAT RawBLOB" | md5sum
|
||||
${CLICKHOUSE_CLIENT} --query "SELECT * FROM t FORMAT RawBLOB" | md5sum
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query "
|
||||
DROP TABLE t;
|
||||
|
@ -24,7 +24,7 @@ expect_after {
|
||||
-i $any_spawn_id timeout { exit 1 }
|
||||
}
|
||||
|
||||
spawn bash -c "source $basedir/../shell_config.sh ; \$CLICKHOUSE_CLIENT_BINARY \$CLICKHOUSE_CLIENT_OPT --disable_suggestion -mn --history_file=$history_file --highlight 0"
|
||||
spawn bash -c "source $basedir/../shell_config.sh ; \$CLICKHOUSE_CLIENT_BINARY \$CLICKHOUSE_CLIENT_OPT --disable_suggestion -m --history_file=$history_file --highlight 0"
|
||||
expect "\n:) "
|
||||
|
||||
send -- "DROP TABLE IF EXISTS t01565;\r"
|
||||
|
@ -17,7 +17,7 @@ function wait_with_limit()
|
||||
done
|
||||
}
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
$CLICKHOUSE_CLIENT -m -q "
|
||||
drop table if exists data_01811;
|
||||
drop table if exists buffer_01811;
|
||||
|
||||
@ -39,9 +39,9 @@ $CLICKHOUSE_CLIENT -nm -q "
|
||||
# wait for background buffer flush
|
||||
wait_with_limit 30 '[[ $($CLICKHOUSE_CLIENT -q "select count() from data_01811") -gt 0 ]]'
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm -q "select count() from data_01811"
|
||||
$CLICKHOUSE_CLIENT -m -q "select count() from data_01811"
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
$CLICKHOUSE_CLIENT -m -q "
|
||||
drop table buffer_01811;
|
||||
drop table data_01811;
|
||||
"
|
||||
|
@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
sql="toUInt16OrNull(arrayFirst((v, k) -> (k = '4Id'), arr[2], arr[1]))"
|
||||
|
||||
# Create the table and fill it
|
||||
$CLICKHOUSE_CLIENT -n --query="
|
||||
$CLICKHOUSE_CLIENT --query="
|
||||
CREATE TABLE test_extract(str String, arr Array(Array(String)) ALIAS extractAllGroupsHorizontal(str, '\\W(\\w+)=(\"[^\"]*?\"|[^\",}]*)')) ENGINE=MergeTree() PARTITION BY tuple() ORDER BY tuple();
|
||||
INSERT INTO test_extract (str) WITH range(8) as range_arr, arrayMap(x-> concat(toString(x),'Id'), range_arr) as key, arrayMap(x -> rand() % 8, range_arr) as val, arrayStringConcat(arrayMap((x,y) -> concat(x,'=',toString(y)), key, val),',') as str SELECT str FROM numbers(500000);
|
||||
ALTER TABLE test_extract ADD COLUMN 15Id Nullable(UInt16) DEFAULT $sql;"
|
||||
@ -24,7 +24,7 @@ function test()
|
||||
$CLICKHOUSE_CLIENT --query="SELECT uniq(15Id) FROM test_extract $where SETTINGS max_threads=1" --query_id=$uuid_1
|
||||
uuid_2=$(cat /proc/sys/kernel/random/uuid)
|
||||
$CLICKHOUSE_CLIENT --query="SELECT uniq($sql) FROM test_extract $where SETTINGS max_threads=1" --query_id=$uuid_2
|
||||
$CLICKHOUSE_CLIENT -n --query="
|
||||
$CLICKHOUSE_CLIENT --query="
|
||||
SYSTEM FLUSH LOGS;
|
||||
WITH memory_1 AS (SELECT memory_usage FROM system.query_log WHERE current_database = currentDatabase() AND query_id='$uuid_1' AND type = 'QueryFinish' as memory_1),
|
||||
memory_2 AS (SELECT memory_usage FROM system.query_log WHERE current_database = currentDatabase() AND query_id='$uuid_2' AND type = 'QueryFinish' as memory_2)
|
||||
|
@ -16,7 +16,7 @@ function test_table_comments()
|
||||
local ENGINE_NAME="$1"
|
||||
echo "engine : ${ENGINE_NAME}"
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm <<EOF
|
||||
$CLICKHOUSE_CLIENT -m <<EOF
|
||||
DROP TABLE IF EXISTS comment_test_table;
|
||||
|
||||
CREATE TABLE comment_test_table
|
||||
|
@ -20,9 +20,9 @@ function explain_sorting {
|
||||
|
||||
function explain_sortmode {
|
||||
echo "-- QUERY: "$1
|
||||
$CLICKHOUSE_CLIENT --enable_analyzer=0 --merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability=0.0 -nq "$1" | eval $FIND_SORTMODE
|
||||
$CLICKHOUSE_CLIENT --enable_analyzer=0 --merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability=0.0 -q "$1" | eval $FIND_SORTMODE
|
||||
echo "-- QUERY (analyzer): "$1
|
||||
$CLICKHOUSE_CLIENT --enable_analyzer=1 --merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability=0.0 -nq "$1" | eval $FIND_SORTMODE
|
||||
$CLICKHOUSE_CLIENT --enable_analyzer=1 --merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability=0.0 -q "$1" | eval $FIND_SORTMODE
|
||||
}
|
||||
|
||||
$CLICKHOUSE_CLIENT -q "drop table if exists optimize_sorting sync"
|
||||
|
@ -9,7 +9,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
$CLICKHOUSE_CLIENT -mn -q """
|
||||
$CLICKHOUSE_CLIENT -m -q """
|
||||
DROP TABLE IF EXISTS t1;
|
||||
DROP TABLE IF EXISTS t2;
|
||||
|
||||
|
@ -39,7 +39,7 @@ function check_span()
|
||||
extra_condition=""
|
||||
fi
|
||||
|
||||
ret=$(${CLICKHOUSE_CLIENT} -nq "
|
||||
ret=$(${CLICKHOUSE_CLIENT} -q "
|
||||
SYSTEM FLUSH LOGS;
|
||||
|
||||
SELECT count()
|
||||
|
@ -239,10 +239,15 @@ Check bug found fuzzing
|
||||
Test arrays and maps
|
||||
608E1FF030C9E206185B112C2A25F1A7
|
||||
ABB65AE97711A2E053E324ED88B1D08B
|
||||
Test emtpy arrays and maps
|
||||
Test empty arrays and maps
|
||||
4761183170873013810
|
||||
0AD04BFD000000000000000000000000
|
||||
4761183170873013810
|
||||
0AD04BFD000000000000000000000000
|
||||
Test maps with arrays as keys
|
||||
16734549324845627102
|
||||
D675BB3D687973A238AB891DD99C7047
|
||||
1D03941D808D04810D2363A6C107D622
|
||||
16734549324845627102
|
||||
16734549324845627102
|
||||
1D03941D808D04810D2363A6C107D622
|
||||
1D03941D808D04810D2363A6C107D622
|
||||
|
@ -346,10 +346,13 @@ INSERT INTO sipHashKeyed_keys FORMAT VALUES ({'a':'b', 'c':'d'}), ({'e':'f', 'g'
|
||||
SELECT hex(sipHash128ReferenceKeyed((0::UInt64, materialize(0::UInt64)), a)) FROM sipHashKeyed_keys ORDER BY a;
|
||||
DROP TABLE sipHashKeyed_keys;
|
||||
|
||||
SELECT 'Test emtpy arrays and maps';
|
||||
SELECT 'Test empty arrays and maps';
|
||||
SELECT sipHash64Keyed((1::UInt64, 2::UInt64), []);
|
||||
SELECT hex(sipHash128Keyed((1::UInt64, 2::UInt64), []));
|
||||
SELECT sipHash64Keyed((1::UInt64, 2::UInt64), mapFromArrays([], []));
|
||||
SELECT hex(sipHash128Keyed((1::UInt64, 2::UInt64), mapFromArrays([], [])));
|
||||
SELECT 'Test maps with arrays as keys';
|
||||
SELECT sipHash64Keyed((1::UInt64, 2::UInt64), map([0], 1, [2], 3));
|
||||
SELECT hex(sipHash128Keyed((0::UInt64, 0::UInt64), map([0], 1, [2], 3)));
|
||||
SELECT hex(sipHash128Keyed((1::UInt64, 2::UInt64), map([0], 1, [2], 3)));
|
||||
SELECT sipHash64Keyed((materialize(1::UInt64), 2::UInt64), map([0], 1, [2], 3)) FROM numbers(2);
|
||||
SELECT hex(sipHash128Keyed((materialize(1::UInt64), 2::UInt64), map([0], 1, [2], 3))) FROM numbers(2);
|
||||
|
@ -10,7 +10,7 @@ for check_query in "SELECT value FROM system.settings WHERE name = 'alter_sync';
|
||||
echo "Checking setting value with '$check_query'"
|
||||
|
||||
echo 'Using SET'
|
||||
$CLICKHOUSE_CLIENT -mn -q """
|
||||
$CLICKHOUSE_CLIENT -m -q """
|
||||
SET replication_alter_partitions_sync = 0;
|
||||
$check_query
|
||||
|
||||
@ -28,7 +28,7 @@ for check_query in "SELECT value FROM system.settings WHERE name = 'alter_sync';
|
||||
done
|
||||
|
||||
|
||||
$CLICKHOUSE_CLIENT -mn -q """
|
||||
$CLICKHOUSE_CLIENT -m -q """
|
||||
DROP VIEW IF EXISTS 02539_settings_alias_view;
|
||||
CREATE VIEW 02539_settings_alias_view AS SELECT 1 SETTINGS replication_alter_partitions_sync = 2;
|
||||
SHOW CREATE TABLE 02539_settings_alias_view;
|
||||
|
@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
|
||||
QUERY_ID="${CLICKHOUSE_DATABASE}_read_with_cancel"
|
||||
|
||||
$CLICKHOUSE_CLIENT --max_rows_to_read 0 -n --query_id="$QUERY_ID" --query="SELECT sum(number * 0) FROM numbers(10000000000) SETTINGS partial_result_on_first_cancel=true;" &
|
||||
$CLICKHOUSE_CLIENT --max_rows_to_read 0 --query_id="$QUERY_ID" --query="SELECT sum(number * 0) FROM numbers(10000000000) SETTINGS partial_result_on_first_cancel=true;" &
|
||||
pid=$!
|
||||
|
||||
for _ in {0..60}
|
||||
|
@ -3,7 +3,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CUR_DIR"/../shell_config.sh
|
||||
|
||||
$CLICKHOUSE_CLIENT --multiquery "
|
||||
$CLICKHOUSE_CLIENT "
|
||||
SELECT 'Policy for table \`*\` does not affect other tables in the database';
|
||||
CREATE ROW POLICY 02703_asterisk_${CLICKHOUSE_DATABASE}_policy ON ${CLICKHOUSE_DATABASE}.\`*\` USING x=1 AS permissive TO ALL;
|
||||
CREATE TABLE ${CLICKHOUSE_DATABASE}.\`*\` (x UInt8, y UInt8) ENGINE = MergeTree ORDER BY x AS SELECT 100, 20;
|
||||
|
@ -3,7 +3,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CUR_DIR"/../shell_config.sh
|
||||
|
||||
$CLICKHOUSE_CLIENT --multiquery "
|
||||
$CLICKHOUSE_CLIENT "
|
||||
|
||||
DROP TABLE IF EXISTS 02703_rptable;
|
||||
DROP TABLE IF EXISTS 02703_rptable_another;
|
||||
|
@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
|
||||
CLICKHOUSE_USER="user_$CLICKHOUSE_DATABASE"
|
||||
|
||||
$CLICKHOUSE_CLIENT --multiquery "
|
||||
$CLICKHOUSE_CLIENT "
|
||||
|
||||
DROP USER IF EXISTS ${CLICKHOUSE_USER};
|
||||
CREATE USER ${CLICKHOUSE_USER};
|
||||
@ -28,7 +28,7 @@ DROP POLICY ${CLICKHOUSE_DATABASE}_tb_policy ON ${CLICKHOUSE_DATABASE}.table;
|
||||
|
||||
$CLICKHOUSE_CLIENT --query "CREATE ROW POLICY any_02703 ON *.some_table USING 1 AS PERMISSIVE TO ALL;" 2>&1 | grep -q "SYNTAX_ERROR"
|
||||
|
||||
$CLICKHOUSE_CLIENT --multiquery "
|
||||
$CLICKHOUSE_CLIENT "
|
||||
CREATE TABLE 02703_rqtable_default (x UInt8) ENGINE = MergeTree ORDER BY x;
|
||||
|
||||
CREATE ROW POLICY ${CLICKHOUSE_DATABASE}_filter_11_db_policy ON * USING x=1 AS permissive TO ALL;
|
||||
|
@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=./mergetree_mutations.lib
|
||||
. "$CURDIR"/mergetree_mutations.lib
|
||||
|
||||
${CLICKHOUSE_CLIENT} -n --query "
|
||||
${CLICKHOUSE_CLIENT} --query "
|
||||
DROP TABLE IF EXISTS t_delay_mutations SYNC;
|
||||
|
||||
CREATE TABLE t_delay_mutations (id UInt64, v UInt64)
|
||||
@ -36,14 +36,14 @@ SELECT count() FROM system.mutations WHERE database = currentDatabase() AND tabl
|
||||
${CLICKHOUSE_CLIENT} --query "SYSTEM START MERGES t_delay_mutations"
|
||||
wait_for_mutation "t_delay_mutations" "mutation_5.txt"
|
||||
|
||||
${CLICKHOUSE_CLIENT} -n --query "
|
||||
${CLICKHOUSE_CLIENT} --query "
|
||||
SELECT * FROM t_delay_mutations ORDER BY id;
|
||||
SELECT count() FROM system.mutations WHERE database = currentDatabase() AND table = 't_delay_mutations' AND NOT is_done;
|
||||
|
||||
DROP TABLE IF EXISTS t_delay_mutations SYNC;
|
||||
"
|
||||
|
||||
${CLICKHOUSE_CLIENT} -n --query "
|
||||
${CLICKHOUSE_CLIENT} --query "
|
||||
SYSTEM FLUSH LOGS;
|
||||
|
||||
SELECT
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user