Merge remote-tracking branch 'origin/master' into pr-local-plan

This commit is contained in:
Igor Nikonov 2024-08-01 14:22:26 +00:00
commit 7082a9797e
229 changed files with 1083 additions and 628 deletions

View File

@ -22,7 +22,6 @@
#### New Feature
* Add `ASOF JOIN` support for `full_sorting_join` algorithm. [#55051](https://github.com/ClickHouse/ClickHouse/pull/55051) ([vdimir](https://github.com/vdimir)).
* Add new window function `percent_rank`. [#62747](https://github.com/ClickHouse/ClickHouse/pull/62747) ([lgbo](https://github.com/lgbo-ustc)).
* Support JWT authentication in `clickhouse-client` (will be available only in ClickHouse Cloud). [#62829](https://github.com/ClickHouse/ClickHouse/pull/62829) ([Konstantin Bogdanov](https://github.com/thevar1able)).
* Add SQL functions `changeYear`, `changeMonth`, `changeDay`, `changeHour`, `changeMinute`, `changeSecond`. For example, `SELECT changeMonth(toDate('2024-06-14'), 7)` returns date `2024-07-14`. [#63186](https://github.com/ClickHouse/ClickHouse/pull/63186) ([cucumber95](https://github.com/cucumber95)).
* Introduce startup scripts, which allow the execution of preconfigured queries at the startup stage. [#64889](https://github.com/ClickHouse/ClickHouse/pull/64889) ([pufit](https://github.com/pufit)).

View File

@ -57,7 +57,8 @@ option(WITH_COVERAGE "Instrumentation for code coverage with default implementat
if (WITH_COVERAGE)
message (STATUS "Enabled instrumentation for code coverage")
set(COVERAGE_FLAGS "-fprofile-instr-generate -fcoverage-mapping")
set(COVERAGE_FLAGS "SHELL:-fprofile-instr-generate -fcoverage-mapping")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fprofile-instr-generate -fcoverage-mapping")
endif()
option (SANITIZE_COVERAGE "Instrumentation for code coverage with custom callbacks" OFF)

@ -1 +1 @@
Subproject commit a304ec48dcf15d942607032151f7e9ee504b5dcf
Subproject commit 1f95f8083066f5b38fd2db172e7e7f9aa7c49d2d

View File

@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \
# lts / testing / prestable / etc
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
ARG VERSION="24.7.1.2915"
ARG VERSION="24.7.2.13"
ARG PACKAGES="clickhouse-keeper"
ARG DIRECT_DOWNLOAD_URLS=""

View File

@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
# lts / testing / prestable / etc
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
ARG VERSION="24.7.1.2915"
ARG VERSION="24.7.2.13"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
ARG DIRECT_DOWNLOAD_URLS=""

View File

@ -28,7 +28,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
ARG VERSION="24.7.1.2915"
ARG VERSION="24.7.2.13"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
#docker-official-library:off

View File

@ -0,0 +1,40 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.3.5.46-lts (fe54cead6b6) FIXME as compared to v24.3.4.147-lts (31a7bdc346d)
#### Improvement
* Backported in [#65463](https://github.com/ClickHouse/ClickHouse/issues/65463): Reload certificate chain during certificate reload. [#61671](https://github.com/ClickHouse/ClickHouse/pull/61671) ([Pervakov Grigorii](https://github.com/GrigoryPervakov)).
* Backported in [#65882](https://github.com/ClickHouse/ClickHouse/issues/65882): Always start Keeper with sufficient amount of threads in global thread pool. [#64444](https://github.com/ClickHouse/ClickHouse/pull/64444) ([Duc Canh Le](https://github.com/canhld94)).
* Backported in [#65302](https://github.com/ClickHouse/ClickHouse/issues/65302): Returned back the behaviour of how ClickHouse works and interprets Tuples in CSV format. This change effectively reverts https://github.com/ClickHouse/ClickHouse/pull/60994 and makes it available only under a few settings: `output_format_csv_serialize_tuple_into_separate_columns`, `input_format_csv_deserialize_separate_columns_into_tuple` and `input_format_csv_try_infer_strings_from_quoted_tuples`. [#65170](https://github.com/ClickHouse/ClickHouse/pull/65170) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Backported in [#65892](https://github.com/ClickHouse/ClickHouse/issues/65892): Respect cgroup CPU limit in Keeper. [#65819](https://github.com/ClickHouse/ClickHouse/pull/65819) ([Antonio Andelic](https://github.com/antonio2368)).
#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)
* Backported in [#65283](https://github.com/ClickHouse/ClickHouse/issues/65283): Fix crash with UniqInjectiveFunctionsEliminationPass and uniqCombined. [#65188](https://github.com/ClickHouse/ClickHouse/pull/65188) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#65370](https://github.com/ClickHouse/ClickHouse/issues/65370): Fix a bug in ClickHouse Keeper that causes digest mismatch during closing session. [#65198](https://github.com/ClickHouse/ClickHouse/pull/65198) ([Aleksei Filatov](https://github.com/aalexfvk)).
* Backported in [#65446](https://github.com/ClickHouse/ClickHouse/issues/65446): Use correct memory alignment for Distinct combinator. Previously, crash could happen because of invalid memory allocation when the combinator was used. [#65379](https://github.com/ClickHouse/ClickHouse/pull/65379) ([Antonio Andelic](https://github.com/antonio2368)).
* Backported in [#65708](https://github.com/ClickHouse/ClickHouse/issues/65708): Fix crash in maxIntersections. [#65689](https://github.com/ClickHouse/ClickHouse/pull/65689) ([Raúl Marín](https://github.com/Algunenano)).
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#65352](https://github.com/ClickHouse/ClickHouse/issues/65352): Fix possible abort on uncaught exception in ~WriteBufferFromFileDescriptor in StatusFile. [#64206](https://github.com/ClickHouse/ClickHouse/pull/64206) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#65327](https://github.com/ClickHouse/ClickHouse/issues/65327): Fix the crash loop when restoring from backup is blocked by creating an MV with a definer that hasn't been restored yet. [#64595](https://github.com/ClickHouse/ClickHouse/pull/64595) ([pufit](https://github.com/pufit)).
* Backported in [#65538](https://github.com/ClickHouse/ClickHouse/issues/65538): Fix crash for `ALTER TABLE ... ON CLUSTER ... MODIFY SQL SECURITY`. [#64957](https://github.com/ClickHouse/ClickHouse/pull/64957) ([pufit](https://github.com/pufit)).
* Backported in [#65576](https://github.com/ClickHouse/ClickHouse/issues/65576): Fix crash on destroying AccessControl: add explicit shutdown. [#64993](https://github.com/ClickHouse/ClickHouse/pull/64993) ([Vitaly Baranov](https://github.com/vitlibar)).
* Backported in [#65159](https://github.com/ClickHouse/ClickHouse/issues/65159): Fix pushing arithmetic operations out of aggregation. In the new analyzer, optimization was applied only once. [#65104](https://github.com/ClickHouse/ClickHouse/pull/65104) ([Dmitry Novik](https://github.com/novikd)).
* Backported in [#65615](https://github.com/ClickHouse/ClickHouse/issues/65615): Fix aggregate function name rewriting in the new analyzer. [#65110](https://github.com/ClickHouse/ClickHouse/pull/65110) ([Dmitry Novik](https://github.com/novikd)).
* Backported in [#65728](https://github.com/ClickHouse/ClickHouse/issues/65728): Eliminate injective function in argument of functions `uniq*` recursively. This used to work correctly but was broken in the new analyzer. [#65140](https://github.com/ClickHouse/ClickHouse/pull/65140) ([Duc Canh Le](https://github.com/canhld94)).
* Backported in [#65261](https://github.com/ClickHouse/ClickHouse/issues/65261): Fix the bug in Hashed and Hashed_Array dictionary short circuit evaluation, which may read uninitialized number, leading to various errors. [#65256](https://github.com/ClickHouse/ClickHouse/pull/65256) ([jsc0218](https://github.com/jsc0218)).
* Backported in [#65667](https://github.com/ClickHouse/ClickHouse/issues/65667): Disable `non-intersecting-parts` optimization for queries with `FINAL` in case of `read-in-order` optimization was enabled. This could lead to an incorrect query result. As a workaround, disable `do_not_merge_across_partitions_select_final` and `split_parts_ranges_into_intersecting_and_non_intersecting_final` before this fix is merged. [#65505](https://github.com/ClickHouse/ClickHouse/pull/65505) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#65784](https://github.com/ClickHouse/ClickHouse/issues/65784): Fixed bug in MergeJoin. Column in sparse serialisation might be treated as a column of its nested type though the required conversion wasn't performed. [#65632](https://github.com/ClickHouse/ClickHouse/pull/65632) ([Nikita Taranov](https://github.com/nickitat)).
* Backported in [#65929](https://github.com/ClickHouse/ClickHouse/issues/65929): For queries that read from `PostgreSQL`, cancel the internal `PostgreSQL` query if the ClickHouse query is finished. Otherwise, `ClickHouse` query cannot be canceled until the internal `PostgreSQL` query is finished. [#65771](https://github.com/ClickHouse/ClickHouse/pull/65771) ([Maksim Kita](https://github.com/kitaisreal)).
* Backported in [#65824](https://github.com/ClickHouse/ClickHouse/issues/65824): Fix a bug in short circuit logic when old analyzer and dictGetOrDefault is used. [#65802](https://github.com/ClickHouse/ClickHouse/pull/65802) ([jsc0218](https://github.com/jsc0218)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Backported in [#65223](https://github.com/ClickHouse/ClickHouse/issues/65223): Capture weak_ptr of ContextAccess for safety. [#65051](https://github.com/ClickHouse/ClickHouse/pull/65051) ([Alexander Gololobov](https://github.com/davenger)).
* Backported in [#65901](https://github.com/ClickHouse/ClickHouse/issues/65901): Fix bug with session closing in Keeper. [#65735](https://github.com/ClickHouse/ClickHouse/pull/65735) ([Antonio Andelic](https://github.com/antonio2368)).

View File

@ -0,0 +1,70 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.4.4.107-stable (af0ed6b197e) FIXME as compared to v24.4.3.25-stable (a915dd4eda4)
#### Improvement
* Backported in [#65884](https://github.com/ClickHouse/ClickHouse/issues/65884): Always start Keeper with sufficient amount of threads in global thread pool. [#64444](https://github.com/ClickHouse/ClickHouse/pull/64444) ([Duc Canh Le](https://github.com/canhld94)).
* Backported in [#65303](https://github.com/ClickHouse/ClickHouse/issues/65303): Returned back the behaviour of how ClickHouse works and interprets Tuples in CSV format. This change effectively reverts https://github.com/ClickHouse/ClickHouse/pull/60994 and makes it available only under a few settings: `output_format_csv_serialize_tuple_into_separate_columns`, `input_format_csv_deserialize_separate_columns_into_tuple` and `input_format_csv_try_infer_strings_from_quoted_tuples`. [#65170](https://github.com/ClickHouse/ClickHouse/pull/65170) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Backported in [#65894](https://github.com/ClickHouse/ClickHouse/issues/65894): Respect cgroup CPU limit in Keeper. [#65819](https://github.com/ClickHouse/ClickHouse/pull/65819) ([Antonio Andelic](https://github.com/antonio2368)).
#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)
* Backported in [#65372](https://github.com/ClickHouse/ClickHouse/issues/65372): Fix a bug in ClickHouse Keeper that causes digest mismatch during closing session. [#65198](https://github.com/ClickHouse/ClickHouse/pull/65198) ([Aleksei Filatov](https://github.com/aalexfvk)).
* Backported in [#66883](https://github.com/ClickHouse/ClickHouse/issues/66883): Fix unexpeced size of low cardinality column in function calls. [#65298](https://github.com/ClickHouse/ClickHouse/pull/65298) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#65435](https://github.com/ClickHouse/ClickHouse/issues/65435): Forbid `QUALIFY` clause in the old analyzer. The old analyzer ignored `QUALIFY`, so it could lead to unexpected data removal in mutations. [#65356](https://github.com/ClickHouse/ClickHouse/pull/65356) ([Dmitry Novik](https://github.com/novikd)).
* Backported in [#65448](https://github.com/ClickHouse/ClickHouse/issues/65448): Use correct memory alignment for Distinct combinator. Previously, crash could happen because of invalid memory allocation when the combinator was used. [#65379](https://github.com/ClickHouse/ClickHouse/pull/65379) ([Antonio Andelic](https://github.com/antonio2368)).
* Backported in [#65710](https://github.com/ClickHouse/ClickHouse/issues/65710): Fix crash in maxIntersections. [#65689](https://github.com/ClickHouse/ClickHouse/pull/65689) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#66689](https://github.com/ClickHouse/ClickHouse/issues/66689): Fix the VALID UNTIL clause in the user definition resetting after a restart. Closes [#66405](https://github.com/ClickHouse/ClickHouse/issues/66405). [#66409](https://github.com/ClickHouse/ClickHouse/pull/66409) ([Nikolay Degterinsky](https://github.com/evillique)).
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#65353](https://github.com/ClickHouse/ClickHouse/issues/65353): Fix possible abort on uncaught exception in ~WriteBufferFromFileDescriptor in StatusFile. [#64206](https://github.com/ClickHouse/ClickHouse/pull/64206) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#65060](https://github.com/ClickHouse/ClickHouse/issues/65060): Fix the `Expression nodes list expected 1 projection names` and `Unknown expression or identifier` errors for queries with aliases to `GLOBAL IN.`. [#64517](https://github.com/ClickHouse/ClickHouse/pull/64517) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#65329](https://github.com/ClickHouse/ClickHouse/issues/65329): Fix the crash loop when restoring from backup is blocked by creating an MV with a definer that hasn't been restored yet. [#64595](https://github.com/ClickHouse/ClickHouse/pull/64595) ([pufit](https://github.com/pufit)).
* Backported in [#64833](https://github.com/ClickHouse/ClickHouse/issues/64833): Fix bug which could lead to non-working TTLs with expressions. [#64694](https://github.com/ClickHouse/ClickHouse/pull/64694) ([alesapin](https://github.com/alesapin)).
* Backported in [#65086](https://github.com/ClickHouse/ClickHouse/issues/65086): Fix removing the `WHERE` and `PREWHERE` expressions, which are always true (for the new analyzer). [#64695](https://github.com/ClickHouse/ClickHouse/pull/64695) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#65540](https://github.com/ClickHouse/ClickHouse/issues/65540): Fix crash for `ALTER TABLE ... ON CLUSTER ... MODIFY SQL SECURITY`. [#64957](https://github.com/ClickHouse/ClickHouse/pull/64957) ([pufit](https://github.com/pufit)).
* Backported in [#65578](https://github.com/ClickHouse/ClickHouse/issues/65578): Fix crash on destroying AccessControl: add explicit shutdown. [#64993](https://github.com/ClickHouse/ClickHouse/pull/64993) ([Vitaly Baranov](https://github.com/vitlibar)).
* Backported in [#65161](https://github.com/ClickHouse/ClickHouse/issues/65161): Fix pushing arithmetic operations out of aggregation. In the new analyzer, optimization was applied only once. [#65104](https://github.com/ClickHouse/ClickHouse/pull/65104) ([Dmitry Novik](https://github.com/novikd)).
* Backported in [#65616](https://github.com/ClickHouse/ClickHouse/issues/65616): Fix aggregate function name rewriting in the new analyzer. [#65110](https://github.com/ClickHouse/ClickHouse/pull/65110) ([Dmitry Novik](https://github.com/novikd)).
* Backported in [#65730](https://github.com/ClickHouse/ClickHouse/issues/65730): Eliminate injective function in argument of functions `uniq*` recursively. This used to work correctly but was broken in the new analyzer. [#65140](https://github.com/ClickHouse/ClickHouse/pull/65140) ([Duc Canh Le](https://github.com/canhld94)).
* Backported in [#65668](https://github.com/ClickHouse/ClickHouse/issues/65668): Disable `non-intersecting-parts` optimization for queries with `FINAL` in case of `read-in-order` optimization was enabled. This could lead to an incorrect query result. As a workaround, disable `do_not_merge_across_partitions_select_final` and `split_parts_ranges_into_intersecting_and_non_intersecting_final` before this fix is merged. [#65505](https://github.com/ClickHouse/ClickHouse/pull/65505) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#65786](https://github.com/ClickHouse/ClickHouse/issues/65786): Fixed bug in MergeJoin. Column in sparse serialisation might be treated as a column of its nested type though the required conversion wasn't performed. [#65632](https://github.com/ClickHouse/ClickHouse/pull/65632) ([Nikita Taranov](https://github.com/nickitat)).
* Backported in [#65810](https://github.com/ClickHouse/ClickHouse/issues/65810): Fix invalid exceptions in function `parseDateTime` with `%F` and `%D` placeholders. [#65768](https://github.com/ClickHouse/ClickHouse/pull/65768) ([Antonio Andelic](https://github.com/antonio2368)).
* Backported in [#65931](https://github.com/ClickHouse/ClickHouse/issues/65931): For queries that read from `PostgreSQL`, cancel the internal `PostgreSQL` query if the ClickHouse query is finished. Otherwise, `ClickHouse` query cannot be canceled until the internal `PostgreSQL` query is finished. [#65771](https://github.com/ClickHouse/ClickHouse/pull/65771) ([Maksim Kita](https://github.com/kitaisreal)).
* Backported in [#65826](https://github.com/ClickHouse/ClickHouse/issues/65826): Fix a bug in short circuit logic when old analyzer and dictGetOrDefault is used. [#65802](https://github.com/ClickHouse/ClickHouse/pull/65802) ([jsc0218](https://github.com/jsc0218)).
* Backported in [#66299](https://github.com/ClickHouse/ClickHouse/issues/66299): Better handling of join conditions involving `IS NULL` checks (for example `ON (a = b AND (a IS NOT NULL) AND (b IS NOT NULL) ) OR ( (a IS NULL) AND (b IS NULL) )` is rewritten to `ON a <=> b`), fix incorrect optimization when condition other then `IS NULL` are present. [#65835](https://github.com/ClickHouse/ClickHouse/pull/65835) ([vdimir](https://github.com/vdimir)).
* Backported in [#66326](https://github.com/ClickHouse/ClickHouse/issues/66326): Add missing settings `input_format_csv_skip_first_lines/input_format_tsv_skip_first_lines/input_format_csv_try_infer_numbers_from_strings/input_format_csv_try_infer_strings_from_quoted_tuples` in schema inference cache because they can change the resulting schema. It prevents from incorrect result of schema inference with these settings changed. [#65980](https://github.com/ClickHouse/ClickHouse/pull/65980) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#66153](https://github.com/ClickHouse/ClickHouse/issues/66153): Fixed buffer overflow bug in `unbin`/`unhex` implementation. [#66106](https://github.com/ClickHouse/ClickHouse/pull/66106) ([Nikita Taranov](https://github.com/nickitat)).
* Backported in [#66459](https://github.com/ClickHouse/ClickHouse/issues/66459): Fixed a bug in ZooKeeper client: a session could get stuck in unusable state after receiving a hardware error from ZooKeeper. For example, this might happen due to "soft memory limit" in ClickHouse Keeper. [#66140](https://github.com/ClickHouse/ClickHouse/pull/66140) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Backported in [#66224](https://github.com/ClickHouse/ClickHouse/issues/66224): Fix issue in SumIfToCountIfVisitor and signed integers. [#66146](https://github.com/ClickHouse/ClickHouse/pull/66146) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#66267](https://github.com/ClickHouse/ClickHouse/issues/66267): Don't throw `TIMEOUT_EXCEEDED` for `none_only_active` mode of `distributed_ddl_output_mode`. [#66218](https://github.com/ClickHouse/ClickHouse/pull/66218) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Backported in [#66678](https://github.com/ClickHouse/ClickHouse/issues/66678): Fix handling limit for `system.numbers_mt` when no index can be used. [#66231](https://github.com/ClickHouse/ClickHouse/pull/66231) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
* Backported in [#66603](https://github.com/ClickHouse/ClickHouse/issues/66603): Fixed how the ClickHouse server detects the maximum number of usable CPU cores as specified by cgroups v2 if the server runs in a container such as Docker. In more detail, containers often run their process in the root cgroup which has an empty name. In that case, ClickHouse ignored the CPU limits set by cgroups v2. [#66237](https://github.com/ClickHouse/ClickHouse/pull/66237) ([filimonov](https://github.com/filimonov)).
* Backported in [#66358](https://github.com/ClickHouse/ClickHouse/issues/66358): Fix the `Not-ready set` error when a subquery with `IN` is used in the constraint. [#66261](https://github.com/ClickHouse/ClickHouse/pull/66261) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#66971](https://github.com/ClickHouse/ClickHouse/issues/66971): Fix `Column identifier is already registered` error with `group_by_use_nulls=true` and new analyzer. [#66400](https://github.com/ClickHouse/ClickHouse/pull/66400) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#66968](https://github.com/ClickHouse/ClickHouse/issues/66968): Fix `Cannot find column` error for queries with constant expression in `GROUP BY` key and new analyzer enabled. [#66433](https://github.com/ClickHouse/ClickHouse/pull/66433) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#66719](https://github.com/ClickHouse/ClickHouse/issues/66719): Correctly track memory for `Allocator::realloc`. [#66548](https://github.com/ClickHouse/ClickHouse/pull/66548) ([Antonio Andelic](https://github.com/antonio2368)).
* Backported in [#66950](https://github.com/ClickHouse/ClickHouse/issues/66950): Fix an invalid result for queries with `WINDOW`. This could happen when `PARTITION` columns have sparse serialization and window functions are executed in parallel. [#66579](https://github.com/ClickHouse/ClickHouse/pull/66579) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#66947](https://github.com/ClickHouse/ClickHouse/issues/66947): Fix `Method getResultType is not supported for QUERY query node` error when scalar subquery was used as the first argument of IN (with new analyzer). [#66655](https://github.com/ClickHouse/ClickHouse/pull/66655) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#67195](https://github.com/ClickHouse/ClickHouse/issues/67195): TRUNCATE DATABASE used to stop replication as if it was a DROP DATABASE query, it's fixed. [#67129](https://github.com/ClickHouse/ClickHouse/pull/67129) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Backported in [#67377](https://github.com/ClickHouse/ClickHouse/issues/67377): Fix error `Cannot convert column because it is non constant in source stream but must be constant in result.` for a query that reads from the `Merge` table over the `Distriburted` table with one shard. [#67146](https://github.com/ClickHouse/ClickHouse/pull/67146) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#67240](https://github.com/ClickHouse/ClickHouse/issues/67240): This closes [#67156](https://github.com/ClickHouse/ClickHouse/issues/67156). This closes [#66447](https://github.com/ClickHouse/ClickHouse/issues/66447). The bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/62907. [#67178](https://github.com/ClickHouse/ClickHouse/pull/67178) ([Maksim Kita](https://github.com/kitaisreal)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Backported in [#65410](https://github.com/ClickHouse/ClickHouse/issues/65410): Re-enable OpenSSL session caching. [#65111](https://github.com/ClickHouse/ClickHouse/pull/65111) ([Robert Schulze](https://github.com/rschu1ze)).
* Backported in [#65903](https://github.com/ClickHouse/ClickHouse/issues/65903): Fix bug with session closing in Keeper. [#65735](https://github.com/ClickHouse/ClickHouse/pull/65735) ([Antonio Andelic](https://github.com/antonio2368)).
* Backported in [#66385](https://github.com/ClickHouse/ClickHouse/issues/66385): Disable broken cases from 02911_join_on_nullsafe_optimization. [#66310](https://github.com/ClickHouse/ClickHouse/pull/66310) ([vdimir](https://github.com/vdimir)).
* Backported in [#66424](https://github.com/ClickHouse/ClickHouse/issues/66424): Ignore subquery for IN in DDLLoadingDependencyVisitor. [#66395](https://github.com/ClickHouse/ClickHouse/pull/66395) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#66542](https://github.com/ClickHouse/ClickHouse/issues/66542): Add additional log masking in CI. [#66523](https://github.com/ClickHouse/ClickHouse/pull/66523) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#66857](https://github.com/ClickHouse/ClickHouse/issues/66857): Fix data race in S3::ClientCache. [#66644](https://github.com/ClickHouse/ClickHouse/pull/66644) ([Konstantin Morozov](https://github.com/k-morozov)).
* Backported in [#66873](https://github.com/ClickHouse/ClickHouse/issues/66873): Support one more case in JOIN ON ... IS NULL. [#66725](https://github.com/ClickHouse/ClickHouse/pull/66725) ([vdimir](https://github.com/vdimir)).
* Backported in [#67057](https://github.com/ClickHouse/ClickHouse/issues/67057): Increase asio pool size in case the server is tiny. [#66761](https://github.com/ClickHouse/ClickHouse/pull/66761) ([alesapin](https://github.com/alesapin)).
* Backported in [#66944](https://github.com/ClickHouse/ClickHouse/issues/66944): Small fix in realloc memory tracking. [#66820](https://github.com/ClickHouse/ClickHouse/pull/66820) ([Antonio Andelic](https://github.com/antonio2368)).
* Backported in [#67250](https://github.com/ClickHouse/ClickHouse/issues/67250): Followup [#66725](https://github.com/ClickHouse/ClickHouse/issues/66725). [#66869](https://github.com/ClickHouse/ClickHouse/pull/66869) ([vdimir](https://github.com/vdimir)).
* Backported in [#67410](https://github.com/ClickHouse/ClickHouse/issues/67410): CI: Fix build results for release branches. [#67402](https://github.com/ClickHouse/ClickHouse/pull/67402) ([Max K.](https://github.com/maxknv)).

View File

@ -0,0 +1,24 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.7.2.13-stable (6e41f601b2f) FIXME as compared to v24.7.1.2915-stable (a37d2d43da7)
#### Improvement
* Backported in [#67531](https://github.com/ClickHouse/ClickHouse/issues/67531): In pr : https://github.com/ClickHouse/ClickHouse/pull/66025, we introduce a settings `input_format_orc_read_use_writer_time_zone` to fix when read orc file, make the reader use writer timezone, not always use `GMT`. [#67175](https://github.com/ClickHouse/ClickHouse/pull/67175) ([kevinyhzou](https://github.com/KevinyhZou)).
#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)
* Backported in [#67505](https://github.com/ClickHouse/ClickHouse/issues/67505): Fix crash in DistributedAsyncInsert when connection is empty. [#67219](https://github.com/ClickHouse/ClickHouse/pull/67219) ([Pablo Marcos](https://github.com/pamarcos)).
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#67580](https://github.com/ClickHouse/ClickHouse/issues/67580): Fix execution of nested short-circuit functions. [#67520](https://github.com/ClickHouse/ClickHouse/pull/67520) ([Kruglov Pavel](https://github.com/Avogar)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Backported in [#67551](https://github.com/ClickHouse/ClickHouse/issues/67551): [Green CI] Fix test test_storage_s3_queue/test.py::test_max_set_age. [#67035](https://github.com/ClickHouse/ClickHouse/pull/67035) ([Pablo Marcos](https://github.com/pamarcos)).
* Backported in [#67514](https://github.com/ClickHouse/ClickHouse/issues/67514): Split test 02967_parallel_replicas_join_algo_and_analyzer. [#67211](https://github.com/ClickHouse/ClickHouse/pull/67211) ([Nikita Taranov](https://github.com/nickitat)).
* Backported in [#67545](https://github.com/ClickHouse/ClickHouse/issues/67545): [Green CI] Fix WriteBuffer destructor when finalize has failed for MergeTreeDeduplicationLog::shutdown. [#67474](https://github.com/ClickHouse/ClickHouse/pull/67474) ([Alexey Katsman](https://github.com/alexkats)).

View File

@ -5608,3 +5608,9 @@ Default value: `10000000`.
Minimal size of block to compress in CROSS JOIN. Zero value means - disable this threshold. This block is compressed when any of the two thresholds (by rows or by bytes) are reached.
Default value: `1GiB`.
## disable_insertion_and_mutation
Disable all insert and mutations (alter table update / alter table delete / alter table drop partition). Set to true, can make this node focus on reading queries.
Default value: `false`.

View File

@ -1,2 +1,2 @@
clickhouse_add_executable(aggregate_function_state_deserialization_fuzzer aggregate_function_state_deserialization_fuzzer.cpp ${SRCS})
target_link_libraries(aggregate_function_state_deserialization_fuzzer PRIVATE dbms clickhouse_aggregate_functions)
target_link_libraries(aggregate_function_state_deserialization_fuzzer PRIVATE dbms clickhouse_aggregate_functions clickhouse_functions)

View File

@ -12,38 +12,36 @@
#include <Interpreters/Context.h>
#include <AggregateFunctions/IAggregateFunction.h>
#include <AggregateFunctions/registerAggregateFunctions.h>
#include <base/scope_guard.h>
using namespace DB;
ContextMutablePtr context;
extern "C" int LLVMFuzzerInitialize(int *, char ***)
{
if (context)
return true;
SharedContextHolder shared_context = Context::createShared();
context = Context::createGlobal(shared_context.get());
context->makeGlobalContext();
MainThreadStatus::getInstance();
registerAggregateFunctions();
return 0;
}
extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size)
{
try
{
using namespace DB;
static SharedContextHolder shared_context;
static ContextMutablePtr context;
auto initialize = [&]() mutable
{
if (context)
return true;
shared_context = Context::createShared();
context = Context::createGlobal(shared_context.get());
context->makeGlobalContext();
context->setApplicationType(Context::ApplicationType::LOCAL);
MainThreadStatus::getInstance();
registerAggregateFunctions();
return true;
};
static bool initialized = initialize();
(void) initialized;
total_memory_tracker.resetCounters();
total_memory_tracker.setHardLimit(1_GiB);
CurrentThread::get().memory_tracker.resetCounters();

View File

@ -2,6 +2,7 @@
#include <DataTypes/DataTypesNumber.h>
#include <DataTypes/DataTypeNullable.h>
#include <DataTypes/DataTypeAggregateFunction.h>
#include <AggregateFunctions/AggregateFunctionFactory.h>
#include <AggregateFunctions/IAggregateFunction.h>
@ -42,7 +43,7 @@ public:
if (lower_name.ends_with("if"))
return;
auto & function_arguments_nodes = function_node->getArguments().getNodes();
const auto & function_arguments_nodes = function_node->getArguments().getNodes();
if (function_arguments_nodes.size() != 1)
return;
@ -50,6 +51,8 @@ public:
if (!if_node || if_node->getFunctionName() != "if")
return;
FunctionNodePtr replaced_node;
auto if_arguments_nodes = if_node->getArguments().getNodes();
auto * first_const_node = if_arguments_nodes[1]->as<ConstantNode>();
auto * second_const_node = if_arguments_nodes[2]->as<ConstantNode>();
@ -75,8 +78,11 @@ public:
new_arguments[0] = std::move(if_arguments_nodes[1]);
new_arguments[1] = std::move(if_arguments_nodes[0]);
function_arguments_nodes = std::move(new_arguments);
resolveAggregateFunctionNodeByName(*function_node, function_node->getFunctionName() + "If");
replaced_node = std::make_shared<FunctionNode>(function_node->getFunctionName() + "If");
replaced_node->getArguments().getNodes() = std::move(new_arguments);
replaced_node->getParameters().getNodes() = function_node->getParameters().getNodes();
resolveAggregateFunctionNodeByName(*replaced_node, replaced_node->getFunctionName());
}
}
else if (first_const_node)
@ -104,10 +110,26 @@ public:
FunctionFactory::instance().get("not", getContext())->build(not_function->getArgumentColumns()));
new_arguments[1] = std::move(not_function);
function_arguments_nodes = std::move(new_arguments);
resolveAggregateFunctionNodeByName(*function_node, function_node->getFunctionName() + "If");
replaced_node = std::make_shared<FunctionNode>(function_node->getFunctionName() + "If");
replaced_node->getArguments().getNodes() = std::move(new_arguments);
replaced_node->getParameters().getNodes() = function_node->getParameters().getNodes();
resolveAggregateFunctionNodeByName(*replaced_node, replaced_node->getFunctionName());
}
}
if (!replaced_node)
return;
auto prev_type = function_node->getResultType();
auto curr_type = replaced_node->getResultType();
if (!prev_type->equals(*curr_type))
return;
/// Just in case, CAST compatible aggregate function states.
if (WhichDataType(prev_type).isAggregateFunction() && !DataTypeAggregateFunction::strictEquals(prev_type, curr_type))
node = createCastFunction(std::move(replaced_node), prev_type, getContext());
else
node = std::move(replaced_node);
}
};

View File

@ -296,11 +296,22 @@ ColumnWithTypeAndName ColumnFunction::reduce() const
function->getName(), toString(args), toString(captured));
ColumnsWithTypeAndName columns = captured_columns;
IFunction::ShortCircuitSettings settings;
/// Arguments of lazy executed function can also be lazy executed.
/// But we shouldn't execute arguments if this function is short circuit,
if (is_short_circuit_argument)
{
IFunction::ShortCircuitSettings settings;
/// We shouldn't execute all arguments if this function is short circuit,
/// because it will handle lazy executed arguments by itself.
if (is_short_circuit_argument && !function->isShortCircuit(settings, args))
/// Execute only arguments with disabled lazy execution.
if (function->isShortCircuit(settings, args))
{
for (size_t i : settings.arguments_with_disabled_lazy_execution)
{
if (const ColumnFunction * arg = checkAndGetShortCircuitArgument(columns[i].column))
columns[i] = arg->reduce();
}
}
else
{
for (auto & col : columns)
{
@ -308,6 +319,7 @@ ColumnWithTypeAndName ColumnFunction::reduce() const
col = arg->reduce();
}
}
}
ColumnWithTypeAndName res{nullptr, function->getResultType(), ""};

View File

@ -86,7 +86,10 @@ inline std::string_view toDescription(OvercommitResult result)
bool shouldTrackAllocation(Float64 probability, void * ptr)
{
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wimplicit-const-int-float-conversion"
return intHash64(uintptr_t(ptr)) < std::numeric_limits<uint64_t>::max() * probability;
#pragma clang diagnostic pop
}
}

View File

@ -166,6 +166,7 @@ namespace DB
M(Bool, prepare_system_log_tables_on_startup, false, "If true, ClickHouse creates all configured `system.*_log` tables before the startup. It can be helpful if some startup scripts depend on these tables.", 0) \
M(Double, gwp_asan_force_sample_probability, 0.0003, "Probability that an allocation from specific places will be sampled by GWP Asan (i.e. PODArray allocations)", 0) \
M(UInt64, config_reload_interval_ms, 2000, "How often clickhouse will reload config and check for new changes", 0) \
M(Bool, disable_insertion_and_mutation, false, "Disable all insert/alter/delete queries. This setting will be enabled if someone needs read-only nodes to prevent insertion and mutation affect reading performance.", 0)
/// If you add a setting which can be updated at runtime, please update 'changeable_settings' map in StorageSystemServerSettings.cpp

View File

@ -1074,7 +1074,7 @@ class IColumn;
M(Bool, input_format_orc_allow_missing_columns, true, "Allow missing columns while reading ORC input formats", 0) \
M(Bool, input_format_orc_use_fast_decoder, true, "Use a faster ORC decoder implementation.", 0) \
M(Bool, input_format_orc_filter_push_down, true, "When reading ORC files, skip whole stripes or row groups based on the WHERE/PREWHERE expressions, min/max statistics or bloom filter in the ORC metadata.", 0) \
M(Bool, input_format_orc_read_use_writer_time_zone, false, "Whether use the writer's time zone in ORC stripe for ORC row reader, the default ORC row reader's time zone is GMT.", 0) \
M(String, input_format_orc_reader_time_zone_name, "GMT", "The time zone name for ORC row reader, the default ORC row reader's time zone is GMT.", 0) \
M(Bool, input_format_parquet_allow_missing_columns, true, "Allow missing columns while reading Parquet input formats", 0) \
M(UInt64, input_format_parquet_local_file_min_bytes_for_seek, 8192, "Min bytes required for local read (file) to do seek, instead of read with ignore in Parquet input format", 0) \
M(Bool, input_format_arrow_allow_missing_columns, true, "Allow missing columns while reading Arrow input formats", 0) \

View File

@ -70,7 +70,7 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
{"dictionary_validate_primary_key_type", false, false, "Validate primary key type for dictionaries. By default id type for simple layouts will be implicitly converted to UInt64."},
{"collect_hash_table_stats_during_joins", false, true, "New setting."},
{"max_size_to_preallocate_for_joins", 0, 100'000'000, "New setting."},
{"input_format_orc_read_use_writer_time_zone", false, false, "Whether use the writer's time zone in ORC stripe for ORC row reader, the default ORC row reader's time zone is GMT."},
{"input_format_orc_reader_time_zone_name", "GMT", "GMT", "The time zone name for ORC row reader, the default ORC row reader's time zone is GMT."},
{"lightweight_mutation_projection_mode", "throw", "throw", "When lightweight delete happens on a table with projection(s), the possible operations include throw the exception as projection exists, or drop all projection related to this table then do lightweight delete."},
{"database_replicated_allow_heavy_create", true, false, "Long-running DDL queries (CREATE AS SELECT and POPULATE) for Replicated database engine was forbidden"},
{"query_plan_merge_filters", false, false, "Allow to merge filters in the query plan"},

View File

@ -271,9 +271,12 @@ namespace
if (d != 0.0 && !std::isnormal(d))
throw Exception(
ErrorCodes::CANNOT_PARSE_NUMBER, "A setting's value in seconds must be a normal floating point number or zero. Got {}", d);
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wimplicit-const-int-float-conversion"
if (d * 1000000 > std::numeric_limits<Poco::Timespan::TimeDiff>::max() || d * 1000000 < std::numeric_limits<Poco::Timespan::TimeDiff>::min())
throw Exception(
ErrorCodes::BAD_ARGUMENTS, "Cannot convert seconds to microseconds: the setting's value in seconds is too big: {}", d);
#pragma clang diagnostic pop
return static_cast<Poco::Timespan::TimeDiff>(d * 1000000);
}

View File

@ -1,2 +1,2 @@
clickhouse_add_executable (names_and_types_fuzzer names_and_types_fuzzer.cpp)
target_link_libraries (names_and_types_fuzzer PRIVATE dbms)
target_link_libraries (names_and_types_fuzzer PRIVATE dbms clickhouse_functions)

View File

@ -1,2 +1,2 @@
clickhouse_add_executable(data_type_deserialization_fuzzer data_type_deserialization_fuzzer.cpp ${SRCS})
target_link_libraries(data_type_deserialization_fuzzer PRIVATE dbms clickhouse_aggregate_functions)
target_link_libraries(data_type_deserialization_fuzzer PRIVATE dbms clickhouse_aggregate_functions clickhouse_functions)

View File

@ -12,35 +12,30 @@
#include <AggregateFunctions/registerAggregateFunctions.h>
using namespace DB;
ContextMutablePtr context;
extern "C" int LLVMFuzzerInitialize(int *, char ***)
{
if (context)
return true;
SharedContextHolder shared_context = Context::createShared();
context = Context::createGlobal(shared_context.get());
context->makeGlobalContext();
MainThreadStatus::getInstance();
registerAggregateFunctions();
return 0;
}
extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size)
{
try
{
using namespace DB;
static SharedContextHolder shared_context;
static ContextMutablePtr context;
auto initialize = [&]() mutable
{
if (context)
return true;
shared_context = Context::createShared();
context = Context::createGlobal(shared_context.get());
context->makeGlobalContext();
context->setApplicationType(Context::ApplicationType::LOCAL);
MainThreadStatus::getInstance();
registerAggregateFunctions();
return true;
};
static bool initialized = initialize();
(void) initialized;
total_memory_tracker.resetCounters();
total_memory_tracker.setHardLimit(1_GiB);
CurrentThread::get().memory_tracker.resetCounters();

View File

@ -243,7 +243,7 @@ FormatSettings getFormatSettings(const ContextPtr & context, const Settings & se
format_settings.orc.output_row_index_stride = settings.output_format_orc_row_index_stride;
format_settings.orc.use_fast_decoder = settings.input_format_orc_use_fast_decoder;
format_settings.orc.filter_push_down = settings.input_format_orc_filter_push_down;
format_settings.orc.read_use_writer_time_zone = settings.input_format_orc_read_use_writer_time_zone;
format_settings.orc.reader_time_zone_name = settings.input_format_orc_reader_time_zone_name;
format_settings.defaults_for_omitted_fields = settings.input_format_defaults_for_omitted_fields;
format_settings.capn_proto.enum_comparing_mode = settings.format_capn_proto_enum_comparising_mode;
format_settings.capn_proto.skip_fields_with_unsupported_types_in_schema_inference = settings.input_format_capn_proto_skip_fields_with_unsupported_types_in_schema_inference;

View File

@ -409,7 +409,7 @@ struct FormatSettings
bool use_fast_decoder = true;
bool filter_push_down = true;
UInt64 output_row_index_stride = 10'000;
bool read_use_writer_time_zone = false;
String reader_time_zone_name = "GMT";
} orc{};
/// For capnProto format we should determine how to

View File

@ -1,2 +1,2 @@
clickhouse_add_executable(format_fuzzer format_fuzzer.cpp ${SRCS})
target_link_libraries(format_fuzzer PRIVATE dbms clickhouse_aggregate_functions)
target_link_libraries(format_fuzzer PRIVATE dbms clickhouse_aggregate_functions clickhouse_functions)

View File

@ -20,37 +20,32 @@
#include <AggregateFunctions/registerAggregateFunctions.h>
extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size)
{
try
{
using namespace DB;
static SharedContextHolder shared_context;
static ContextMutablePtr context;
auto initialize = [&]() mutable
ContextMutablePtr context;
extern "C" int LLVMFuzzerInitialize(int *, char ***)
{
if (context)
return true;
shared_context = Context::createShared();
SharedContextHolder shared_context = Context::createShared();
context = Context::createGlobal(shared_context.get());
context->makeGlobalContext();
context->setApplicationType(Context::ApplicationType::LOCAL);
MainThreadStatus::getInstance();
registerAggregateFunctions();
registerFormats();
return true;
};
static bool initialized = initialize();
(void) initialized;
return 0;
}
extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size)
{
try
{
total_memory_tracker.resetCounters();
total_memory_tracker.setHardLimit(1_GiB);
CurrentThread::get().memory_tracker.resetCounters();

View File

@ -2146,7 +2146,10 @@ struct Transformer
if constexpr (std::is_same_v<Additions, DateTimeAccurateConvertStrategyAdditions>
|| std::is_same_v<Additions, DateTimeAccurateOrNullConvertStrategyAdditions>)
{
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wimplicit-const-int-float-conversion"
bool is_valid_input = vec_from[i] >= 0 && vec_from[i] <= 0xFFFFFFFFL;
#pragma clang diagnostic pop
if (!is_valid_input)
{
if constexpr (std::is_same_v<Additions, DateTimeAccurateOrNullConvertStrategyAdditions>)

View File

@ -217,7 +217,10 @@ private:
}
Float64 num_bytes_with_decimals = base * iter->second;
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wimplicit-const-int-float-conversion"
if (num_bytes_with_decimals > std::numeric_limits<UInt64>::max())
#pragma clang diagnostic pop
{
throw Exception(
ErrorCodes::BAD_ARGUMENTS,

View File

@ -214,10 +214,6 @@ static void setLazyExecutionInfo(
}
lazy_execution_info.short_circuit_ancestors_info[parent].insert(indexes.begin(), indexes.end());
/// After checking arguments_with_disabled_lazy_execution, if there is no relation with parent,
/// disable the current node.
if (indexes.empty())
lazy_execution_info.can_be_lazy_executed = false;
}
else
/// If lazy execution is disabled for one of parents, we should disable it for current node.

View File

@ -5,6 +5,7 @@
#include <Access/Common/AccessRightsElement.h>
#include <Common/typeid_cast.h>
#include <Core/Settings.h>
#include <Core/ServerSettings.h>
#include <Databases/DatabaseFactory.h>
#include <Databases/DatabaseReplicated.h>
#include <Databases/IDatabase.h>
@ -47,6 +48,7 @@ namespace ErrorCodes
extern const int BAD_ARGUMENTS;
extern const int UNKNOWN_TABLE;
extern const int UNKNOWN_DATABASE;
extern const int QUERY_IS_PROHIBITED;
}
@ -191,6 +193,12 @@ BlockIO InterpreterAlterQuery::executeToTable(const ASTAlterQuery & alter)
"to execute ALTERs of different types (replicated and non replicated) in single query");
}
if (mutation_commands.hasNonEmptyMutationCommands() || !partition_commands.empty())
{
if (getContext()->getServerSettings().disable_insertion_and_mutation)
throw Exception(ErrorCodes::QUERY_IS_PROHIBITED, "Mutations are prohibited");
}
if (!alter_commands.empty())
{
auto alter_lock = table->lockForAlter(getContext()->getSettingsRef().lock_acquire_timeout);

View File

@ -361,20 +361,12 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create)
TablesLoader loader{getContext()->getGlobalContext(), {{database_name, database}}, mode};
auto load_tasks = loader.loadTablesAsync();
auto startup_tasks = loader.startupTablesAsync();
if (getContext()->getGlobalContext()->getServerSettings().async_load_databases)
{
scheduleLoad(load_tasks);
scheduleLoad(startup_tasks);
}
else
{
/// First prioritize, schedule and wait all the load table tasks
waitLoad(currentPoolOr(TablesLoaderForegroundPoolId), load_tasks);
/// Only then prioritize, schedule and wait all the startup tasks
waitLoad(currentPoolOr(TablesLoaderForegroundPoolId), startup_tasks);
}
}
}
catch (...)
{
if (renamed)

View File

@ -3,6 +3,7 @@
#include <Access/ContextAccess.h>
#include <Core/Settings.h>
#include <Core/ServerSettings.h>
#include <Databases/DatabaseReplicated.h>
#include <Databases/IDatabase.h>
#include <Interpreters/Context.h>
@ -27,6 +28,7 @@ namespace ErrorCodes
extern const int SUPPORT_IS_DISABLED;
extern const int BAD_ARGUMENTS;
extern const int NOT_IMPLEMENTED;
extern const int QUERY_IS_PROHIBITED;
}
@ -51,6 +53,9 @@ BlockIO InterpreterDeleteQuery::execute()
if (table->isStaticStorage())
throw Exception(ErrorCodes::TABLE_IS_READ_ONLY, "Table is read-only");
if (getContext()->getGlobalContext()->getServerSettings().disable_insertion_and_mutation)
throw Exception(ErrorCodes::QUERY_IS_PROHIBITED, "Delete queries are prohibited");
DatabasePtr database = DatabaseCatalog::instance().getDatabase(table_id.database_name);
if (database->shouldReplicateQuery(getContext(), query_ptr))
{

View File

@ -6,6 +6,7 @@
#include <AggregateFunctions/AggregateFunctionFactory.h>
#include <Columns/ColumnNullable.h>
#include <Core/Settings.h>
#include <Core/ServerSettings.h>
#include <Processors/Transforms/buildPushingToViewsChain.h>
#include <DataTypes/DataTypeNullable.h>
#include <Interpreters/DatabaseCatalog.h>
@ -60,6 +61,7 @@ namespace ErrorCodes
extern const int NO_SUCH_COLUMN_IN_TABLE;
extern const int ILLEGAL_COLUMN;
extern const int DUPLICATE_COLUMN;
extern const int QUERY_IS_PROHIBITED;
}
InterpreterInsertQuery::InterpreterInsertQuery(
@ -732,6 +734,9 @@ BlockIO InterpreterInsertQuery::execute()
const Settings & settings = getContext()->getSettingsRef();
auto & query = query_ptr->as<ASTInsertQuery &>();
if (getContext()->getServerSettings().disable_insertion_and_mutation
&& query.table_id.database_name != DatabaseCatalog::SYSTEM_DATABASE)
throw Exception(ErrorCodes::QUERY_IS_PROHIBITED, "Insert queries are prohibited");
StoragePtr table = getTable(query);
checkStorageSupportsTransactionsIfNeeded(table, getContext());

View File

@ -14,24 +14,17 @@
using namespace DB;
extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size)
{
try
{
std::string input = std::string(reinterpret_cast<const char*>(data), size);
static SharedContextHolder shared_context;
static ContextMutablePtr context;
ContextMutablePtr context;
auto initialize = [&]() mutable
extern "C" int LLVMFuzzerInitialize(int *, char ***)
{
if (context)
return true;
shared_context = Context::createShared();
SharedContextHolder shared_context = Context::createShared();
context = Context::createGlobal(shared_context.get());
context->makeGlobalContext();
context->setApplicationType(Context::ApplicationType::LOCAL);
registerInterpreters();
registerFunctions();
@ -43,11 +36,14 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size)
registerDisks(/* global_skip_access_check= */ true);
registerFormats();
return true;
};
return 0;
}
static bool initialized = initialize();
(void) initialized;
extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size)
{
try
{
std::string input = std::string(reinterpret_cast<const char*>(data), size);
auto io = DB::executeQuery(input, context, QueryFlags{ .internal = true }, QueryProcessingStage::Complete).second;

View File

@ -27,7 +27,8 @@ DEFINE_BINARY_PROTO_FUZZER(const Sentence& main)
DB::ParserQueryWithOutput parser(input.data() + input.size());
try
{
DB::ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0);
DB::ASTPtr ast
= parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0, DB::DBMS_DEFAULT_MAX_PARSER_BACKTRACKS);
DB::WriteBufferFromOStream out(std::cerr, 4096);
DB::formatAST(*ast, out);

View File

@ -14,7 +14,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size)
std::string input = std::string(reinterpret_cast<const char*>(data), size);
DB::ParserCreateQuery parser;
DB::ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 1000);
DB::ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 1000, DB::DBMS_DEFAULT_MAX_PARSER_BACKTRACKS);
const UInt64 max_ast_depth = 1000;
ast->checkDepth(max_ast_depth);

View File

@ -900,11 +900,7 @@ bool NativeORCBlockInputFormat::prepareStripeReader()
orc::RowReaderOptions row_reader_options;
row_reader_options.includeTypes(include_indices);
if (format_settings.orc.read_use_writer_time_zone)
{
String writer_time_zone = current_stripe_info->getWriterTimezone();
row_reader_options.setTimezoneName(writer_time_zone);
}
row_reader_options.setTimezoneName(format_settings.orc.reader_time_zone_name);
row_reader_options.range(current_stripe_info->getOffset(), current_stripe_info->getLength());
if (format_settings.orc.filter_push_down && sarg)
{

View File

@ -5557,12 +5557,16 @@ public:
auto it = temp_part_dirs.find(part_name);
if (it == temp_part_dirs.end())
{
auto temp_part_dir = std::make_shared<TemporaryFileOnDisk>(disk, fs::path{storage->getRelativeDataPath()} / ("tmp_restore_" + part_name + "-"));
auto temp_dir_deleter = std::make_unique<TemporaryFileOnDisk>(disk, fs::path{storage->getRelativeDataPath()} / ("tmp_restore_" + part_name + "-"));
auto temp_part_dir = fs::path{temp_dir_deleter->getRelativePath()}.filename();
/// Attaching parts will rename them so it's expected for a temporary part directory not to exist anymore in the end.
temp_part_dir->setShowWarningIfRemoved(false);
it = temp_part_dirs.emplace(part_name, temp_part_dir).first;
temp_dir_deleter->setShowWarningIfRemoved(false);
/// The following holder is needed to prevent clearOldTemporaryDirectories() from clearing `temp_part_dir` before we attach the part.
auto temp_dir_holder = storage->getTemporaryPartDirectoryHolder(temp_part_dir);
it = temp_part_dirs.emplace(part_name,
std::make_pair(std::move(temp_dir_deleter), std::move(temp_dir_holder))).first;
}
return it->second->getRelativePath();
return it->second.first->getRelativePath();
}
private:
@ -5588,7 +5592,7 @@ private:
size_t num_parts = 0;
size_t num_broken_parts = 0;
MutableDataPartsVector parts;
std::map<String /* part_name*/, std::shared_ptr<TemporaryFileOnDisk>> temp_part_dirs;
std::map<String /* part_name*/, std::pair<std::unique_ptr<TemporaryFileOnDisk>, scope_guard>> temp_part_dirs;
mutable std::mutex mutex;
};

View File

@ -341,17 +341,21 @@ void MergeTreeDeduplicationLog::shutdown()
stopped = true;
if (current_writer)
{
/// If an error has occurred during finalize, we'd like to have the exception set for reset.
/// Otherwise, we'll be in a situation when a finalization didn't happen, and we didn't get
/// any error, causing logical error (see ~MemoryBuffer()).
try
{
current_writer->finalize();
current_writer.reset();
}
catch (...)
{
tryLogCurrentException(__PRETTY_FUNCTION__);
}
current_writer.reset();
}
}
}
MergeTreeDeduplicationLog::~MergeTreeDeduplicationLog()
{

View File

@ -4,4 +4,4 @@ clickhouse_add_executable (mergetree_checksum_fuzzer mergetree_checksum_fuzzer.c
target_link_libraries (mergetree_checksum_fuzzer PRIVATE dbms)
clickhouse_add_executable (columns_description_fuzzer columns_description_fuzzer.cpp)
target_link_libraries (columns_description_fuzzer PRIVATE dbms)
target_link_libraries (columns_description_fuzzer PRIVATE dbms clickhouse_functions)

View File

@ -1,5 +1,7 @@
#include <Storages/ColumnsDescription.h>
#include <iostream>
extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size)
{

View File

@ -16,7 +16,7 @@ Don't use Docker from your system repository.
* [py.test](https://docs.pytest.org/) testing framework. To install: `sudo -H pip install pytest`
* [docker-compose](https://docs.docker.com/compose/) and additional python libraries. To install:
```
```bash
sudo -H pip install \
PyMySQL \
avro \
@ -78,7 +78,7 @@ Notes:
* Some tests maybe require a lot of resources (CPU, RAM, etc.). Better not try large tests like `test_distributed_ddl*` on your laptop.
You can run tests via `./runner` script and pass pytest arguments as last arg:
```
```bash
$ ./runner --binary $HOME/ClickHouse/programs/clickhouse --odbc-bridge-binary $HOME/ClickHouse/programs/clickhouse-odbc-bridge --base-configs-dir $HOME/ClickHouse/programs/server/ 'test_ssl_cert_authentication -ss'
Start tests
====================================================================================================== test session starts ======================================================================================================
@ -102,7 +102,7 @@ test_ssl_cert_authentication/test.py::test_create_user PASSED
```
Path to binary and configs maybe specified via env variables:
```
```bash
$ export CLICKHOUSE_TESTS_BASE_CONFIG_DIR=$HOME/ClickHouse/programs/server/
$ export CLICKHOUSE_TESTS_SERVER_BIN_PATH=$HOME/ClickHouse/programs/clickhouse
$ export CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH=$HOME/ClickHouse/programs/clickhouse-odbc-bridge
@ -121,6 +121,64 @@ test_odbc_interaction/test.py ...... [100%]
You can just open shell inside a container by overwritting the command:
./runner --command=bash
### Parallel test execution
On the CI, we run a number of parallel runners (5 at the time of this writing), each on its own
Docker container. These runner containers spawn more containers for each test for the services
needed such as ZooKeeper, MySQL, PostgreSQL and minio, among others. This means that tests do not
share any services among them. Within each runner, tests are parallelized using
[pytest-xdist](https://pytest-xdist.readthedocs.io/en/stable/). We're using `--dist=loadfile` to
[distribute the load](https://pytest-xdist.readthedocs.io/en/stable/distribution.html). In the
documentation words: this guarantees that all tests in a file run in the same worker. This means
that any test within the same file will never execute their tests in parallel. They'll be executed
on the same worker one after the other.
If the test supports parallel and repeated execution, you can run a bunch of them in parallel to
look for flakiness. We use [pytest-repeat](https://pypi.org/project/pytest-repeat/) to set the
number of times we want to execute a test through the `--count` argument. Then, `-n` sets the number
of parallel workers for `pytest-xdist`.
```bash
$ export CLICKHOUSE_TESTS_BASE_CONFIG_DIR=$HOME/ClickHouse/programs/server/
$ export CLICKHOUSE_TESTS_SERVER_BIN_PATH=$HOME/ClickHouse/programs/clickhouse
$ export CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH=$HOME/ClickHouse/programs/clickhouse-odbc-bridge
$ ./runner 'test_storage_s3_queue/test.py::test_max_set_age -- --count 10 -n 5'
Start tests
=============================================================================== test session starts ================================================================================
platform linux -- Python 3.10.12, pytest-7.4.4, pluggy-1.5.0 -- /usr/bin/python3
cachedir: .pytest_cache
rootdir: /ClickHouse/tests/integration
configfile: pytest.ini
plugins: reportlog-0.4.0, xdist-3.5.0, random-0.2, repeat-0.9.3, order-1.0.0, timeout-2.2.0
timeout: 900.0s
timeout method: signal
timeout func_only: False
5 workers [10 items]
scheduling tests via LoadScheduling
test_storage_s3_queue/test.py::test_max_set_age[9-10]
test_storage_s3_queue/test.py::test_max_set_age[7-10]
test_storage_s3_queue/test.py::test_max_set_age[5-10]
test_storage_s3_queue/test.py::test_max_set_age[1-10]
test_storage_s3_queue/test.py::test_max_set_age[3-10]
[gw3] [ 10%] PASSED test_storage_s3_queue/test.py::test_max_set_age[7-10]
test_storage_s3_queue/test.py::test_max_set_age[8-10]
[gw4] [ 20%] PASSED test_storage_s3_queue/test.py::test_max_set_age[9-10]
test_storage_s3_queue/test.py::test_max_set_age[10-10]
[gw0] [ 30%] PASSED test_storage_s3_queue/test.py::test_max_set_age[1-10]
test_storage_s3_queue/test.py::test_max_set_age[2-10]
[gw1] [ 40%] PASSED test_storage_s3_queue/test.py::test_max_set_age[3-10]
test_storage_s3_queue/test.py::test_max_set_age[4-10]
[gw2] [ 50%] PASSED test_storage_s3_queue/test.py::test_max_set_age[5-10]
test_storage_s3_queue/test.py::test_max_set_age[6-10]
[gw3] [ 60%] PASSED test_storage_s3_queue/test.py::test_max_set_age[8-10]
[gw4] [ 70%] PASSED test_storage_s3_queue/test.py::test_max_set_age[10-10]
[gw0] [ 80%] PASSED test_storage_s3_queue/test.py::test_max_set_age[2-10]
[gw1] [ 90%] PASSED test_storage_s3_queue/test.py::test_max_set_age[4-10]
[gw2] [100%] PASSED test_storage_s3_queue/test.py::test_max_set_age[6-10]
========================================================================== 10 passed in 120.65s (0:02:00) ==========================================================================
```
### Rebuilding the docker containers
The main container used for integration tests lives in `docker/test/integration/base/Dockerfile`. Rebuild it with
@ -149,7 +207,7 @@ will automagically detect the types of variables and only the small diff of two
If tests failing for mysterious reasons, this may help:
```
```bash
sudo service docker stop
sudo bash -c 'rm -rf /var/lib/docker/*'
sudo service docker start
@ -159,6 +217,6 @@ sudo service docker start
On Ubuntu 20.10 and later in host network mode (default) one may encounter problem with nested containers not seeing each other. It happens because legacy and nftables rules are out of sync. Problem can be solved by:
```
```bash
sudo iptables -P FORWARD ACCEPT
```

View File

@ -435,6 +435,11 @@ class ClickHouseCluster:
# docker-compose removes everything non-alphanumeric from project names so we do it too.
self.project_name = re.sub(r"[^a-z0-9]", "", project_name.lower())
self.instances_dir_name = get_instances_dir(self.name)
xdist_worker = os.getenv("PYTEST_XDIST_WORKER")
if xdist_worker:
self.project_name += f"_{xdist_worker}"
self.instances_dir_name += f"_{xdist_worker}"
self.instances_dir = p.join(self.base_dir, self.instances_dir_name)
self.docker_logs_path = p.join(self.instances_dir, "docker.log")
self.env_file = p.join(self.instances_dir, DEFAULT_ENV_NAME)

View File

@ -1054,9 +1054,12 @@ def test_mutation():
backup_name = new_backup_name()
node1.query(f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {backup_name}")
assert not has_mutation_in_backup("0000000000", backup_name, "default", "tbl")
# mutation #0000000000: "UPDATE x=x+1 WHERE 1" could already finish before starting the backup
# mutation #0000000001: "UPDATE x=x+1+sleep(3) WHERE 1"
assert has_mutation_in_backup("0000000001", backup_name, "default", "tbl")
# mutation #0000000002: "UPDATE x=x+1+sleep(3) WHERE 1"
assert has_mutation_in_backup("0000000002", backup_name, "default", "tbl")
# mutation #0000000003: not expected
assert not has_mutation_in_backup("0000000003", backup_name, "default", "tbl")
node1.query("DROP TABLE tbl ON CLUSTER 'cluster' SYNC")

View File

@ -0,0 +1,16 @@
<clickhouse>
<remote_servers>
<default>
<shard>
<replica>
<host>writing_node</host>
<port>9000</port>
</replica>
<replica>
<host>reading_node</host>
<port>9000</port>
</replica>
</shard>
</default>
</remote_servers>
</clickhouse>

View File

@ -0,0 +1,3 @@
<clickhouse>
<disable_insertion_and_mutation>true</disable_insertion_and_mutation>
</clickhouse>

View File

@ -0,0 +1,3 @@
<clickhouse>
<disable_insertion_and_mutation>false</disable_insertion_and_mutation>
</clickhouse>

View File

@ -0,0 +1,75 @@
import pytest
from helpers.client import QueryRuntimeException
from helpers.cluster import ClickHouseCluster
import time
cluster = ClickHouseCluster(__file__)
writing_node = cluster.add_instance(
"writing_node",
main_configs=["config/writing_node.xml", "config/cluster.xml"],
with_zookeeper=True,
with_minio=True,
stay_alive=True,
macros={"shard": 1, "replica": 1},
)
reading_node = cluster.add_instance(
"reading_node",
main_configs=["config/reading_node.xml", "config/cluster.xml"],
with_zookeeper=True,
with_minio=True,
stay_alive=True,
macros={"shard": 1, "replica": 2},
)
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def test_disable_insertion_and_mutation(started_cluster):
writing_node.query(
"""CREATE TABLE my_table on cluster default (key UInt64, value String) ENGINE=ReplicatedMergeTree('/clickhouse/tables/{shard}/default.my_table', '{replica}') ORDER BY key partition by (key % 5) """
)
assert "QUERY_IS_PROHIBITED" in reading_node.query_and_get_error(
"INSERT INTO my_table VALUES (1, 'hello')"
)
assert "QUERY_IS_PROHIBITED" in reading_node.query_and_get_error(
"INSERT INTO my_table SETTINGS async_insert = 1 VALUES (1, 'hello')"
)
assert "QUERY_IS_PROHIBITED" in reading_node.query_and_get_error(
"ALTER TABLE my_table delete where 1"
)
assert "QUERY_IS_PROHIBITED" in reading_node.query_and_get_error(
"ALTER table my_table update key = 1 where 1"
)
assert "QUERY_IS_PROHIBITED" in reading_node.query_and_get_error(
"ALTER TABLE my_table drop partition 0"
)
reading_node.query("SELECT * from my_table")
writing_node.query("INSERT INTO my_table VALUES (1, 'hello')")
writing_node.query("ALTER TABLE my_table delete where 1")
writing_node.query("ALTER table my_table update value = 'no hello' where 1")
reading_node.query("ALTER TABLE my_table ADD COLUMN new_column UInt64")
writing_node.query("SELECT new_column from my_table")
reading_node.query("SELECT new_column from my_table")
reading_node.query("ALter Table my_table MODIFY COLUMN new_column String")
assert "new_column\tString" in reading_node.query("DESC my_table")
assert "new_column\tString" in writing_node.query("DESC my_table")

View File

@ -55,7 +55,7 @@ def test_single_file(started_cluster, cluster):
path = get_dist_path(cluster, "distr_1", 1)
query = f"select * from file('{path}/1.bin', 'Distributed')"
out = node.exec_in_container(
["/usr/bin/clickhouse", "local", "--multiquery", "--stacktrace", "-q", query]
["/usr/bin/clickhouse", "local", "--stacktrace", "-q", query]
)
assert out == "1\ta\n2\tbb\n3\tccc\n"
@ -65,7 +65,7 @@ def test_single_file(started_cluster, cluster):
select * from t;
"""
out = node.exec_in_container(
["/usr/bin/clickhouse", "local", "--multiquery", "--stacktrace", "-q", query]
["/usr/bin/clickhouse", "local", "--stacktrace", "-q", query]
)
assert out == "1\ta\n2\tbb\n3\tccc\n"
@ -106,7 +106,7 @@ def test_two_files(started_cluster, cluster):
select * from t order by x;
"""
out = node.exec_in_container(
["/usr/bin/clickhouse", "local", "--multiquery", "--stacktrace", "-q", query]
["/usr/bin/clickhouse", "local", "--stacktrace", "-q", query]
)
assert out == "0\t_\n1\ta\n2\tbb\n3\tccc\n"
@ -141,7 +141,7 @@ def test_single_file_old(started_cluster, cluster):
select * from t;
"""
out = node.exec_in_container(
["/usr/bin/clickhouse", "local", "--multiquery", "--stacktrace", "-q", query]
["/usr/bin/clickhouse", "local", "--stacktrace", "-q", query]
)
assert out == "1\ta\n2\tbb\n3\tccc\n"

View File

@ -7,6 +7,7 @@ import pytest
from helpers.client import QueryRuntimeException
from helpers.cluster import ClickHouseCluster, ClickHouseInstance
import json
from uuid import uuid4
AVAILABLE_MODES = ["unordered", "ordered"]
@ -822,11 +823,11 @@ def test_multiple_tables_streaming_sync_distributed(started_cluster, mode):
def test_max_set_age(started_cluster):
node = started_cluster.instances["instance"]
table_name = f"max_set_age"
table_name = "max_set_age"
dst_table_name = f"{table_name}_dst"
keeper_path = f"/clickhouse/test_{table_name}"
files_path = f"{table_name}_data"
max_age = 10
max_age = 20
files_to_generate = 10
create_table(
@ -847,11 +848,9 @@ def test_max_set_age(started_cluster):
)
create_mv(node, table_name, dst_table_name)
total_values = generate_random_files(
started_cluster, files_path, files_to_generate, row_num=1
)
_ = generate_random_files(started_cluster, files_path, files_to_generate, row_num=1)
expected_rows = 10
expected_rows = files_to_generate
node.wait_for_log_line("Checking node limits")
node.wait_for_log_line("Node limits check finished")
@ -859,25 +858,24 @@ def test_max_set_age(started_cluster):
def get_count():
return int(node.query(f"SELECT count() FROM {dst_table_name}"))
for _ in range(20):
if expected_rows == get_count():
break
time.sleep(1)
def wait_for_condition(check_function, max_wait_time=1.5 * max_age):
before = time.time()
while time.time() - before < max_wait_time:
if check_function():
return
time.sleep(0.25)
assert False
assert expected_rows == get_count()
assert 10 == int(node.query(f"SELECT uniq(_path) from {dst_table_name}"))
wait_for_condition(lambda: get_count() == expected_rows)
assert files_to_generate == int(
node.query(f"SELECT uniq(_path) from {dst_table_name}")
)
time.sleep(max_age + 5)
expected_rows = 20
for _ in range(20):
if expected_rows == get_count():
break
time.sleep(1)
assert expected_rows == get_count()
assert 10 == int(node.query(f"SELECT uniq(_path) from {dst_table_name}"))
expected_rows *= 2
wait_for_condition(lambda: get_count() == expected_rows)
assert files_to_generate == int(
node.query(f"SELECT uniq(_path) from {dst_table_name}")
)
paths_count = [
int(x)
@ -885,69 +883,52 @@ def test_max_set_age(started_cluster):
f"SELECT count() from {dst_table_name} GROUP BY _path"
).splitlines()
]
assert 10 == len(paths_count)
assert files_to_generate == len(paths_count)
for path_count in paths_count:
assert 2 == path_count
failed_count = int(
def get_object_storage_failures():
return int(
node.query(
"SELECT value FROM system.events WHERE name = 'ObjectStorageQueueFailedFiles' SETTINGS system_events_show_zero_values=1"
)
)
failed_count = get_object_storage_failures()
values = [
["failed", 1, 1],
]
values_csv = (
"\n".join((",".join(map(str, row)) for row in values)) + "\n"
).encode()
put_s3_file_content(started_cluster, f"{files_path}/fff.csv", values_csv)
for _ in range(30):
if failed_count + 1 == int(
node.query(
"SELECT value FROM system.events WHERE name = 'ObjectStorageQueueFailedFiles' SETTINGS system_events_show_zero_values=1"
)
):
break
time.sleep(1)
# use a different filename for each test to allow running a bunch of them sequentially with --count
file_with_error = f"max_set_age_fail_{uuid4().hex[:8]}.csv"
put_s3_file_content(started_cluster, f"{files_path}/{file_with_error}", values_csv)
assert failed_count + 1 == int(
node.query(
"SELECT value FROM system.events WHERE name = 'ObjectStorageQueueFailedFiles' SETTINGS system_events_show_zero_values=1"
)
)
wait_for_condition(lambda: failed_count + 1 == get_object_storage_failures())
node.query("SYSTEM FLUSH LOGS")
assert "Cannot parse input" in node.query(
"SELECT exception FROM system.s3queue WHERE file_name ilike '%fff.csv'"
f"SELECT exception FROM system.s3queue WHERE file_name ilike '%{file_with_error}'"
)
assert 1 == int(
node.query(
"SELECT count() FROM system.s3queue_log WHERE file_name ilike '%fff.csv'"
)
)
assert 1 == int(
node.query(
"SELECT count() FROM system.s3queue_log WHERE file_name ilike '%fff.csv' AND notEmpty(exception)"
f"SELECT count() FROM system.s3queue_log WHERE file_name ilike '%{file_with_error}' AND notEmpty(exception)"
)
)
time.sleep(max_age + 1)
assert failed_count + 2 == int(
node.query(
"SELECT value FROM system.events WHERE name = 'ObjectStorageQueueFailedFiles'"
)
)
wait_for_condition(lambda: failed_count + 2 == get_object_storage_failures())
node.query("SYSTEM FLUSH LOGS")
assert "Cannot parse input" in node.query(
"SELECT exception FROM system.s3queue WHERE file_name ilike '%fff.csv' ORDER BY processing_end_time DESC LIMIT 1"
f"SELECT exception FROM system.s3queue WHERE file_name ilike '%{file_with_error}' ORDER BY processing_end_time DESC LIMIT 1"
)
assert 1 < int(
node.query(
"SELECT count() FROM system.s3queue_log WHERE file_name ilike '%fff.csv' AND notEmpty(exception)"
f"SELECT count() FROM system.s3queue_log WHERE file_name ilike '%{file_with_error}' AND notEmpty(exception)"
)
)

View File

@ -71,7 +71,7 @@ def test_first_or_random(started_cluster):
[
"bash",
"-c",
"lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo1_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l",
"lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo1_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l",
],
privileged=True,
user="root",
@ -99,7 +99,7 @@ def test_first_or_random(started_cluster):
[
"bash",
"-c",
"lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo1_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l",
"lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo1_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l",
],
privileged=True,
user="root",
@ -127,7 +127,7 @@ def test_first_or_random(started_cluster):
[
"bash",
"-c",
"lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo1_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l",
"lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo1_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l",
],
privileged=True,
user="root",
@ -161,7 +161,7 @@ def test_in_order(started_cluster):
[
"bash",
"-c",
"lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo1_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l",
"lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo1_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l",
],
privileged=True,
user="root",
@ -189,7 +189,7 @@ def test_in_order(started_cluster):
[
"bash",
"-c",
"lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo1_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l",
"lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo1_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l",
],
privileged=True,
user="root",
@ -217,7 +217,7 @@ def test_in_order(started_cluster):
[
"bash",
"-c",
"lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo1_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l",
"lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo1_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l",
],
privileged=True,
user="root",
@ -251,7 +251,7 @@ def test_nearest_hostname(started_cluster):
[
"bash",
"-c",
"lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo1_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l",
"lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo1_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l",
],
privileged=True,
user="root",
@ -279,7 +279,7 @@ def test_nearest_hostname(started_cluster):
[
"bash",
"-c",
"lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo2_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l",
"lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo2_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l",
],
privileged=True,
user="root",
@ -307,7 +307,7 @@ def test_nearest_hostname(started_cluster):
[
"bash",
"-c",
"lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo3_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l",
"lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo3_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l",
],
privileged=True,
user="root",
@ -341,7 +341,7 @@ def test_hostname_levenshtein_distance(started_cluster):
[
"bash",
"-c",
"lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo1_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l",
"lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo1_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l",
],
privileged=True,
user="root",
@ -369,7 +369,7 @@ def test_hostname_levenshtein_distance(started_cluster):
[
"bash",
"-c",
"lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo2_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l",
"lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo2_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l",
],
privileged=True,
user="root",
@ -397,7 +397,7 @@ def test_hostname_levenshtein_distance(started_cluster):
[
"bash",
"-c",
"lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo3_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l",
"lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo3_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l",
],
privileged=True,
user="root",

View File

@ -1,3 +1,4 @@
unnamed columns in tuple
<?xml version='1.0' encoding='UTF-8' ?>
<result>
<meta>
@ -54,3 +55,43 @@
</extremes>
<rows>1</rows>
</result>
named columns in tuple
<?xml version='1.0' encoding='UTF-8' ?>
<result>
<meta>
<columns>
<column>
<name>s</name>
<type>String</type>
</column>
<column>
<name>time</name>
<type>DateTime</type>
</column>
<column>
<name>tpl</name>
<type>Tuple(String, DateTime)</type>
</column>
</columns>
</meta>
<data>
<row>
<s>Hello &amp; world</s>
<time>2001-02-03 04:05:06</time>
<tpl><tuple><elem>Hello &amp; world</elem><elem>2001-02-03 04:05:06</elem></tuple></tpl>
</row>
</data>
<extremes>
<min>
<s>Hello &amp; world</s>
<time>2001-02-03 04:05:06</time>
<tpl><tuple><elem>Hello &amp; world</elem><elem>2001-02-03 04:05:06</elem></tuple></tpl>
</min>
<max>
<s>Hello &amp; world</s>
<time>2001-02-03 04:05:06</time>
<tpl><tuple><elem>Hello &amp; world</elem><elem>2001-02-03 04:05:06</elem></tuple></tpl>
</max>
</extremes>
<rows>1</rows>
</result>

View File

@ -1,2 +1,5 @@
SET output_format_write_statistics = 0;
SELECT 'unnamed columns in tuple';
SELECT 'Hello & world' AS s, 'Hello\n<World>', toDateTime('2001-02-03 04:05:06') AS time, arrayMap(x -> toString(x), range(10)) AS arr, (s, time) AS tpl SETTINGS extremes = 1, enable_named_columns_in_function_tuple = 0 FORMAT XML;
SELECT 'named columns in tuple';
SELECT 'Hello & world' AS s, toDateTime('2001-02-03 04:05:06') AS time, (s, time) AS tpl SETTINGS extremes = 1, enable_named_columns_in_function_tuple = 0 FORMAT XML;

View File

@ -9,3 +9,8 @@ SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, range(n) AS arr, a
SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, range(n) AS arr, arrayStringConcat(arrayMap(x -> reinterpretAsString(x), arr)) AS s, (n, d) AS tuple FROM system.numbers LIMIT 2 FORMAT JSON;
SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, range(n) AS arr, arrayStringConcat(arrayMap(x -> reinterpretAsString(x), arr)) AS s, (n, d) AS tuple FROM system.numbers LIMIT 2 FORMAT JSONCompact;
SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, range(n) AS arr, arrayStringConcat(arrayMap(x -> reinterpretAsString(x), arr)) AS s, (n, d) AS tuple FROM system.numbers LIMIT 2 FORMAT XML;
SET enable_named_columns_in_function_tuple = 1;
SELECT 36 AS n, toDate('2000-01-01') + n AS d, (n, d) AS tuple FROM system.numbers LIMIT 1 FORMAT RowBinaryWithNamesAndTypes SETTINGS allow_experimental_analyzer=1;
SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, (n, d) AS tuple FROM system.numbers LIMIT 1 FORMAT TabSeparatedWithNamesAndTypes SETTINGS allow_experimental_analyzer=1;

View File

@ -5,4 +5,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --multiquery --query="SELECT 1; SELECT xyz; SELECT 2;" 2> /dev/null || true;
$CLICKHOUSE_CLIENT --query="SELECT 1; SELECT xyz; SELECT 2;" 2> /dev/null || true;

View File

@ -25,7 +25,7 @@ ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_4&se
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_5&session_timeout=60" --data-binary "SELECT 1"
echo "Sessions are local per user:"
${CLICKHOUSE_CLIENT} --multiquery --query "DROP USER IF EXISTS test_00463; CREATE USER test_00463; GRANT ALL ON *.* TO test_00463;"
${CLICKHOUSE_CLIENT} --query "DROP USER IF EXISTS test_00463; CREATE USER test_00463; GRANT ALL ON *.* TO test_00463;"
${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_6&session_timeout=600" --data-binary "CREATE TEMPORARY TABLE t (s String)"
${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_6" --data-binary "INSERT INTO t VALUES ('Hello')"
@ -37,7 +37,7 @@ ${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&user=test_00463&session_id=${C
${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_6" --data-binary "SELECT * FROM t"
${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&user=test_00463&session_id=${CLICKHOUSE_DATABASE}_6" --data-binary "SELECT * FROM t"
${CLICKHOUSE_CLIENT} --multiquery --query "DROP USER test_00463";
${CLICKHOUSE_CLIENT} --query "DROP USER test_00463";
echo "And cannot be accessed for a non-existent user:"
${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&user=test_00463&session_id=${CLICKHOUSE_DATABASE}_6" --data-binary "SELECT * FROM t" | grep -c -F 'Exception'
@ -59,7 +59,7 @@ done
echo "A session successfully expire after a timeout and the session's temporary table shadows the permanent table:"
# An infinite loop is required to make the test reliable. We will check that the timeout corresponds to the observed time at least once
${CLICKHOUSE_CLIENT} --multiquery --query "DROP TABLE IF EXISTS t; CREATE TABLE t (s String) ENGINE = Memory; INSERT INTO t VALUES ('World');"
${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS t; CREATE TABLE t (s String) ENGINE = Memory; INSERT INTO t VALUES ('World');"
while true
do
(
@ -70,7 +70,7 @@ do
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_8" --data-binary "SELECT * FROM t"
) | tr -d '\n' | grep -F 'HelloWorld' && break || sleep 1
done
${CLICKHOUSE_CLIENT} --multiquery --query "DROP TABLE t"
${CLICKHOUSE_CLIENT} --query "DROP TABLE t"
echo "A session cannot be used by concurrent connections:"
@ -83,5 +83,5 @@ do
done
${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_9" --data-binary "SELECT 1" | grep -c -F 'SESSION_IS_LOCKED'
${CLICKHOUSE_CLIENT} --multiquery --query "KILL QUERY WHERE query_id = '${CLICKHOUSE_DATABASE}_9' SYNC FORMAT Null";
${CLICKHOUSE_CLIENT} --query "KILL QUERY WHERE query_id = '${CLICKHOUSE_DATABASE}_9' SYNC FORMAT Null";
wait

View File

@ -8,8 +8,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
$CLICKHOUSE_CLIENT --query="select toUInt64(pow(2, 62)) as value format JSON" --output_format_json_quote_64bit_integers=0 | grep value
$CLICKHOUSE_CLIENT --query="select toUInt64(pow(2, 62)) as value format JSON" --output_format_json_quote_64bit_integers=1 | grep value
$CLICKHOUSE_CLIENT --readonly=1 --multiquery --query="set output_format_json_quote_64bit_integers=1 ; select toUInt64(pow(2, 63)) as value format JSON" --server_logs_file=/dev/null 2>&1 | grep -o -q 'value\|Cannot modify .* setting in readonly mode' && echo "OK" || echo "FAIL"
$CLICKHOUSE_CLIENT --readonly=1 --multiquery --query="set output_format_json_quote_64bit_integers=0 ; select toUInt64(pow(2, 63)) as value format JSON" --server_logs_file=/dev/null 2>&1 | grep -o -q 'value\|Cannot modify .* setting in readonly mode' && echo "OK" || echo "FAIL"
$CLICKHOUSE_CLIENT --readonly=1 --query="set output_format_json_quote_64bit_integers=1 ; select toUInt64(pow(2, 63)) as value format JSON" --server_logs_file=/dev/null 2>&1 | grep -o -q 'value\|Cannot modify .* setting in readonly mode' && echo "OK" || echo "FAIL"
$CLICKHOUSE_CLIENT --readonly=1 --query="set output_format_json_quote_64bit_integers=0 ; select toUInt64(pow(2, 63)) as value format JSON" --server_logs_file=/dev/null 2>&1 | grep -o -q 'value\|Cannot modify .* setting in readonly mode' && echo "OK" || echo "FAIL"
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&query=SELECT+toUInt64(pow(2,+63))+as+value+format+JSON&output_format_json_quote_64bit_integers=1" | grep value
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&query=SELECT+toUInt64(pow(2,+63))+as+value+format+JSON&output_format_json_quote_64bit_integers=0" | grep value

View File

@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
$CLICKHOUSE_CLIENT --multiquery <<EOF
$CLICKHOUSE_CLIENT <<EOF
DROP TABLE IF EXISTS pk_in_tuple_perf;
CREATE TABLE pk_in_tuple_perf
(
@ -27,7 +27,7 @@ $CLICKHOUSE_CLIENT --query "$query FORMAT JSON" | grep "rows_read"
## Test with non-const args in tuple
$CLICKHOUSE_CLIENT --multiquery <<EOF
$CLICKHOUSE_CLIENT <<EOF
DROP TABLE IF EXISTS pk_in_tuple_perf_non_const;
CREATE TABLE pk_in_tuple_perf_non_const
(

View File

@ -22,7 +22,7 @@ echo '"Hello, world"; 123; "2016-01-01"
"Hello, ""world"""; "456"; 2016-01-02;
Hello "world"; 789 ;2016-01-03
"Hello
world"; 100; 2016-01-04;' | $CLICKHOUSE_CLIENT --multiquery --query="SET format_csv_delimiter=';'; INSERT INTO csv FORMAT CSV";
world"; 100; 2016-01-04;' | $CLICKHOUSE_CLIENT --query="SET format_csv_delimiter=';'; INSERT INTO csv FORMAT CSV";
$CLICKHOUSE_CLIENT --query="SELECT * FROM csv ORDER BY d";
$CLICKHOUSE_CLIENT --format_csv_delimiter=";" --query="SELECT * FROM csv ORDER BY d FORMAT CSV";
@ -33,7 +33,7 @@ $CLICKHOUSE_CLIENT --query="CREATE TABLE csv (s1 String, s2 String) ENGINE = Mem
echo 'abc,def;hello;
hello; world;
"hello ""world""";abc,def;' | $CLICKHOUSE_CLIENT --multiquery --query="SET format_csv_delimiter=';'; INSERT INTO csv FORMAT CSV";
"hello ""world""";abc,def;' | $CLICKHOUSE_CLIENT --query="SET format_csv_delimiter=';'; INSERT INTO csv FORMAT CSV";
$CLICKHOUSE_CLIENT --query="SELECT * FROM csv";
@ -44,7 +44,7 @@ $CLICKHOUSE_CLIENT --query="CREATE TABLE csv (s1 String, s2 String) ENGINE = Mem
echo '"s1";"s2"
abc,def;hello;
hello; world;
"hello ""world""";abc,def;' | $CLICKHOUSE_CLIENT --multiquery --query="SET format_csv_delimiter=';'; INSERT INTO csv FORMAT CSVWithNames";
"hello ""world""";abc,def;' | $CLICKHOUSE_CLIENT --query="SET format_csv_delimiter=';'; INSERT INTO csv FORMAT CSVWithNames";
$CLICKHOUSE_CLIENT --format_csv_delimiter=";" --query="SELECT * FROM csv FORMAT CSV";
$CLICKHOUSE_CLIENT --format_csv_delimiter="," --query="SELECT * FROM csv FORMAT CSV";

View File

@ -18,7 +18,7 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE csv";
$CLICKHOUSE_CLIENT --query="CREATE TABLE csv (s String, n UInt64, d Date) ENGINE = Memory";
echo "'single quote' not end, 123, 2016-01-01
'em good, 456, 2016-01-02" | $CLICKHOUSE_CLIENT --multiquery --query="SET format_csv_allow_single_quotes=0; INSERT INTO csv FORMAT CSV";
'em good, 456, 2016-01-02" | $CLICKHOUSE_CLIENT --query="SET format_csv_allow_single_quotes=0; INSERT INTO csv FORMAT CSV";
$CLICKHOUSE_CLIENT --query="SELECT * FROM csv ORDER BY d";
@ -38,7 +38,7 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE csv";
$CLICKHOUSE_CLIENT --query="CREATE TABLE csv (s String, n UInt64, d Date) ENGINE = Memory";
echo '"double quote" not end, 123, 2016-01-01
"em good, 456, 2016-01-02' | $CLICKHOUSE_CLIENT --multiquery --query="SET format_csv_allow_double_quotes=0; INSERT INTO csv FORMAT CSV";
"em good, 456, 2016-01-02' | $CLICKHOUSE_CLIENT --query="SET format_csv_allow_double_quotes=0; INSERT INTO csv FORMAT CSV";
$CLICKHOUSE_CLIENT --query="SELECT * FROM csv ORDER BY d";

View File

@ -5,4 +5,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} --ignore-error --multiquery --query "DROP TABLE IF EXISTS tab_00651; CREATE TABLE tab_00651 (val UInt64) engine = Memory; SHOW CREATE TABLE tab_00651 format abcd; DESC tab_00651; DROP TABLE tab_00651;" 2>/dev/null ||:
${CLICKHOUSE_CLIENT} --ignore-error --query "DROP TABLE IF EXISTS tab_00651; CREATE TABLE tab_00651 (val UInt64) engine = Memory; SHOW CREATE TABLE tab_00651 format abcd; DESC tab_00651; DROP TABLE tab_00651;" 2>/dev/null ||:

View File

@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} --multiquery --mutations_sync=1 << EOF
${CLICKHOUSE_CLIENT} --mutations_sync=1 << EOF
DROP TABLE IF EXISTS mutations;
DROP TABLE IF EXISTS for_subquery;

View File

@ -9,7 +9,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=./mergetree_mutations.lib
. "$CURDIR"/mergetree_mutations.lib
${CLICKHOUSE_CLIENT} --allow_nondeterministic_mutations=1 --multiquery << EOF
${CLICKHOUSE_CLIENT} --allow_nondeterministic_mutations=1 << EOF
DROP TABLE IF EXISTS mutations_r1;
DROP TABLE IF EXISTS for_subquery;

View File

@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CURDIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} --multiquery --query="
${CLICKHOUSE_CLIENT} --query="
DROP TABLE IF EXISTS view_00699;
DROP TABLE IF EXISTS null_00699;
@ -20,14 +20,14 @@ SELECT count(), min(x), max(x) FROM view_00699;
ALTER TABLE null_00699 DELETE WHERE x % 2 = 0;" --mutations_sync=1
${CLICKHOUSE_CLIENT} --multiquery --query="
${CLICKHOUSE_CLIENT} --query="
SELECT count(), min(x), max(x) FROM null_00699;
SELECT count(), min(x), max(x) FROM view_00699;
ALTER TABLE view_00699 DELETE WHERE x % 2 = 0;
" --mutations_sync=1
${CLICKHOUSE_CLIENT} --multiquery --query="
${CLICKHOUSE_CLIENT} --query="
SELECT count(), min(x), max(x) FROM null_00699;
SELECT count(), min(x), max(x) FROM view_00699;
@ -35,7 +35,7 @@ ALTER TABLE null_00699 DELETE WHERE x % 2 = 1;
ALTER TABLE view_00699 DELETE WHERE x % 2 = 1;
" --mutations_sync=1
${CLICKHOUSE_CLIENT} --multiquery --query="
${CLICKHOUSE_CLIENT} --query="
SELECT count(), min(x), max(x) FROM null_00699;
SELECT count(), min(x), max(x) FROM view_00699;

View File

@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} --multiquery --query="
${CLICKHOUSE_CLIENT} --query="
DROP TABLE IF EXISTS memory;
CREATE TABLE memory (x UInt64) ENGINE = Memory;
@ -21,13 +21,13 @@ INSERT INTO memory SELECT * FROM numbers(1000);"
# But if the table will be dropped before query - just pass.
# It's Ok, because otherwise the test will depend on the race condition in the test itself.
${CLICKHOUSE_CLIENT} --multiquery --query="
${CLICKHOUSE_CLIENT} --query="
SET max_threads = 1;
SELECT count() FROM memory WHERE NOT ignore(sleep(0.0001));" 2>&1 | grep -c -P '^1000$|^0$|Exception' &
sleep 0.05;
${CLICKHOUSE_CLIENT} --multiquery --query="
${CLICKHOUSE_CLIENT} --query="
TRUNCATE TABLE memory;
DROP TABLE memory;
"

View File

@ -16,12 +16,12 @@ ${CLICKHOUSE_CLIENT} --query="CREATE TABLE buffer_00763_2 (s String) ENGINE = Bu
function thread1()
{
seq 1 500 | sed -r -e 's/.+/DROP TABLE IF EXISTS mt_00763_2; CREATE TABLE mt_00763_2 (s String) ENGINE = MergeTree ORDER BY s; INSERT INTO mt_00763_2 SELECT toString(number) FROM numbers(10);/' | ${CLICKHOUSE_CLIENT} --fsync-metadata 0 --multiquery --ignore-error ||:
seq 1 500 | sed -r -e 's/.+/DROP TABLE IF EXISTS mt_00763_2; CREATE TABLE mt_00763_2 (s String) ENGINE = MergeTree ORDER BY s; INSERT INTO mt_00763_2 SELECT toString(number) FROM numbers(10);/' | ${CLICKHOUSE_CLIENT} --fsync-metadata 0 --ignore-error ||:
}
function thread2()
{
seq 1 500 | sed -r -e 's/.+/SELECT count() FROM buffer_00763_2;/' | ${CLICKHOUSE_CLIENT} --multiquery --server_logs_file='/dev/null' --ignore-error 2>&1 | grep -vP '^0$|^10$|^Received exception|^Code: 60|^Code: 218|^Code: 473' | grep -v '(query: '
seq 1 500 | sed -r -e 's/.+/SELECT count() FROM buffer_00763_2;/' | ${CLICKHOUSE_CLIENT} --server_logs_file='/dev/null' --ignore-error 2>&1 | grep -vP '^0$|^10$|^Received exception|^Code: 60|^Code: 218|^Code: 473' | grep -v '(query: '
}
thread1 &

View File

@ -16,18 +16,39 @@ ${CLICKHOUSE_CLIENT} --query="CREATE TABLE buffer_00763_1 (s String) ENGINE = Bu
${CLICKHOUSE_CLIENT} --query="CREATE TABLE mt_00763_1 (x UInt32, s String) ENGINE = MergeTree ORDER BY x"
${CLICKHOUSE_CLIENT} --query="INSERT INTO mt_00763_1 VALUES (1, '1'), (2, '2'), (3, '3')"
function thread1()
function thread_alter()
{
seq 1 300 | sed -r -e 's/.+/ALTER TABLE mt_00763_1 MODIFY column s UInt32; ALTER TABLE mt_00763_1 MODIFY column s String;/' | ${CLICKHOUSE_CLIENT} --multiquery --ignore-error ||:
local TIMELIMIT=$((SECONDS+$1))
local it=0
while [ $SECONDS -lt "$TIMELIMIT" ] && [ $it -lt 300 ];
do
it=$((it+1))
$CLICKHOUSE_CLIENT --ignore-error -q "
ALTER TABLE mt_00763_1 MODIFY column s UInt32;
ALTER TABLE mt_00763_1 MODIFY column s String;
" ||:
done
}
function thread2()
function thread_query()
{
seq 1 2000 | sed -r -e 's/.+/SELECT sum(length(s)) FROM buffer_00763_1;/' | ${CLICKHOUSE_CLIENT} --multiquery --ignore-error 2>&1 | grep -vP '(^3$|^Received exception from server|^Code: 473)'
local TIMELIMIT=$((SECONDS+$1))
local it=0
while [ $SECONDS -lt "$TIMELIMIT" ] && [ $it -lt 2000 ];
do
it=$((it+1))
$CLICKHOUSE_CLIENT --ignore-error -q "
SELECT sum(length(s)) FROM buffer_00763_1;
" 2>&1 | grep -vP '(^3$|^Received exception from server|^Code: 473)'
done
}
thread1 &
thread2 &
export -f thread_alter
export -f thread_query
TIMEOUT=30
thread_alter $TIMEOUT &
thread_query $TIMEOUT &
wait

View File

@ -9,7 +9,7 @@ SCHEMADIR=$CURDIR/format_schemas
set -eo pipefail
# Run the client.
$CLICKHOUSE_CLIENT --multiquery <<EOF
$CLICKHOUSE_CLIENT <<EOF
DROP TABLE IF EXISTS array_3dim_protobuf_00825;
CREATE TABLE array_3dim_protobuf_00825

View File

@ -11,7 +11,7 @@ SCHEMADIR=$CURDIR/format_schemas
set -eo pipefail
# Run the client.
$CLICKHOUSE_CLIENT --multiquery <<EOF
$CLICKHOUSE_CLIENT <<EOF
DROP TABLE IF EXISTS array_of_arrays_protobuf_00825;
CREATE TABLE array_of_arrays_protobuf_00825

View File

@ -11,7 +11,7 @@ SCHEMADIR=$CURDIR/format_schemas
set -eo pipefail
# Run the client.
$CLICKHOUSE_CLIENT --multiquery <<EOF
$CLICKHOUSE_CLIENT <<EOF
DROP TABLE IF EXISTS enum_mapping_protobuf_00825;
CREATE TABLE enum_mapping_protobuf_00825

View File

@ -9,7 +9,7 @@ SCHEMADIR=$CURDIR/format_schemas
set -eo pipefail
# Run the client.
$CLICKHOUSE_CLIENT --multiquery <<EOF
$CLICKHOUSE_CLIENT <<EOF
DROP TABLE IF EXISTS map_protobuf_00825;
CREATE TABLE map_protobuf_00825

View File

@ -11,7 +11,7 @@ SCHEMADIR=$CURDIR/format_schemas
set -eo pipefail
# Run the client.
$CLICKHOUSE_CLIENT --multiquery <<EOF
$CLICKHOUSE_CLIENT <<EOF
DROP TABLE IF EXISTS nested_in_nested_protobuf_00825;
CREATE TABLE nested_in_nested_protobuf_00825 (x Nested (y Nested (z Int64))) ENGINE = MergeTree ORDER BY tuple();

View File

@ -11,7 +11,7 @@ SCHEMADIR=$CURDIR/format_schemas
set -eo pipefail
# Run the client.
$CLICKHOUSE_CLIENT --multiquery <<EOF
$CLICKHOUSE_CLIENT <<EOF
DROP TABLE IF EXISTS nested_optional_protobuf_00825;
CREATE TABLE nested_optional_protobuf_00825

View File

@ -9,7 +9,7 @@ SCHEMADIR=$CURDIR/format_schemas
set -eo pipefail
# Run the client.
$CLICKHOUSE_CLIENT --multiquery <<EOF
$CLICKHOUSE_CLIENT <<EOF
DROP TABLE IF EXISTS no_length_delimiter_protobuf_00825;
DROP TABLE IF EXISTS roundtrip_no_length_delimiter_protobuf_00825;
@ -43,11 +43,11 @@ $CLICKHOUSE_CLIENT --query "SELECT * FROM roundtrip_no_length_delimiter_protobuf
rm "$BINARY_FILE_PATH"
# The ProtobufSingle format can't be used to write multiple rows because this format doesn't have any row delimiter.
$CLICKHOUSE_CLIENT --multiquery > /dev/null <<EOF
$CLICKHOUSE_CLIENT > /dev/null <<EOF
SELECT * FROM no_length_delimiter_protobuf_00825 FORMAT ProtobufSingle SETTINGS format_schema = '$SCHEMADIR/00825_protobuf_format_no_length_delimiter:Message'; -- { clientError 546 }
EOF
$CLICKHOUSE_CLIENT --multiquery <<EOF
$CLICKHOUSE_CLIENT <<EOF
DROP TABLE no_length_delimiter_protobuf_00825;
DROP TABLE roundtrip_no_length_delimiter_protobuf_00825;
EOF

View File

@ -17,7 +17,7 @@ SCHEMADIR=$CURDIR/format_schemas
set -eo pipefail
# Run the client.
$CLICKHOUSE_CLIENT --multiquery <<EOF
$CLICKHOUSE_CLIENT <<EOF
DROP TABLE IF EXISTS persons_00825;
DROP TABLE IF EXISTS roundtrip_persons_00825;
DROP TABLE IF EXISTS alt_persons_00825;
@ -129,7 +129,7 @@ $CLICKHOUSE_CLIENT --query "INSERT INTO edition2023_persons_00825 SETTINGS forma
$CLICKHOUSE_CLIENT --query "SELECT * FROM edition2023_persons_00825 ORDER BY name"
rm "$BINARY_FILE_PATH"
$CLICKHOUSE_CLIENT --multiquery <<EOF
$CLICKHOUSE_CLIENT <<EOF
DROP TABLE persons_00825;
DROP TABLE roundtrip_persons_00825;
DROP TABLE alt_persons_00825;

View File

@ -11,7 +11,7 @@ SCHEMADIR=$CURDIR/format_schemas
set -eo pipefail
# Run the client.
$CLICKHOUSE_CLIENT --multiquery <<EOF
$CLICKHOUSE_CLIENT <<EOF
DROP TABLE IF EXISTS table_skipped_column_in_nested_00825;
CREATE TABLE table_skipped_column_in_nested_00825 (

View File

@ -9,7 +9,7 @@ SCHEMADIR=$CURDIR/format_schemas
set -eo pipefail
# Run the client.
$CLICKHOUSE_CLIENT --multiquery <<EOF
$CLICKHOUSE_CLIENT <<EOF
DROP TABLE IF EXISTS splitted_nested_protobuf_00825;
CREATE TABLE splitted_nested_protobuf_00825 (

View File

@ -9,7 +9,7 @@ SCHEMADIR=$CURDIR/format_schemas
set -eo pipefail
# Run the client.
$CLICKHOUSE_CLIENT --multiquery <<EOF
$CLICKHOUSE_CLIENT <<EOF
DROP TABLE IF EXISTS squares_protobuf_00825;
CREATE TABLE squares_protobuf_00825 (number UInt32, square UInt64) ENGINE = MergeTree ORDER BY tuple();

View File

@ -9,7 +9,7 @@ SCHEMADIR=$CURDIR/format_schemas
set -eo pipefail
# Run the client.
$CLICKHOUSE_CLIENT --multiquery <<EOF
$CLICKHOUSE_CLIENT <<EOF
DROP TABLE IF EXISTS table_default_protobuf_00825;
CREATE TABLE table_default_protobuf_00825

View File

@ -58,7 +58,7 @@ for NAME in $(find "$DATA_DIR"/*.parquet -print0 | xargs -0 -n 1 basename | LC_A
COLUMNS=$(cat "$COLUMNS_FILE") || continue
${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS parquet_load"
$CLICKHOUSE_CLIENT --multiquery <<EOF
$CLICKHOUSE_CLIENT <<EOF
CREATE TABLE parquet_load ($COLUMNS) ENGINE = Memory;
EOF

View File

@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CUR_DIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS maps"
${CLICKHOUSE_CLIENT} --multiquery <<EOF
${CLICKHOUSE_CLIENT} <<EOF
CREATE TABLE maps (m1 Map(UInt32, UInt32), m2 Map(String, String), m3 Map(UInt32, Tuple(UInt32, UInt32)), m4 Map(UInt32, Array(UInt32)), m5 Array(Map(UInt32, UInt32)), m6 Tuple(Map(UInt32, UInt32), Map(String, String)), m7 Array(Map(UInt32, Array(Tuple(Map(UInt32, UInt32), Tuple(UInt32)))))) ENGINE=Memory();
EOF

View File

@ -23,7 +23,7 @@ echo -e "\n"
# Test that if both format_template_row_format setting and format_template_row are provided, error is thrown
row_format_file="$CURDIR"/"${CLICKHOUSE_TEST_UNIQUE_NAME}"_template_output_format_row.tmp
echo -ne 'Question: ${question:Quoted}, Answer: ${answer:Quoted}, Number of Likes: ${likes:Raw}, Date: ${date:Raw}' > $row_format_file
$CLICKHOUSE_CLIENT --multiline --multiquery --query "SELECT * FROM template GROUP BY question, answer, likes, date WITH TOTALS ORDER BY date LIMIT 3 FORMAT Template SETTINGS \
$CLICKHOUSE_CLIENT --multiline --query "SELECT * FROM template GROUP BY question, answer, likes, date WITH TOTALS ORDER BY date LIMIT 3 FORMAT Template SETTINGS \
format_template_row = '$row_format_file', \
format_template_row_format = 'Question: \${question:Quoted}, Answer: \${answer:Quoted}, Number of Likes: \${likes:Raw}, Date: \${date:Raw}', \
format_template_rows_between_delimiter = ';\n'; --{clientError 474}"
@ -38,7 +38,7 @@ format_template_rows_between_delimiter = ';\n'";
# Test that if both format_template_result_format setting and format_template_resultset are provided, error is thrown
resultset_output_file="$CURDIR"/"$CLICKHOUSE_TEST_UNIQUE_NAME"_template_output_format_resultset.tmp
echo -ne '===== Resultset ===== \n \${data} \n ===============' > $resultset_output_file
$CLICKHOUSE_CLIENT --multiline --multiquery --query "SELECT * FROM template GROUP BY question, answer, likes, date WITH TOTALS ORDER BY date LIMIT 3 FORMAT Template SETTINGS \
$CLICKHOUSE_CLIENT --multiline --query "SELECT * FROM template GROUP BY question, answer, likes, date WITH TOTALS ORDER BY date LIMIT 3 FORMAT Template SETTINGS \
format_template_resultset = '$resultset_output_file', \
format_template_resultset_format = '===== Resultset ===== \n \${data} \n ===============', \
format_template_row_format = 'Question: \${question:Quoted}, Answer: \${answer:Quoted}, Number of Likes: \${likes:Raw}, Date: \${date:Raw}', \

View File

@ -17,7 +17,7 @@ echo 1
# normal execution
$CLICKHOUSE_CLIENT \
--query="SELECT 'find_me_TOPSECRET=TOPSECRET' FROM numbers(1) FORMAT Null" \
--log_queries=1 --ignore-error --multiquery >"$tmp_file" 2>&1
--log_queries=1 --ignore-error >"$tmp_file" 2>&1
grep -F 'find_me_[hidden]' "$tmp_file" >/dev/null || echo 'fail 1a'
grep -F 'TOPSECRET' "$tmp_file" && echo 'fail 1b'
@ -38,7 +38,7 @@ echo 3
# failure at before query start
$CLICKHOUSE_CLIENT \
--query="SELECT 1 FROM system.numbers WHERE credit_card_number='find_me_TOPSECRET=TOPSECRET' FORMAT Null" \
--log_queries=1 --ignore-error --multiquery |& grep -v '^(query: ' > "$tmp_file"
--log_queries=1 --ignore-error |& grep -v '^(query: ' > "$tmp_file"
grep -F 'find_me_[hidden]' "$tmp_file" >/dev/null || echo 'fail 3a'
grep -F 'TOPSECRET' "$tmp_file" && echo 'fail 3b'
@ -56,7 +56,7 @@ echo 4
# failure at the end of query
$CLICKHOUSE_CLIENT \
--query="SELECT 'find_me_TOPSECRET=TOPSECRET', intDiv( 100, number - 10) FROM numbers(11) FORMAT Null" \
--log_queries=1 --ignore-error --max_block_size=2 --multiquery |& grep -v '^(query: ' > "$tmp_file"
--log_queries=1 --ignore-error --max_block_size=2 |& grep -v '^(query: ' > "$tmp_file"
grep -F 'find_me_[hidden]' "$tmp_file" >/dev/null || echo 'fail 4a'
grep -F 'TOPSECRET' "$tmp_file" && echo 'fail 4b'
@ -67,7 +67,7 @@ rm -f "$tmp_file2" >/dev/null 2>&1
bash -c "$CLICKHOUSE_CLIENT \
--function_sleep_max_microseconds_per_block 60000000 \
--query=\"select sleepEachRow(1) from numbers(10) where ignore('find_me_TOPSECRET=TOPSECRET')=0 and ignore('fwerkh_that_magic_string_make_me_unique') = 0 FORMAT Null\" \
--log_queries=1 --ignore-error --multiquery |& grep -v '^(query: ' > $tmp_file2" &
--log_queries=1 --ignore-error |& grep -v '^(query: ' > $tmp_file2" &
rm -f "$tmp_file" >/dev/null 2>&1
# check that executing query doesn't expose secrets in processlist
@ -133,7 +133,7 @@ insert into sensitive select number as id, toDate('2019-01-01') as date, 'abcd'
insert into sensitive select number as id, toDate('2019-01-01') as date, 'find_me_TOPSECRET=TOPSECRET' as value1, rand() as valuer from numbers(10);
insert into sensitive select number as id, toDate('2019-01-01') as date, 'abcd' as value1, rand() as valuer from numbers(10000);
select * from sensitive WHERE value1 = 'find_me_TOPSECRET=TOPSECRET' FORMAT Null;
drop table sensitive;" --log_queries=1 --ignore-error --multiquery >"$tmp_file" 2>&1
drop table sensitive;" --log_queries=1 --ignore-error >"$tmp_file" 2>&1
grep -F 'find_me_[hidden]' "$tmp_file" >/dev/null || echo 'fail 8a'
grep -F 'TOPSECRET' "$tmp_file" && echo 'fail 8b'
@ -144,7 +144,7 @@ echo 9
$CLICKHOUSE_CLIENT \
--server_logs_file=/dev/null \
--query="SELECT if( count() > 0, 'text_log non empty', 'text_log empty') FROM system.text_log WHERE event_date >= yesterday() and message like '%find_me%';
select * from system.text_log where event_date >= yesterday() and message like '%TOPSECRET=TOPSECRET%';" --ignore-error --multiquery
select * from system.text_log where event_date >= yesterday() and message like '%TOPSECRET=TOPSECRET%';" --ignore-error
echo 'finish'
rm -f "$tmp_file" >/dev/null 2>&1

View File

@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --multiquery <<EOF
$CLICKHOUSE_CLIENT <<EOF
DROP TABLE IF EXISTS src_a;
DROP TABLE IF EXISTS src_b;

View File

@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# that test is failing on versions <= 19.11.12
${CLICKHOUSE_CLIENT} --multiquery --query="
${CLICKHOUSE_CLIENT} --query="
DROP TABLE IF EXISTS lc_empty_part_bug;
create table lc_empty_part_bug (id UInt64, s String) Engine=MergeTree ORDER BY id SETTINGS number_of_free_entries_in_pool_to_execute_mutation=0;
insert into lc_empty_part_bug select number as id, toString(rand()) from numbers(100);
@ -15,7 +15,7 @@ ${CLICKHOUSE_CLIENT} --multiquery --query="
echo 'Waited for mutation to finish'
${CLICKHOUSE_CLIENT} --multiquery --query="
${CLICKHOUSE_CLIENT} --query="
alter table lc_empty_part_bug modify column s LowCardinality(String);
SELECT 'still alive';
insert into lc_empty_part_bug select number+100 as id, toString(rand()) from numbers(100);

View File

@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
set -e -o pipefail
$CLICKHOUSE_CLIENT --multiquery <<EOF
$CLICKHOUSE_CLIENT <<EOF
CREATE TABLE ${CLICKHOUSE_DATABASE}.table(x Int64, y Int64, insert_time DateTime) ENGINE = MergeTree ORDER BY tuple();
INSERT INTO ${CLICKHOUSE_DATABASE}.table VALUES (12, 102, now());

View File

@ -9,7 +9,7 @@ opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS dst;

View File

@ -9,7 +9,7 @@ opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS dst;

View File

@ -9,7 +9,7 @@ opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS dst;

View File

@ -9,7 +9,7 @@ opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS dst;

View File

@ -8,7 +8,7 @@ opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS dst;

View File

@ -8,7 +8,7 @@ opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS dst;

View File

@ -8,7 +8,7 @@ opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS dst;

View File

@ -8,7 +8,7 @@ opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS dst;

View File

@ -8,7 +8,7 @@ opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS dst;

View File

@ -8,7 +8,7 @@ opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS dst;

View File

@ -8,7 +8,7 @@ opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS dst;

View File

@ -8,7 +8,7 @@ opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS dst;

Some files were not shown because too many files have changed in this diff Show More