diff --git a/.gitmodules b/.gitmodules index 55fd684fddb..5988282f616 100644 --- a/.gitmodules +++ b/.gitmodules @@ -265,6 +265,9 @@ [submodule "contrib/wyhash"] path = contrib/wyhash url = https://github.com/wangyi-fudan/wyhash.git +[submodule "contrib/eigen"] + path = contrib/eigen + url = https://github.com/eigen-mirror/eigen [submodule "contrib/hashidsxx"] path = contrib/hashidsxx - url = https://github.com/schoentoon/hashidsxx.git + url = https://github.com/schoentoon/hashidsxx.git \ No newline at end of file diff --git a/cmake/cpu_features.cmake b/cmake/cpu_features.cmake index 36c91d95be9..9551e05d4f8 100644 --- a/cmake/cpu_features.cmake +++ b/cmake/cpu_features.cmake @@ -61,7 +61,7 @@ else () endif () if (ARCH_PPC64LE) - set (COMPILER_FLAGS "${COMPILER_FLAGS} -maltivec -mcpu=power8 -D__SSE2__=1 -DNO_WARN_X86_INTRINSICS") + set (COMPILER_FLAGS "${COMPILER_FLAGS} -maltivec -mcpu=power8 -DNO_WARN_X86_INTRINSICS") endif () set (TEST_FLAG "-msse4.2") diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 943e0e0ebc1..a7f1a908474 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -153,6 +153,7 @@ endif() add_contrib (sqlite-cmake sqlite-amalgamation) add_contrib (s2geometry-cmake s2geometry) +add_contrib (eigen-cmake eigen) # Put all targets defined here and in subdirectories under "contrib/" folders in GUI-based IDEs. # Some of third-party projects may override CMAKE_FOLDER or FOLDER property of their targets, so they would not appear diff --git a/contrib/eigen b/contrib/eigen new file mode 160000 index 00000000000..3147391d946 --- /dev/null +++ b/contrib/eigen @@ -0,0 +1 @@ +Subproject commit 3147391d946bb4b6c68edd901f2add6ac1f31f8c diff --git a/contrib/eigen-cmake/CMakeLists.txt b/contrib/eigen-cmake/CMakeLists.txt new file mode 100644 index 00000000000..6bdf3ab7c35 --- /dev/null +++ b/contrib/eigen-cmake/CMakeLists.txt @@ -0,0 +1,23 @@ +set(EIGEN_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/eigen") + +add_library (_eigen INTERFACE) + +option (ENABLE_MKL "Build Eigen with Intel MKL" OFF) +if (ENABLE_MKL) + set(MKL_THREADING sequential) + set(MKL_INTERFACE lp64) + find_package(MKL REQUIRED) + if (MKL_FOUND) + message("MKL INCLUDE: ${MKL_INCLUDE}") + message("MKL LIBRARIES: ${MKL_LIBRARIES}") + target_compile_definitions(_eigen INTERFACE EIGEN_USE_MKL_ALL) + target_include_directories(_eigen INTERFACE ${MKL_INCLUDE}) + target_link_libraries(_eigen INTERFACE ${MKL_LIBRARIES}) + endif() +endif() + +# Only include MPL2 code from Eigen library +target_compile_definitions(_eigen INTERFACE EIGEN_MPL2_ONLY) + +target_include_directories (_eigen SYSTEM INTERFACE ${EIGEN_LIBRARY_DIR}) +add_library(ch_contrib::eigen ALIAS _eigen) diff --git a/contrib/poco b/contrib/poco index 6c1a233744d..5d11f0aa6fd 160000 --- a/contrib/poco +++ b/contrib/poco @@ -1 +1 @@ -Subproject commit 6c1a233744d13414e8e8db396c75177b857b2c22 +Subproject commit 5d11f0aa6fd2faad0a7b34aa355cd50c4deb27e6 diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index c547ae03a52..8692c8c64c5 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -177,6 +177,7 @@ function clone_submodules contrib/jemalloc contrib/replxx contrib/wyhash + contrib/eigen ) git submodule sync diff --git a/docs/en/development/build-osx.md b/docs/en/development/build-osx.md index 05ef10ad020..943e41e059b 100644 --- a/docs/en/development/build-osx.md +++ b/docs/en/development/build-osx.md @@ -10,21 +10,17 @@ description: How to build ClickHouse on Mac OS X You can install pre-built ClickHouse as described in [Quick Start](https://clickhouse.com/#quick-start). Follow **macOS (Intel)** or **macOS (Apple silicon)** installation instructions. ::: -Build should work on x86_64 (Intel) and arm64 (Apple silicon) based macOS 10.15 (Catalina) and higher with Homebrew's vanilla Clang. -It is always recommended to use vanilla `clang` compiler. +The build works on x86_64 (Intel) and arm64 (Apple Silicon) based on macOS 10.15 (Catalina) or higher with Homebrew's vanilla Clang. :::note -It is possible to use XCode's `apple-clang` or `gcc`, but it's strongly discouraged. +It is also possible to compile with Apple's XCode `apple-clang` or Homebrew's `gcc`, but it's strongly discouraged. ::: ## Install Homebrew {#install-homebrew} -``` bash -/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" -# ...and follow the printed instructions on any additional steps required to complete the installation. -``` +First install [Homebrew](https://brew.sh/) -## Install Xcode and Command Line Tools {#install-xcode-and-command-line-tools} +## For Apple's Clang (discouraged): Install Xcode and Command Line Tools {#install-xcode-and-command-line-tools} Install the latest [Xcode](https://apps.apple.com/am/app/xcode/id497799835?mt=12) from App Store. @@ -57,12 +53,12 @@ To build using Homebrew's vanilla Clang compiler (the only **recommended** way): ``` bash cd ClickHouse -rm -rf build mkdir build -cd build -cmake -DCMAKE_C_COMPILER=$(brew --prefix llvm)/bin/clang -DCMAKE_CXX_COMPILER=$(brew --prefix llvm)/bin/clang++ -DCMAKE_AR=$(brew --prefix llvm)/bin/llvm-ar -DCMAKE_RANLIB=$(brew --prefix llvm)/bin/llvm-ranlib -DOBJCOPY_PATH=$(brew --prefix llvm)/bin/llvm-objcopy -DCMAKE_BUILD_TYPE=RelWithDebInfo .. -cmake --build . --config RelWithDebInfo -# The resulting binary will be created at: ./programs/clickhouse +export CC=$(brew --prefix llvm)/bin/clang +export CXX=$(brew --prefix llvm)/bin/clang++ +cmake -G Ninja -DCMAKE_BUILD_TYPE=RelWithDebInfo -S . -B build +cmake --build build +# The resulting binary will be created at: build/programs/clickhouse ``` To build using Xcode's native AppleClang compiler in Xcode IDE (this option is only for development builds and workflows, and is **not recommended** unless you know what you are doing): @@ -82,12 +78,12 @@ To build using Homebrew's vanilla GCC compiler (this option is only for developm ``` bash cd ClickHouse -rm -rf build mkdir build -cd build -cmake -DCMAKE_C_COMPILER=$(brew --prefix gcc)/bin/gcc-11 -DCMAKE_CXX_COMPILER=$(brew --prefix gcc)/bin/g++-11 -DCMAKE_AR=$(brew --prefix gcc)/bin/gcc-ar-11 -DCMAKE_RANLIB=$(brew --prefix gcc)/bin/gcc-ranlib-11 -DOBJCOPY_PATH=$(brew --prefix binutils)/bin/objcopy -DCMAKE_BUILD_TYPE=RelWithDebInfo .. -cmake --build . --config RelWithDebInfo -# The resulting binary will be created at: ./programs/clickhouse +export CC=$(brew --prefix gcc)/bin/gcc-11 +export CXX=$(brew --prefix gcc)/bin/g++-11 +cmake -G Ninja -DCMAKE_BUILD_TYPE=RelWithDebInfo -S . -B build +cmake --build build +# The resulting binary will be created at: build/programs/clickhouse ``` ## Caveats {#caveats} diff --git a/docs/en/getting-started/example-datasets/nyc-taxi.md b/docs/en/getting-started/example-datasets/nyc-taxi.md index 270aeb4929c..360f9eed1c8 100644 --- a/docs/en/getting-started/example-datasets/nyc-taxi.md +++ b/docs/en/getting-started/example-datasets/nyc-taxi.md @@ -1,5 +1,6 @@ --- sidebar_label: New York Taxi Data +sidebar_position: 2 description: Data for billions of taxi and for-hire vehicle (Uber, Lyft, etc.) trips originating in New York City since 2009 --- diff --git a/docs/en/getting-started/example-datasets/uk-price-paid.md b/docs/en/getting-started/example-datasets/uk-price-paid.md index e0f20639aea..eaec6e53ed4 100644 --- a/docs/en/getting-started/example-datasets/uk-price-paid.md +++ b/docs/en/getting-started/example-datasets/uk-price-paid.md @@ -1,5 +1,6 @@ --- sidebar_label: UK Property Price Paid +sidebar_position: 1 --- # UK Property Price Paid diff --git a/docs/en/operations/external-authenticators/ssl-x509.md b/docs/en/operations/external-authenticators/ssl-x509.md index dd4f35257bb..15b5990d00e 100644 --- a/docs/en/operations/external-authenticators/ssl-x509.md +++ b/docs/en/operations/external-authenticators/ssl-x509.md @@ -2,7 +2,7 @@ [SSL 'strict' option](../server-configuration-parameters/settings.md#server_configuration_parameters-openssl) enables mandatory certificate validation for the incoming connections. In this case, only connections with trusted certificates can be established. Connections with untrusted certificates will be rejected. Thus, certificate validation allows to uniquely authenticate an incoming connection. `Common Name` field of the certificate is used to identify connected user. This allows to associate multiple certificates with the same user. Additionally, reissuing and revoking of the certificates does not affect the ClickHouse configuration. -To enable SSL certificate authentication, a list of `Common Name`'s for each ClickHouse user must be sspecified in the settings file `config.xml `: +To enable SSL certificate authentication, a list of `Common Name`'s for each ClickHouse user must be specified in the settings file `users.xml `: **Example** ```xml @@ -10,11 +10,11 @@ To enable SSL certificate authentication, a list of `Common Name`'s for each Cli - + host.domain.com:example_user host.domain.com:example_user_dev - + diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md index f235fba84f7..fd5c2a187b5 100644 --- a/docs/en/operations/server-configuration-parameters/settings.md +++ b/docs/en/operations/server-configuration-parameters/settings.md @@ -1745,3 +1745,13 @@ Possible values: - Positive integer. Default value: `10000`. + +## global_memory_usage_overcommit_max_wait_microseconds {#global_memory_usage_overcommit_max_wait_microseconds} + +Sets maximum waiting time for global overcommit tracker. + +Possible values: + +- Positive integer. + +Default value: `200`. diff --git a/docs/en/operations/settings/memory-overcommit.md b/docs/en/operations/settings/memory-overcommit.md new file mode 100644 index 00000000000..74cbc4dbd03 --- /dev/null +++ b/docs/en/operations/settings/memory-overcommit.md @@ -0,0 +1,37 @@ +# Memory overcommit + +Memory overcommit is an experimental technique intended to allow to set more flexible memory limits for queries. + +The idea of this technique is to introduce settings which can represent guaranteed amount of memory a query can use. +When memory overcommit is enabled and the memory limit is reached ClickHouse will select the most overcommitted query and try to free memory by killing this query. + +When memory limit is reached any query will wait some time during attempt to allocate new memory. +If timeout is passed and memory is freed, the query continues execution. +Otherwise an exception will be thrown and the query is killed. + +Selection of query to stop or kill is performed by either global or user overcommit trackers depending on what memory limit is reached. +If overcommit tracker can't choose query to stop, MEMORY_LIMIT_EXCEEDED exception is thrown. + +## User overcommit tracker + +User overcommit tracker finds a query with the biggest overcommit ratio in the user's query list. +Overcommit ratio for a query is computed as number of allocated bytes divided by value of `memory_overcommit_ratio_denominator` setting. + +If `memory_overcommit_ratio_denominator` for the query is equals to zero, overcommit tracker won't choose this query. + +Waiting timeout is set by `memory_usage_overcommit_max_wait_microseconds` setting. + +**Example** + +```sql +SELECT number FROM numbers(1000) GROUP BY number SETTINGS memory_overcommit_ratio_denominator=4000, memory_usage_overcommit_max_wait_microseconds=500 +``` + +## Global overcommit tracker + +Global overcommit tracker finds a query with the biggest overcommit ratio in the list of all queries. +In this case overcommit ratio is computed as number of allocated bytes divided by value of `memory_overcommit_ratio_denominator_for_user` setting. + +If `memory_overcommit_ratio_denominator_for_user` for the query is equals to zero, overcommit tracker won't choose this query. + +Waiting timeout is set by `global_memory_usage_overcommit_max_wait_microseconds` parameter in the configuration file. diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 8f2b9bc86fc..76fbc5f239d 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -4263,3 +4263,29 @@ Possible values: - 1 — Enabled. Default value: 1. + +## memory_overcommit_ratio_denominator + +It represents soft memory limit in case when hard limit is reached on user level. +This value is used to compute overcommit ratio for the query. +Zero means skip the query. +Read more about [memory overcommit](memory-overcommit.md). + +Default value: `1GiB`. + +## memory_usage_overcommit_max_wait_microseconds + +Maximum time thread will wait for memory to be freed in the case of memory overcommit on a user level. +If the timeout is reached and memory is not freed, an exception is thrown. +Read more about [memory overcommit](memory-overcommit.md). + +Default value: `200`. + +## memory_overcommit_ratio_denominator_for_user + +It represents soft memory limit in case when hard limit is reached on global level. +This value is used to compute overcommit ratio for the query. +Zero means skip the query. +Read more about [memory overcommit](memory-overcommit.md). + +Default value: `1GiB`. diff --git a/docs/en/sql-reference/functions/hash-functions.md b/docs/en/sql-reference/functions/hash-functions.md index e4b1fdd3bbb..bd05f3b4ad2 100644 --- a/docs/en/sql-reference/functions/hash-functions.md +++ b/docs/en/sql-reference/functions/hash-functions.md @@ -13,7 +13,7 @@ Simhash is a hash function, which returns close hash values for close (similar) [Interprets](../../sql-reference/functions/type-conversion-functions.md#type_conversion_functions-reinterpretAsString) all the input parameters as strings and calculates the [MD5](https://en.wikipedia.org/wiki/MD5) hash value for each of them. Then combines hashes, takes the first 8 bytes of the hash of the resulting string, and interprets them as `UInt64` in big-endian byte order. -``` sql +```sql halfMD5(par1, ...) ``` @@ -30,11 +30,11 @@ A [UInt64](../../sql-reference/data-types/int-uint.md) data type hash value. **Example** -``` sql +```sql SELECT halfMD5(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS halfMD5hash, toTypeName(halfMD5hash) AS type; ``` -``` text +```response ┌────────halfMD5hash─┬─type───┐ │ 186182704141653334 │ UInt64 │ └────────────────────┴────────┘ @@ -54,7 +54,7 @@ If you want to get the same result as output by the md5sum utility, use lower(he Produces a 64-bit [SipHash](https://131002.net/siphash/) hash value. -``` sql +```sql sipHash64(par1,...) ``` @@ -77,11 +77,11 @@ A [UInt64](../../sql-reference/data-types/int-uint.md) data type hash value. **Example** -``` sql +```sql SELECT sipHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS SipHash, toTypeName(SipHash) AS type; ``` -``` text +```response ┌──────────────SipHash─┬─type───┐ │ 13726873534472839665 │ UInt64 │ └──────────────────────┴────────┘ @@ -93,7 +93,7 @@ Produces a 128-bit [SipHash](https://131002.net/siphash/) hash value. Differs fr **Syntax** -``` sql +```sql sipHash128(par1,...) ``` @@ -111,13 +111,13 @@ Type: [FixedString(16)](../../sql-reference/data-types/fixedstring.md). Query: -``` sql +```sql SELECT hex(sipHash128('foo', '\x01', 3)); ``` Result: -``` text +```response ┌─hex(sipHash128('foo', '', 3))────┐ │ 9DE516A64A414D4B1B609415E4523F24 │ └──────────────────────────────────┘ @@ -127,7 +127,7 @@ Result: Produces a 64-bit [CityHash](https://github.com/google/cityhash) hash value. -``` sql +```sql cityHash64(par1,...) ``` @@ -145,11 +145,11 @@ A [UInt64](../../sql-reference/data-types/int-uint.md) data type hash value. Call example: -``` sql +```sql SELECT cityHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS CityHash, toTypeName(CityHash) AS type; ``` -``` text +```response ┌─────────────CityHash─┬─type───┐ │ 12072650598913549138 │ UInt64 │ └──────────────────────┴────────┘ @@ -157,7 +157,7 @@ SELECT cityHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:0 The following example shows how to compute the checksum of the entire table with accuracy up to the row order: -``` sql +```sql SELECT groupBitXor(cityHash64(*)) FROM table ``` @@ -177,7 +177,7 @@ Calculates SHA-1, SHA-224, SHA-256, SHA-512 hash from a string and returns the r **Syntax** -``` sql +```sql SHA1('s') ... SHA512('s') @@ -203,24 +203,62 @@ Use the [hex](../functions/encoding-functions.md#hex) function to represent the Query: -``` sql +```sql SELECT hex(SHA1('abc')); ``` Result: -``` text +```response ┌─hex(SHA1('abc'))─────────────────────────┐ │ A9993E364706816ABA3E25717850C26C9CD0D89D │ └──────────────────────────────────────────┘ ``` +## BLAKE3 {#blake3} + +Calculates BLAKE3 hash string and returns the resulting set of bytes as [FixedString](../data-types/fixedstring.md). + +**Syntax** + +```sql +BLAKE3('s') +``` + +This cryptographic hash-function is integrated into ClickHouse with BLAKE3 Rust library. The function is rather fast and shows approximately two times faster performance compared to SHA-2, while generating hashes of the same length as SHA-256. + +**Arguments** + +- s - input string for BLAKE3 hash calculation. [String](../data-types/string.md). + +**Return value** + +- BLAKE3 hash as a byte array with type FixedString(32). + +Type: [FixedString](../data-types/fixedstring.md). + +**Example** + +Use function [hex](../functions/encoding-functions.md#hex) to represent the result as a hex-encoded string. + +Query: +```sql +SELECT hex(BLAKE3('ABC')) +``` + +Result: +```sql +┌─hex(BLAKE3('ABC'))───────────────────────────────────────────────┐ +│ D1717274597CF0289694F75D96D444B992A096F1AFD8E7BBFA6EBB1D360FEDFC │ +└──────────────────────────────────────────────────────────────────┘ +``` + ## URLHash(url\[, N\]) {#urlhashurl-n} A fast, decent-quality non-cryptographic hash function for a string obtained from a URL using some type of normalization. `URLHash(s)` – Calculates a hash from a string without one of the trailing symbols `/`,`?` or `#` at the end, if present. `URLHash(s, N)` – Calculates a hash from a string up to the N level in the URL hierarchy, without one of the trailing symbols `/`,`?` or `#` at the end, if present. -Levels are the same as in URLHierarchy. +Levels are the same as in URLHierarchy. ## farmFingerprint64 {#farmfingerprint64} @@ -228,7 +266,7 @@ Levels are the same as in URLHierarchy. Produces a 64-bit [FarmHash](https://github.com/google/farmhash) or Fingerprint value. `farmFingerprint64` is preferred for a stable and portable value. -``` sql +```sql farmFingerprint64(par1, ...) farmHash64(par1, ...) ``` @@ -245,11 +283,11 @@ A [UInt64](../../sql-reference/data-types/int-uint.md) data type hash value. **Example** -``` sql +```sql SELECT farmHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS FarmHash, toTypeName(FarmHash) AS type; ``` -``` text +```response ┌─────────────FarmHash─┬─type───┐ │ 17790458267262532859 │ UInt64 │ └──────────────────────┴────────┘ @@ -261,7 +299,7 @@ Calculates [JavaHash](http://hg.openjdk.java.net/jdk8u/jdk8u/jdk/file/478a4add97 **Syntax** -``` sql +```sql SELECT javaHash('') ``` @@ -273,13 +311,13 @@ A `Int32` data type hash value. Query: -``` sql +```sql SELECT javaHash('Hello, world!'); ``` Result: -``` text +```response ┌─javaHash('Hello, world!')─┐ │ -1880044555 │ └───────────────────────────┘ @@ -291,7 +329,7 @@ Calculates [JavaHash](http://hg.openjdk.java.net/jdk8u/jdk8u/jdk/file/478a4add97 **Syntax** -``` sql +```sql javaHashUTF16LE(stringUtf16le) ``` @@ -309,13 +347,13 @@ Correct query with UTF-16LE encoded string. Query: -``` sql +```sql SELECT javaHashUTF16LE(convertCharset('test', 'utf-8', 'utf-16le')); ``` Result: -``` text +```response ┌─javaHashUTF16LE(convertCharset('test', 'utf-8', 'utf-16le'))─┐ │ 3556498 │ └──────────────────────────────────────────────────────────────┘ @@ -325,7 +363,7 @@ Result: Calculates `HiveHash` from a string. -``` sql +```sql SELECT hiveHash('') ``` @@ -341,13 +379,13 @@ Type: `hiveHash`. Query: -``` sql +```sql SELECT hiveHash('Hello, world!'); ``` Result: -``` text +```response ┌─hiveHash('Hello, world!')─┐ │ 267439093 │ └───────────────────────────┘ @@ -357,7 +395,7 @@ Result: Produces a 64-bit [MetroHash](http://www.jandrewrogers.com/2015/05/27/metrohash/) hash value. -``` sql +```sql metroHash64(par1, ...) ``` @@ -371,11 +409,11 @@ A [UInt64](../../sql-reference/data-types/int-uint.md) data type hash value. **Example** -``` sql +```sql SELECT metroHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MetroHash, toTypeName(MetroHash) AS type; ``` -``` text +```response ┌────────────MetroHash─┬─type───┐ │ 14235658766382344533 │ UInt64 │ └──────────────────────┴────────┘ @@ -391,7 +429,7 @@ For more information, see the link: [JumpConsistentHash](https://arxiv.org/pdf/1 Produces a [MurmurHash2](https://github.com/aappleby/smhasher) hash value. -``` sql +```sql murmurHash2_32(par1, ...) murmurHash2_64(par1, ...) ``` @@ -407,11 +445,11 @@ Both functions take a variable number of input parameters. Arguments can be any **Example** -``` sql +```sql SELECT murmurHash2_64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MurmurHash2, toTypeName(MurmurHash2) AS type; ``` -``` text +```response ┌──────────MurmurHash2─┬─type───┐ │ 11832096901709403633 │ UInt64 │ └──────────────────────┴────────┘ @@ -423,7 +461,7 @@ Calculates a 64-bit [MurmurHash2](https://github.com/aappleby/smhasher) hash val **Syntax** -``` sql +```sql gccMurmurHash(par1, ...) ``` @@ -441,7 +479,7 @@ Type: [UInt64](../../sql-reference/data-types/int-uint.md). Query: -``` sql +```sql SELECT gccMurmurHash(1, 2, 3) AS res1, gccMurmurHash(('a', [1, 2, 3], 4, (4, ['foo', 'bar'], 1, (1, 2)))) AS res2 @@ -449,7 +487,7 @@ SELECT Result: -``` text +```response ┌─────────────────res1─┬────────────────res2─┐ │ 12384823029245979431 │ 1188926775431157506 │ └──────────────────────┴─────────────────────┘ @@ -459,7 +497,7 @@ Result: Produces a [MurmurHash3](https://github.com/aappleby/smhasher) hash value. -``` sql +```sql murmurHash3_32(par1, ...) murmurHash3_64(par1, ...) ``` @@ -475,11 +513,11 @@ Both functions take a variable number of input parameters. Arguments can be any **Example** -``` sql +```sql SELECT murmurHash3_32(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MurmurHash3, toTypeName(MurmurHash3) AS type; ``` -``` text +```response ┌─MurmurHash3─┬─type───┐ │ 2152717 │ UInt32 │ └─────────────┴────────┘ @@ -491,7 +529,7 @@ Produces a 128-bit [MurmurHash3](https://github.com/aappleby/smhasher) hash valu **Syntax** -``` sql +```sql murmurHash3_128(expr) ``` @@ -509,13 +547,13 @@ Type: [FixedString(16)](../../sql-reference/data-types/fixedstring.md). Query: -``` sql +```sql SELECT hex(murmurHash3_128('foo', 'foo', 'foo')); ``` Result: -``` text +```response ┌─hex(murmurHash3_128('foo', 'foo', 'foo'))─┐ │ F8F7AD9B6CD4CF117A71E277E2EC2931 │ └───────────────────────────────────────────┘ @@ -525,7 +563,7 @@ Result: Calculates `xxHash` from a string. It is proposed in two flavors, 32 and 64 bits. -``` sql +```sql SELECT xxHash32('') OR @@ -543,13 +581,13 @@ Type: `xxHash`. Query: -``` sql +```sql SELECT xxHash32('Hello, world!'); ``` Result: -``` text +```response ┌─xxHash32('Hello, world!')─┐ │ 834093149 │ └───────────────────────────┘ @@ -567,7 +605,7 @@ Can be used for detection of semi-duplicate strings with [bitHammingDistance](.. **Syntax** -``` sql +```sql ngramSimHash(string[, ngramsize]) ``` @@ -586,13 +624,13 @@ Type: [UInt64](../../sql-reference/data-types/int-uint.md). Query: -``` sql +```sql SELECT ngramSimHash('ClickHouse') AS Hash; ``` Result: -``` text +```response ┌───────Hash─┐ │ 1627567969 │ └────────────┘ @@ -606,7 +644,7 @@ Can be used for detection of semi-duplicate strings with [bitHammingDistance](.. **Syntax** -``` sql +```sql ngramSimHashCaseInsensitive(string[, ngramsize]) ``` @@ -625,13 +663,13 @@ Type: [UInt64](../../sql-reference/data-types/int-uint.md). Query: -``` sql +```sql SELECT ngramSimHashCaseInsensitive('ClickHouse') AS Hash; ``` Result: -``` text +```response ┌──────Hash─┐ │ 562180645 │ └───────────┘ @@ -645,7 +683,7 @@ Can be used for detection of semi-duplicate strings with [bitHammingDistance](.. **Syntax** -``` sql +```sql ngramSimHashUTF8(string[, ngramsize]) ``` @@ -664,13 +702,13 @@ Type: [UInt64](../../sql-reference/data-types/int-uint.md). Query: -``` sql +```sql SELECT ngramSimHashUTF8('ClickHouse') AS Hash; ``` Result: -``` text +```response ┌───────Hash─┐ │ 1628157797 │ └────────────┘ @@ -684,7 +722,7 @@ Can be used for detection of semi-duplicate strings with [bitHammingDistance](.. **Syntax** -``` sql +```sql ngramSimHashCaseInsensitiveUTF8(string[, ngramsize]) ``` @@ -703,13 +741,13 @@ Type: [UInt64](../../sql-reference/data-types/int-uint.md). Query: -``` sql +```sql SELECT ngramSimHashCaseInsensitiveUTF8('ClickHouse') AS Hash; ``` Result: -``` text +```response ┌───────Hash─┐ │ 1636742693 │ └────────────┘ @@ -723,7 +761,7 @@ Can be used for detection of semi-duplicate strings with [bitHammingDistance](.. **Syntax** -``` sql +```sql wordShingleSimHash(string[, shinglesize]) ``` @@ -742,13 +780,13 @@ Type: [UInt64](../../sql-reference/data-types/int-uint.md). Query: -``` sql +```sql SELECT wordShingleSimHash('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Hash; ``` Result: -``` text +```response ┌───────Hash─┐ │ 2328277067 │ └────────────┘ @@ -762,7 +800,7 @@ Can be used for detection of semi-duplicate strings with [bitHammingDistance](.. **Syntax** -``` sql +```sql wordShingleSimHashCaseInsensitive(string[, shinglesize]) ``` @@ -781,13 +819,13 @@ Type: [UInt64](../../sql-reference/data-types/int-uint.md). Query: -``` sql +```sql SELECT wordShingleSimHashCaseInsensitive('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Hash; ``` Result: -``` text +```response ┌───────Hash─┐ │ 2194812424 │ └────────────┘ @@ -801,7 +839,7 @@ Can be used for detection of semi-duplicate strings with [bitHammingDistance](.. **Syntax** -``` sql +```sql wordShingleSimHashUTF8(string[, shinglesize]) ``` @@ -820,13 +858,13 @@ Type: [UInt64](../../sql-reference/data-types/int-uint.md). Query: -``` sql +```sql SELECT wordShingleSimHashUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Hash; ``` Result: -``` text +```response ┌───────Hash─┐ │ 2328277067 │ └────────────┘ @@ -840,7 +878,7 @@ Can be used for detection of semi-duplicate strings with [bitHammingDistance](.. **Syntax** -``` sql +```sql wordShingleSimHashCaseInsensitiveUTF8(string[, shinglesize]) ``` @@ -859,13 +897,13 @@ Type: [UInt64](../../sql-reference/data-types/int-uint.md). Query: -``` sql +```sql SELECT wordShingleSimHashCaseInsensitiveUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Hash; ``` Result: -``` text +```response ┌───────Hash─┐ │ 2194812424 │ └────────────┘ @@ -879,7 +917,7 @@ Can be used for detection of semi-duplicate strings with [tupleHammingDistance]( **Syntax** -``` sql +```sql ngramMinHash(string[, ngramsize, hashnum]) ``` @@ -899,13 +937,13 @@ Type: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-refere Query: -``` sql +```sql SELECT ngramMinHash('ClickHouse') AS Tuple; ``` Result: -``` text +```response ┌─Tuple──────────────────────────────────────┐ │ (18333312859352735453,9054248444481805918) │ └────────────────────────────────────────────┘ @@ -919,7 +957,7 @@ Can be used for detection of semi-duplicate strings with [tupleHammingDistance]( **Syntax** -``` sql +```sql ngramMinHashCaseInsensitive(string[, ngramsize, hashnum]) ``` @@ -939,13 +977,13 @@ Type: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-refere Query: -``` sql +```sql SELECT ngramMinHashCaseInsensitive('ClickHouse') AS Tuple; ``` Result: -``` text +```response ┌─Tuple──────────────────────────────────────┐ │ (2106263556442004574,13203602793651726206) │ └────────────────────────────────────────────┘ @@ -959,7 +997,7 @@ Can be used for detection of semi-duplicate strings with [tupleHammingDistance]( **Syntax** -``` sql +```sql ngramMinHashUTF8(string[, ngramsize, hashnum]) ``` @@ -979,13 +1017,13 @@ Type: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-refere Query: -``` sql +```sql SELECT ngramMinHashUTF8('ClickHouse') AS Tuple; ``` Result: -``` text +```response ┌─Tuple──────────────────────────────────────┐ │ (18333312859352735453,6742163577938632877) │ └────────────────────────────────────────────┘ @@ -999,7 +1037,7 @@ Can be used for detection of semi-duplicate strings with [tupleHammingDistance]( **Syntax** -``` sql +```sql ngramMinHashCaseInsensitiveUTF8(string [, ngramsize, hashnum]) ``` @@ -1019,13 +1057,13 @@ Type: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-refere Query: -``` sql +```sql SELECT ngramMinHashCaseInsensitiveUTF8('ClickHouse') AS Tuple; ``` Result: -``` text +```response ┌─Tuple───────────────────────────────────────┐ │ (12493625717655877135,13203602793651726206) │ └─────────────────────────────────────────────┘ @@ -1037,7 +1075,7 @@ Splits a ASCII string into n-grams of `ngramsize` symbols and returns the n-gram **Syntax** -``` sql +```sql ngramMinHashArg(string[, ngramsize, hashnum]) ``` @@ -1057,13 +1095,13 @@ Type: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-referen Query: -``` sql +```sql SELECT ngramMinHashArg('ClickHouse') AS Tuple; ``` Result: -``` text +```response ┌─Tuple─────────────────────────────────────────────────────────────────────────┐ │ (('ous','ick','lic','Hou','kHo','use'),('Hou','lic','ick','ous','ckH','Cli')) │ └───────────────────────────────────────────────────────────────────────────────┘ @@ -1075,7 +1113,7 @@ Splits a ASCII string into n-grams of `ngramsize` symbols and returns the n-gram **Syntax** -``` sql +```sql ngramMinHashArgCaseInsensitive(string[, ngramsize, hashnum]) ``` @@ -1095,13 +1133,13 @@ Type: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-referen Query: -``` sql +```sql SELECT ngramMinHashArgCaseInsensitive('ClickHouse') AS Tuple; ``` Result: -``` text +```response ┌─Tuple─────────────────────────────────────────────────────────────────────────┐ │ (('ous','ick','lic','kHo','use','Cli'),('kHo','lic','ick','ous','ckH','Hou')) │ └───────────────────────────────────────────────────────────────────────────────┘ @@ -1113,7 +1151,7 @@ Splits a UTF-8 string into n-grams of `ngramsize` symbols and returns the n-gram **Syntax** -``` sql +```sql ngramMinHashArgUTF8(string[, ngramsize, hashnum]) ``` @@ -1133,13 +1171,13 @@ Type: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-referen Query: -``` sql +```sql SELECT ngramMinHashArgUTF8('ClickHouse') AS Tuple; ``` Result: -``` text +```response ┌─Tuple─────────────────────────────────────────────────────────────────────────┐ │ (('ous','ick','lic','Hou','kHo','use'),('kHo','Hou','lic','ick','ous','ckH')) │ └───────────────────────────────────────────────────────────────────────────────┘ @@ -1151,7 +1189,7 @@ Splits a UTF-8 string into n-grams of `ngramsize` symbols and returns the n-gram **Syntax** -``` sql +```sql ngramMinHashArgCaseInsensitiveUTF8(string[, ngramsize, hashnum]) ``` @@ -1171,13 +1209,13 @@ Type: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-referen Query: -``` sql +```sql SELECT ngramMinHashArgCaseInsensitiveUTF8('ClickHouse') AS Tuple; ``` Result: -``` text +```response ┌─Tuple─────────────────────────────────────────────────────────────────────────┐ │ (('ckH','ous','ick','lic','kHo','use'),('kHo','lic','ick','ous','ckH','Hou')) │ └───────────────────────────────────────────────────────────────────────────────┘ @@ -1191,7 +1229,7 @@ Can be used for detection of semi-duplicate strings with [tupleHammingDistance]( **Syntax** -``` sql +```sql wordShingleMinHash(string[, shinglesize, hashnum]) ``` @@ -1211,13 +1249,13 @@ Type: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-refere Query: -``` sql +```sql SELECT wordShingleMinHash('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Tuple; ``` Result: -``` text +```response ┌─Tuple──────────────────────────────────────┐ │ (16452112859864147620,5844417301642981317) │ └────────────────────────────────────────────┘ @@ -1231,7 +1269,7 @@ Can be used for detection of semi-duplicate strings with [tupleHammingDistance]( **Syntax** -``` sql +```sql wordShingleMinHashCaseInsensitive(string[, shinglesize, hashnum]) ``` @@ -1251,13 +1289,13 @@ Type: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-refere Query: -``` sql +```sql SELECT wordShingleMinHashCaseInsensitive('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Tuple; ``` Result: -``` text +```response ┌─Tuple─────────────────────────────────────┐ │ (3065874883688416519,1634050779997673240) │ └───────────────────────────────────────────┘ @@ -1271,7 +1309,7 @@ Can be used for detection of semi-duplicate strings with [tupleHammingDistance]( **Syntax** -``` sql +```sql wordShingleMinHashUTF8(string[, shinglesize, hashnum]) ``` @@ -1291,13 +1329,13 @@ Type: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-refere Query: -``` sql +```sql SELECT wordShingleMinHashUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Tuple; ``` Result: -``` text +```response ┌─Tuple──────────────────────────────────────┐ │ (16452112859864147620,5844417301642981317) │ └────────────────────────────────────────────┘ @@ -1311,7 +1349,7 @@ Can be used for detection of semi-duplicate strings with [tupleHammingDistance]( **Syntax** -``` sql +```sql wordShingleMinHashCaseInsensitiveUTF8(string[, shinglesize, hashnum]) ``` @@ -1331,13 +1369,13 @@ Type: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-refere Query: -``` sql +```sql SELECT wordShingleMinHashCaseInsensitiveUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Tuple; ``` Result: -``` text +```response ┌─Tuple─────────────────────────────────────┐ │ (3065874883688416519,1634050779997673240) │ └───────────────────────────────────────────┘ @@ -1349,7 +1387,7 @@ Splits a ASCII string into parts (shingles) of `shinglesize` words each and retu **Syntax** -``` sql +```sql wordShingleMinHashArg(string[, shinglesize, hashnum]) ``` @@ -1369,13 +1407,13 @@ Type: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-referen Query: -``` sql +```sql SELECT wordShingleMinHashArg('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).', 1, 3) AS Tuple; ``` Result: -``` text +```response ┌─Tuple─────────────────────────────────────────────────────────────────┐ │ (('OLAP','database','analytical'),('online','oriented','processing')) │ └───────────────────────────────────────────────────────────────────────┘ @@ -1387,7 +1425,7 @@ Splits a ASCII string into parts (shingles) of `shinglesize` words each and retu **Syntax** -``` sql +```sql wordShingleMinHashArgCaseInsensitive(string[, shinglesize, hashnum]) ``` @@ -1407,13 +1445,13 @@ Type: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-referen Query: -``` sql +```sql SELECT wordShingleMinHashArgCaseInsensitive('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).', 1, 3) AS Tuple; ``` Result: -``` text +```response ┌─Tuple──────────────────────────────────────────────────────────────────┐ │ (('queries','database','analytical'),('oriented','processing','DBMS')) │ └────────────────────────────────────────────────────────────────────────┘ @@ -1425,7 +1463,7 @@ Splits a UTF-8 string into parts (shingles) of `shinglesize` words each and retu **Syntax** -``` sql +```sql wordShingleMinHashArgUTF8(string[, shinglesize, hashnum]) ``` @@ -1445,13 +1483,13 @@ Type: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-referen Query: -``` sql +```sql SELECT wordShingleMinHashArgUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).', 1, 3) AS Tuple; ``` Result: -``` text +```response ┌─Tuple─────────────────────────────────────────────────────────────────┐ │ (('OLAP','database','analytical'),('online','oriented','processing')) │ └───────────────────────────────────────────────────────────────────────┘ @@ -1463,7 +1501,7 @@ Splits a UTF-8 string into parts (shingles) of `shinglesize` words each and retu **Syntax** -``` sql +```sql wordShingleMinHashArgCaseInsensitiveUTF8(string[, shinglesize, hashnum]) ``` @@ -1483,13 +1521,13 @@ Type: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-referen Query: -``` sql +```sql SELECT wordShingleMinHashArgCaseInsensitiveUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).', 1, 3) AS Tuple; ``` Result: -``` text +```response ┌─Tuple──────────────────────────────────────────────────────────────────┐ │ (('queries','database','analytical'),('oriented','processing','DBMS')) │ └────────────────────────────────────────────────────────────────────────┘ diff --git a/docs/ru/sql-reference/functions/hash-functions.md b/docs/ru/sql-reference/functions/hash-functions.md index 12091fcbc3f..0065275519b 100644 --- a/docs/ru/sql-reference/functions/hash-functions.md +++ b/docs/ru/sql-reference/functions/hash-functions.md @@ -13,7 +13,7 @@ Simhash – это хеш-функция, которая для близких [Интерпретирует](../../sql-reference/functions/hash-functions.md#type_conversion_functions-reinterpretAsString) все входные параметры как строки и вычисляет хэш [MD5](https://ru.wikipedia.org/wiki/MD5) для каждой из них. Затем объединяет хэши, берет первые 8 байт хэша результирующей строки и интерпретирует их как значение типа `UInt64` с big-endian порядком байтов. -``` sql +```sql halfMD5(par1, ...) ``` @@ -30,11 +30,11 @@ halfMD5(par1, ...) **Пример** -``` sql +```sql SELECT halfMD5(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS halfMD5hash, toTypeName(halfMD5hash) AS type; ``` -``` text +```response ┌────────halfMD5hash─┬─type───┐ │ 186182704141653334 │ UInt64 │ └────────────────────┴────────┘ @@ -54,7 +54,7 @@ SELECT halfMD5(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00') Генерирует 64-х битное значение [SipHash](https://131002.net/siphash/). -``` sql +```sql sipHash64(par1,...) ``` @@ -77,11 +77,11 @@ sipHash64(par1,...) **Пример** -``` sql +```sql SELECT sipHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS SipHash, toTypeName(SipHash) AS type; ``` -``` text +```response ┌──────────────SipHash─┬─type───┐ │ 13726873534472839665 │ UInt64 │ └──────────────────────┴────────┘ @@ -93,7 +93,7 @@ SELECT sipHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00 **Синтаксис** -``` sql +```sql sipHash128(par1,...) ``` @@ -111,13 +111,13 @@ sipHash128(par1,...) Запрос: -``` sql +```sql SELECT hex(sipHash128('foo', '\x01', 3)); ``` Результат: -``` text +```response ┌─hex(sipHash128('foo', '', 3))────┐ │ 9DE516A64A414D4B1B609415E4523F24 │ └──────────────────────────────────┘ @@ -127,7 +127,7 @@ SELECT hex(sipHash128('foo', '\x01', 3)); Генерирует 64-х битное значение [CityHash](https://github.com/google/cityhash). -``` sql +```sql cityHash64(par1,...) ``` @@ -145,11 +145,11 @@ cityHash64(par1,...) Пример вызова: -``` sql +```sql SELECT cityHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS CityHash, toTypeName(CityHash) AS type; ``` -``` text +```response ┌─────────────CityHash─┬─type───┐ │ 12072650598913549138 │ UInt64 │ └──────────────────────┴────────┘ @@ -157,7 +157,7 @@ SELECT cityHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:0 А вот так вы можете вычислить чексумму всей таблицы с точностью до порядка строк: -``` sql +```sql SELECT groupBitXor(cityHash64(*)) FROM table ``` @@ -177,7 +177,7 @@ SELECT groupBitXor(cityHash64(*)) FROM table **Синтаксис** -``` sql +```sql SHA1('s') ... SHA512('s') @@ -203,18 +203,56 @@ SHA512('s') Запрос: -``` sql +```sql SELECT hex(SHA1('abc')); ``` Результат: -``` text +```response ┌─hex(SHA1('abc'))─────────────────────────┐ │ A9993E364706816ABA3E25717850C26C9CD0D89D │ └──────────────────────────────────────────┘ ``` +## BLAKE3 {#blake3} + +Вычисляет BLAKE3 хеш строки и возвращает полученный набор байт в виде [FixedString](../data-types/fixedstring.md). + +**Синтаксис** + +```sql +BLAKE3('s') +``` + +Данная криптографическая функция интегрирована в ClickHouse из Rust-библиотеки. Функция работает сравнительно быстро, показывая в 2 раза более быстрые результаты по сравнению с SHA-2, генерируя хеши аналогичной SHA-256 длины. + +**Параметры** + +- s - входная строка для вычисления хеша BLAKE3. [String](../data-types/string.md). + +**Возвращаемое значение** + +- Хеш BLAKE3 в виде шестнадцатеричной строки, имеющей тип FixedString(32). + +Тип: [FixedString](../data-types/fixedstring.md). + +**Пример** + +Используйте функцию [hex](../functions/encoding-functions.md#hex) для представления результата в виде строки с шестнадцатеричной кодировкой. + +Запрос: +```sql +SELECT hex(BLAKE3('ABC')) +``` + +Результат: +```response +┌─hex(BLAKE3('ABC'))───────────────────────────────────────────────┐ +│ D1717274597CF0289694F75D96D444B992A096F1AFD8E7BBFA6EBB1D360FEDFC │ +└──────────────────────────────────────────────────────────────────┘ +``` + ## URLHash(url\[, N\]) {#urlhashurl-n} Быстрая не криптографическая хэш-функция неплохого качества для строки, полученной из URL путём некоторой нормализации. @@ -228,7 +266,7 @@ SELECT hex(SHA1('abc')); Создает 64-битное значение [FarmHash](https://github.com/google/farmhash), независимое от платформы (архитектуры сервера), что важно, если значения сохраняются или используются для разбиения данных на группы. -``` sql +```sql farmFingerprint64(par1, ...) farmHash64(par1, ...) ``` @@ -245,11 +283,11 @@ farmHash64(par1, ...) **Пример** -``` sql +```sql SELECT farmHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS FarmHash, toTypeName(FarmHash) AS type; ``` -``` text +```response ┌─────────────FarmHash─┬─type───┐ │ 17790458267262532859 │ UInt64 │ └──────────────────────┴────────┘ @@ -259,7 +297,7 @@ SELECT farmHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:0 Вычисляет [JavaHash](http://hg.openjdk.java.net/jdk8u/jdk8u/jdk/file/478a4add975b/src/share/classes/java/lang/String.java#l1452) от строки. `JavaHash` не отличается ни скоростью, ни качеством, поэтому эту функцию следует считать устаревшей. Используйте эту функцию, если вам необходимо получить значение хэша по такому же алгоритму. -``` sql +```sql SELECT javaHash('') ``` @@ -273,13 +311,13 @@ SELECT javaHash('') Запрос: -``` sql +```sql SELECT javaHash('Hello, world!'); ``` Результат: -``` text +```response ┌─javaHash('Hello, world!')─┐ │ -1880044555 │ └───────────────────────────┘ @@ -291,7 +329,7 @@ SELECT javaHash('Hello, world!'); **Синтаксис** -``` sql +```sql javaHashUTF16LE(stringUtf16le) ``` @@ -311,13 +349,13 @@ javaHashUTF16LE(stringUtf16le) Запрос: -``` sql +```sql SELECT javaHashUTF16LE(convertCharset('test', 'utf-8', 'utf-16le')); ``` Результат: -``` text +```response ┌─javaHashUTF16LE(convertCharset('test', 'utf-8', 'utf-16le'))─┐ │ 3556498 │ └──────────────────────────────────────────────────────────────┘ @@ -327,7 +365,7 @@ SELECT javaHashUTF16LE(convertCharset('test', 'utf-8', 'utf-16le')); Вычисляет `HiveHash` от строки. -``` sql +```sql SELECT hiveHash('') ``` @@ -343,13 +381,13 @@ SELECT hiveHash('') Запрос: -``` sql +```sql SELECT hiveHash('Hello, world!'); ``` Результат: -``` text +```response ┌─hiveHash('Hello, world!')─┐ │ 267439093 │ └───────────────────────────┘ @@ -359,7 +397,7 @@ SELECT hiveHash('Hello, world!'); Генерирует 64-х битное значение [MetroHash](http://www.jandrewrogers.com/2015/05/27/metrohash/). -``` sql +```sql metroHash64(par1, ...) ``` @@ -373,11 +411,11 @@ metroHash64(par1, ...) **Пример** -``` sql +```sql SELECT metroHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MetroHash, toTypeName(MetroHash) AS type; ``` -``` text +```response ┌────────────MetroHash─┬─type───┐ │ 14235658766382344533 │ UInt64 │ └──────────────────────┴────────┘ @@ -393,7 +431,7 @@ SELECT metroHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00: Генерирует значение [MurmurHash2](https://github.com/aappleby/smhasher). -``` sql +```sql murmurHash2_32(par1, ...) murmurHash2_64(par1, ...) ``` @@ -409,11 +447,11 @@ murmurHash2_64(par1, ...) **Пример** -``` sql +```sql SELECT murmurHash2_64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MurmurHash2, toTypeName(MurmurHash2) AS type; ``` -``` text +```response ┌──────────MurmurHash2─┬─type───┐ │ 11832096901709403633 │ UInt64 │ └──────────────────────┴────────┘ @@ -425,7 +463,7 @@ SELECT murmurHash2_64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23: **Синтаксис** -``` sql +```sql gccMurmurHash(par1, ...); ``` @@ -443,7 +481,7 @@ gccMurmurHash(par1, ...); Запрос: -``` sql +```sql SELECT gccMurmurHash(1, 2, 3) AS res1, gccMurmurHash(('a', [1, 2, 3], 4, (4, ['foo', 'bar'], 1, (1, 2)))) AS res2 @@ -451,7 +489,7 @@ SELECT Результат: -``` text +```response ┌─────────────────res1─┬────────────────res2─┐ │ 12384823029245979431 │ 1188926775431157506 │ └──────────────────────┴─────────────────────┘ @@ -461,7 +499,7 @@ SELECT Генерирует значение [MurmurHash3](https://github.com/aappleby/smhasher). -``` sql +```sql murmurHash3_32(par1, ...) murmurHash3_64(par1, ...) ``` @@ -477,11 +515,11 @@ murmurHash3_64(par1, ...) **Пример** -``` sql +```sql SELECT murmurHash3_32(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MurmurHash3, toTypeName(MurmurHash3) AS type; ``` -``` text +```response ┌─MurmurHash3─┬─type───┐ │ 2152717 │ UInt32 │ └─────────────┴────────┘ @@ -493,7 +531,7 @@ SELECT murmurHash3_32(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23: **Синтаксис** -``` sql +```sql murmurHash3_128(expr) ``` @@ -511,13 +549,13 @@ murmurHash3_128(expr) Запрос: -``` sql +```sql SELECT hex(murmurHash3_128('foo', 'foo', 'foo')); ``` Результат: -``` text +```response ┌─hex(murmurHash3_128('foo', 'foo', 'foo'))─┐ │ F8F7AD9B6CD4CF117A71E277E2EC2931 │ └───────────────────────────────────────────┘ @@ -527,7 +565,7 @@ SELECT hex(murmurHash3_128('foo', 'foo', 'foo')); Вычисляет `xxHash` от строки. Предлагается в двух вариантах: 32 и 64 бита. -``` sql +```sql SELECT xxHash32('') OR @@ -545,13 +583,13 @@ SELECT xxHash64('') Запрос: -``` sql +```sql SELECT xxHash32('Hello, world!'); ``` Результат: -``` text +```response ┌─xxHash32('Hello, world!')─┐ │ 834093149 │ └───────────────────────────┘ @@ -569,7 +607,7 @@ SELECT xxHash32('Hello, world!'); **Синтаксис** -``` sql +```sql ngramSimHash(string[, ngramsize]) ``` @@ -588,13 +626,13 @@ ngramSimHash(string[, ngramsize]) Запрос: -``` sql +```sql SELECT ngramSimHash('ClickHouse') AS Hash; ``` Результат: -``` text +```response ┌───────Hash─┐ │ 1627567969 │ └────────────┘ @@ -608,7 +646,7 @@ SELECT ngramSimHash('ClickHouse') AS Hash; **Синтаксис** -``` sql +```sql ngramSimHashCaseInsensitive(string[, ngramsize]) ``` @@ -627,13 +665,13 @@ ngramSimHashCaseInsensitive(string[, ngramsize]) Запрос: -``` sql +```sql SELECT ngramSimHashCaseInsensitive('ClickHouse') AS Hash; ``` Результат: -``` text +```response ┌──────Hash─┐ │ 562180645 │ └───────────┘ @@ -647,7 +685,7 @@ SELECT ngramSimHashCaseInsensitive('ClickHouse') AS Hash; **Синтаксис** -``` sql +```sql ngramSimHashUTF8(string[, ngramsize]) ``` @@ -666,13 +704,13 @@ ngramSimHashUTF8(string[, ngramsize]) Запрос: -``` sql +```sql SELECT ngramSimHashUTF8('ClickHouse') AS Hash; ``` Результат: -``` text +```response ┌───────Hash─┐ │ 1628157797 │ └────────────┘ @@ -686,7 +724,7 @@ SELECT ngramSimHashUTF8('ClickHouse') AS Hash; **Синтаксис** -``` sql +```sql ngramSimHashCaseInsensitiveUTF8(string[, ngramsize]) ``` @@ -705,13 +743,13 @@ ngramSimHashCaseInsensitiveUTF8(string[, ngramsize]) Запрос: -``` sql +```sql SELECT ngramSimHashCaseInsensitiveUTF8('ClickHouse') AS Hash; ``` Результат: -``` text +```response ┌───────Hash─┐ │ 1636742693 │ └────────────┘ @@ -725,7 +763,7 @@ SELECT ngramSimHashCaseInsensitiveUTF8('ClickHouse') AS Hash; **Синтаксис** -``` sql +```sql wordShingleSimHash(string[, shinglesize]) ``` @@ -744,13 +782,13 @@ wordShingleSimHash(string[, shinglesize]) Запрос: -``` sql +```sql SELECT wordShingleSimHash('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Hash; ``` Результат: -``` text +```response ┌───────Hash─┐ │ 2328277067 │ └────────────┘ @@ -764,7 +802,7 @@ SELECT wordShingleSimHash('ClickHouse® is a column-oriented database management **Синтаксис** -``` sql +```sql wordShingleSimHashCaseInsensitive(string[, shinglesize]) ``` @@ -783,13 +821,13 @@ wordShingleSimHashCaseInsensitive(string[, shinglesize]) Запрос: -``` sql +```sql SELECT wordShingleSimHashCaseInsensitive('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Hash; ``` Результат: -``` text +```response ┌───────Hash─┐ │ 2194812424 │ └────────────┘ @@ -803,7 +841,7 @@ SELECT wordShingleSimHashCaseInsensitive('ClickHouse® is a column-oriented data **Синтаксис** -``` sql +```sql wordShingleSimHashUTF8(string[, shinglesize]) ``` @@ -822,13 +860,13 @@ wordShingleSimHashUTF8(string[, shinglesize]) Запрос: -``` sql +```sql SELECT wordShingleSimHashUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Hash; ``` Результат: -``` text +```response ┌───────Hash─┐ │ 2328277067 │ └────────────┘ @@ -842,7 +880,7 @@ SELECT wordShingleSimHashUTF8('ClickHouse® is a column-oriented database manage **Синтаксис** -``` sql +```sql wordShingleSimHashCaseInsensitiveUTF8(string[, shinglesize]) ``` @@ -861,13 +899,13 @@ wordShingleSimHashCaseInsensitiveUTF8(string[, shinglesize]) Запрос: -``` sql +```sql SELECT wordShingleSimHashCaseInsensitiveUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Hash; ``` Результат: -``` text +```response ┌───────Hash─┐ │ 2194812424 │ └────────────┘ @@ -881,7 +919,7 @@ SELECT wordShingleSimHashCaseInsensitiveUTF8('ClickHouse® is a column-oriented **Синтаксис** -``` sql +```sql ngramMinHash(string[, ngramsize, hashnum]) ``` @@ -901,13 +939,13 @@ ngramMinHash(string[, ngramsize, hashnum]) Запрос: -``` sql +```sql SELECT ngramMinHash('ClickHouse') AS Tuple; ``` Результат: -``` text +```response ┌─Tuple──────────────────────────────────────┐ │ (18333312859352735453,9054248444481805918) │ └────────────────────────────────────────────┘ @@ -921,7 +959,7 @@ SELECT ngramMinHash('ClickHouse') AS Tuple; **Синтаксис** -``` sql +```sql ngramMinHashCaseInsensitive(string[, ngramsize, hashnum]) ``` @@ -941,13 +979,13 @@ ngramMinHashCaseInsensitive(string[, ngramsize, hashnum]) Запрос: -``` sql +```sql SELECT ngramMinHashCaseInsensitive('ClickHouse') AS Tuple; ``` Результат: -``` text +```response ┌─Tuple──────────────────────────────────────┐ │ (2106263556442004574,13203602793651726206) │ └────────────────────────────────────────────┘ @@ -960,7 +998,7 @@ SELECT ngramMinHashCaseInsensitive('ClickHouse') AS Tuple; Может быть использована для проверки двух строк на схожесть вместе с функцией [tupleHammingDistance](../../sql-reference/functions/tuple-functions.md#tuplehammingdistance). Если для двух строк минимальные или максимальные хеши одинаковы, мы считаем, что эти строки совпадают. **Синтаксис** -``` sql +```sql ngramMinHashUTF8(string[, ngramsize, hashnum]) ``` @@ -980,13 +1018,13 @@ ngramMinHashUTF8(string[, ngramsize, hashnum]) Запрос: -``` sql +```sql SELECT ngramMinHashUTF8('ClickHouse') AS Tuple; ``` Результат: -``` text +```response ┌─Tuple──────────────────────────────────────┐ │ (18333312859352735453,6742163577938632877) │ └────────────────────────────────────────────┘ @@ -1000,7 +1038,7 @@ SELECT ngramMinHashUTF8('ClickHouse') AS Tuple; **Синтаксис** -``` sql +```sql ngramMinHashCaseInsensitiveUTF8(string [, ngramsize, hashnum]) ``` @@ -1020,13 +1058,13 @@ ngramMinHashCaseInsensitiveUTF8(string [, ngramsize, hashnum]) Запрос: -``` sql +```sql SELECT ngramMinHashCaseInsensitiveUTF8('ClickHouse') AS Tuple; ``` Результат: -``` text +```response ┌─Tuple───────────────────────────────────────┐ │ (12493625717655877135,13203602793651726206) │ └─────────────────────────────────────────────┘ @@ -1038,7 +1076,7 @@ SELECT ngramMinHashCaseInsensitiveUTF8('ClickHouse') AS Tuple; **Синтаксис** -``` sql +```sql ngramMinHashArg(string[, ngramsize, hashnum]) ``` @@ -1058,13 +1096,13 @@ ngramMinHashArg(string[, ngramsize, hashnum]) Запрос: -``` sql +```sql SELECT ngramMinHashArg('ClickHouse') AS Tuple; ``` Результат: -``` text +```response ┌─Tuple─────────────────────────────────────────────────────────────────────────┐ │ (('ous','ick','lic','Hou','kHo','use'),('Hou','lic','ick','ous','ckH','Cli')) │ └───────────────────────────────────────────────────────────────────────────────┘ @@ -1076,7 +1114,7 @@ SELECT ngramMinHashArg('ClickHouse') AS Tuple; **Синтаксис** -``` sql +```sql ngramMinHashArgCaseInsensitive(string[, ngramsize, hashnum]) ``` @@ -1096,13 +1134,13 @@ ngramMinHashArgCaseInsensitive(string[, ngramsize, hashnum]) Запрос: -``` sql +```sql SELECT ngramMinHashArgCaseInsensitive('ClickHouse') AS Tuple; ``` Результат: -``` text +```response ┌─Tuple─────────────────────────────────────────────────────────────────────────┐ │ (('ous','ick','lic','kHo','use','Cli'),('kHo','lic','ick','ous','ckH','Hou')) │ └───────────────────────────────────────────────────────────────────────────────┘ @@ -1114,7 +1152,7 @@ SELECT ngramMinHashArgCaseInsensitive('ClickHouse') AS Tuple; **Синтаксис** -``` sql +```sql ngramMinHashArgUTF8(string[, ngramsize, hashnum]) ``` @@ -1134,13 +1172,13 @@ ngramMinHashArgUTF8(string[, ngramsize, hashnum]) Запрос: -``` sql +```sql SELECT ngramMinHashArgUTF8('ClickHouse') AS Tuple; ``` Результат: -``` text +```response ┌─Tuple─────────────────────────────────────────────────────────────────────────┐ │ (('ous','ick','lic','Hou','kHo','use'),('kHo','Hou','lic','ick','ous','ckH')) │ └───────────────────────────────────────────────────────────────────────────────┘ @@ -1152,7 +1190,7 @@ SELECT ngramMinHashArgUTF8('ClickHouse') AS Tuple; **Синтаксис** -``` sql +```sql ngramMinHashArgCaseInsensitiveUTF8(string[, ngramsize, hashnum]) ``` @@ -1172,13 +1210,13 @@ ngramMinHashArgCaseInsensitiveUTF8(string[, ngramsize, hashnum]) Запрос: -``` sql +```sql SELECT ngramMinHashArgCaseInsensitiveUTF8('ClickHouse') AS Tuple; ``` Результат: -``` text +```response ┌─Tuple─────────────────────────────────────────────────────────────────────────┐ │ (('ckH','ous','ick','lic','kHo','use'),('kHo','lic','ick','ous','ckH','Hou')) │ └───────────────────────────────────────────────────────────────────────────────┘ @@ -1192,7 +1230,7 @@ SELECT ngramMinHashArgCaseInsensitiveUTF8('ClickHouse') AS Tuple; **Синтаксис** -``` sql +```sql wordShingleMinHash(string[, shinglesize, hashnum]) ``` @@ -1212,13 +1250,13 @@ wordShingleMinHash(string[, shinglesize, hashnum]) Запрос: -``` sql +```sql SELECT wordShingleMinHash('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Tuple; ``` Результат: -``` text +```response ┌─Tuple──────────────────────────────────────┐ │ (16452112859864147620,5844417301642981317) │ └────────────────────────────────────────────┘ @@ -1232,7 +1270,7 @@ SELECT wordShingleMinHash('ClickHouse® is a column-oriented database management **Синтаксис** -``` sql +```sql wordShingleMinHashCaseInsensitive(string[, shinglesize, hashnum]) ``` @@ -1252,13 +1290,13 @@ wordShingleMinHashCaseInsensitive(string[, shinglesize, hashnum]) Запрос: -``` sql +```sql SELECT wordShingleMinHashCaseInsensitive('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Tuple; ``` Результат: -``` text +```response ┌─Tuple─────────────────────────────────────┐ │ (3065874883688416519,1634050779997673240) │ └───────────────────────────────────────────┘ @@ -1272,7 +1310,7 @@ SELECT wordShingleMinHashCaseInsensitive('ClickHouse® is a column-oriented data **Синтаксис** -``` sql +```sql wordShingleMinHashUTF8(string[, shinglesize, hashnum]) ``` @@ -1292,13 +1330,13 @@ wordShingleMinHashUTF8(string[, shinglesize, hashnum]) Запрос: -``` sql +```sql SELECT wordShingleMinHashUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Tuple; ``` Результат: -``` text +```response ┌─Tuple──────────────────────────────────────┐ │ (16452112859864147620,5844417301642981317) │ └────────────────────────────────────────────┘ @@ -1312,7 +1350,7 @@ SELECT wordShingleMinHashUTF8('ClickHouse® is a column-oriented database manage **Синтаксис** -``` sql +```sql wordShingleMinHashCaseInsensitiveUTF8(string[, shinglesize, hashnum]) ``` @@ -1332,13 +1370,13 @@ wordShingleMinHashCaseInsensitiveUTF8(string[, shinglesize, hashnum]) Запрос: -``` sql +```sql SELECT wordShingleMinHashCaseInsensitiveUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Tuple; ``` Результат: -``` text +```response ┌─Tuple─────────────────────────────────────┐ │ (3065874883688416519,1634050779997673240) │ └───────────────────────────────────────────┘ @@ -1350,7 +1388,7 @@ SELECT wordShingleMinHashCaseInsensitiveUTF8('ClickHouse® is a column-oriented **Синтаксис** -``` sql +```sql wordShingleMinHashArg(string[, shinglesize, hashnum]) ``` @@ -1370,13 +1408,13 @@ wordShingleMinHashArg(string[, shinglesize, hashnum]) Запрос: -``` sql +```sql SELECT wordShingleMinHashArg('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).', 1, 3) AS Tuple; ``` Результат: -``` text +```response ┌─Tuple─────────────────────────────────────────────────────────────────┐ │ (('OLAP','database','analytical'),('online','oriented','processing')) │ └───────────────────────────────────────────────────────────────────────┘ @@ -1388,7 +1426,7 @@ SELECT wordShingleMinHashArg('ClickHouse® is a column-oriented database managem **Синтаксис** -``` sql +```sql wordShingleMinHashArgCaseInsensitive(string[, shinglesize, hashnum]) ``` @@ -1408,13 +1446,13 @@ wordShingleMinHashArgCaseInsensitive(string[, shinglesize, hashnum]) Запрос: -``` sql +```sql SELECT wordShingleMinHashArgCaseInsensitive('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).', 1, 3) AS Tuple; ``` Результат: -``` text +```response ┌─Tuple──────────────────────────────────────────────────────────────────┐ │ (('queries','database','analytical'),('oriented','processing','DBMS')) │ └────────────────────────────────────────────────────────────────────────┘ @@ -1426,7 +1464,7 @@ SELECT wordShingleMinHashArgCaseInsensitive('ClickHouse® is a column-oriented d **Синтаксис** -``` sql +```sql wordShingleMinHashArgUTF8(string[, shinglesize, hashnum]) ``` @@ -1446,13 +1484,13 @@ wordShingleMinHashArgUTF8(string[, shinglesize, hashnum]) Запрос: -``` sql +```sql SELECT wordShingleMinHashArgUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).', 1, 3) AS Tuple; ``` Результат: -``` text +```response ┌─Tuple─────────────────────────────────────────────────────────────────┐ │ (('OLAP','database','analytical'),('online','oriented','processing')) │ └───────────────────────────────────────────────────────────────────────┘ @@ -1464,7 +1502,7 @@ SELECT wordShingleMinHashArgUTF8('ClickHouse® is a column-oriented database man **Синтаксис** -``` sql +```sql wordShingleMinHashArgCaseInsensitiveUTF8(string[, shinglesize, hashnum]) ``` @@ -1484,13 +1522,13 @@ wordShingleMinHashArgCaseInsensitiveUTF8(string[, shinglesize, hashnum]) Запрос: -``` sql +```sql SELECT wordShingleMinHashArgCaseInsensitiveUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).', 1, 3) AS Tuple; ``` Результат: -``` text +```response ┌─Tuple──────────────────────────────────────────────────────────────────┐ │ (('queries','database','analytical'),('oriented','processing','DBMS')) │ └────────────────────────────────────────────────────────────────────────┘ diff --git a/docs/zh/sql-reference/functions/arithmetic-functions.md b/docs/zh/sql-reference/functions/arithmetic-functions.md index 7af0df084f3..15bec0d2107 100644 --- a/docs/zh/sql-reference/functions/arithmetic-functions.md +++ b/docs/zh/sql-reference/functions/arithmetic-functions.md @@ -80,4 +80,78 @@ SELECT toTypeName(0), toTypeName(0 + 0), toTypeName(0 + 0 + 0), toTypeName(0 + 0 返回数值的最小公倍数。 除以零或将最小负数除以-1时抛出异常。 +## max2 {#max2} + +比较两个值并返回最大值。返回值转换为[Float64](../../sql-reference/data-types/float.md)。 + +**语法** + +```sql +max2(value1, value2) +``` + +**参数** + +- `value1` — 第一个值,类型为[Int/UInt](../../sql-reference/data-types/int-uint.md)或[Float](../../sql-reference/data-types/float.md)。 +- `value2` — 第二个值,类型为[Int/UInt](../../sql-reference/data-types/int-uint.md)或[Float](../../sql-reference/data-types/float.md)。 + +**返回值** + +- 两个值中的最大值。 + +类型: [Float](../../sql-reference/data-types/float.md)。 + +**示例** + +查询语句: + +```sql +SELECT max2(-1, 2); +``` + +结果: + +```text +┌─max2(-1, 2)─┐ +│ 2 │ +└─────────────┘ +``` + +## min2 {#min2} + +比较两个值并返回最小值。返回值类型转换为[Float64](../../sql-reference/data-types/float.md)。 + +**语法** + +```sql +max2(value1, value2) +``` + +**参数** + +- `value1` — 第一个值,类型为[Int/UInt](../../sql-reference/data-types/int-uint.md) or [Float](../../sql-reference/data-types/float.md)。 +- `value2` — 第二个值,类型为[Int/UInt](../../sql-reference/data-types/int-uint.md) or [Float](../../sql-reference/data-types/float.md)。 + +**返回值** + +- 两个值中的最小值。 + +类型: [Float](../../sql-reference/data-types/float.md)。 + +**示例** + +查询语句: + +```sql +SELECT min2(-1, 2); +``` + +结果: + +```text +┌─min2(-1, 2)─┐ +│ -1 │ +└─────────────┘ +``` + [来源文章](https://clickhouse.com/docs/en/query_language/functions/arithmetic_functions/) diff --git a/docs/zh/sql-reference/functions/array-functions.md b/docs/zh/sql-reference/functions/array-functions.md index 6ec6d8fe352..0d4f51e1ddc 100644 --- a/docs/zh/sql-reference/functions/array-functions.md +++ b/docs/zh/sql-reference/functions/array-functions.md @@ -1,16 +1,92 @@ # 数组函数 {#shu-zu-han-shu} -## empty {#empty} +## empty {#empty函数} -对于空数组返回1,对于非空数组返回0。 -结果类型是UInt8。 -该函数也适用于字符串。 +检查输入的数组是否为空。 -## notEmpty {#notempty} +**语法** -对于空数组返回0,对于非空数组返回1。 -结果类型是UInt8。 -该函数也适用于字符串。 +``` sql +empty([x]) +``` + +如果一个数组中不包含任何元素,则此数组为空数组。 + +:::注意 +可以通过启用[optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns)配置进行优化。设置`optimize_functions_to_subcolumns = 1`后,函数通过读取[size0](../../sql-reference/data-types/array.md#array-size)子列获取结果,不在读取和处理整个数组列,查询语句`SELECT empty(arr) FROM TABLE;`将转化为`SELECT arr.size0 = 0 FROM TABLE;`。 +::: + +此函数也适用于[strings](string-functions.md#empty)或[UUID](uuid-functions.md#empty)。 + +**参数** + +- `[x]` — 输入的数组,类型为[数组](../data-types/array.md)。 + +**返回值** + +- 对于空数组返回`1`,对于非空数组返回`0`。 + +类型: [UInt8](../data-types/int-uint.md)。 + +**示例** + +查询语句: + +```sql +SELECT empty([]); +``` + +结果: + +```text +┌─empty(array())─┐ +│ 1 │ +└────────────────┘ +``` + +## notEmpty {#notempty函数} + +检测输入的数组是否非空。 + +**语法** + +``` sql +notEmpty([x]) +``` + +如果一个数组至少包含一个元素,则此数组为非空数组。 + +:::注意 +可以通过启用[optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns)配置进行优化。设置`optimize_functions_to_subcolumns = 1`后,函数通过读取[size0](../../sql-reference/data-types/array.md#array-size)子列获取结果,不在读取和处理整个数组列,查询语句`SELECT notEmpty(arr) FROM TABLE;`将转化为`SELECT arr.size0 != 0 FROM TABLE;`。 +::: + +此函数也适用于[strings](string-functions.md#empty)或[UUID](uuid-functions.md#empty)。 + +**参数** + +- `[x]` — 输入的数组,类型为[数组](../data-types/array.md)。 + +**返回值** + +- 对于空数组返回`0`,对于非空数组返回`1`。 + +类型: [UInt8](../data-types/int-uint.md). + +**示例** + +查询语句: + +```sql +SELECT notEmpty([1,2]); +``` + +结果: + +```text +┌─notEmpty([1, 2])─┐ +│ 1 │ +└──────────────────┘ +``` ## length {#array_functions-length} @@ -18,13 +94,15 @@ 结果类型是UInt64。 该函数也适用于字符串。 -## emptyArrayUInt8,emptyArrayUInt16,emptyArrayUInt32,emptyArrayUInt64 {#emptyarrayuint8-emptyarrayuint16-emptyarrayuint32-emptyarrayuint64} +可以通过启用[optimize_functions_to_subcolumns](../../operations/settings/settings.md#optimize-functions-to-subcolumns)配置进行优化。设置`optimize_functions_to_subcolumns = 1`后,函数通过读取[size0](../../sql-reference/data-types/array.md#array-size)子列获取结果,不在读取和处理整个数组列,查询语句`SELECT length(arr) FROM table`将转化为`SELECT arr.size0 FROM TABLE`。 -## emptyArrayInt8,emptyArrayInt16,emptyArrayInt32,emptyArrayInt64 {#emptyarrayint8-emptyarrayint16-emptyarrayint32-emptyarrayint64} +## emptyArrayUInt8, emptyArrayUInt16, emptyArrayUInt32, emptyArrayUInt64 {#emptyarrayuint8-emptyarrayuint16-emptyarrayuint32-emptyarrayuint64} -## emptyArrayFloat32,emptyArrayFloat64 {#emptyarrayfloat32-emptyarrayfloat64} +## emptyArrayInt8, emptyArrayInt16, emptyArrayInt32, emptyArrayInt64 {#emptyarrayint8-emptyarrayint16-emptyarrayint32-emptyarrayint64} -## emptyArrayDate,emptyArrayDateTime {#emptyarraydate-emptyarraydatetime} +## emptyArrayFloat32, emptyArrayFloat64 {#emptyarrayfloat32-emptyarrayfloat64} + +## emptyArrayDate, emptyArrayDateTime {#emptyarraydate-emptyarraydatetime} ## emptyArrayString {#emptyarraystring} @@ -34,10 +112,43 @@ 接受一个空数组并返回一个仅包含一个默认值元素的数组。 -## range(N) {#rangen} +## range(end), range(\[start, \] end \[, step\]) {#range} -返回从0到N-1的数字数组。 -以防万一,如果在数据块中创建总长度超过100,000,000个元素的数组,则抛出异常。 +返回一个以`step`作为增量步长的从`start`到`end - 1`的`UInt`类型数字数组。 + +**语法** +``` sql +range([start, ] end [, step]) +``` + +**参数** + +- `start` — 数组的第一个元素。可选项,如果设置了`step`时同样需要`start`,默认值为:0,类型为[UInt](../data-types/int-uint.md)。 +- `end` — 计数到`end`结束,但不包括`end`,必填项,类型为[UInt](../data-types/int-uint.md)。 +- `step` — 确定数组中每个元素之间的增量步长。可选项,默认值为:1,类型为[UInt](../data-types/int-uint.md)。 + +**返回值** + +- 以`step`作为增量步长的从`start`到`end - 1`的`UInt`类型数字数组。 + +**注意事项** + +- 所有参数必须是正值:`start`、`end`、`step`,类型均为`UInt`,结果数组的元素与此相同。 +- 如果查询结果的数组总长度超过[function_range_max_elements_in_block](../../operations/settings/settings.md#settings-function_range_max_elements_in_block)指定的元素数,将会抛出异常。 + + +**示例** + +查询语句: +``` sql +SELECT range(5), range(1, 5), range(1, 5, 2); +``` +结果: +```txt +┌─range(5)────┬─range(1, 5)─┬─range(1, 5, 2)─┐ +│ [0,1,2,3,4] │ [1,2,3,4] │ [1,3] │ +└─────────────┴─────────────┴────────────────┘ +``` ## array(x1, …), operator \[x1, …\] {#arrayx1-operator-x1} @@ -49,7 +160,9 @@ 合并参数中传递的所有数组。 - arrayConcat(arrays) +``` sql +arrayConcat(arrays) +``` **参数** @@ -62,9 +175,11 @@ SELECT arrayConcat([1, 2], [3, 4], [5, 6]) AS res ``` - ┌─res───────────┐ - │ [1,2,3,4,5,6] │ - └───────────────┘ +``` text +┌─res───────────┐ +│ [1,2,3,4,5,6] │ +└───────────────┘ +``` ## arrayElement(arr,n),运算符arr\[n\] {#arrayelementarr-n-operator-arrn} @@ -156,22 +271,76 @@ SELECT arrayConcat([1, 2], [3, 4], [5, 6]) AS res `SELECT hasAll([[1, 2], [3, 4]], [[1, 2], [1, 2]])` 返回 `1`. +## hasSubstr {#hassubstr} + +检查 array2 的所有元素是否以相同的顺序出现在 array1 中。当且仅当 `array1 = prefix + array2 + suffix`时,该函数将返回 1。 + +``` sql +hasSubstr(array1, array2) +``` + +换句话说,函数将检查 `array2` 的所有元素是否都包含在 `array1` 中,就像 `hasAll` 函数一样。此外,它将检查元素在“array1”和“array2”中的出现顺序是否相同。 + +举例: +- `hasSubstr([1,2,3,4], [2,3])`返回`1`。然而`hasSubstr([1,2,3,4], [3,2])`将返回`0`。 +- `hasSubstr([1,2,3,4], [1,2,3])`返回`1`。然而`hasSubstr([1,2,3,4], [1,2,4])`将返回`0`。 + +**参数** + +- `array1` – 具有一组元素的任何类型数组。 +- `array2` – 具有一组元素的任何类型数组。 + +**返回值** + +- 如果`array1`中包含`array2`,返回`1`。 +- 其他情况返回`0`。 + +**特殊属性** + +- 当`array2`为空数组时,函数将返回`1`. +- `Null` 作为值处理。换句话说,`hasSubstr([1, 2, NULL, 3, 4], [2,3])` 将返回 `0`。但是,`hasSubstr([1, 2, NULL, 3, 4], [2,NULL,3])` 将返回 `1`。 +- 两个数组中**值的顺序**会影响函数结果。 + +**示例** + +`SELECT hasSubstr([], [])`返回1。 + +`SELECT hasSubstr([1, Null], [Null])`返回1。 + +`SELECT hasSubstr([1.0, 2, 3, 4], [1, 3])`返回0。 + +`SELECT hasSubstr(['a', 'b'], ['a'])`返回1。 + +`SELECT hasSubstr(['a', 'b' , 'c'], ['a', 'b'])`返回1。 + +`SELECT hasSubstr(['a', 'b' , 'c'], ['a', 'c'])`返回0。 + +`SELECT hasSubstr([[1, 2], [3, 4], [5, 6]], [[1, 2], [3, 4]])`返回1。 + ## indexOf(arr,x) {#indexofarr-x} 返回数组中第一个’x’元素的索引(从1开始),如果’x’元素不存在在数组中,则返回0。 示例: - :) SELECT indexOf([1,3,NULL,NULL],NULL) +``` sql +SELECT indexOf([1, 3, NULL, NULL], NULL) +``` - SELECT indexOf([1, 3, NULL, NULL], NULL) - - ┌─indexOf([1, 3, NULL, NULL], NULL)─┐ - │ 3 │ - └───────────────────────────────────┘ +``` text +┌─indexOf([1, 3, NULL, NULL], NULL)─┐ +│ 3 │ +└───────────────────────────────────┘ +``` 设置为«NULL»的元素将作为普通的元素值处理。 +## arrayCount(\[func,\] arr1, …) {#array-count} + +`func`将arr数组作为参数,其返回结果为非零值的数量。如果未指定“func”,则返回数组中非零元素的数量。 + +请注意,`arrayCount`是一个[高阶函数](../../sql-reference/functions/index.md#higher-order-functions)。您可以将 lambda 函数作为第一个参数传递给它。 + ## countEqual(arr,x) {#countequalarr-x} 返回数组中等于x的元素的个数。相当于arrayCount(elem - \> elem = x,arr)。 @@ -180,11 +349,15 @@ SELECT arrayConcat([1, 2], [3, 4], [5, 6]) AS res 示例: - SELECT countEqual([1, 2, NULL, NULL], NULL) +``` sql +SELECT countEqual([1, 2, NULL, NULL], NULL) +``` - ┌─countEqual([1, 2, NULL, NULL], NULL)─┐ - │ 2 │ - └──────────────────────────────────────┘ +``` text +┌─countEqual([1, 2, NULL, NULL], NULL)─┐ +│ 2 │ +└──────────────────────────────────────┘ +``` ## arrayEnumerate(arr) {#array_functions-arrayenumerate} @@ -204,9 +377,11 @@ WHERE CounterID = 160656 LIMIT 10 ``` - ┌─Reaches─┬──Hits─┐ - │ 95606 │ 31406 │ - └─────────┴───────┘ +``` text +┌─Reaches─┬──Hits─┐ +│ 95606 │ 31406 │ +└─────────┴───────┘ +``` 在此示例中,Reaches是转换次数(应用ARRAY JOIN后接收的字符串),Hits是浏览量(ARRAY JOIN之前的字符串)。在这种特殊情况下,您可以更轻松地获得相同的结果: @@ -218,9 +393,11 @@ FROM test.hits WHERE (CounterID = 160656) AND notEmpty(GoalsReached) ``` - ┌─Reaches─┬──Hits─┐ - │ 95606 │ 31406 │ - └─────────┴───────┘ +``` text +┌─Reaches─┬──Hits─┐ +│ 95606 │ 31406 │ +└─────────┴───────┘ +``` 此功能也可用于高阶函数。例如,您可以使用它来获取与条件匹配的元素的数组索引。 @@ -248,18 +425,20 @@ ORDER BY Reaches DESC LIMIT 10 ``` - ┌──GoalID─┬─Reaches─┬─Visits─┐ - │ 53225 │ 3214 │ 1097 │ - │ 2825062 │ 3188 │ 1097 │ - │ 56600 │ 2803 │ 488 │ - │ 1989037 │ 2401 │ 365 │ - │ 2830064 │ 2396 │ 910 │ - │ 1113562 │ 2372 │ 373 │ - │ 3270895 │ 2262 │ 812 │ - │ 1084657 │ 2262 │ 345 │ - │ 56599 │ 2260 │ 799 │ - │ 3271094 │ 2256 │ 812 │ - └─────────┴─────────┴────────┘ +``` text +┌──GoalID─┬─Reaches─┬─Visits─┐ +│ 53225 │ 3214 │ 1097 │ +│ 2825062 │ 3188 │ 1097 │ +│ 56600 │ 2803 │ 488 │ +│ 1989037 │ 2401 │ 365 │ +│ 2830064 │ 2396 │ 910 │ +│ 1113562 │ 2372 │ 373 │ +│ 3270895 │ 2262 │ 812 │ +│ 1084657 │ 2262 │ 345 │ +│ 56599 │ 2260 │ 799 │ +│ 3271094 │ 2256 │ 812 │ +└─────────┴─────────┴────────┘ +``` 在此示例中,每个GoalID都计算转换次数(目标嵌套数据结构中的每个元素都是达到的目标,我们称之为转换)和会话数。如果没有ARRAY JOIN,我们会将会话数计为总和(Sign)。但在这种特殊情况下,行乘以嵌套的Goals结构,因此为了在此之后计算每个会话一次,我们将一个条件应用于arrayEnumerateUniq(Goals.ID)函数的值。 @@ -269,9 +448,11 @@ arrayEnumerateUniq函数可以使用与参数大小相同的多个数组。在 SELECT arrayEnumerateUniq([1, 1, 1, 2, 2, 2], [1, 1, 2, 1, 1, 2]) AS res ``` - ┌─res───────────┐ - │ [1,2,1,1,2,1] │ - └───────────────┘ +``` text +┌─res───────────┐ +│ [1,2,1,1,2,1] │ +└───────────────┘ +``` 当使用带有嵌套数据结构的ARRAY JOIN并在此结构中跨多个元素进一步聚合时,这是必需的。 @@ -291,10 +472,11 @@ SELECT arrayEnumerateUniq([1, 1, 1, 2, 2, 2], [1, 1, 2, 1, 1, 2]) AS res SELECT arrayPopBack([1, 2, 3]) AS res ``` - ┌─res───┐ - │ [1,2] │ - └───────┘ - +``` text +┌─res───┐ +│ [1,2] │ +└───────┘ +``` ## arrayPopFront {#arraypopfront} 从数组中删除第一项。 @@ -311,15 +493,19 @@ SELECT arrayPopBack([1, 2, 3]) AS res SELECT arrayPopFront([1, 2, 3]) AS res ``` - ┌─res───┐ - │ [2,3] │ - └───────┘ +``` text +┌─res───┐ +│ [2,3] │ +└───────┘ +``` ## arrayPushBack {#arraypushback} 添加一个元素到数组的末尾。 - arrayPushBack(array, single_value) +``` sql +arrayPushBack(array, single_value) +``` **参数** @@ -332,15 +518,19 @@ SELECT arrayPopFront([1, 2, 3]) AS res SELECT arrayPushBack(['a'], 'b') AS res ``` - ┌─res───────┐ - │ ['a','b'] │ - └───────────┘ +``` text +┌─res───────┐ +│ ['a','b'] │ +└───────────┘ +``` ## arrayPushFront {#arraypushfront} 将一个元素添加到数组的开头。 - arrayPushFront(array, single_value) +``` sql +arrayPushFront(array, single_value) +``` **参数** @@ -353,15 +543,19 @@ SELECT arrayPushBack(['a'], 'b') AS res SELECT arrayPushFront(['b'], 'a') AS res ``` - ┌─res───────┐ - │ ['a','b'] │ - └───────────┘ +``` text +┌─res───────┐ +│ ['a','b'] │ +└───────────┘ +``` ## arrayResize {#arrayresize} 更改数组的长度。 - arrayResize(array, size[, extender]) +``` sql +arrayResize(array, size[, extender]) +``` **参数:** @@ -377,23 +571,33 @@ SELECT arrayPushFront(['b'], 'a') AS res **调用示例** - SELECT arrayResize([1], 3) +``` sql +SELECT arrayResize([1], 3); +``` - ┌─arrayResize([1], 3)─┐ - │ [1,0,0] │ - └─────────────────────┘ +``` text +┌─arrayResize([1], 3)─┐ +│ [1,0,0] │ +└─────────────────────┘ +``` - SELECT arrayResize([1], 3, NULL) +``` sql +SELECT arrayResize([1], 3, NULL); +``` - ┌─arrayResize([1], 3, NULL)─┐ - │ [1,NULL,NULL] │ - └───────────────────────────┘ +``` text +┌─arrayResize([1], 3, NULL)─┐ +│ [1,NULL,NULL] │ +└───────────────────────────┘ +``` ## arraySlice {#arrayslice} 返回一个子数组,包含从指定位置的指定长度的元素。 - arraySlice(array, offset[, length]) +``` sql +arraySlice(array, offset[, length]) +``` **参数** @@ -407,9 +611,11 @@ SELECT arrayPushFront(['b'], 'a') AS res SELECT arraySlice([1, 2, NULL, 4, 5], 2, 3) AS res ``` - ┌─res────────┐ - │ [2,NULL,4] │ - └────────────┘ +``` text +┌─res────────┐ +│ [2,NULL,4] │ +└────────────┘ +``` 设置为«NULL»的数组元素作为普通的数组元素值处理。 @@ -423,9 +629,11 @@ SELECT arraySlice([1, 2, NULL, 4, 5], 2, 3) AS res SELECT arraySort([1, 3, 3, 0]); ``` - ┌─arraySort([1, 3, 3, 0])─┐ - │ [0,1,3,3] │ - └─────────────────────────┘ +``` text +┌─arraySort([1, 3, 3, 0])─┐ +│ [0,1,3,3] │ +└─────────────────────────┘ +``` 字符串排序示例: @@ -433,9 +641,11 @@ SELECT arraySort([1, 3, 3, 0]); SELECT arraySort(['hello', 'world', '!']); ``` - ┌─arraySort(['hello', 'world', '!'])─┐ - │ ['!','hello','world'] │ - └────────────────────────────────────┘ +``` text +┌─arraySort(['hello', 'world', '!'])─┐ +│ ['!','hello','world'] │ +└────────────────────────────────────┘ +``` `NULL`,`NaN`和`Inf`的排序顺序: @@ -443,9 +653,11 @@ SELECT arraySort(['hello', 'world', '!']); SELECT arraySort([1, nan, 2, NULL, 3, nan, -4, NULL, inf, -inf]); ``` - ┌─arraySort([1, nan, 2, NULL, 3, nan, -4, NULL, inf, -inf])─┐ - │ [-inf,-4,1,2,3,inf,nan,nan,NULL,NULL] │ - └───────────────────────────────────────────────────────────┘ +``` text +┌─arraySort([1, nan, 2, NULL, 3, nan, -4, NULL, inf, -inf])─┐ +│ [-inf,-4,1,2,3,inf,nan,nan,NULL,NULL] │ +└───────────────────────────────────────────────────────────┘ +``` - `-Inf` 是数组中的第一个。 - `NULL` 是数组中的最后一个。 @@ -460,9 +672,11 @@ SELECT arraySort([1, nan, 2, NULL, 3, nan, -4, NULL, inf, -inf]); SELECT arraySort((x) -> -x, [1, 2, 3]) as res; ``` - ┌─res─────┐ - │ [3,2,1] │ - └─────────┘ +``` text +┌─res─────┐ +│ [3,2,1] │ +└─────────┘ +``` 对于源数组的每个元素,lambda函数返回排序键,即\[1 -\> -1, 2 -\> -2, 3 -\> -3\]。由于`arraySort`函数按升序对键进行排序,因此结果为\[3,2,1\]。因此,`(x) -> -x` lambda函数将排序设置为[降序](#array_functions-reverse-sort)。 @@ -472,9 +686,11 @@ lambda函数可以接受多个参数。在这种情况下,您需要为`arraySo SELECT arraySort((x, y) -> y, ['hello', 'world'], [2, 1]) as res; ``` - ┌─res────────────────┐ - │ ['world', 'hello'] │ - └────────────────────┘ +``` text +┌─res────────────────┐ +│ ['world', 'hello'] │ +└────────────────────┘ +``` 这里,在第二个数组(\[2, 1\])中定义了第一个数组(\[‘hello’,‘world’\])的相应元素的排序键,即\[‘hello’ -\> 2,‘world’ -\> 1\]。 由于lambda函数中没有使用`x`,因此源数组中的实际值不会影响结果的顺序。所以,’world’将是结果中的第一个元素,’hello’将是结果中的第二个元素。 @@ -484,7 +700,7 @@ SELECT arraySort((x, y) -> y, ['hello', 'world'], [2, 1]) as res; SELECT arraySort((x, y) -> y, [0, 1, 2], ['c', 'b', 'a']) as res; ``` -``` sql +``` text ┌─res─────┐ │ [2,1,0] │ └─────────┘ @@ -494,7 +710,7 @@ SELECT arraySort((x, y) -> y, [0, 1, 2], ['c', 'b', 'a']) as res; SELECT arraySort((x, y) -> -y, [0, 1, 2], [1, 2, 3]) as res; ``` -``` sql +``` text ┌─res─────┐ │ [2,1,0] │ └─────────┘ @@ -513,9 +729,11 @@ SELECT arraySort((x, y) -> -y, [0, 1, 2], [1, 2, 3]) as res; SELECT arrayReverseSort([1, 3, 3, 0]); ``` - ┌─arrayReverseSort([1, 3, 3, 0])─┐ - │ [3,3,1,0] │ - └────────────────────────────────┘ +``` text +┌─arrayReverseSort([1, 3, 3, 0])─┐ +│ [3,3,1,0] │ +└────────────────────────────────┘ +``` 字符串排序示例: @@ -523,9 +741,11 @@ SELECT arrayReverseSort([1, 3, 3, 0]); SELECT arrayReverseSort(['hello', 'world', '!']); ``` - ┌─arrayReverseSort(['hello', 'world', '!'])─┐ - │ ['world','hello','!'] │ - └───────────────────────────────────────────┘ +``` text +┌─arrayReverseSort(['hello', 'world', '!'])─┐ +│ ['world','hello','!'] │ +└───────────────────────────────────────────┘ +``` `NULL`,`NaN`和`Inf`的排序顺序: @@ -533,7 +753,7 @@ SELECT arrayReverseSort(['hello', 'world', '!']); SELECT arrayReverseSort([1, nan, 2, NULL, 3, nan, -4, NULL, inf, -inf]) as res; ``` -``` sql +``` text ┌─res───────────────────────────────────┐ │ [inf,3,2,1,-4,-inf,nan,nan,NULL,NULL] │ └───────────────────────────────────────┘ @@ -550,11 +770,12 @@ SELECT arrayReverseSort([1, nan, 2, NULL, 3, nan, -4, NULL, inf, -inf]) as res; SELECT arrayReverseSort((x) -> -x, [1, 2, 3]) as res; ``` - ┌─res─────┐ - │ [1,2,3] │ - └─────────┘ +``` text +┌─res─────┐ +│ [1,2,3] │ +└─────────┘ +``` -数组按以下方式排序: 数组按以下方式排序: 1. 首先,根据lambda函数的调用结果对源数组(\[1, 2, 3\])进行排序。 结果是\[3, 2, 1\]。 @@ -566,7 +787,7 @@ lambda函数可以接受多个参数。在这种情况下,您需要为`arrayRe SELECT arrayReverseSort((x, y) -> y, ['hello', 'world'], [2, 1]) as res; ``` -``` sql +``` text ┌─res───────────────┐ │ ['hello','world'] │ └───────────────────┘ @@ -583,7 +804,7 @@ SELECT arrayReverseSort((x, y) -> y, ['hello', 'world'], [2, 1]) as res; SELECT arrayReverseSort((x, y) -> y, [4, 3, 5], ['a', 'b', 'c']) AS res; ``` -``` sql +``` text ┌─res─────┐ │ [5,3,4] │ └─────────┘ @@ -593,7 +814,7 @@ SELECT arrayReverseSort((x, y) -> y, [4, 3, 5], ['a', 'b', 'c']) AS res; SELECT arrayReverseSort((x, y) -> -y, [4, 3, 5], [1, 2, 3]) AS res; ``` -``` sql +``` text ┌─res─────┐ │ [4,3,5] │ └─────────┘ @@ -610,35 +831,108 @@ SELECT arrayReverseSort((x, y) -> -y, [4, 3, 5], [1, 2, 3]) AS res; 一个特殊的功能。请参见[«ArrayJoin函数»](array-join.md#functions_arrayjoin)部分。 -## arrayDifference(arr) {#arraydifferencearr} +## arrayDifference {#arraydifference} -返回一个数组,其中包含所有相邻元素对之间的差值。例如: +计算相邻数组元素之间的差异。返回一个数组,其中第一个元素为 0,第二个是 `a[1] - a[0]` 之差等。结果数组中元素的类型由减法的类型推断规则确定(例如`UInt8` - `UInt8` = `Int16`)。 + +**语法** ``` sql -SELECT arrayDifference([1, 2, 3, 4]) +arrayDifference(array) ``` - ┌─arrayDifference([1, 2, 3, 4])─┐ - │ [0,1,1,1] │ - └───────────────────────────────┘ +**参数** -## arrayDistinct(arr) {#arraydistinctarr} +- `array` –类型为[数组](https://clickhouse.com/docs/en/data_types/array/)。 -返回一个包含所有数组中不同元素的数组。例如: +**返回值** + +返回相邻元素之间的差异数组。 + +类型: [UInt\*](https://clickhouse.com/docs/en/data_types/int_uint/#uint-ranges), [Int\*](https://clickhouse.com/docs/en/data_types/int_uint/#int-ranges), [Float\*](https://clickhouse.com/docs/en/data_types/float/)。 + +**示例** + +查询语句: ``` sql -SELECT arrayDistinct([1, 2, 2, 3, 1]) +SELECT arrayDifference([1, 2, 3, 4]); ``` - ┌─arrayDistinct([1, 2, 2, 3, 1])─┐ - │ [1,2,3] │ - └────────────────────────────────┘ +结果: + +``` text +┌─arrayDifference([1, 2, 3, 4])─┐ +│ [0,1,1,1] │ +└───────────────────────────────┘ +``` + +由于结果类型为Int64导致的溢出示例: + +查询语句: + +``` sql +SELECT arrayDifference([0, 10000000000000000000]); +``` + +结果: + +``` text +┌─arrayDifference([0, 10000000000000000000])─┐ +│ [0,-8446744073709551616] │ +└────────────────────────────────────────────┘ +``` +## arrayDistinct {#arraydistinctarr} + +返回一个包含所有数组中不同元素的数组。 + +**语法** + +``` sql +arrayDistinct(array) +``` + +**参数** + +- `array` –类型为[数组](https://clickhouse.com/docs/en/data_types/array/)。 + +**返回值** + +返回一个包含不同元素的数组。 + +**示例** + +查询语句: + +``` sql +SELECT arrayDistinct([1, 2, 2, 3, 1]); +``` + +结果: + +``` text +┌─arrayDistinct([1, 2, 2, 3, 1])─┐ +│ [1,2,3] │ +└────────────────────────────────┘ +``` ## arrayEnumerateDense(arr) {#arrayenumeratedensearr} 返回与源数组大小相同的数组,指示每个元素首次出现在源数组中的位置。例如:arrayEnumerateDense(\[10,20,10,30\])= \[1,2,1,3\]。 -## arrayIntersect(arr) {#arrayintersectarr} +示例: + +``` sql +SELECT arrayEnumerateDense([10, 20, 10, 30]) +``` + +``` text +┌─arrayEnumerateDense([10, 20, 10, 30])─┐ +│ [1,2,1,3] │ +└───────────────────────────────────────┘ +``` + +## arrayIntersect(arr) {#array-functions-arrayintersect} 返回所有数组元素的交集。例如: @@ -648,18 +942,755 @@ SELECT arrayIntersect([1, 2], [1, 3], [1, 4]) AS intersect ``` - ┌─no_intersect─┬─intersect─┐ - │ [] │ [1] │ - └──────────────┴───────────┘ +``` text +┌─no_intersect─┬─intersect─┐ +│ [] │ [1] │ +└──────────────┴───────────┘ +``` -## arrayReduce(agg_func, arr1, …) {#arrayreduceagg-func-arr1} +## arrayReduce {#arrayreduce} -将聚合函数应用于数组并返回其结果。如果聚合函数具有多个参数,则此函数可应用于相同大小的多个数组。 +将聚合函数应用于数组元素并返回其结果。聚合函数的名称以单引号 `'max'`、`'sum'` 中的字符串形式传递。使用参数聚合函数时,参数在括号中的函数名称后指示“uniqUpTo(6)”。 -arrayReduce(‘agg_func’,arr1,…) - 将聚合函数`agg_func`应用于数组`arr1 ...`。如果传递了多个数组,则相应位置上的元素将作为多个参数传递给聚合函数。例如:SELECT arrayReduce(‘max’,\[1,2,3\])= 3 +**语法** -## arrayReverse(arr) {#arrayreversearr} +``` sql +arrayReduce(agg_func, arr1, arr2, ..., arrN) +``` -返回与源数组大小相同的数组,包含反转源数组的所有元素的结果。 +**参数** + +- `agg_func` — 聚合函数的名称,应该是一个常量[string](../../sql-reference/data-types/string.md)。 +- `arr` — 任意数量的[数组](../../sql-reference/data-types/array.md)类型列作为聚合函数的参数。 + +**返回值** + +**示例** + +查询语句: + +``` sql +SELECT arrayReduce('max', [1, 2, 3]); +``` + +结果: + +``` text +┌─arrayReduce('max', [1, 2, 3])─┐ +│ 3 │ +└───────────────────────────────┘ +``` + +如果聚合函数采用多个参数,则该函数必须应用于多个相同大小的数组。 + +查询语句: + +``` sql +SELECT arrayReduce('maxIf', [3, 5], [1, 0]); +``` + +结果: + +``` text +┌─arrayReduce('maxIf', [3, 5], [1, 0])─┐ +│ 3 │ +└──────────────────────────────────────┘ +``` + +带有参数聚合函数的示例: + +查询语句: + +``` sql +SELECT arrayReduce('uniqUpTo(3)', [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]); +``` + +结果: + +``` text +┌─arrayReduce('uniqUpTo(3)', [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])─┐ +│ 4 │ +└─────────────────────────────────────────────────────────────┘ +``` + +## arrayReduceInRanges {#arrayreduceinranges} + +将聚合函数应用于给定范围内的数组元素,并返回一个包含与每个范围对应的结果的数组。该函数将返回与多个 `arrayReduce(agg_func, arraySlice(arr1, index, length), ...)` 相同的结果。 + +**语法** + +``` sql +arrayReduceInRanges(agg_func, ranges, arr1, arr2, ..., arrN) +``` + +**参数** + +- `agg_func` — 聚合函数的名称,应该是一个常量[string](../../sql-reference/data-types/string.md)。 +- `ranges` — 要聚合的范围应该是[元组](../../sql-reference/data-types/tuple.md)为元素的[数组](../../sql-reference/data-types/array.md),其中包含每个索引和长度范围。 +- `arr` — 任意数量的[数组](../../sql-reference/data-types/array.md)类型列作为聚合函数的参数。 + +**返回值** + +- 包含指定范围内聚合函数结果的数组。 + +类型为: [数组](../../sql-reference/data-types/array.md). + +**示例** + +查询语句: + +``` sql +SELECT arrayReduceInRanges( + 'sum', + [(1, 5), (2, 3), (3, 4), (4, 4)], + [1000000, 200000, 30000, 4000, 500, 60, 7] +) AS res +``` + +结果: + +``` text +┌─res─────────────────────────┐ +│ [1234500,234000,34560,4567] │ +└─────────────────────────────┘ +``` + +## arrayReverse(arr) {#arrayreverse} + +返回一个与原始数组大小相同的数组,其中包含相反顺序的元素。 + +示例: + +``` sql +SELECT arrayReverse([1, 2, 3]) +``` + +``` text +┌─arrayReverse([1, 2, 3])─┐ +│ [3,2,1] │ +└─────────────────────────┘ +``` + +## reverse(arr) {#array-functions-reverse} + +与[“arrayReverse”](#arrayreverse)作用相同。 + +## arrayFlatten {#arrayflatten} + +将嵌套的数组展平。 + +函数: + +- 适用于任何深度的嵌套数组。 +- 不会更改已经展平的数组。 + +展平后的数组包含来自所有源数组的所有元素。 + +**语法** + +``` sql +flatten(array_of_arrays) +``` + +别名: `flatten`. + +**参数** + +- `array_of_arrays` — 嵌套[数组](../../sql-reference/data-types/array.md)。 例如:`[[1,2,3], [4,5]]`。 + +**示例** + +``` sql +SELECT flatten([[[1]], [[2], [3]]]); +``` + +``` text +┌─flatten(array(array([1]), array([2], [3])))─┐ +│ [1,2,3] │ +└─────────────────────────────────────────────┘ +``` + +## arrayCompact {#arraycompact} + +从数组中删除连续的重复元素。结果值的顺序由源数组中的顺序决定。 + +**语法** + +``` sql +arrayCompact(arr) +``` + +**参数** + +`arr` — 类型为[数组](../../sql-reference/data-types/array.md)。 + +**返回值** + +没有重复元素的数组。 + +类型为: `Array`。 + +**示例** + +查询语句: + +``` sql +SELECT arrayCompact([1, 1, nan, nan, 2, 3, 3, 3]); +``` + +结果: + +``` text +┌─arrayCompact([1, 1, nan, nan, 2, 3, 3, 3])─┐ +│ [1,nan,nan,2,3] │ +└────────────────────────────────────────────┘ +``` + +## arrayZip {#arrayzip} + +将多个数组组合成一个数组。结果数组包含按列出的参数顺序分组为元组的源数组的相应元素。 + +**语法** + +``` sql +arrayZip(arr1, arr2, ..., arrN) +``` + +**参数** + +- `arrN` — N个[数组](../../sql-reference/data-types/array.md)。 + +该函数可以采用任意数量的不同类型的数组。所有输入数组的大小必须相等。 + +**返回值** + +- 将源数组中的元素分组为[元组](../../sql-reference/data-types/tuple.md)的数组。元组中的数据类型与输入数组的类型相同,并且与传递数组的顺序相同。 + +类型为: [数组](../../sql-reference/data-types/array.md)。 + +**示例** + +查询语句: + +``` sql +SELECT arrayZip(['a', 'b', 'c'], [5, 2, 1]); +``` + +结果: + +``` text +┌─arrayZip(['a', 'b', 'c'], [5, 2, 1])─┐ +│ [('a',5),('b',2),('c',1)] │ +└──────────────────────────────────────┘ +``` + +## arrayAUC {#arrayauc} + +计算AUC (ROC曲线下的面积,这是机器学习中的一个概念,更多细节请查看:https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve)。 + +**语法** + +``` sql +arrayAUC(arr_scores, arr_labels) +``` + +**参数** + +- `arr_scores` — 分数预测模型给出。 +- `arr_labels` — 样本的标签,通常为 1 表示正样本,0 表示负样本。 + +**返回值** + +返回 Float64 类型的 AUC 值。 + +**示例** + +查询语句: + +``` sql +select arrayAUC([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1]); +``` + +结果: + +``` text +┌─arrayAUC([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1])─┐ +│ 0.75 │ +└───────────────────────────────────────────────┘ +``` + +## arrayMap(func, arr1, …) {#array-map} + +将从 `func` 函数的原始应用中获得的数组返回给 `arr` 数组中的每个元素。 + +示例: + +``` sql +SELECT arrayMap(x -> (x + 2), [1, 2, 3]) as res; +``` + +``` text +┌─res─────┐ +│ [3,4,5] │ +└─────────┘ +``` + +下面的例子展示了如何从不同的数组创建一个元素的元组: + +``` sql +SELECT arrayMap((x, y) -> (x, y), [1, 2, 3], [4, 5, 6]) AS res +``` + +``` text +┌─res─────────────────┐ +│ [(1,4),(2,5),(3,6)] │ +└─────────────────────┘ +``` + +请注意,`arrayMap` 是一个[高阶函数](../../sql-reference/functions/index.md#higher-order-functions)。 您必须将 lambda 函数作为第一个参数传递给它,并且不能省略。 + +## arrayFilter(func, arr1, …) {#array-filter} + +返回一个仅包含 `arr1` 中的元素的数组,其中 `func` 返回的值不是 0。 + +示例: + +``` sql +SELECT arrayFilter(x -> x LIKE '%World%', ['Hello', 'abc World']) AS res +``` + +``` text +┌─res───────────┐ +│ ['abc World'] │ +└───────────────┘ +``` + +``` sql +SELECT + arrayFilter( + (i, x) -> x LIKE '%World%', + arrayEnumerate(arr), + ['Hello', 'abc World'] AS arr) + AS res +``` + +``` text +┌─res─┐ +│ [2] │ +└─────┘ +``` + +请注意,`arrayFilter`是一个[高阶函数](../../sql-reference/functions/index.md#higher-order-functions)。 您必须将 lambda 函数作为第一个参数传递给它,并且不能省略。 + +## arrayFill(func, arr1, …) {#array-fill} + +从第一个元素到最后一个元素扫描`arr1`,如果`func`返回0,则用`arr1[i - 1]`替换`arr1[i]`。`arr1`的第一个元素不会被替换。 + +示例: + +``` sql +SELECT arrayFill(x -> not isNull(x), [1, null, 3, 11, 12, null, null, 5, 6, 14, null, null]) AS res +``` + +``` text +┌─res──────────────────────────────┐ +│ [1,1,3,11,12,12,12,5,6,14,14,14] │ +└──────────────────────────────────┘ +``` + +请注意,`arrayFill` 是一个[高阶函数](../../sql-reference/functions/index.md#higher-order-functions)。 您必须将 lambda 函数作为第一个参数传递给它,并且不能省略。 + +## arrayReverseFill(func, arr1, …) {#array-reverse-fill} + +从最后一个元素到第一个元素扫描`arr1`,如果`func`返回0,则用`arr1[i + 1]`替换`arr1[i]`。`arr1`的最后一个元素不会被替换。 + +示例: + +``` sql +SELECT arrayReverseFill(x -> not isNull(x), [1, null, 3, 11, 12, null, null, 5, 6, 14, null, null]) AS res +``` + +``` text +┌─res────────────────────────────────┐ +│ [1,3,3,11,12,5,5,5,6,14,NULL,NULL] │ +└────────────────────────────────────┘ +``` + +请注意,`arrayReverseFill`是一个[高阶函数](../../sql-reference/functions/index.md#higher-order-functions)。 您必须将 lambda 函数作为第一个参数传递给它,并且不能省略。 + +## arraySplit(func, arr1, …) {#array-split} + +将 `arr1` 拆分为多个数组。当 `func` 返回 0 以外的值时,数组将在元素的左侧拆分。数组不会在第一个元素之前被拆分。 + +示例: + +``` sql +SELECT arraySplit((x, y) -> y, [1, 2, 3, 4, 5], [1, 0, 0, 1, 0]) AS res +``` + +``` text +┌─res─────────────┐ +│ [[1,2,3],[4,5]] │ +└─────────────────┘ +``` + +请注意,`arraySplit`是一个[高阶函数](../../sql-reference/functions/index.md#higher-order-functions)。 您必须将 lambda 函数作为第一个参数传递给它,并且不能省略。 + +## arrayReverseSplit(func, arr1, …) {#array-reverse-split} + +将 `arr1` 拆分为多个数组。当 `func` 返回 0 以外的值时,数组将在元素的右侧拆分。数组不会在最后一个元素之后被拆分。 + +示例: + +``` sql +SELECT arrayReverseSplit((x, y) -> y, [1, 2, 3, 4, 5], [1, 0, 0, 1, 0]) AS res +``` + +``` text +┌─res───────────────┐ +│ [[1],[2,3,4],[5]] │ +└───────────────────┘ +``` + +请注意,`arrayReverseSplit`是一个[高阶函数](../../sql-reference/functions/index.md#higher-order-functions)。 您必须将 lambda 函数作为第一个参数传递给它,并且不能省略。 + +## arrayExists(\[func,\] arr1, …) {#arrayexistsfunc-arr1} + +如果 `arr` 中至少有一个元素 `func` 返回 0 以外的值,则返回 1。否则,它返回 0。 + +请注意,`arrayExists`是一个[高阶函数](../../sql-reference/functions/index.md#higher-order-functions)。您可以将 lambda 函数作为第一个参数传递给它,并且不能省略。 + +## arrayAll(\[func,\] arr1, …) {#arrayallfunc-arr1} + +如果 `func` 为 `arr` 中的所有元素返回 0 以外的值,则返回 1。否则,它返回 0。 + +请注意,`arrayAll`是一个[高阶函数](../../sql-reference/functions/index.md#higher-order-functions)。您可以将 lambda 函数作为第一个参数传递给它,并且不能省略。 + +## arrayFirst(func, arr1, …) {#array-first} + +返回 `arr1` 数组中 `func` 返回非 0 的值的第一个元素。 + +请注意,`arrayFirst`是一个[高阶函数](../../sql-reference/functions/index.md#higher-order-functions)。您必须将 lambda 函数作为第一个参数传递给它,并且不能省略。 + +## arrayLast(func, arr1, …) {#array-last} + +返回 `arr1` 数组中的最后一个元素,其中 `func` 返回的值不是 0。 + +请注意,`arrayLast`是一个[高阶函数](../../sql-reference/functions/index.md#higher-order-functions)。您必须将 lambda 函数作为第一个参数传递给它,并且不能省略。 + +## arrayFirstIndex(func, arr1, …) {#array-first-index} + +返回 `arr1` 数组中第一个元素的索引,其中 `func` 返回的值不是 0。 + +请注意,`arrayFirstIndex`是一个[高阶函数](../../sql-reference/functions/index.md#higher-order-functions)。您必须将 lambda 函数作为第一个参数传递给它,并且不能省略。 + +## arrayLastIndex(func, arr1, …) {#array-last-index} + +返回 `arr1` 数组中最后一个元素的索引,其中 `func` 返回的值不是 0。 + +请注意,`arrayLastIndex`是一个[高阶函数](../../sql-reference/functions/index.md#higher-order-functions)。您必须将 lambda 函数作为第一个参数传递给它,并且不能省略。 + +## arrayMin {#array-min} + +返回源数组中的最小元素。 + +如果指定了 `func` 函数,则返回此函数转换的元素的最小值。 + +请注意,`arrayMin`是一个[高阶函数](../../sql-reference/functions/index.md#higher-order-functions)。您可以将 lambda 函数作为第一个参数传递给它,并且不能省略。 + +**语法** + +```sql +arrayMin([func,] arr) +``` + +**参数** + +- `func` — 类型为[表达式](../../sql-reference/data-types/special-data-types/expression.md)。 +- `arr` — 类型为[数组](../../sql-reference/data-types/array.md)。 + +**返回值** + +- 函数值的最小值(或数组最小值)。 + +类型:如果指定了`func`,则匹配`func`返回值类型,否则匹配数组元素类型。 + +**示例** + +查询语句: + +```sql +SELECT arrayMin([1, 2, 4]) AS res; +``` + +结果: + +```text +┌─res─┐ +│ 1 │ +└─────┘ +``` + +查询语句: + +```sql +SELECT arrayMin(x -> (-x), [1, 2, 4]) AS res; +``` + +结果: + +```text +┌─res─┐ +│ -4 │ +└─────┘ +``` + +## arrayMax {#array-max} + +返回源数组中元素的最大值。 + +如果指定了`func` 函数,则返回此函数转换的元素的最大值。 + +请注意,`arrayMax`是一个[高阶函数](../../sql-reference/functions/index.md#higher-order-functions). 您可以将 lambda 函数作为第一个参数传递给它,并且不能省略。 + +**语法** + +```sql +arrayMax([func,] arr) +``` + +**参数** + +- `func` — 类型为[表达式](../../sql-reference/data-types/special-data-types/expression.md)。 +- `arr` — 类型为[数组](../../sql-reference/data-types/array.md)。 + +**返回值** + +- 函数值的最大值(或数组最大值)。 + +类型:如果指定了`func`,则匹配`func`返回值类型,否则匹配数组元素类型。 + +**示例** + +查询语句: + +```sql +SELECT arrayMax([1, 2, 4]) AS res; +``` + +结果: + +```text +┌─res─┐ +│ 4 │ +└─────┘ +``` + +查询语句: + +```sql +SELECT arrayMax(x -> (-x), [1, 2, 4]) AS res; +``` + +结果: + +```text +┌─res─┐ +│ -1 │ +└─────┘ +``` + +## arraySum {#array-sum} + +返回源数组中元素的总和。 + +如果指定了 `func` 函数,则返回此函数转换的元素的总和。 + +请注意,`arraySum`是一个[高阶函数](../../sql-reference/functions/index.md#higher-order-functions). 您可以将 lambda 函数作为第一个参数传递给它,并且不能省略。 + +**语法** + +```sql +arraySum([func,] arr) +``` + +**参数** + +- `func` — 类型为[表达式](../../sql-reference/data-types/special-data-types/expression.md)。 +- `arr` — 类型为[数组](../../sql-reference/data-types/array.md)。 + +**返回值** + +- 函数值的总和(或数组总和)。 + +类型:源数组中的十进制数(或转换后的值,如果指定了`func`)-[Decimal128](../../sql-reference/data-types/decimal.md), 对于浮点数 — [Float64](../../sql-reference/data-types/float.md), 对于无符号数 — [UInt64](../../sql-reference/data-types/int-uint.md), 对于有符号数 — [Int64](../../sql-reference/data-types/int-uint.md)。 + +**示例** + +查询语句: + +```sql +SELECT arraySum([2, 3]) AS res; +``` + +结果: + +```text +┌─res─┐ +│ 5 │ +└─────┘ +``` + +查询语句: + +```sql +SELECT arraySum(x -> x*x, [2, 3]) AS res; +``` + +结果: + +```text +┌─res─┐ +│ 13 │ +└─────┘ +``` + +## arrayAvg {#array-avg} + +返回源数组中元素的平均值。 + +如果指定了 func 函数,则返回此函数转换的元素的平均值。 + +请注意,`arrayAvg`是一个[高阶函数](../../sql-reference/functions/index.md#higher-order-functions). 您可以将 lambda 函数作为第一个参数传递给它,并且不能省略。 + +**语法** + +```sql +arrayAvg([func,] arr) +``` + +**参数** + +- `func` — 类型为[表达式](../../sql-reference/data-types/special-data-types/expression.md)。 +- `arr` — 类型为[数组](../../sql-reference/data-types/array.md)。 + +**返回值** + +- 函数值的平均值(或数组平均值)。 + +类型为: [Float64](../../sql-reference/data-types/float.md). + +**示例** + +查询语句: + +```sql +SELECT arrayAvg([1, 2, 4]) AS res; +``` + +结果: + +```text +┌────────────────res─┐ +│ 2.3333333333333335 │ +└────────────────────┘ +``` + +查询语句: + +```sql +SELECT arrayAvg(x -> (x * x), [2, 4]) AS res; +``` + +结果: + +```text +┌─res─┐ +│ 10 │ +└─────┘ +``` + +## arrayCumSum(\[func,\] arr1, …) {#arraycumsumfunc-arr1} + +返回源数组中元素的部分和的数组(运行总和)。如果指定了 func 函数,则数组元素的值在求和之前由该函数转换。 + +示例: + +``` sql +SELECT arrayCumSum([1, 1, 1, 1]) AS res +``` + +``` text +┌─res──────────┐ +│ [1, 2, 3, 4] │ +└──────────────┘ +``` + +请注意,`arrayCumSum`是一个[高阶函数](../../sql-reference/functions/index.md#higher-order-functions). 您可以将 lambda 函数作为第一个参数传递给它,并且不能省略。 + +## arrayCumSumNonNegative(arr) {#arraycumsumnonnegativearr} + +与 `arrayCumSum` 相同,返回源数组中元素的部分和的数组(运行总和)。不同的`arrayCumSum`,当返回值包含小于零的值时,将该值替换为零,并以零参数执行后续计算。例如: + +``` sql +SELECT arrayCumSumNonNegative([1, 1, -4, 1]) AS res +``` + +``` text +┌─res───────┐ +│ [1,2,0,1] │ +└───────────┘ +``` +请注意`arraySumNonNegative`是一个[高阶函数](../../sql-reference/functions/index.md#higher-order-functions). 您可以将 lambda 函数作为第一个参数传递给它,并且不能省略。 + +## arrayProduct {#arrayproduct} + +将一个[数组](../../sql-reference/data-types/array.md)中的元素相乘。 + +**语法** + +``` sql +arrayProduct(arr) +``` + +**参数** + +- `arr` — 数值类型的[数组](../../sql-reference/data-types/array.md)。 + +**返回值** + +- 数组元素的乘积。 + +类型为: [Float64](../../sql-reference/data-types/float.md). + +**示例** + +查询语句: + +``` sql +SELECT arrayProduct([1,2,3,4,5,6]) as res; +``` + +结果: + +``` text +┌─res───┐ +│ 720 │ +└───────┘ +``` + +查询语句: + +``` sql +SELECT arrayProduct([toDecimal64(1,8), toDecimal64(2,8), toDecimal64(3,8)]) as res, toTypeName(res); +``` + +返回值类型总是[Float64](../../sql-reference/data-types/float.md). 结果: + +``` text +┌─res─┬─toTypeName(arrayProduct(array(toDecimal64(1, 8), toDecimal64(2, 8), toDecimal64(3, 8))))─┐ +│ 6 │ Float64 │ +└─────┴──────────────────────────────────────────────────────────────────────────────────────────┘ +``` [来源文章](https://clickhouse.com/docs/en/query_language/functions/array_functions/) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index fa1664b120a..3a6dfb161a2 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -109,7 +109,7 @@ std::vector Client::loadWarningMessages() connection->sendQuery(connection_parameters.timeouts, "SELECT message FROM system.warnings", "" /* query_id */, QueryProcessingStage::Complete, &global_context->getSettingsRef(), - &global_context->getClientInfo(), false); + &global_context->getClientInfo(), false, {}); while (true) { Packet packet = connection->receivePacket(); diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 2b1a0809143..b0931f678f7 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -334,7 +334,12 @@ Poco::Net::SocketAddress makeSocketAddress(const std::string & host, UInt16 port return socket_address; } -Poco::Net::SocketAddress Server::socketBindListen(Poco::Net::ServerSocket & socket, const std::string & host, UInt16 port, [[maybe_unused]] bool secure) const +Poco::Net::SocketAddress Server::socketBindListen( + const Poco::Util::AbstractConfiguration & config, + Poco::Net::ServerSocket & socket, + const std::string & host, + UInt16 port, + [[maybe_unused]] bool secure) const { auto address = makeSocketAddress(host, port, &logger()); #if !defined(POCO_CLICKHOUSE_PATCH) || POCO_VERSION < 0x01090100 @@ -347,7 +352,7 @@ Poco::Net::SocketAddress Server::socketBindListen(Poco::Net::ServerSocket & sock #if POCO_VERSION < 0x01080000 socket.bind(address, /* reuseAddress = */ true); #else - socket.bind(address, /* reuseAddress = */ true, /* reusePort = */ config().getBool("listen_reuse_port", false)); + socket.bind(address, /* reuseAddress = */ true, /* reusePort = */ config.getBool("listen_reuse_port", false)); #endif /// If caller requests any available port from the OS, discover it after binding. @@ -357,7 +362,7 @@ Poco::Net::SocketAddress Server::socketBindListen(Poco::Net::ServerSocket & sock LOG_DEBUG(&logger(), "Requested any available port (port == 0), actual port is {:d}", address.port()); } - socket.listen(/* backlog = */ config().getUInt("listen_backlog", 4096)); + socket.listen(/* backlog = */ config.getUInt("listen_backlog", 4096)); return address; } @@ -1237,7 +1242,7 @@ int Server::main(const std::vector & /*args*/) [&](UInt16 port) -> ProtocolServerAdapter { Poco::Net::ServerSocket socket; - auto address = socketBindListen(socket, listen_host, port); + auto address = socketBindListen(config(), socket, listen_host, port); socket.setReceiveTimeout(config().getUInt64("keeper_server.socket_receive_timeout_sec", DBMS_DEFAULT_RECEIVE_TIMEOUT_SEC)); socket.setSendTimeout(config().getUInt64("keeper_server.socket_send_timeout_sec", DBMS_DEFAULT_SEND_TIMEOUT_SEC)); return ProtocolServerAdapter( @@ -1260,7 +1265,7 @@ int Server::main(const std::vector & /*args*/) { #if USE_SSL Poco::Net::SecureServerSocket socket; - auto address = socketBindListen(socket, listen_host, port, /* secure = */ true); + auto address = socketBindListen(config(), socket, listen_host, port, /* secure = */ true); socket.setReceiveTimeout(config().getUInt64("keeper_server.socket_receive_timeout_sec", DBMS_DEFAULT_RECEIVE_TIMEOUT_SEC)); socket.setSendTimeout(config().getUInt64("keeper_server.socket_send_timeout_sec", DBMS_DEFAULT_SEND_TIMEOUT_SEC)); return ProtocolServerAdapter( @@ -1797,7 +1802,7 @@ void Server::createServers( createServer(config, listen_host, port_name, listen_try, start_servers, servers, [&](UInt16 port) -> ProtocolServerAdapter { Poco::Net::ServerSocket socket; - auto address = socketBindListen(socket, listen_host, port); + auto address = socketBindListen(config, socket, listen_host, port); socket.setReceiveTimeout(settings.http_receive_timeout); socket.setSendTimeout(settings.http_send_timeout); @@ -1815,7 +1820,7 @@ void Server::createServers( { #if USE_SSL Poco::Net::SecureServerSocket socket; - auto address = socketBindListen(socket, listen_host, port, /* secure = */ true); + auto address = socketBindListen(config, socket, listen_host, port, /* secure = */ true); socket.setReceiveTimeout(settings.http_receive_timeout); socket.setSendTimeout(settings.http_send_timeout); return ProtocolServerAdapter( @@ -1836,7 +1841,7 @@ void Server::createServers( createServer(config, listen_host, port_name, listen_try, start_servers, servers, [&](UInt16 port) -> ProtocolServerAdapter { Poco::Net::ServerSocket socket; - auto address = socketBindListen(socket, listen_host, port); + auto address = socketBindListen(config, socket, listen_host, port); socket.setReceiveTimeout(settings.receive_timeout); socket.setSendTimeout(settings.send_timeout); return ProtocolServerAdapter( @@ -1855,7 +1860,7 @@ void Server::createServers( createServer(config, listen_host, port_name, listen_try, start_servers, servers, [&](UInt16 port) -> ProtocolServerAdapter { Poco::Net::ServerSocket socket; - auto address = socketBindListen(socket, listen_host, port); + auto address = socketBindListen(config, socket, listen_host, port); socket.setReceiveTimeout(settings.receive_timeout); socket.setSendTimeout(settings.send_timeout); return ProtocolServerAdapter( @@ -1875,7 +1880,7 @@ void Server::createServers( { #if USE_SSL Poco::Net::SecureServerSocket socket; - auto address = socketBindListen(socket, listen_host, port, /* secure = */ true); + auto address = socketBindListen(config, socket, listen_host, port, /* secure = */ true); socket.setReceiveTimeout(settings.receive_timeout); socket.setSendTimeout(settings.send_timeout); return ProtocolServerAdapter( @@ -1899,7 +1904,7 @@ void Server::createServers( createServer(config, listen_host, port_name, listen_try, start_servers, servers, [&](UInt16 port) -> ProtocolServerAdapter { Poco::Net::ServerSocket socket; - auto address = socketBindListen(socket, listen_host, port); + auto address = socketBindListen(config, socket, listen_host, port); socket.setReceiveTimeout(settings.http_receive_timeout); socket.setSendTimeout(settings.http_send_timeout); return ProtocolServerAdapter( @@ -1919,7 +1924,7 @@ void Server::createServers( { #if USE_SSL Poco::Net::SecureServerSocket socket; - auto address = socketBindListen(socket, listen_host, port, /* secure = */ true); + auto address = socketBindListen(config, socket, listen_host, port, /* secure = */ true); socket.setReceiveTimeout(settings.http_receive_timeout); socket.setSendTimeout(settings.http_send_timeout); return ProtocolServerAdapter( @@ -1943,7 +1948,7 @@ void Server::createServers( createServer(config, listen_host, port_name, listen_try, start_servers, servers, [&](UInt16 port) -> ProtocolServerAdapter { Poco::Net::ServerSocket socket; - auto address = socketBindListen(socket, listen_host, port, /* secure = */ true); + auto address = socketBindListen(config, socket, listen_host, port, /* secure = */ true); socket.setReceiveTimeout(Poco::Timespan()); socket.setSendTimeout(settings.send_timeout); return ProtocolServerAdapter( @@ -1957,7 +1962,7 @@ void Server::createServers( createServer(config, listen_host, port_name, listen_try, start_servers, servers, [&](UInt16 port) -> ProtocolServerAdapter { Poco::Net::ServerSocket socket; - auto address = socketBindListen(socket, listen_host, port, /* secure = */ true); + auto address = socketBindListen(config, socket, listen_host, port, /* secure = */ true); socket.setReceiveTimeout(Poco::Timespan()); socket.setSendTimeout(settings.send_timeout); return ProtocolServerAdapter( @@ -1985,7 +1990,7 @@ void Server::createServers( createServer(config, listen_host, port_name, listen_try, start_servers, servers, [&](UInt16 port) -> ProtocolServerAdapter { Poco::Net::ServerSocket socket; - auto address = socketBindListen(socket, listen_host, port); + auto address = socketBindListen(config, socket, listen_host, port); socket.setReceiveTimeout(settings.http_receive_timeout); socket.setSendTimeout(settings.http_send_timeout); return ProtocolServerAdapter( diff --git a/programs/server/Server.h b/programs/server/Server.h index 9a0fabd97c2..4235fcc2d3b 100644 --- a/programs/server/Server.h +++ b/programs/server/Server.h @@ -67,7 +67,12 @@ protected: private: ContextMutablePtr global_context; - Poco::Net::SocketAddress socketBindListen(Poco::Net::ServerSocket & socket, const std::string & host, UInt16 port, [[maybe_unused]] bool secure = false) const; + Poco::Net::SocketAddress socketBindListen( + const Poco::Util::AbstractConfiguration & config, + Poco::Net::ServerSocket & socket, + const std::string & host, + UInt16 port, + [[maybe_unused]] bool secure = false) const; using CreateServerFunc = std::function; void createServer( diff --git a/src/Access/UsersConfigAccessStorage.cpp b/src/Access/UsersConfigAccessStorage.cpp index 240130bbf74..712e5393ce7 100644 --- a/src/Access/UsersConfigAccessStorage.cpp +++ b/src/Access/UsersConfigAccessStorage.cpp @@ -67,11 +67,11 @@ namespace size_t num_password_fields = has_no_password + has_password_plaintext + has_password_sha256_hex + has_password_double_sha1_hex + has_ldap + has_kerberos + has_certificates; if (num_password_fields > 1) - throw Exception("More than one field of 'password', 'password_sha256_hex', 'password_double_sha1_hex', 'no_password', 'ldap', 'kerberos', 'certificates' are used to specify authentication info for user " + user_name + ". Must be only one of them.", + throw Exception("More than one field of 'password', 'password_sha256_hex', 'password_double_sha1_hex', 'no_password', 'ldap', 'kerberos', 'ssl_certificates' are used to specify authentication info for user " + user_name + ". Must be only one of them.", ErrorCodes::BAD_ARGUMENTS); if (num_password_fields < 1) - throw Exception("Either 'password' or 'password_sha256_hex' or 'password_double_sha1_hex' or 'no_password' or 'ldap' or 'kerberos' or 'certificates' must be specified for user " + user_name + ".", ErrorCodes::BAD_ARGUMENTS); + throw Exception("Either 'password' or 'password_sha256_hex' or 'password_double_sha1_hex' or 'no_password' or 'ldap' or 'kerberos' or 'ssl_certificates' must be specified for user " + user_name + ".", ErrorCodes::BAD_ARGUMENTS); if (has_password_plaintext) { diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 62edf7f2879..35ef55a1387 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -718,7 +718,8 @@ void ClientBase::processOrdinaryQuery(const String & query_to_execute, ASTPtr pa query_processing_stage, &global_context->getSettingsRef(), &global_context->getClientInfo(), - true); + true, + [&](const Progress & progress) { onProgress(progress); }); if (send_external_tables) sendExternalTables(parsed_query); @@ -1071,7 +1072,8 @@ void ClientBase::processInsertQuery(const String & query_to_execute, ASTPtr pars query_processing_stage, &global_context->getSettingsRef(), &global_context->getClientInfo(), - true); + true, + [&](const Progress & progress) { onProgress(progress); }); if (send_external_tables) sendExternalTables(parsed_query); @@ -1103,7 +1105,9 @@ void ClientBase::sendData(Block & sample, const ColumnsDescription & columns_des if (!parsed_insert_query) return; - if (need_render_progress) + bool have_data_in_stdin = !is_interactive && !stdin_is_a_tty && !std_in.eof(); + + if (need_render_progress && have_data_in_stdin) { /// Set total_bytes_to_read for current fd. FileProgress file_progress(0, std_in.size()); @@ -1113,8 +1117,6 @@ void ClientBase::sendData(Block & sample, const ColumnsDescription & columns_des progress_indication.setFileProgressCallback(global_context, true); } - bool have_data_in_stdin = !is_interactive && !stdin_is_a_tty && !std_in.eof(); - /// If data fetched from file (maybe compressed file) if (parsed_insert_query->infile) { diff --git a/src/Client/Connection.cpp b/src/Client/Connection.cpp index 021ae2142a1..e53d55f6964 100644 --- a/src/Client/Connection.cpp +++ b/src/Client/Connection.cpp @@ -451,7 +451,8 @@ void Connection::sendQuery( UInt64 stage, const Settings * settings, const ClientInfo * client_info, - bool with_pending_data) + bool with_pending_data, + std::function) { if (!connected) connect(timeouts); diff --git a/src/Client/Connection.h b/src/Client/Connection.h index e34a0a22f42..d00a5760a8d 100644 --- a/src/Client/Connection.h +++ b/src/Client/Connection.h @@ -100,7 +100,8 @@ public: UInt64 stage/* = QueryProcessingStage::Complete */, const Settings * settings/* = nullptr */, const ClientInfo * client_info/* = nullptr */, - bool with_pending_data/* = false */) override; + bool with_pending_data/* = false */, + std::function process_progress_callback) override; void sendCancel() override; diff --git a/src/Client/HedgedConnections.cpp b/src/Client/HedgedConnections.cpp index 665f98a88d6..954396af0fa 100644 --- a/src/Client/HedgedConnections.cpp +++ b/src/Client/HedgedConnections.cpp @@ -179,7 +179,7 @@ void HedgedConnections::sendQuery( modified_settings.parallel_replica_offset = fd_to_replica_location[replica.packet_receiver->getFileDescriptor()].offset; } - replica.connection->sendQuery(timeouts, query, query_id, stage, &modified_settings, &client_info, with_pending_data); + replica.connection->sendQuery(timeouts, query, query_id, stage, &modified_settings, &client_info, with_pending_data, {}); replica.change_replica_timeout.setRelative(timeouts.receive_data_timeout); replica.packet_receiver->setReceiveTimeout(hedged_connections_factory.getConnectionTimeouts().receive_timeout); }; diff --git a/src/Client/IServerConnection.h b/src/Client/IServerConnection.h index 861630a942b..dfa4873e426 100644 --- a/src/Client/IServerConnection.h +++ b/src/Client/IServerConnection.h @@ -90,7 +90,8 @@ public: UInt64 stage, const Settings * settings, const ClientInfo * client_info, - bool with_pending_data) = 0; + bool with_pending_data, + std::function process_progress_callback) = 0; virtual void sendCancel() = 0; diff --git a/src/Client/LocalConnection.cpp b/src/Client/LocalConnection.cpp index 49e40ef8571..77519423763 100644 --- a/src/Client/LocalConnection.cpp +++ b/src/Client/LocalConnection.cpp @@ -74,13 +74,14 @@ void LocalConnection::sendQuery( UInt64 stage, const Settings *, const ClientInfo *, - bool) + bool, + std::function process_progress_callback) { query_context = session.makeQueryContext(); query_context->setCurrentQueryId(query_id); if (send_progress) { - query_context->setProgressCallback([this] (const Progress & value) { return this->updateProgress(value); }); + query_context->setProgressCallback([this] (const Progress & value) { this->updateProgress(value); }); query_context->setFileProgressCallback([this](const FileProgress & value) { this->updateProgress(Progress(value)); }); } if (!current_database.empty()) @@ -143,6 +144,19 @@ void LocalConnection::sendQuery( else if (state->io.pipeline.completed()) { CompletedPipelineExecutor executor(state->io.pipeline); + if (process_progress_callback) + { + auto callback = [this, &process_progress_callback]() + { + if (state->is_cancelled) + return true; + + process_progress_callback(state->progress.fetchAndResetPiecewiseAtomically()); + return false; + }; + + executor.setCancelCallback(callback, query_context->getSettingsRef().interactive_delay / 1000); + } executor.execute(); } @@ -185,6 +199,7 @@ void LocalConnection::sendData(const Block & block, const String &, bool) void LocalConnection::sendCancel() { + state->is_cancelled = true; if (state->executor) state->executor->cancel(); } @@ -440,7 +455,7 @@ Packet LocalConnection::receivePacket() } case Protocol::Server::Progress: { - packet.progress = std::move(state->progress); + packet.progress = state->progress.fetchAndResetPiecewiseAtomically(); state->progress.reset(); next_packet_type.reset(); break; diff --git a/src/Client/LocalConnection.h b/src/Client/LocalConnection.h index ad6f94122cc..1ad6ad73238 100644 --- a/src/Client/LocalConnection.h +++ b/src/Client/LocalConnection.h @@ -98,7 +98,8 @@ public: UInt64 stage/* = QueryProcessingStage::Complete */, const Settings * settings/* = nullptr */, const ClientInfo * client_info/* = nullptr */, - bool with_pending_data/* = false */) override; + bool with_pending_data/* = false */, + std::function process_progress_callback) override; void sendCancel() override; diff --git a/src/Client/MultiplexedConnections.cpp b/src/Client/MultiplexedConnections.cpp index 31fbc609bdc..b14ff9f2c8d 100644 --- a/src/Client/MultiplexedConnections.cpp +++ b/src/Client/MultiplexedConnections.cpp @@ -161,14 +161,14 @@ void MultiplexedConnections::sendQuery( modified_settings.parallel_replica_offset = i; replica_states[i].connection->sendQuery(timeouts, query, query_id, - stage, &modified_settings, &client_info, with_pending_data); + stage, &modified_settings, &client_info, with_pending_data, {}); } } else { /// Use single replica. replica_states[0].connection->sendQuery(timeouts, query, query_id, - stage, &modified_settings, &client_info, with_pending_data); + stage, &modified_settings, &client_info, with_pending_data, {}); } sent_query = true; diff --git a/src/Client/Suggest.cpp b/src/Client/Suggest.cpp index 84625a768bf..de09c07f4c1 100644 --- a/src/Client/Suggest.cpp +++ b/src/Client/Suggest.cpp @@ -132,7 +132,7 @@ void Suggest::load(ContextPtr context, const ConnectionParameters & connection_p void Suggest::fetch(IServerConnection & connection, const ConnectionTimeouts & timeouts, const std::string & query) { - connection.sendQuery(timeouts, query, "" /* query_id */, QueryProcessingStage::Complete, nullptr, nullptr, false); + connection.sendQuery(timeouts, query, "" /* query_id */, QueryProcessingStage::Complete, nullptr, nullptr, false, {}); while (true) { diff --git a/src/Columns/ColumnNullable.cpp b/src/Columns/ColumnNullable.cpp index 214a9a807f5..435e6bf1fbc 100644 --- a/src/Columns/ColumnNullable.cpp +++ b/src/Columns/ColumnNullable.cpp @@ -10,6 +10,11 @@ #include #include +#if USE_EMBEDDED_COMPILER +#include +#include +#endif + namespace DB { @@ -241,6 +246,66 @@ ColumnPtr ColumnNullable::index(const IColumn & indexes, size_t limit) const return ColumnNullable::create(indexed_data, indexed_null_map); } +#if USE_EMBEDDED_COMPILER + +bool ColumnNullable::isComparatorCompilable() const +{ + return nested_column->isComparatorCompilable(); +} + +llvm::Value * ColumnNullable::compileComparator(llvm::IRBuilderBase & builder, llvm::Value * lhs, llvm::Value * rhs, + llvm::Value * nan_direction_hint) const +{ + llvm::IRBuilder<> & b = static_cast &>(builder); + auto * head = b.GetInsertBlock(); + + llvm::Value * lhs_unwrapped_value = b.CreateExtractValue(lhs, {0}); + llvm::Value * lhs_is_null_value = b.CreateExtractValue(lhs, {1}); + + llvm::Value * rhs_unwrapped_value = b.CreateExtractValue(rhs, {0}); + llvm::Value * rhs_is_null_value = b.CreateExtractValue(rhs, {1}); + + llvm::Value * lhs_or_rhs_are_null = b.CreateOr(lhs_is_null_value, rhs_is_null_value); + + auto * lhs_or_rhs_are_null_block = llvm::BasicBlock::Create(head->getContext(), "lhs_or_rhs_are_null_block", head->getParent()); + auto * lhs_rhs_are_not_null_block = llvm::BasicBlock::Create(head->getContext(), "lhs_and_rhs_are_not_null_block", head->getParent()); + auto * join_block = llvm::BasicBlock::Create(head->getContext(), "join_block", head->getParent()); + + b.CreateCondBr(lhs_or_rhs_are_null, lhs_or_rhs_are_null_block, lhs_rhs_are_not_null_block); + + // if (unlikely(lval_is_null || rval_is_null)) + // { + // if (lval_is_null && rval_is_null) + // return 0; + // else + // return lval_is_null ? null_direction_hint : -null_direction_hint; + // } + + b.SetInsertPoint(lhs_or_rhs_are_null_block); + auto * lhs_equals_rhs_result = llvm::ConstantInt::getSigned(b.getInt8Ty(), 0); + llvm::Value * lhs_and_rhs_are_null = b.CreateAnd(lhs_is_null_value, rhs_is_null_value); + llvm::Value * lhs_is_null_result = b.CreateSelect(lhs_is_null_value, nan_direction_hint, b.CreateNeg(nan_direction_hint)); + llvm::Value * lhs_or_rhs_are_null_block_result = b.CreateSelect(lhs_and_rhs_are_null, lhs_equals_rhs_result, lhs_is_null_result); + b.CreateBr(join_block); + + // getNestedColumn().compareAt(n, m, nested_rhs, null_direction_hint); + + b.SetInsertPoint(lhs_rhs_are_not_null_block); + llvm::Value * lhs_rhs_are_not_null_block_result + = nested_column->compileComparator(builder, lhs_unwrapped_value, rhs_unwrapped_value, nan_direction_hint); + b.CreateBr(join_block); + + b.SetInsertPoint(join_block); + + auto * result = b.CreatePHI(b.getInt8Ty(), 2); + result->addIncoming(lhs_or_rhs_are_null_block_result, lhs_or_rhs_are_null_block); + result->addIncoming(lhs_rhs_are_not_null_block_result, lhs_rhs_are_not_null_block); + + return result; +} + +#endif + int ColumnNullable::compareAtImpl(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint, const Collator * collator) const { /// NULL values share the properties of NaN values. diff --git a/src/Columns/ColumnNullable.h b/src/Columns/ColumnNullable.h index a0c72182f63..60951dfcc2e 100644 --- a/src/Columns/ColumnNullable.h +++ b/src/Columns/ColumnNullable.h @@ -6,6 +6,9 @@ #include #include +#include "config_core.h" + + class Collator; namespace DB @@ -94,6 +97,15 @@ public: ColumnPtr permute(const Permutation & perm, size_t limit) const override; ColumnPtr index(const IColumn & indexes, size_t limit) const override; int compareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const override; + +#if USE_EMBEDDED_COMPILER + + bool isComparatorCompilable() const override; + + llvm::Value * compileComparator(llvm::IRBuilderBase & /*builder*/, llvm::Value * /*lhs*/, llvm::Value * /*rhs*/, llvm::Value * /*nan_direction_hint*/) const override; + +#endif + void compareColumn(const IColumn & rhs, size_t rhs_row_num, PaddedPODArray * row_indexes, PaddedPODArray & compare_results, int direction, int nan_direction_hint) const override; diff --git a/src/Columns/ColumnVector.cpp b/src/Columns/ColumnVector.cpp index 90b821bfe06..04f2efab0d7 100644 --- a/src/Columns/ColumnVector.cpp +++ b/src/Columns/ColumnVector.cpp @@ -25,6 +25,12 @@ # include #endif +#if USE_EMBEDDED_COMPILER +#include +#include +#endif + + namespace DB { @@ -183,6 +189,43 @@ namespace }; } +#if USE_EMBEDDED_COMPILER + +template +bool ColumnVector::isComparatorCompilable() const +{ + /// TODO: for std::is_floating_point_v we need implement is_nan in LLVM IR. + return std::is_integral_v; +} + +template +llvm::Value * ColumnVector::compileComparator(llvm::IRBuilderBase & builder, llvm::Value * lhs, llvm::Value * rhs, llvm::Value *) const +{ + llvm::IRBuilder<> & b = static_cast &>(builder); + + if constexpr (std::is_integral_v) + { + // a > b ? 1 : (a < b ? -1 : 0); + + bool is_signed = std::is_signed_v; + + auto * lhs_greater_than_rhs_result = llvm::ConstantInt::getSigned(b.getInt8Ty(), 1); + auto * lhs_less_than_rhs_result = llvm::ConstantInt::getSigned(b.getInt8Ty(), -1); + auto * lhs_equals_rhs_result = llvm::ConstantInt::getSigned(b.getInt8Ty(), 0); + + auto * lhs_greater_than_rhs = is_signed ? b.CreateICmpSGT(lhs, rhs) : b.CreateICmpUGT(lhs, rhs); + auto * lhs_less_than_rhs = is_signed ? b.CreateICmpSLT(lhs, rhs) : b.CreateICmpULT(lhs, rhs); + auto * if_lhs_less_than_rhs_result = b.CreateSelect(lhs_less_than_rhs, lhs_less_than_rhs_result, lhs_equals_rhs_result); + + return b.CreateSelect(lhs_greater_than_rhs, lhs_greater_than_rhs_result, if_lhs_less_than_rhs_result); + } + else + { + throw Exception(ErrorCodes::LOGICAL_ERROR, "Method compileComparator is not supported for type {}", TypeName); + } +} + +#endif template void ColumnVector::getPermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability, diff --git a/src/Columns/ColumnVector.h b/src/Columns/ColumnVector.h index ea9eae1881c..6ba9abaca32 100644 --- a/src/Columns/ColumnVector.h +++ b/src/Columns/ColumnVector.h @@ -10,6 +10,8 @@ #include #include +#include "config_core.h" + namespace DB { @@ -217,6 +219,14 @@ public: return CompareHelper::compare(data[n], assert_cast(rhs_).data[m], nan_direction_hint); } +#if USE_EMBEDDED_COMPILER + + bool isComparatorCompilable() const override; + + llvm::Value * compileComparator(llvm::IRBuilderBase & /*builder*/, llvm::Value * /*lhs*/, llvm::Value * /*rhs*/, llvm::Value * /*nan_direction_hint*/) const override; + +#endif + void compareColumn(const IColumn & rhs, size_t rhs_row_num, PaddedPODArray * row_indexes, PaddedPODArray & compare_results, int direction, int nan_direction_hint) const override diff --git a/src/Columns/IColumn.h b/src/Columns/IColumn.h index f4986799d47..f62f6c444b3 100644 --- a/src/Columns/IColumn.h +++ b/src/Columns/IColumn.h @@ -7,10 +7,19 @@ #include #include +#include "config_core.h" + class SipHash; class Collator; +namespace llvm +{ + class LLVMContext; + class Value; + class IRBuilderBase; +} + namespace DB { @@ -281,6 +290,17 @@ public: */ [[nodiscard]] virtual int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const = 0; +#if USE_EMBEDDED_COMPILER + + [[nodiscard]] virtual bool isComparatorCompilable() const { return false; } + + [[nodiscard]] virtual llvm::Value * compileComparator(llvm::IRBuilderBase & /*builder*/, llvm::Value * /*lhs*/, llvm::Value * /*rhs*/, llvm::Value * /*nan_direction_hint*/) const + { + throw Exception("Method compileComparator is not supported for " + getName(), ErrorCodes::NOT_IMPLEMENTED); + } + +#endif + /// Equivalent to compareAt, but collator is used to compare values. [[nodiscard]] virtual int compareAtWithCollation(size_t, size_t, const IColumn &, int, const Collator &) const { diff --git a/src/Common/FileCache.cpp b/src/Common/FileCache.cpp index 8bb133afb54..ae1b1afdd09 100644 --- a/src/Common/FileCache.cpp +++ b/src/Common/FileCache.cpp @@ -400,7 +400,7 @@ LRUFileCache::FileSegmentCell * LRUFileCache::addCell( if (files[key].contains(offset)) throw Exception( - ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, + ErrorCodes::LOGICAL_ERROR, "Cache already exists for key: `{}`, offset: {}, size: {}.\nCurrent cache structure: {}", keyToStr(key), offset, size, dumpStructureUnlocked(key, cache_lock)); @@ -609,7 +609,7 @@ void LRUFileCache::remove(const Key & key) #endif } -void LRUFileCache::remove(bool force_remove_unreleasable) +void LRUFileCache::remove() { /// Try remove all cached files by cache_base_path. /// Only releasable file segments are evicted. @@ -626,7 +626,7 @@ void LRUFileCache::remove(bool force_remove_unreleasable) ErrorCodes::LOGICAL_ERROR, "Cache is in inconsistent state: LRU queue contains entries with no cache cell"); - if (cell->releasable() || force_remove_unreleasable) + if (cell->releasable()) { auto file_segment = cell->file_segment; if (file_segment) @@ -647,7 +647,7 @@ void LRUFileCache::remove( auto * cell = getCell(key, offset, cache_lock); if (!cell) - throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, "No cache cell for key: {}, offset: {}", keyToStr(key), offset); + throw Exception(ErrorCodes::LOGICAL_ERROR, "No cache cell for key: {}, offset: {}", keyToStr(key), offset); if (cell->queue_iterator) { diff --git a/src/Common/FileCache.h b/src/Common/FileCache.h index d4235735fbf..ff65b579470 100644 --- a/src/Common/FileCache.h +++ b/src/Common/FileCache.h @@ -26,6 +26,7 @@ class IFileCache : private boost::noncopyable { friend class FileSegment; friend struct FileSegmentsHolder; +friend class FileSegmentRangeWriter; public: using Key = UInt128; @@ -42,7 +43,7 @@ public: virtual void remove(const Key & key) = 0; - virtual void remove(bool force_remove_unreleasable) = 0; + virtual void remove() = 0; static bool isReadOnly(); @@ -143,13 +144,11 @@ public: FileSegments getSnapshot() const override; - FileSegmentsHolder setDownloading(const Key & key, size_t offset, size_t size) override; - void initialize() override; void remove(const Key & key) override; - void remove(bool force_remove_unreleasable) override; + void remove() override; std::vector tryGetCachePaths(const Key & key) override; @@ -272,6 +271,8 @@ private: void fillHolesWithEmptyFileSegments( FileSegments & file_segments, const Key & key, const FileSegment::Range & range, bool fill_with_detached_file_segments, std::lock_guard & cache_lock); + FileSegmentsHolder setDownloading(const Key & key, size_t offset, size_t size) override; + size_t getUsedCacheSizeUnlocked(std::lock_guard & cache_lock) const; size_t getAvailableCacheSizeUnlocked(std::lock_guard & cache_lock) const; diff --git a/src/Common/FileSegment.cpp b/src/Common/FileSegment.cpp index 9c75dcfb2a8..356ba8bf55f 100644 --- a/src/Common/FileSegment.cpp +++ b/src/Common/FileSegment.cpp @@ -107,8 +107,7 @@ String FileSegment::getOrSetDownloader() { std::lock_guard segment_lock(mutex); - if (detached) - throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, "Cannot set downloader for a detached file segment"); + assertNotDetached(segment_lock); if (downloader_id.empty()) { @@ -132,6 +131,8 @@ void FileSegment::resetDownloader() { std::lock_guard segment_lock(mutex); + assertNotDetached(segment_lock); + if (downloader_id.empty()) throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, "There is no downloader"); @@ -209,7 +210,7 @@ void FileSegment::write(const char * from, size_t size, size_t offset_) "Not enough space is reserved. Available: {}, expected: {}", availableSize(), size); if (!isDownloader()) - throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, + throw Exception(ErrorCodes::LOGICAL_ERROR, "Only downloader can do the downloading. (CallerId: {}, DownloaderId: {})", getCallerId(), downloader_id); @@ -224,7 +225,10 @@ void FileSegment::write(const char * from, size_t size, size_t offset_) "Attempt to write {} bytes to offset: {}, but current download offset is {}", size, offset_, download_offset); - assertNotDetached(); + { + std::lock_guard segment_lock(mutex); + assertNotDetached(segment_lock); + } if (!cache_writer) { @@ -273,9 +277,8 @@ void FileSegment::writeInMemory(const char * from, size_t size) ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, "Not enough space is reserved. Available: {}, expected: {}", availableSize(), size); - assertNotDetached(); - std::lock_guard segment_lock(mutex); + assertNotDetached(segment_lock); if (cache_writer) throw Exception(ErrorCodes::LOGICAL_ERROR, "Cache writer already initialized"); @@ -311,7 +314,7 @@ size_t FileSegment::finalizeWrite() if (size == 0) throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, "Writing zero size is not allowed"); - assertNotDetached(); + assertNotDetached(segment_lock); try { @@ -342,6 +345,11 @@ FileSegment::State FileSegment::wait() { std::unique_lock segment_lock(mutex); + if (is_detached) + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "Cache file segment is in detached state, operation not allowed"); + if (downloader_id.empty()) return download_state; @@ -366,14 +374,19 @@ bool FileSegment::reserve(size_t size) if (!size) throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, "Zero space reservation is not allowed"); - assertNotDetached(); - { std::lock_guard segment_lock(mutex); + assertNotDetached(segment_lock); auto caller_id = getCallerId(); - if (downloader_id != caller_id) - throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, "Space can be reserved only by downloader (current: {}, expected: {})", caller_id, downloader_id); + bool is_downloader = caller_id == downloader_id; + if (!is_downloader) + { + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "Space can be reserved only by downloader (current: {}, expected: {})", + caller_id, downloader_id); + } if (downloaded_size + size > range().size()) throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, @@ -392,6 +405,7 @@ bool FileSegment::reserve(size_t size) size_t size_to_reserve = size - free_space; std::lock_guard cache_lock(cache->mutex); + bool reserved = cache->tryReserve(key(), offset(), size_to_reserve, cache_lock); if (reserved) @@ -437,6 +451,8 @@ void FileSegment::completeBatchAndResetDownloader() { std::lock_guard segment_lock(mutex); + assertNotDetached(segment_lock); + if (!isDownloaderImpl(segment_lock)) { cv.notify_all(); @@ -458,7 +474,7 @@ void FileSegment::complete(State state) std::lock_guard cache_lock(cache->mutex); std::lock_guard segment_lock(mutex); - assertNotDetached(); + assertNotDetached(segment_lock); bool is_downloader = isDownloaderImpl(segment_lock); if (!is_downloader) @@ -501,12 +517,15 @@ void FileSegment::complete(State state) void FileSegment::complete(std::lock_guard & cache_lock) { std::lock_guard segment_lock(mutex); + + assertNotDetached(segment_lock); + completeUnlocked(cache_lock, segment_lock); } void FileSegment::completeUnlocked(std::lock_guard & cache_lock, std::lock_guard & segment_lock) { - if (download_state == State::SKIP_CACHE || detached) + if (download_state == State::SKIP_CACHE || is_detached) return; if (isDownloaderImpl(segment_lock) @@ -516,7 +535,7 @@ void FileSegment::completeUnlocked(std::lock_guard & cache_lock, std setDownloaded(segment_lock); } - assertNotDetached(); + assertNotDetached(segment_lock); if (download_state == State::DOWNLOADING || download_state == State::EMPTY) { @@ -589,6 +608,7 @@ void FileSegment::completeImpl(std::lock_guard & cache_lock, std::lo downloader_id.clear(); } + LOG_TEST(log, "Completed file segment: {}", getInfoForLogImpl(segment_lock)); assertCorrectnessImpl(segment_lock); } @@ -649,15 +669,40 @@ void FileSegment::assertCorrectnessImpl(std::lock_guard & /* segment assert(download_state != FileSegment::State::DOWNLOADED || std::filesystem::file_size(cache->getPathInLocalCache(key(), offset())) > 0); } -void FileSegment::assertNotDetached() const +void FileSegment::throwIfDetached() const { - if (detached) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Operation not allowed, file segment is detached"); + std::lock_guard segment_lock(mutex); + throwIfDetachedUnlocked(segment_lock); } -void FileSegment::assertDetachedStatus(std::lock_guard & /* segment_lock */) const +void FileSegment::throwIfDetachedUnlocked(std::lock_guard & segment_lock) const { - assert(download_state == State::EMPTY || hasFinalizedState()); + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "Cache file segment is in detached state, operation not allowed. " + "It can happen when cache was concurrently dropped with SYSTEM DROP FILESYSTEM CACHE FORCE. " + "Please, retry. File segment info: {}", getInfoForLogImpl(segment_lock)); +} + + +void FileSegment::assertNotDetached(std::lock_guard & segment_lock) const +{ + if (is_detached) + throwIfDetachedUnlocked(segment_lock); +} + +void FileSegment::assertDetachedStatus(std::lock_guard & segment_lock) const +{ + /// Detached file segment is allowed to have only a certain subset of states. + /// It should be either EMPTY or one of the finalized states. + + if (download_state != State::EMPTY && !hasFinalizedState()) + { + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "Detached file segment has incorrect state: {}", + getInfoForLogImpl(segment_lock)); + } } FileSegmentPtr FileSegment::getSnapshot(const FileSegmentPtr & file_segment, std::lock_guard & /* cache_lock */) @@ -684,29 +729,35 @@ bool FileSegment::hasFinalizedState() const || download_state == State::SKIP_CACHE; } -void FileSegment::detach(std::lock_guard & cache_lock, std::lock_guard & segment_lock) +void FileSegment::detach( + std::lock_guard & /* cache_lock */, + std::lock_guard & segment_lock) { - if (detached) + /// Now detached status can be in 2 cases, which do not do any complex logic: + /// 1. there is only 1 remaining file segment holder + /// && it does not need this segment anymore + /// && this file segment was in cache and needs to be removed + /// 2. in read_from_cache_if_exists_otherwise_bypass_cache case + if (is_detached) return; markAsDetached(segment_lock); + download_state = State::PARTIALLY_DOWNLOADED_NO_CONTINUATION; + downloader_id.clear(); - if (!hasFinalizedState()) - { - completeUnlocked(cache_lock, segment_lock); - } + LOG_TEST(log, "Detached file segment: {}", getInfoForLogImpl(segment_lock)); } void FileSegment::markAsDetached(std::lock_guard & /* segment_lock */) { - detached = true; + is_detached = true; CurrentMetrics::add(CurrentMetrics::CacheDetachedFileSegments); } FileSegment::~FileSegment() { std::lock_guard segment_lock(mutex); - if (detached) + if (is_detached) CurrentMetrics::sub(CurrentMetrics::CacheDetachedFileSegments); } @@ -726,15 +777,18 @@ FileSegmentsHolder::~FileSegmentsHolder() if (!cache) cache = file_segment->cache; + try { - bool detached = false; + bool is_detached = false; + { std::lock_guard segment_lock(file_segment->mutex); - detached = file_segment->isDetached(segment_lock); - if (detached) + is_detached = file_segment->isDetached(segment_lock); + if (is_detached) file_segment->assertDetachedStatus(segment_lock); } - if (detached) + + if (is_detached) { /// This file segment is not owned by cache, so it will be destructed /// at this point, therefore no completion required. @@ -742,10 +796,6 @@ FileSegmentsHolder::~FileSegmentsHolder() continue; } - } - - try - { /// File segment pointer must be reset right after calling complete() and /// under the same mutex, because complete() checks for segment pointers. std::lock_guard cache_lock(cache->mutex); @@ -757,7 +807,6 @@ FileSegmentsHolder::~FileSegmentsHolder() catch (...) { tryLogCurrentException(__PRETTY_FUNCTION__); - assert(false); } } } @@ -774,5 +823,4 @@ String FileSegmentsHolder::toString() return ranges; } - } diff --git a/src/Common/FileSegment.h b/src/Common/FileSegment.h index 42ebfd39bcc..7a25529ab23 100644 --- a/src/Common/FileSegment.h +++ b/src/Common/FileSegment.h @@ -25,8 +25,10 @@ using FileSegments = std::list; class FileSegment : boost::noncopyable { + friend class LRUFileCache; friend struct FileSegmentsHolder; +friend class FileSegmentRangeWriter; public: using Key = UInt128; @@ -149,9 +151,15 @@ public: void assertCorrectness() const; - static FileSegmentPtr getSnapshot(const FileSegmentPtr & file_segment, std::lock_guard & cache_lock); + static FileSegmentPtr getSnapshot( + const FileSegmentPtr & file_segment, + std::lock_guard & cache_lock); - void detach(std::lock_guard & cache_lock, std::lock_guard & segment_lock); + void detach( + std::lock_guard & cache_lock, + std::lock_guard & segment_lock); + + [[noreturn]] void throwIfDetached() const; private: size_t availableSize() const { return reserved_size - downloaded_size; } @@ -159,11 +167,14 @@ private: size_t getDownloadedSize(std::lock_guard & segment_lock) const; String getInfoForLogImpl(std::lock_guard & segment_lock) const; void assertCorrectnessImpl(std::lock_guard & segment_lock) const; - void assertNotDetached() const; - void assertDetachedStatus(std::lock_guard & segment_lock) const; bool hasFinalizedState() const; - bool isDetached(std::lock_guard & /* segment_lock */) const { return detached; } + + bool isDetached(std::lock_guard & /* segment_lock */) const { return is_detached; } void markAsDetached(std::lock_guard & segment_lock); + [[noreturn]] void throwIfDetachedUnlocked(std::lock_guard & segment_lock) const; + + void assertDetachedStatus(std::lock_guard & segment_lock) const; + void assertNotDetached(std::lock_guard & segment_lock) const; void setDownloaded(std::lock_guard & segment_lock); void setDownloadFailed(std::lock_guard & segment_lock); @@ -197,6 +208,10 @@ private: size_t downloaded_size = 0; size_t reserved_size = 0; + /// global locking order rule: + /// 1. cache lock + /// 2. segment lock + mutable std::mutex mutex; std::condition_variable cv; @@ -215,7 +230,7 @@ private: /// "detached" file segment means that it is not owned by cache ("detached" from cache). /// In general case, all file segments are owned by cache. - bool detached = false; + bool is_detached = false; std::atomic is_downloaded{false}; std::atomic hits_count = 0; /// cache hits. @@ -227,6 +242,7 @@ private: struct FileSegmentsHolder : private boost::noncopyable { explicit FileSegmentsHolder(FileSegments && file_segments_) : file_segments(std::move(file_segments_)) {} + FileSegmentsHolder(FileSegmentsHolder && other) noexcept : file_segments(std::move(other.file_segments)) {} ~FileSegmentsHolder(); diff --git a/src/Common/ObjectPool.h b/src/Common/ObjectPool.h index ef07b8eed1b..801a37d0dfb 100644 --- a/src/Common/ObjectPool.h +++ b/src/Common/ObjectPool.h @@ -94,7 +94,7 @@ public: template Pointer get(const Key & key, Factory && f) { - std::unique_lock lock(mutex); + std::lock_guard lock(mutex); auto it = container.find(key); if (container.end() == it) diff --git a/src/Common/ProfileEvents.cpp b/src/Common/ProfileEvents.cpp index 7d811d6c1ee..7f3b9788c1f 100644 --- a/src/Common/ProfileEvents.cpp +++ b/src/Common/ProfileEvents.cpp @@ -260,10 +260,12 @@ \ M(QueryMemoryLimitExceeded, "Number of times when memory limit exceeded for query.") \ \ - M(RemoteFSReadMicroseconds, "Time of reading from remote filesystem.") \ - M(RemoteFSReadBytes, "Read bytes from remote filesystem.") \ - M(RemoteFSCacheReadBytes, "Read bytes from cache of remote filesystem.") \ - M(RemoteFSCacheDownloadBytes, "Bytes downloaded to cache from remote filesystem.") \ + M(CachedReadBufferReadFromSourceMicroseconds, "Time reading from filesystem cache source (from remote filesystem, etc)") \ + M(CachedReadBufferReadFromCacheMicroseconds, "Time reading from filesystem cache") \ + M(CachedReadBufferReadFromSourceBytes, "Bytes read from filesystem cache source (from remote fs, etc)") \ + M(CachedReadBufferReadFromCacheBytes, "Bytes read from filesystem cache") \ + M(CachedReadBufferCacheWriteBytes, "Bytes written from source (remote fs, etc) to filesystem cache") \ + M(CachedReadBufferCacheWriteMicroseconds, "Time spent writing data into filesystem cache") \ \ M(RemoteFSSeeks, "Total number of seeks for async buffer") \ M(RemoteFSPrefetches, "Number of prefetches made with asynchronous reading from remote filesystem") \ @@ -275,6 +277,15 @@ M(RemoteFSSeeksWithReset, "Number of seeks which lead to a new connection") \ M(RemoteFSBuffers, "Number of buffers created for asynchronous reading from remote filesystem") \ \ + M(ThreadpoolReaderTaskMicroseconds, "Time spent getting the data in asynchronous reading") \ + M(ThreadpoolReaderReadBytes, "Bytes read from a threadpool task in asynchronous reading") \ + \ + M(FileSegmentWaitReadBufferMicroseconds, "Metric per file segment. Time spend waiting for internal read buffer (includes cache waiting)") \ + M(FileSegmentReadMicroseconds, "Metric per file segment. Time spend reading from file") \ + M(FileSegmentCacheWriteMicroseconds, "Metric per file segment. Time spend writing data to cache") \ + M(FileSegmentPredownloadMicroseconds, "Metric per file segment. Time spent predownloading data to cache (predownloading - finishing file segment download (after someone who failed to do that) up to the point current thread was requested to do)") \ + M(FileSegmentUsedBytes, "Metric per file segment. How many bytes were actually used from current file segment") \ + \ M(ReadBufferSeekCancelConnection, "Number of seeks which lead to new connection (s3, http)") \ \ M(SleepFunctionCalls, "Number of times a sleep function (sleep, sleepEachRow) has been called.") \ diff --git a/src/Common/ProgressIndication.cpp b/src/Common/ProgressIndication.cpp index 7a8cff2d58f..ffc90807060 100644 --- a/src/Common/ProgressIndication.cpp +++ b/src/Common/ProgressIndication.cpp @@ -165,18 +165,17 @@ void ProgressIndication::writeProgress() message << '\r'; size_t prefix_size = message.count(); - size_t read_bytes = progress.read_raw_bytes ? progress.read_raw_bytes : progress.read_bytes; message << indicator << " Progress: "; message << formatReadableQuantity(progress.read_rows) << " rows, " - << formatReadableSizeWithDecimalSuffix(read_bytes); + << formatReadableSizeWithDecimalSuffix(progress.read_bytes); auto elapsed_ns = watch.elapsed(); if (elapsed_ns) message << " (" << formatReadableQuantity(progress.read_rows * 1000000000.0 / elapsed_ns) << " rows/s., " - << formatReadableSizeWithDecimalSuffix(read_bytes * 1000000000.0 / elapsed_ns) << "/s.) "; + << formatReadableSizeWithDecimalSuffix(progress.read_bytes * 1000000000.0 / elapsed_ns) << "/s.) "; else message << ". "; @@ -206,7 +205,7 @@ void ProgressIndication::writeProgress() int64_t remaining_space = static_cast(terminal_width) - written_progress_chars; /// If the approximate number of rows to process is known, we can display a progress bar and percentage. - if (progress.total_rows_to_read || progress.total_raw_bytes_to_read) + if (progress.total_rows_to_read || progress.total_bytes_to_read) { size_t current_count, max_count; if (progress.total_rows_to_read) @@ -216,8 +215,8 @@ void ProgressIndication::writeProgress() } else { - current_count = progress.read_raw_bytes; - max_count = std::max(progress.read_raw_bytes, progress.total_raw_bytes_to_read); + current_count = progress.read_bytes; + max_count = std::max(progress.read_bytes, progress.total_bytes_to_read); } /// To avoid flicker, display progress bar only if .5 seconds have passed since query execution start diff --git a/src/Common/tests/gtest_lru_file_cache.cpp b/src/Common/tests/gtest_lru_file_cache.cpp index 7b3d988c8e2..24e69259241 100644 --- a/src/Common/tests/gtest_lru_file_cache.cpp +++ b/src/Common/tests/gtest_lru_file_cache.cpp @@ -119,9 +119,9 @@ TEST(LRUFileCache, get) assertRange(1, segments[0], DB::FileSegment::Range(0, 9), DB::FileSegment::State::EMPTY); /// Exception because space not reserved. - EXPECT_THROW(download(segments[0]), DB::Exception); + /// EXPECT_THROW(download(segments[0]), DB::Exception); /// Exception because space can be reserved only by downloader - EXPECT_THROW(segments[0]->reserve(segments[0]->range().size()), DB::Exception); + /// EXPECT_THROW(segments[0]->reserve(segments[0]->range().size()), DB::Exception); ASSERT_TRUE(segments[0]->getOrSetDownloader() == DB::FileSegment::getCallerId()); ASSERT_TRUE(segments[0]->reserve(segments[0]->range().size())); diff --git a/src/Core/Block.cpp b/src/Core/Block.cpp index f5764262c66..a21c96abfdb 100644 --- a/src/Core/Block.cpp +++ b/src/Core/Block.cpp @@ -498,6 +498,15 @@ Block Block::cloneWithColumns(MutableColumns && columns) const Block res; size_t num_columns = data.size(); + + if (num_columns != columns.size()) + { + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "Cannot clone block with columns because block has {} columns, but {} columns given", + num_columns, columns.size()); + } + res.reserve(num_columns); for (size_t i = 0; i < num_columns; ++i) @@ -514,8 +523,12 @@ Block Block::cloneWithColumns(const Columns & columns) const size_t num_columns = data.size(); if (num_columns != columns.size()) - throw Exception("Cannot clone block with columns because block has " + toString(num_columns) + " columns, " - "but " + toString(columns.size()) + " columns given.", ErrorCodes::LOGICAL_ERROR); + { + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "Cannot clone block with columns because block has {} columns, but {} columns given", + num_columns, columns.size()); + } res.reserve(num_columns); diff --git a/src/Core/ColumnNumbers.h b/src/Core/ColumnNumbers.h index 9441f6485a7..29b4c49dc83 100644 --- a/src/Core/ColumnNumbers.h +++ b/src/Core/ColumnNumbers.h @@ -8,5 +8,6 @@ namespace DB { using ColumnNumbers = std::vector; +using ColumnNumbersList = std::vector; } diff --git a/src/Core/NamesAndTypes.h b/src/Core/NamesAndTypes.h index 3ac9ad2fa02..2719017a726 100644 --- a/src/Core/NamesAndTypes.h +++ b/src/Core/NamesAndTypes.h @@ -109,6 +109,8 @@ public: std::optional tryGetByName(const std::string & name) const; }; +using NamesAndTypesLists = std::vector; + } namespace std diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 30e01413e1b..2e6d657698c 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -22,7 +22,7 @@ namespace DB { class IColumn; -static constexpr UInt64 operator""_Gb(unsigned long long value) +static constexpr UInt64 operator""_GiB(unsigned long long value) { return value * 1024 * 1024 * 1024; } @@ -120,6 +120,8 @@ static constexpr UInt64 operator""_Gb(unsigned long long value) M(UInt64, min_count_to_compile_expression, 3, "The number of identical expressions before they are JIT-compiled", 0) \ M(Bool, compile_aggregate_expressions, true, "Compile aggregate functions to native code.", 0) \ M(UInt64, min_count_to_compile_aggregate_expression, 3, "The number of identical aggregate expressions before they are JIT-compiled", 0) \ + M(Bool, compile_sort_description, true, "Compile sort description to native code.", 0) \ + M(UInt64, min_count_to_compile_sort_description, 3, "The number of identical sort descriptions before they are JIT-compiled", 0) \ M(UInt64, group_by_two_level_threshold, 100000, "From what number of keys, a two-level aggregation starts. 0 - the threshold is not set.", 0) \ M(UInt64, group_by_two_level_threshold_bytes, 50000000, "From what size of the aggregation state in bytes, a two-level aggregation begins to be used. 0 - the threshold is not set. Two-level aggregation is used when at least one of the thresholds is triggered.", 0) \ M(Bool, distributed_aggregation_memory_efficient, true, "Is the memory-saving mode of distributed aggregation enabled.", 0) \ @@ -360,14 +362,14 @@ static constexpr UInt64 operator""_Gb(unsigned long long value) M(OverflowMode, distinct_overflow_mode, OverflowMode::THROW, "What to do when the limit is exceeded.", 0) \ \ M(UInt64, max_memory_usage, 0, "Maximum memory usage for processing of single query. Zero means unlimited.", 0) \ - M(UInt64, max_guaranteed_memory_usage, 10_Gb, "Maximum guaranteed memory usage for processing of single query. It represents soft limit. Zero means unlimited.", 0) \ + M(UInt64, memory_overcommit_ratio_denominator, 1_GiB, "It represents soft memory limit on the user level. This value is used to compute query overcommit ratio.", 0) \ M(UInt64, max_memory_usage_for_user, 0, "Maximum memory usage for processing all concurrently running queries for the user. Zero means unlimited.", 0) \ - M(UInt64, max_guaranteed_memory_usage_for_user, 10_Gb, "Maximum guaranteed memory usage for processing all concurrently running queries for the user. It represents soft limit. Zero means unlimited.", 0) \ + M(UInt64, memory_overcommit_ratio_denominator_for_user, 1_GiB, "It represents soft memory limit on the global level. This value is used to compute query overcommit ratio.", 0) \ M(UInt64, max_untracked_memory, (4 * 1024 * 1024), "Small allocations and deallocations are grouped in thread local variable and tracked or profiled only when amount (in absolute value) becomes larger than specified value. If the value is higher than 'memory_profiler_step' it will be effectively lowered to 'memory_profiler_step'.", 0) \ M(UInt64, memory_profiler_step, (4 * 1024 * 1024), "Whenever query memory usage becomes larger than every next step in number of bytes the memory profiler will collect the allocating stack trace. Zero means disabled memory profiler. Values lower than a few megabytes will slow down query processing.", 0) \ M(Float, memory_profiler_sample_probability, 0., "Collect random allocations and deallocations and write them into system.trace_log with 'MemorySample' trace_type. The probability is for every alloc/free regardless to the size of the allocation. Note that sampling happens only when the amount of untracked memory exceeds 'max_untracked_memory'. You may want to set 'max_untracked_memory' to 0 for extra fine grained sampling.", 0) \ \ - M(UInt64, memory_usage_overcommit_max_wait_microseconds, 0, "Maximum time thread will wait for memory to be freed in the case of memory overcommit. If timeout is reached and memory is not freed, exception is thrown", 0) \ + M(UInt64, memory_usage_overcommit_max_wait_microseconds, 200, "Maximum time thread will wait for memory to be freed in the case of memory overcommit on user level. If timeout is reached and memory is not freed, exception is thrown.", 0) \ \ M(UInt64, max_network_bandwidth, 0, "The maximum speed of data exchange over the network in bytes per second for a query. Zero means unlimited.", 0) \ M(UInt64, max_network_bytes, 0, "The maximum number of bytes (compressed) to receive or transmit over the network for execution of the query.", 0) \ @@ -379,7 +381,7 @@ static constexpr UInt64 operator""_Gb(unsigned long long value) \ M(Bool, log_profile_events, true, "Log query performance statistics into the query_log, query_thread_log and query_views_log.", 0) \ M(Bool, log_query_settings, true, "Log query settings into the query_log.", 0) \ - M(Bool, log_query_threads, true, "Log query threads into system.query_thread_log table. This setting have effect only when 'log_queries' is true.", 0) \ + M(Bool, log_query_threads, false, "Log query threads into system.query_thread_log table. This setting have effect only when 'log_queries' is true.", 0) \ M(Bool, log_query_views, true, "Log query dependent views into system.query_views_log table. This setting have effect only when 'log_queries' is true.", 0) \ M(String, log_comment, "", "Log comment into system.query_log table and server log. It can be set to arbitrary string no longer than max_query_size.", 0) \ M(LogsLevel, send_logs_level, LogsLevel::fatal, "Send server text logs with specified minimum level to client. Valid values: 'trace', 'debug', 'information', 'warning', 'error', 'fatal', 'none'", 0) \ diff --git a/src/Core/SortCursor.h b/src/Core/SortCursor.h index a0f60fbccf8..584556045e8 100644 --- a/src/Core/SortCursor.h +++ b/src/Core/SortCursor.h @@ -12,6 +12,11 @@ #include #include +#include "config_core.h" + +#if USE_EMBEDDED_COMPILER +#include +#endif namespace DB { @@ -49,6 +54,10 @@ struct SortCursorImpl */ IColumn::Permutation * permutation = nullptr; +#if USE_EMBEDDED_COMPILER + std::vector raw_sort_columns_data; +#endif + SortCursorImpl() = default; SortCursorImpl(const Block & block, const SortDescription & desc_, size_t order_ = 0, IColumn::Permutation * perm = nullptr) @@ -78,6 +87,9 @@ struct SortCursorImpl { all_columns.clear(); sort_columns.clear(); +#if USE_EMBEDDED_COMPILER + raw_sort_columns_data.clear(); +#endif size_t num_columns = columns.size(); @@ -90,6 +102,10 @@ struct SortCursorImpl size_t column_number = block.getPositionByName(column_desc.column_name); sort_columns.push_back(columns[column_number].get()); +#if USE_EMBEDDED_COMPILER + if (desc.compiled_sort_description) + raw_sort_columns_data.emplace_back(getColumnData(sort_columns.back())); +#endif need_collation[j] = desc[j].collator != nullptr && sort_columns.back()->isCollationSupported(); has_collation |= need_collation[j]; } @@ -164,17 +180,36 @@ struct SortCursor : SortCursorHelper /// The specified row of this cursor is greater than the specified row of another cursor. bool ALWAYS_INLINE greaterAt(const SortCursor & rhs, size_t lhs_pos, size_t rhs_pos) const { +#if USE_EMBEDDED_COMPILER + if (impl->desc.compiled_sort_description && rhs.impl->desc.compiled_sort_description) + { + assert(impl->raw_sort_columns_data.size() == rhs.impl->raw_sort_columns_data.size()); + + auto sort_description_func_typed = reinterpret_cast(impl->desc.compiled_sort_description); + int res = sort_description_func_typed(lhs_pos, rhs_pos, impl->raw_sort_columns_data.data(), rhs.impl->raw_sort_columns_data.data()); /// NOLINT + + if (res > 0) + return true; + if (res < 0) + return false; + + return impl->order > rhs.impl->order; + } +#endif + for (size_t i = 0; i < impl->sort_columns_size; ++i) { const auto & desc = impl->desc[i]; int direction = desc.direction; int nulls_direction = desc.nulls_direction; int res = direction * impl->sort_columns[i]->compareAt(lhs_pos, rhs_pos, *(rhs.impl->sort_columns[i]), nulls_direction); + if (res > 0) return true; if (res < 0) return false; } + return impl->order > rhs.impl->order; } }; @@ -190,8 +225,26 @@ struct SimpleSortCursor : SortCursorHelper const auto & desc = impl->desc[0]; int direction = desc.direction; int nulls_direction = desc.nulls_direction; - int res = impl->sort_columns[0]->compareAt(lhs_pos, rhs_pos, *(rhs.impl->sort_columns[0]), nulls_direction); - return res != 0 && ((res > 0) == (direction > 0)); + + bool result = false; + +#if USE_EMBEDDED_COMPILER + if (impl->desc.compiled_sort_description && rhs.impl->desc.compiled_sort_description) + { + assert(impl->raw_sort_columns_data.size() == rhs.impl->raw_sort_columns_data.size()); + + auto sort_description_func_typed = reinterpret_cast(impl->desc.compiled_sort_description); + int jit_result = sort_description_func_typed(lhs_pos, rhs_pos, impl->raw_sort_columns_data.data(), rhs.impl->raw_sort_columns_data.data()); /// NOLINT + result = jit_result > 0; + } + else +#endif + { + int non_jit_result = impl->sort_columns[0]->compareAt(lhs_pos, rhs_pos, *(rhs.impl->sort_columns[0]), nulls_direction); + result = (non_jit_result != 0 && ((non_jit_result > 0) == (direction > 0))); + } + + return result; } }; diff --git a/src/Core/SortDescription.cpp b/src/Core/SortDescription.cpp index 7994ada7b85..e7fa6816807 100644 --- a/src/Core/SortDescription.cpp +++ b/src/Core/SortDescription.cpp @@ -2,6 +2,13 @@ #include #include #include +#include + +#if USE_EMBEDDED_COMPILER +#include +#include +#include +#endif namespace DB { @@ -35,6 +42,115 @@ void SortColumnDescription::explain(JSONBuilder::JSONMap & map) const map.add("With Fill", with_fill); } +#if USE_EMBEDDED_COMPILER + +static CHJIT & getJITInstance() +{ + static CHJIT jit; + return jit; +} + +class CompiledSortDescriptionFunctionHolder final : public CompiledExpressionCacheEntry +{ +public: + explicit CompiledSortDescriptionFunctionHolder(CompiledSortDescriptionFunction compiled_function_) + : CompiledExpressionCacheEntry(compiled_function_.compiled_module.size) + , compiled_sort_description_function(compiled_function_) + {} + + ~CompiledSortDescriptionFunctionHolder() override + { + getJITInstance().deleteCompiledModule(compiled_sort_description_function.compiled_module); + } + + CompiledSortDescriptionFunction compiled_sort_description_function; +}; + +static std::string getSortDescriptionDump(const SortDescription & description, const DataTypes & header_types) +{ + WriteBufferFromOwnString buffer; + + for (size_t i = 0; i < description.size(); ++i) + buffer << header_types[i]->getName() << ' ' << description[i].direction << ' ' << description[i].nulls_direction; + + return buffer.str(); +} + +static Poco::Logger * getLogger() +{ + static Poco::Logger & logger = Poco::Logger::get("SortDescription"); + return &logger; +} + +void compileSortDescriptionIfNeeded(SortDescription & description, const DataTypes & sort_description_types, bool increase_compile_attemps) +{ + static std::unordered_map counter; + static std::mutex mutex; + + if (!description.compile_sort_description || sort_description_types.empty()) + return; + + for (const auto & type : sort_description_types) + { + if (!type->createColumn()->isComparatorCompilable() || !canBeNativeType(*type)) + return; + } + + auto description_dump = getSortDescriptionDump(description, sort_description_types); + + SipHash sort_description_dump_hash; + sort_description_dump_hash.update(description_dump); + + UInt128 sort_description_hash_key; + sort_description_dump_hash.get128(sort_description_hash_key); + + { + std::lock_guard lock(mutex); + UInt64 & current_counter = counter[sort_description_hash_key]; + if (current_counter < description.min_count_to_compile_sort_description) + { + current_counter += static_cast(increase_compile_attemps); + return; + } + } + + std::shared_ptr compiled_sort_description_holder; + + if (auto * compilation_cache = CompiledExpressionCacheFactory::instance().tryGetCache()) + { + auto [compiled_function_cache_entry, _] = compilation_cache->getOrSet(sort_description_hash_key, [&] () + { + LOG_TRACE(getLogger(), "Compile sort description {}", description_dump); + + auto compiled_sort_description = compileSortDescription(getJITInstance(), description, sort_description_types, description_dump); + return std::make_shared(std::move(compiled_sort_description)); + }); + + compiled_sort_description_holder = std::static_pointer_cast(compiled_function_cache_entry); + } + else + { + LOG_TRACE(getLogger(), "Compile sort description {}", description_dump); + auto compiled_sort_description = compileSortDescription(getJITInstance(), description, sort_description_types, description_dump); + compiled_sort_description_holder = std::make_shared(std::move(compiled_sort_description)); + } + + auto comparator_function = compiled_sort_description_holder->compiled_sort_description_function.comparator_function; + description.compiled_sort_description = reinterpret_cast(comparator_function); + description.compiled_sort_description_holder = std::move(compiled_sort_description_holder); +} + +#else + +void compileSortDescriptionIfNeeded(SortDescription & description, const DataTypes & sort_description_types, bool increase_compile_attemps) +{ + (void)(description); + (void)(sort_description_types); + (void)(increase_compile_attemps); +} + +#endif + std::string dumpSortDescription(const SortDescription & description) { WriteBufferFromOwnString wb; diff --git a/src/Core/SortDescription.h b/src/Core/SortDescription.h index b86706e665a..3d4e3b665ee 100644 --- a/src/Core/SortDescription.h +++ b/src/Core/SortDescription.h @@ -89,10 +89,26 @@ struct SortColumnDescriptionWithColumnIndex } }; +class CompiledSortDescriptionFunctionHolder; + /// Description of the sorting rule for several columns. -using SortDescription = std::vector; using SortDescriptionWithPositions = std::vector; +class SortDescription : public std::vector +{ +public: + /// Can be safely casted into JITSortDescriptionFunc + void * compiled_sort_description = nullptr; + std::shared_ptr compiled_sort_description_holder; + size_t min_count_to_compile_sort_description = 3; + bool compile_sort_description = false; +}; + +/** Compile sort description for header_types. + * Description is compiled only if compilation attempts to compile identical description is more than min_count_to_compile_sort_description. + */ +void compileSortDescriptionIfNeeded(SortDescription & description, const DataTypes & sort_description_types, bool increase_compile_attemps); + /// Outputs user-readable description into `out`. void dumpSortDescription(const SortDescription & description, WriteBuffer & out); diff --git a/src/DataTypes/Native.h b/src/DataTypes/Native.h index 3a635d2e240..1950b1407df 100644 --- a/src/DataTypes/Native.h +++ b/src/DataTypes/Native.h @@ -47,7 +47,7 @@ static inline llvm::Type * toNativeType(llvm::IRBuilderBase & builder, const IDa return builder.getInt8Ty(); else if (data_type.isInt16() || data_type.isUInt16() || data_type.isDate()) return builder.getInt16Ty(); - else if (data_type.isInt32() || data_type.isUInt32() || data_type.isDateTime()) + else if (data_type.isInt32() || data_type.isUInt32() || data_type.isDate32() || data_type.isDateTime()) return builder.getInt32Ty(); else if (data_type.isInt64() || data_type.isUInt64()) return builder.getInt64Ty(); @@ -111,7 +111,8 @@ static inline bool canBeNativeType(const IDataType & type) return canBeNativeType(*data_type_nullable.getNestedType()); } - return data_type.isNativeInt() || data_type.isNativeUInt() || data_type.isFloat() || data_type.isDate() || data_type.isEnum(); + return data_type.isNativeInt() || data_type.isNativeUInt() || data_type.isFloat() || data_type.isDate() + || data_type.isDate32() || data_type.isDateTime() || data_type.isEnum(); } static inline llvm::Type * toNativeType(llvm::IRBuilderBase & builder, const DataTypePtr & type) @@ -264,11 +265,11 @@ static inline llvm::Constant * getColumnNativeValue(llvm::IRBuilderBase & builde { return llvm::ConstantFP::get(type, assert_cast &>(column).getElement(index)); } - else if (column_data_type.isNativeUInt() || column_data_type.isDate() || column_data_type.isDateTime() || column_data_type.isDateTime64()) + else if (column_data_type.isNativeUInt() || column_data_type.isDate() || column_data_type.isDateTime()) { return llvm::ConstantInt::get(type, column.getUInt(index)); } - else if (column_data_type.isNativeInt() || column_data_type.isEnum()) + else if (column_data_type.isNativeInt() || column_data_type.isEnum() || column_data_type.isDate32()) { return llvm::ConstantInt::get(type, column.getInt(index)); } diff --git a/src/Disks/IO/AsynchronousReadIndirectBufferFromRemoteFS.cpp b/src/Disks/IO/AsynchronousReadIndirectBufferFromRemoteFS.cpp index d1e4b9fa21e..4be89389008 100644 --- a/src/Disks/IO/AsynchronousReadIndirectBufferFromRemoteFS.cpp +++ b/src/Disks/IO/AsynchronousReadIndirectBufferFromRemoteFS.cpp @@ -1,10 +1,10 @@ #include "AsynchronousReadIndirectBufferFromRemoteFS.h" #include +#include #include #include #include -#include namespace CurrentMetrics @@ -57,7 +57,6 @@ AsynchronousReadIndirectBufferFromRemoteFS::AsynchronousReadIndirectBufferFromRe ProfileEvents::increment(ProfileEvents::RemoteFSBuffers); } - String AsynchronousReadIndirectBufferFromRemoteFS::getFileName() const { return impl->getFileName(); @@ -169,6 +168,9 @@ bool AsynchronousReadIndirectBufferFromRemoteFS::nextImpl() if (!hasPendingDataToRead()) return false; + Stopwatch watch; + CurrentMetrics::Increment metric_increment{CurrentMetrics::AsynchronousReadWait}; + size_t size = 0; if (prefetch_future.valid()) { @@ -176,15 +178,13 @@ bool AsynchronousReadIndirectBufferFromRemoteFS::nextImpl() size_t offset = 0; { - Stopwatch watch; - CurrentMetrics::Increment metric_increment{CurrentMetrics::AsynchronousReadWait}; auto result = prefetch_future.get(); size = result.size; offset = result.offset; LOG_TEST(log, "Current size: {}, offset: {}", size, offset); /// If prefetch_future is valid, size should always be greater than zero. - assert(offset < size); + assert(offset <= size); ProfileEvents::increment(ProfileEvents::AsynchronousReadWaitMicroseconds, watch.elapsedMicroseconds()); } @@ -201,7 +201,7 @@ bool AsynchronousReadIndirectBufferFromRemoteFS::nextImpl() auto offset = result.offset; LOG_TEST(log, "Current size: {}, offset: {}", size, offset); - assert(offset < size); + assert(offset <= size); if (size) { @@ -210,6 +210,9 @@ bool AsynchronousReadIndirectBufferFromRemoteFS::nextImpl() } } + watch.stop(); + ProfileEvents::increment(ProfileEvents::AsynchronousReadWaitMicroseconds, watch.elapsedMicroseconds()); + file_offset_of_buffer_end = impl->getFileOffsetOfBufferEnd(); assert(file_offset_of_buffer_end == impl->getImplementationBufferOffset()); diff --git a/src/Disks/IO/CachedReadBufferFromRemoteFS.cpp b/src/Disks/IO/CachedReadBufferFromRemoteFS.cpp index 7ffac5cd08e..e10b848bfeb 100644 --- a/src/Disks/IO/CachedReadBufferFromRemoteFS.cpp +++ b/src/Disks/IO/CachedReadBufferFromRemoteFS.cpp @@ -5,13 +5,23 @@ #include #include #include +#include namespace ProfileEvents { -extern const Event RemoteFSReadBytes; -extern const Event RemoteFSCacheReadBytes; -extern const Event RemoteFSCacheDownloadBytes; +extern const Event FileSegmentWaitReadBufferMicroseconds; +extern const Event FileSegmentReadMicroseconds; +extern const Event FileSegmentCacheWriteMicroseconds; +extern const Event FileSegmentPredownloadMicroseconds; +extern const Event FileSegmentUsedBytes; + +extern const Event CachedReadBufferReadFromSourceMicroseconds; +extern const Event CachedReadBufferReadFromCacheMicroseconds; +extern const Event CachedReadBufferCacheWriteMicroseconds; +extern const Event CachedReadBufferReadFromSourceBytes; +extern const Event CachedReadBufferReadFromCacheBytes; +extern const Event CachedReadBufferCacheWriteBytes; } namespace DB @@ -23,18 +33,12 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } -static String getQueryId() -{ - if (!CurrentThread::isInitialized() || !CurrentThread::get().getQueryContext() || CurrentThread::getQueryId().size == 0) - return ""; - return CurrentThread::getQueryId().toString(); -} - CachedReadBufferFromRemoteFS::CachedReadBufferFromRemoteFS( const String & remote_fs_object_path_, FileCachePtr cache_, RemoteFSFileReaderCreator remote_file_reader_creator_, const ReadSettings & settings_, + const String & query_id_, size_t read_until_position_) : SeekableReadBuffer(nullptr, 0) #ifndef NDEBUG @@ -48,8 +52,9 @@ CachedReadBufferFromRemoteFS::CachedReadBufferFromRemoteFS( , settings(settings_) , read_until_position(read_until_position_) , remote_file_reader_creator(remote_file_reader_creator_) - , query_id(getQueryId()) - , enable_logging(!query_id.empty() && CurrentThread::get().getQueryContext()->getSettingsRef().enable_filesystem_cache_log) + , query_id(query_id_) + , enable_logging(!query_id.empty() && settings_.enable_filesystem_cache_log) + , current_buffer_id(getRandomASCIIString(8)) { } @@ -62,9 +67,15 @@ void CachedReadBufferFromRemoteFS::appendFilesystemCacheLog( .query_id = query_id, .source_file_path = remote_fs_object_path, .file_segment_range = { file_segment_range.left, file_segment_range.right }, + .requested_range = { first_offset, read_until_position }, .file_segment_size = file_segment_range.size(), + .cache_attempted = true, + .read_buffer_id = current_buffer_id, + .profile_counters = std::make_shared(current_file_segment_counters.getPartiallyAtomicSnapshot()), }; + current_file_segment_counters.reset(); + switch (type) { case CachedReadBufferFromRemoteFS::ReadType::CACHED: @@ -109,9 +120,16 @@ void CachedReadBufferFromRemoteFS::initialize(size_t offset, size_t size) SeekableReadBufferPtr CachedReadBufferFromRemoteFS::getCacheReadBuffer(size_t offset) const { auto path = cache->getPathInLocalCache(cache_key, offset); - auto buf = std::make_shared(path, settings.local_fs_buffer_size); - if (buf->size() == 0) + + ReadSettings local_read_settings{settings}; + /// Do not allow to use asynchronous version of LocalFSReadMethod. + local_read_settings.local_fs_method = LocalFSReadMethod::pread; + + auto buf = createReadBufferFromFileBase(path, local_read_settings); + auto from_fd = dynamic_cast(buf.get()); + if (from_fd && from_fd->size() == 0) throw Exception(ErrorCodes::LOGICAL_ERROR, "Attempt to read from an empty cache file: {}", path); + return buf; } @@ -340,8 +358,13 @@ SeekableReadBufferPtr CachedReadBufferFromRemoteFS::getImplementationBuffer(File auto range = file_segment->range(); bytes_to_predownload = 0; + Stopwatch watch(CLOCK_MONOTONIC); + auto read_buffer_for_file_segment = getReadBufferForFileSegment(file_segment); + watch.stop(); + current_file_segment_counters.increment(ProfileEvents::FileSegmentWaitReadBufferMicroseconds, watch.elapsedMicroseconds()); + [[maybe_unused]] auto download_current_segment = read_type == ReadType::REMOTE_FS_READ_AND_PUT_IN_CACHE; assert(download_current_segment == file_segment->isDownloader()); @@ -362,7 +385,7 @@ SeekableReadBufferPtr CachedReadBufferFromRemoteFS::getImplementationBuffer(File case ReadType::CACHED: { #ifndef NDEBUG - auto * file_reader = assert_cast(read_buffer_for_file_segment.get()); + auto * file_reader = dynamic_cast(read_buffer_for_file_segment.get()); size_t file_size = file_reader->size(); if (file_size == 0 || range.left + file_size <= file_offset_of_buffer_end) @@ -436,6 +459,9 @@ bool CachedReadBufferFromRemoteFS::completeFileSegmentAndGetNext() { LOG_TEST(log, "Completed segment: {}", (*current_file_segment_it)->range().toString()); + if (enable_logging) + appendFilesystemCacheLog((*current_file_segment_it)->range(), read_type); + auto file_segment_it = current_file_segment_it++; auto & file_segment = *file_segment_it; @@ -460,15 +486,29 @@ bool CachedReadBufferFromRemoteFS::completeFileSegmentAndGetNext() if (read_type == ReadType::CACHED) (*current_file_segment_it)->incrementHitsCount(); - if (enable_logging) - appendFilesystemCacheLog((*current_file_segment_it)->range(), read_type); LOG_TEST(log, "New segment: {}", (*current_file_segment_it)->range().toString()); return true; } +CachedReadBufferFromRemoteFS::~CachedReadBufferFromRemoteFS() +{ + if (enable_logging + && file_segments_holder + && current_file_segment_it != file_segments_holder->file_segments.end()) + { + appendFilesystemCacheLog((*current_file_segment_it)->range(), read_type); + } +} + void CachedReadBufferFromRemoteFS::predownload(FileSegmentPtr & file_segment) { + Stopwatch predownload_watch(CLOCK_MONOTONIC); + SCOPE_EXIT({ + predownload_watch.stop(); + current_file_segment_counters.increment(ProfileEvents::FileSegmentPredownloadMicroseconds, predownload_watch.elapsedMicroseconds()); + }); + if (bytes_to_predownload) { /// Consider this case. Some user needed segment [a, b] and downloaded it partially. @@ -484,7 +524,19 @@ void CachedReadBufferFromRemoteFS::predownload(FileSegmentPtr & file_segment) while (true) { - if (!bytes_to_predownload || implementation_buffer->eof()) + bool has_more_data; + { + Stopwatch watch(CLOCK_MONOTONIC); + + has_more_data = !implementation_buffer->eof(); + + watch.stop(); + auto elapsed = watch.elapsedMicroseconds(); + current_file_segment_counters.increment(ProfileEvents::FileSegmentReadMicroseconds, elapsed); + ProfileEvents::increment(ProfileEvents::CachedReadBufferReadFromSourceMicroseconds, elapsed); + } + + if (!bytes_to_predownload || !has_more_data) { if (bytes_to_predownload) throw Exception( @@ -523,7 +575,7 @@ void CachedReadBufferFromRemoteFS::predownload(FileSegmentPtr & file_segment) size_t current_impl_buffer_size = implementation_buffer->buffer().size(); size_t current_predownload_size = std::min(current_impl_buffer_size, bytes_to_predownload); - ProfileEvents::increment(ProfileEvents::RemoteFSReadBytes, current_impl_buffer_size); + ProfileEvents::increment(ProfileEvents::CachedReadBufferReadFromSourceBytes, current_impl_buffer_size); if (file_segment->reserve(current_predownload_size)) { @@ -531,8 +583,15 @@ void CachedReadBufferFromRemoteFS::predownload(FileSegmentPtr & file_segment) assert(file_segment->getDownloadOffset() == static_cast(implementation_buffer->getPosition())); + Stopwatch watch(CLOCK_MONOTONIC); + file_segment->write(implementation_buffer->buffer().begin(), current_predownload_size, current_offset); - ProfileEvents::increment(ProfileEvents::RemoteFSCacheDownloadBytes, current_predownload_size); + + watch.stop(); + auto elapsed = watch.elapsedMicroseconds(); + current_file_segment_counters.increment(ProfileEvents::FileSegmentCacheWriteMicroseconds, elapsed); + ProfileEvents::increment(ProfileEvents::CachedReadBufferCacheWriteMicroseconds, elapsed); + ProfileEvents::increment(ProfileEvents::CachedReadBufferCacheWriteBytes, current_predownload_size); current_offset += current_predownload_size; @@ -668,18 +727,18 @@ bool CachedReadBufferFromRemoteFS::nextImplStep() return false; SCOPE_EXIT({ - /// Save state of current file segment before it is completed. - nextimpl_step_log_info = getInfoForLog(); - - if (current_file_segment_it == file_segments_holder->file_segments.end()) - return; - - auto & file_segment = *current_file_segment_it; - - bool download_current_segment = read_type == ReadType::REMOTE_FS_READ_AND_PUT_IN_CACHE; - if (download_current_segment) + try { - try + /// Save state of current file segment before it is completed. + nextimpl_step_log_info = getInfoForLog(); + + if (current_file_segment_it == file_segments_holder->file_segments.end()) + return; + + auto & file_segment = *current_file_segment_it; + + bool download_current_segment = read_type == ReadType::REMOTE_FS_READ_AND_PUT_IN_CACHE; + if (download_current_segment) { bool need_complete_file_segment = file_segment->isDownloader(); if (need_complete_file_segment) @@ -688,13 +747,13 @@ bool CachedReadBufferFromRemoteFS::nextImplStep() file_segment->completeBatchAndResetDownloader(); } } - catch (...) - { - tryLogCurrentException(__PRETTY_FUNCTION__); - } - } - assert(!file_segment->isDownloader()); + assert(!file_segment->isDownloader()); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } }); bytes_to_predownload = 0; @@ -711,9 +770,6 @@ bool CachedReadBufferFromRemoteFS::nextImplStep() if (read_type == ReadType::CACHED) (*current_file_segment_it)->incrementHitsCount(); - - if (enable_logging) - appendFilesystemCacheLog((*current_file_segment_it)->range(), read_type); } assert(!internal_buffer.empty()); @@ -747,18 +803,17 @@ bool CachedReadBufferFromRemoteFS::nextImplStep() auto download_current_segment = read_type == ReadType::REMOTE_FS_READ_AND_PUT_IN_CACHE; if (download_current_segment != file_segment->isDownloader()) + { throw Exception( ErrorCodes::LOGICAL_ERROR, - "Incorrect segment state. Having read type: {}, Caller id: {}, downloader id: {}, file segment state: {}", - toString(read_type), - file_segment->getCallerId(), - file_segment->getDownloader(), - file_segment->state()); + "Incorrect segment state. Having read type: {}, file segment info: {}", + toString(read_type), file_segment->getInfoForLog()); + } if (!result) { #ifndef NDEBUG - if (auto * cache_file_reader = typeid_cast(implementation_buffer.get())) + if (auto * cache_file_reader = dynamic_cast(implementation_buffer.get())) { auto cache_file_size = cache_file_reader->size(); if (cache_file_size == 0) @@ -767,13 +822,26 @@ bool CachedReadBufferFromRemoteFS::nextImplStep() } #endif + Stopwatch watch(CLOCK_MONOTONIC); + result = implementation_buffer->next(); + + watch.stop(); + auto elapsed = watch.elapsedMicroseconds(); + current_file_segment_counters.increment(ProfileEvents::FileSegmentReadMicroseconds, elapsed); + size = implementation_buffer->buffer().size(); if (read_type == ReadType::CACHED) - ProfileEvents::increment(ProfileEvents::RemoteFSCacheReadBytes, size); + { + ProfileEvents::increment(ProfileEvents::CachedReadBufferReadFromCacheBytes, size); + ProfileEvents::increment(ProfileEvents::CachedReadBufferReadFromCacheMicroseconds, elapsed); + } else - ProfileEvents::increment(ProfileEvents::RemoteFSReadBytes, size); + { + ProfileEvents::increment(ProfileEvents::CachedReadBufferReadFromSourceBytes, size); + ProfileEvents::increment(ProfileEvents::CachedReadBufferReadFromSourceMicroseconds, elapsed); + } } if (result) @@ -786,12 +854,18 @@ bool CachedReadBufferFromRemoteFS::nextImplStep() { assert(file_segment->getDownloadOffset() == static_cast(implementation_buffer->getPosition())); + Stopwatch watch(CLOCK_MONOTONIC); + file_segment->write( needed_to_predownload ? implementation_buffer->position() : implementation_buffer->buffer().begin(), size, file_offset_of_buffer_end); - ProfileEvents::increment(ProfileEvents::RemoteFSCacheDownloadBytes, size); + watch.stop(); + auto elapsed = watch.elapsedMicroseconds(); + current_file_segment_counters.increment(ProfileEvents::FileSegmentCacheWriteMicroseconds, elapsed); + ProfileEvents::increment(ProfileEvents::CachedReadBufferCacheWriteMicroseconds, elapsed); + ProfileEvents::increment(ProfileEvents::CachedReadBufferCacheWriteBytes, size); assert(file_segment->getDownloadOffset() <= file_segment->range().right + 1); assert( @@ -819,10 +893,13 @@ bool CachedReadBufferFromRemoteFS::nextImplStep() } file_offset_of_buffer_end += size; + } swap(*implementation_buffer); + current_file_segment_counters.increment(ProfileEvents::FileSegmentUsedBytes, available()); + if (download_current_segment) file_segment->completeBatchAndResetDownloader(); @@ -851,7 +928,7 @@ bool CachedReadBufferFromRemoteFS::nextImplStep() if (size == 0 && file_offset_of_buffer_end < read_until_position) { std::optional cache_file_size; - if (auto * cache_file_reader = dynamic_cast(implementation_buffer.get())) + if (auto * cache_file_reader = dynamic_cast(implementation_buffer.get())) cache_file_size = cache_file_reader->size(); throw Exception( diff --git a/src/Disks/IO/CachedReadBufferFromRemoteFS.h b/src/Disks/IO/CachedReadBufferFromRemoteFS.h index 62bb070b437..c73114f50a5 100644 --- a/src/Disks/IO/CachedReadBufferFromRemoteFS.h +++ b/src/Disks/IO/CachedReadBufferFromRemoteFS.h @@ -26,8 +26,11 @@ public: FileCachePtr cache_, RemoteFSFileReaderCreator remote_file_reader_creator_, const ReadSettings & settings_, + const String & query_id_, size_t read_until_position_); + ~CachedReadBufferFromRemoteFS() override; + bool nextImpl() override; off_t seek(off_t off, int whence) override; @@ -116,8 +119,10 @@ private: String query_id; bool enable_logging = false; + String current_buffer_id; CurrentMetrics::Increment metric_increment{CurrentMetrics::FilesystemCacheReadBuffers}; + ProfileEvents::Counters current_file_segment_counters; }; } diff --git a/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp b/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp index 4399befa433..57b72d0190d 100644 --- a/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp +++ b/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp @@ -21,9 +21,11 @@ #include #include #include +#include namespace fs = std::filesystem; + namespace DB { @@ -32,17 +34,24 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } -#if USE_AWS_S3 -SeekableReadBufferPtr ReadBufferFromS3Gather::createImplementationBuffer(const String & path, size_t file_size) +SeekableReadBufferPtr ReadBufferFromRemoteFSGather::createImplementationBuffer(const String & path, size_t file_size) +{ + if (!current_file_path.empty() && !with_cache && enable_cache_log) + { + appendFilesystemCacheLog(); + } + + current_file_path = fs::path(common_path_prefix) / path; + current_file_size = file_size; + total_bytes_read_from_current_file = 0; + + return createImplementationBufferImpl(path, file_size); +} + +#if USE_AWS_S3 +SeekableReadBufferPtr ReadBufferFromS3Gather::createImplementationBufferImpl(const String & path, size_t file_size) { - current_path = path; auto remote_path = fs::path(common_path_prefix) / path; - - auto cache = settings.remote_fs_cache; - bool with_cache = cache - && settings.enable_filesystem_cache - && (!IFileCache::isReadOnly() || settings.read_from_filesystem_cache_if_exists_otherwise_bypass_cache); - auto remote_file_reader_creator = [=, this]() { return std::make_unique( @@ -53,7 +62,7 @@ SeekableReadBufferPtr ReadBufferFromS3Gather::createImplementationBuffer(const S if (with_cache) { return std::make_shared( - remote_path, cache, remote_file_reader_creator, settings, read_until_position ? read_until_position : file_size); + remote_path, settings.remote_fs_cache, remote_file_reader_creator, settings, query_id, read_until_position ? read_until_position : file_size); } return remote_file_reader_creator(); @@ -62,24 +71,24 @@ SeekableReadBufferPtr ReadBufferFromS3Gather::createImplementationBuffer(const S #if USE_AZURE_BLOB_STORAGE -SeekableReadBufferPtr ReadBufferFromAzureBlobStorageGather::createImplementationBuffer(const String & path, size_t /* file_size */) +SeekableReadBufferPtr ReadBufferFromAzureBlobStorageGather::createImplementationBufferImpl(const String & path, size_t /* file_size */) { - current_path = path; + current_file_path = path; return std::make_unique(blob_container_client, path, max_single_read_retries, max_single_download_retries, settings.remote_fs_buffer_size, /* use_external_buffer */true, read_until_position); } #endif -SeekableReadBufferPtr ReadBufferFromWebServerGather::createImplementationBuffer(const String & path, size_t /* file_size */) +SeekableReadBufferPtr ReadBufferFromWebServerGather::createImplementationBufferImpl(const String & path, size_t /* file_size */) { - current_path = path; + current_file_path = path; return std::make_unique(fs::path(uri) / path, context, settings, /* use_external_buffer */true, read_until_position); } #if USE_HDFS -SeekableReadBufferPtr ReadBufferFromHDFSGather::createImplementationBuffer(const String & path, size_t /* file_size */) +SeekableReadBufferPtr ReadBufferFromHDFSGather::createImplementationBufferImpl(const String & path, size_t /* file_size */) { return std::make_unique(hdfs_uri, fs::path(hdfs_directory) / path, config, settings.remote_fs_buffer_size); } @@ -94,8 +103,31 @@ ReadBufferFromRemoteFSGather::ReadBufferFromRemoteFSGather( , common_path_prefix(common_path_prefix_) , blobs_to_read(blobs_to_read_) , settings(settings_) + , query_id(CurrentThread::isInitialized() && CurrentThread::get().getQueryContext() != nullptr ? CurrentThread::getQueryId() : "") , log(&Poco::Logger::get("ReadBufferFromRemoteFSGather")) + , enable_cache_log(!query_id.empty() && settings.enable_filesystem_cache_log) { + with_cache = settings.remote_fs_cache + && settings.enable_filesystem_cache + && (!IFileCache::isReadOnly() || settings.read_from_filesystem_cache_if_exists_otherwise_bypass_cache); +} + + +void ReadBufferFromRemoteFSGather::appendFilesystemCacheLog() +{ + FilesystemCacheLogElement elem + { + .event_time = std::chrono::system_clock::to_time_t(std::chrono::system_clock::now()), + .query_id = query_id, + .source_file_path = current_file_path, + .file_segment_range = { 0, current_file_size }, + .read_type = FilesystemCacheLogElement::ReadType::READ_FROM_FS_BYPASSING_CACHE, + .file_segment_size = total_bytes_read_from_current_file, + .cache_attempted = false, + }; + + if (auto cache_log = Context::getGlobalContextInstance()->getFilesystemCacheLog()) + cache_log->add(elem); } @@ -199,6 +231,7 @@ bool ReadBufferFromRemoteFSGather::readImpl() */ if (bytes_to_ignore) { + total_bytes_read_from_current_file += bytes_to_ignore; current_buf->ignore(bytes_to_ignore); result = current_buf->hasPendingData(); file_offset_of_buffer_end += bytes_to_ignore; @@ -225,6 +258,7 @@ bool ReadBufferFromRemoteFSGather::readImpl() { assert(available()); nextimpl_working_buffer_offset = offset(); + total_bytes_read_from_current_file += available(); } return result; @@ -254,7 +288,7 @@ void ReadBufferFromRemoteFSGather::reset() String ReadBufferFromRemoteFSGather::getFileName() const { - return current_path; + return current_file_path; } @@ -282,5 +316,12 @@ size_t ReadBufferFromRemoteFSGather::getImplementationBufferOffset() const return current_buf->getFileOffsetOfBufferEnd(); } +ReadBufferFromRemoteFSGather::~ReadBufferFromRemoteFSGather() +{ + if (!with_cache && enable_cache_log) + { + appendFilesystemCacheLog(); + } +} } diff --git a/src/Disks/IO/ReadBufferFromRemoteFSGather.h b/src/Disks/IO/ReadBufferFromRemoteFSGather.h index da2d1dee4b2..d6f12b2f45e 100644 --- a/src/Disks/IO/ReadBufferFromRemoteFSGather.h +++ b/src/Disks/IO/ReadBufferFromRemoteFSGather.h @@ -29,6 +29,9 @@ public: const std::string & common_path_prefix_, const BlobsPathToSize & blobs_to_read_, const ReadSettings & settings_); + + ~ReadBufferFromRemoteFSGather() override; + String getFileName() const; void reset(); @@ -54,7 +57,7 @@ public: size_t getImplementationBufferOffset() const; protected: - virtual SeekableReadBufferPtr createImplementationBuffer(const String & path, size_t file_size) = 0; + virtual SeekableReadBufferPtr createImplementationBufferImpl(const String & path, size_t file_size) = 0; std::string common_path_prefix; @@ -64,9 +67,16 @@ protected: size_t read_until_position = 0; - String current_path; + String current_file_path; + size_t current_file_size = 0; + + bool with_cache; + + String query_id; private: + SeekableReadBufferPtr createImplementationBuffer(const String & path, size_t file_size); + bool nextImpl() override; void initialize(); @@ -75,6 +85,8 @@ private: bool moveToNextBuffer(); + void appendFilesystemCacheLog(); + SeekableReadBufferPtr current_buf; size_t current_buf_idx = 0; @@ -89,6 +101,10 @@ private: size_t bytes_to_ignore = 0; Poco::Logger * log; + + size_t total_bytes_read_from_current_file = 0; + + bool enable_cache_log = false; }; @@ -113,7 +129,7 @@ public: { } - SeekableReadBufferPtr createImplementationBuffer(const String & path, size_t file_size) override; + SeekableReadBufferPtr createImplementationBufferImpl(const String & path, size_t file_size) override; private: std::shared_ptr client_ptr; @@ -143,7 +159,7 @@ public: { } - SeekableReadBufferPtr createImplementationBuffer(const String & path, size_t file_size) override; + SeekableReadBufferPtr createImplementationBufferImpl(const String & path, size_t file_size) override; private: std::shared_ptr blob_container_client; @@ -168,7 +184,7 @@ public: { } - SeekableReadBufferPtr createImplementationBuffer(const String & path, size_t file_size) override; + SeekableReadBufferPtr createImplementationBufferImpl(const String & path, size_t file_size) override; private: String uri; @@ -195,7 +211,7 @@ public: hdfs_uri = hdfs_uri_.substr(0, begin_of_path); } - SeekableReadBufferPtr createImplementationBuffer(const String & path, size_t file_size) override; + SeekableReadBufferPtr createImplementationBufferImpl(const String & path, size_t file_size) override; private: const Poco::Util::AbstractConfiguration & config; diff --git a/src/Disks/IO/ThreadPoolRemoteFSReader.cpp b/src/Disks/IO/ThreadPoolRemoteFSReader.cpp index b1ae42d03d6..f8050b8a8b0 100644 --- a/src/Disks/IO/ThreadPoolRemoteFSReader.cpp +++ b/src/Disks/IO/ThreadPoolRemoteFSReader.cpp @@ -16,8 +16,8 @@ namespace ProfileEvents { - extern const Event RemoteFSReadMicroseconds; - extern const Event RemoteFSReadBytes; + extern const Event ThreadpoolReaderTaskMicroseconds; + extern const Event ThreadpoolReaderReadBytes; } namespace CurrentMetrics @@ -83,8 +83,8 @@ std::future ThreadPoolRemoteFSReader::submit(Reques watch.stop(); - ProfileEvents::increment(ProfileEvents::RemoteFSReadMicroseconds, watch.elapsedMicroseconds()); - ProfileEvents::increment(ProfileEvents::RemoteFSReadBytes, result.offset ? result.size - result.offset : result.size); + ProfileEvents::increment(ProfileEvents::ThreadpoolReaderTaskMicroseconds, watch.elapsedMicroseconds()); + ProfileEvents::increment(ProfileEvents::ThreadpoolReaderReadBytes, result.offset ? result.size - result.offset : result.size); thread_status.detachQuery(/* if_not_detached */true); diff --git a/src/Disks/S3/ProxyResolverConfiguration.cpp b/src/Disks/S3/ProxyResolverConfiguration.cpp index bb558cf4b72..7449c5200de 100644 --- a/src/Disks/S3/ProxyResolverConfiguration.cpp +++ b/src/Disks/S3/ProxyResolverConfiguration.cpp @@ -63,7 +63,7 @@ Aws::Client::ClientConfigurationPerRequest ProxyResolverConfiguration::getConfig { auto resolved_endpoint = endpoint; resolved_endpoint.setHost(resolved_hosts[i].toString()); - session = makeHTTPSession(endpoint, timeouts, false); + session = makeHTTPSession(resolved_endpoint, timeouts, false); try { diff --git a/src/Functions/FunctionsStringArray.h b/src/Functions/FunctionsStringArray.h index 6b3adf46ff5..2680816670f 100644 --- a/src/Functions/FunctionsStringArray.h +++ b/src/Functions/FunctionsStringArray.h @@ -477,7 +477,7 @@ public: ErrorCodes::ILLEGAL_COLUMN); if (!col->getValue().empty()) - re = Regexps::get(col->getValue()); + re = Regexps::get(col->getValue()); } @@ -560,7 +560,7 @@ public: + " of first argument of function " + getName() + ". Must be constant string.", ErrorCodes::ILLEGAL_COLUMN); - re = Regexps::get(col->getValue()); + re = Regexps::get(col->getValue()); capture = re->getNumberOfSubpatterns() > 0 ? 1 : 0; matches.resize(capture + 1); diff --git a/src/Functions/MatchImpl.h b/src/Functions/MatchImpl.h index 08ac47e692f..026b38b997b 100644 --- a/src/Functions/MatchImpl.h +++ b/src/Functions/MatchImpl.h @@ -24,12 +24,11 @@ namespace ErrorCodes /// Is the [I]LIKE expression reduced to finding a substring in a string? static inline bool likePatternIsStrstr(const String & pattern, String & res) { - res = ""; - if (pattern.size() < 2 || pattern.front() != '%' || pattern.back() != '%') return false; - res.reserve(pattern.size() * 2); + res = ""; + res.reserve(pattern.size() - 2); const char * pos = pattern.data(); const char * end = pos + pattern.size(); @@ -81,7 +80,7 @@ struct MatchImpl static void vectorConstant( const ColumnString::Chars & data, const ColumnString::Offsets & offsets, - const std::string & pattern, + const String & pattern, const ColumnPtr & start_pos, PaddedPODArray & res) { @@ -92,14 +91,13 @@ struct MatchImpl if (offsets.empty()) return; - String strstr_pattern; - /// A simple case where the [I]LIKE expression reduces to finding a substring in a string + String strstr_pattern; if (like && likePatternIsStrstr(pattern, strstr_pattern)) { - const UInt8 * begin = data.data(); + const UInt8 * const begin = data.data(); + const UInt8 * const end = data.data() + data.size(); const UInt8 * pos = begin; - const UInt8 * end = pos + data.size(); /// The current index in the array of strings. size_t i = 0; @@ -137,7 +135,7 @@ struct MatchImpl auto regexp = Regexps::get(pattern); - std::string required_substring; + String required_substring; bool is_trivial; bool required_substring_is_prefix; /// for `anchored` execution of the regexp. @@ -172,9 +170,9 @@ struct MatchImpl { /// NOTE This almost matches with the case of LikePatternIsStrstr. - const UInt8 * begin = data.data(); + const UInt8 * const begin = data.data(); + const UInt8 * const end = data.begin() + data.size(); const UInt8 * pos = begin; - const UInt8 * end = pos + data.size(); /// The current index in the array of strings. size_t i = 0; @@ -230,6 +228,7 @@ struct MatchImpl ++i; } + /// Tail, in which there can be no substring. if (i < res.size()) memset(&res[i], revert, (res.size() - i) * sizeof(res[0])); } @@ -238,14 +237,14 @@ struct MatchImpl /// Very carefully crafted copy-paste. static void vectorFixedConstant( - const ColumnString::Chars & data, size_t n, const std::string & pattern, + const ColumnString::Chars & data, size_t n, const String & pattern, PaddedPODArray & res) { if (data.empty()) return; - String strstr_pattern; /// A simple case where the LIKE expression reduces to finding a substring in a string + String strstr_pattern; if (like && likePatternIsStrstr(pattern, strstr_pattern)) { const UInt8 * begin = data.data(); @@ -291,9 +290,9 @@ struct MatchImpl { size_t size = data.size() / n; - auto regexp = Regexps::get(pattern); + auto regexp = Regexps::get(pattern); - std::string required_substring; + String required_substring; bool is_trivial; bool required_substring_is_prefix; /// for `anchored` execution of the regexp. diff --git a/src/Functions/Regexps.h b/src/Functions/Regexps.h index 8e6d30c8e14..9a1938a3f32 100644 --- a/src/Functions/Regexps.h +++ b/src/Functions/Regexps.h @@ -44,23 +44,20 @@ namespace Regexps template inline Regexp createRegexp(const std::string & pattern, int flags) { - return {pattern, flags}; - } - - template <> - inline Regexp createRegexp(const std::string & pattern, int flags) - { - return {likePatternToRegexp(pattern), flags}; + if constexpr (like) + return {likePatternToRegexp(pattern), flags}; + else + return {pattern, flags}; } /** Returns holder of an object from Pool. * You must hold the ownership while using the object. * In destructor, it returns the object back to the Pool for further reuse. */ - template + template inline Pool::Pointer get(const std::string & pattern) { - /// C++11 has thread-safe function-local static on most modern compilers. + /// the Singleton is thread-safe in C++11 static Pool known_regexps; /// Different variables for different pattern parameters. return known_regexps.get(pattern, [&pattern] diff --git a/src/Functions/array/CMakeLists.txt b/src/Functions/array/CMakeLists.txt index 9762674d6e9..c98f4430078 100644 --- a/src/Functions/array/CMakeLists.txt +++ b/src/Functions/array/CMakeLists.txt @@ -1,7 +1,7 @@ include("${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake") add_headers_and_sources(clickhouse_functions_array .) add_library(clickhouse_functions_array ${clickhouse_functions_array_sources} ${clickhouse_functions_array_headers}) -target_link_libraries(clickhouse_functions_array PRIVATE dbms clickhouse_functions_gatherutils) +target_link_libraries(clickhouse_functions_array PRIVATE dbms clickhouse_functions_gatherutils ch_contrib::eigen) if (STRIP_DEBUG_SYMBOLS_FUNCTIONS) target_compile_options(clickhouse_functions_array PRIVATE "-g0") diff --git a/src/Functions/array/arrayDistance.cpp b/src/Functions/array/arrayDistance.cpp new file mode 100644 index 00000000000..a533cb2c0cc --- /dev/null +++ b/src/Functions/array/arrayDistance.cpp @@ -0,0 +1,247 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace DB +{ +namespace ErrorCodes +{ + extern const int ILLEGAL_TYPE_OF_ARGUMENT; + extern const int LOGICAL_ERROR; + extern const int SIZES_OF_ARRAYS_DOESNT_MATCH; +} + +template +struct LpDistance +{ + static inline String name = "L" + std::to_string(N); + template + static void compute(const Eigen::MatrixX & left, const Eigen::MatrixX & right, PaddedPODArray & array) + { + auto norms = (left - right).colwise().template lpNorm(); + array.reserve(norms.size()); + // array.insert() failed to work with Eigen iterators + for (auto n : norms) + array.push_back(n); + } +}; + +struct LinfDistance : LpDistance +{ + static inline String name = "Linf"; +}; + +struct CosineDistance +{ + static inline String name = "Cosine"; + template + static void compute(const Eigen::MatrixX & left, const Eigen::MatrixX & right, PaddedPODArray & array) + { + auto prod = left.cwiseProduct(right).colwise().sum(); + auto nx = left.colwise().norm(); + auto ny = right.colwise().norm(); + auto nm = nx.cwiseProduct(ny).cwiseInverse(); + auto dist = 1.0 - prod.cwiseProduct(nm).array(); + array.reserve(dist.size()); + for (auto d : dist) + array.push_back(d); + } +}; + +template +class FunctionArrayDistance : public IFunction +{ +public: + static inline auto name = "array" + Kernel::name + "Distance"; + String getName() const override { return name; } + static FunctionPtr create(ContextPtr) { return std::make_shared>(); } + size_t getNumberOfArguments() const override { return 2; } + bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; } + bool useDefaultImplementationForConstants() const override { return true; } + + DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override + { + DataTypes types; + for (const auto & argument : arguments) + { + const auto * array_type = checkAndGetDataType(argument.type.get()); + if (!array_type) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Argument of function {} must be array.", getName()); + + types.push_back(array_type->getNestedType()); + } + const auto & common_type = getLeastSupertype(types); + switch (common_type->getTypeId()) + { + case TypeIndex::UInt8: + case TypeIndex::UInt16: + case TypeIndex::UInt32: + case TypeIndex::Int8: + case TypeIndex::Int16: + case TypeIndex::Int32: + case TypeIndex::Float32: + return std::make_shared(); + case TypeIndex::UInt64: + case TypeIndex::Int64: + case TypeIndex::Float64: + return std::make_shared(); + default: + throw Exception( + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Arguments of function {} has nested type {}. " + "Support: UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64, Float32, Float64.", + getName(), common_type->getName()); + } + } + + ColumnPtr + executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t /*input_rows_count*/) const override + { + DataTypePtr type_x = typeid_cast(arguments[0].type.get())->getNestedType(); + DataTypePtr type_y = typeid_cast(arguments[1].type.get())->getNestedType(); + + ColumnPtr col_x = arguments[0].column->convertToFullColumnIfConst(); + ColumnPtr col_y = arguments[1].column->convertToFullColumnIfConst(); + + const auto * arr_x = assert_cast(col_x.get()); + const auto * arr_y = assert_cast(col_y.get()); + + auto result = result_type->createColumn(); + switch (result_type->getTypeId()) + { + case TypeIndex::Float32: + executeWithType(*arr_x, *arr_y, type_x, type_y, result); + break; + case TypeIndex::Float64: + executeWithType(*arr_x, *arr_y, type_x, type_y, result); + break; + default: + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected result type."); + } + return result; + } + +private: + template + void executeWithType( + const ColumnArray & array_x, + const ColumnArray & array_y, + const DataTypePtr & type_x, + const DataTypePtr & type_y, + MutableColumnPtr & column) const + { + Eigen::MatrixX mx, my; + columnToMatrix(array_x, type_x, mx); + columnToMatrix(array_y, type_y, my); + + if (mx.rows() && my.rows() && mx.rows() != my.rows()) + { + throw Exception( + ErrorCodes::SIZES_OF_ARRAYS_DOESNT_MATCH, + "Arguments of function {} have different array sizes: {} and {}", + getName(), mx.rows(), my.rows()); + } + auto & data = assert_cast &>(*column).getData(); + Kernel::compute(mx, my, data); + } + + template + void columnToMatrix(const ColumnArray & array, const DataTypePtr & nested_type, Eigen::MatrixX & mat) const + { + const auto & offsets = array.getOffsets(); + size_t cols = offsets.size(); + size_t rows = cols > 0 ? offsets.front() : 0; + + ColumnArray::Offset prev = 0; + for (ColumnArray::Offset off : offsets) + { + if (off - prev != rows) + throw Exception( + ErrorCodes::SIZES_OF_ARRAYS_DOESNT_MATCH, + "Arrays in a column passed to function {} have different sizes: {} and {}", + getName(), rows, off - prev); + prev = off; + } + + switch (nested_type->getTypeId()) + { + case TypeIndex::UInt8: + fillMatrix(mat, array, rows, cols); + break; + case TypeIndex::UInt16: + fillMatrix(mat, array, rows, cols); + break; + case TypeIndex::UInt32: + fillMatrix(mat, array, rows, cols); + break; + case TypeIndex::UInt64: + fillMatrix(mat, array, rows, cols); + break; + case TypeIndex::Int8: + fillMatrix(mat, array, rows, cols); + break; + case TypeIndex::Int16: + fillMatrix(mat, array, rows, cols); + break; + case TypeIndex::Int32: + fillMatrix(mat, array, rows, cols); + break; + case TypeIndex::Int64: + fillMatrix(mat, array, rows, cols); + break; + case TypeIndex::Float32: + fillMatrix(mat, array, rows, cols); + break; + case TypeIndex::Float64: + fillMatrix(mat, array, rows, cols); + break; + default: + throw Exception( + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Arguments of function {} has nested type {}. " + "Support: UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64, Float32, Float64.", + getName(), nested_type->getName()); + } + } + + // optimize for float/ double + template + requires std::is_same_v + void fillMatrix(Eigen::MatrixX & mat, const ColumnArray & array, size_t rows, size_t cols) const + { + const auto & data = typeid_cast &>(array.getData()).getData(); + mat = Eigen::Map>(data.data(), rows, cols); + } + + template + void fillMatrix(Eigen::MatrixX & mat, const ColumnArray & array, size_t rows, size_t cols) const + { + const auto & data = typeid_cast &>(array.getData()).getData(); + mat.resize(rows, cols); + for (size_t col = 0; col < cols; ++col) + { + for (size_t row = 0; row < rows; ++row) + { + size_t off = col * rows; + mat(row, col) = static_cast(data[off + row]); + } + } + } +}; + +void registerFunctionArrayDistance(FunctionFactory & factory) +{ + factory.registerFunction>>(); + factory.registerFunction>>(); + factory.registerFunction>(); + factory.registerFunction>(); +} + +} diff --git a/src/Functions/array/arrayNorm.cpp b/src/Functions/array/arrayNorm.cpp new file mode 100644 index 00000000000..20fe85d7491 --- /dev/null +++ b/src/Functions/array/arrayNorm.cpp @@ -0,0 +1,205 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace DB +{ +namespace ErrorCodes +{ + extern const int ILLEGAL_TYPE_OF_ARGUMENT; + extern const int LOGICAL_ERROR; +} + +template +struct LpNorm +{ + static inline String name = "L" + std::to_string(N); + template + static void compute(const std::vector> & vec, PaddedPODArray & array) + { + array.reserve(vec.size()); + for (const auto & v : vec) + { + array.push_back(v.template lpNorm()); + } + } +}; + +struct LinfNorm : LpNorm +{ + static inline String name = "Linf"; +}; + +template +class FunctionArrayNorm : public IFunction +{ +public: + static inline auto name = "array" + Kernel::name + "Norm"; + String getName() const override { return name; } + static FunctionPtr create(ContextPtr) { return std::make_shared>(); } + size_t getNumberOfArguments() const override { return 1; } + bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; } + bool useDefaultImplementationForConstants() const override { return true; } + + DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override + { + DataTypes types; + for (const auto & argument : arguments) + { + const auto * array_type = checkAndGetDataType(argument.type.get()); + if (!array_type) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Argument of function {} must be array.", getName()); + + types.push_back(array_type->getNestedType()); + } + const auto & common_type = getLeastSupertype(types); + switch (common_type->getTypeId()) + { + case TypeIndex::UInt8: + case TypeIndex::UInt16: + case TypeIndex::UInt32: + case TypeIndex::Int8: + case TypeIndex::Int16: + case TypeIndex::Int32: + case TypeIndex::Float32: + return std::make_shared(); + case TypeIndex::UInt64: + case TypeIndex::Int64: + case TypeIndex::Float64: + return std::make_shared(); + default: + throw Exception( + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Arguments of function {} has nested type {}. " + "Support: UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64, Float32, Float64.", + getName(), common_type->getName()); + } + } + + ColumnPtr + executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t /*input_rows_count*/) const override + { + DataTypePtr type = typeid_cast(arguments[0].type.get())->getNestedType(); + ColumnPtr column = arguments[0].column->convertToFullColumnIfConst(); + const auto * arr = assert_cast(column.get()); + + auto result = result_type->createColumn(); + switch (result_type->getTypeId()) + { + case TypeIndex::Float32: + executeWithType(*arr, type, result); + break; + case TypeIndex::Float64: + executeWithType(*arr, type, result); + break; + default: + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected result type."); + } + return result; + } + +private: + template + void executeWithType(const ColumnArray & array, const DataTypePtr & type, MutableColumnPtr & column) const + { + std::vector> vec; + columnToVectors(array, type, vec); + auto & data = assert_cast &>(*column).getData(); + Kernel::compute(vec, data); + } + + template + void columnToVectors(const ColumnArray & array, const DataTypePtr & nested_type, std::vector> & vec) const + { + switch (nested_type->getTypeId()) + { + case TypeIndex::UInt8: + fillVectors(vec, array); + break; + case TypeIndex::UInt16: + fillVectors(vec, array); + break; + case TypeIndex::UInt32: + fillVectors(vec, array); + break; + case TypeIndex::UInt64: + fillVectors(vec, array); + break; + case TypeIndex::Int8: + fillVectors(vec, array); + break; + case TypeIndex::Int16: + fillVectors(vec, array); + break; + case TypeIndex::Int32: + fillVectors(vec, array); + break; + case TypeIndex::Int64: + fillVectors(vec, array); + break; + case TypeIndex::Float32: + fillVectors(vec, array); + break; + case TypeIndex::Float64: + fillVectors(vec, array); + break; + default: + throw Exception( + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Arguments of function {} has nested type {}. " + "Support: UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64, Float32, Float64.", + getName(), nested_type->getName()); + } + } + + template + requires std::is_same_v + void fillVectors(std::vector> & vec, const ColumnArray & array) const + { + const auto & data = typeid_cast &>(array.getData()).getData(); + const auto & offsets = array.getOffsets(); + vec.reserve(offsets.size()); + ColumnArray::Offset prev = 0; + for (auto off : offsets) + { + vec.emplace_back(Eigen::Map>(data.data() + prev, off - prev)); + prev = off; + } + } + + template + void fillVectors(std::vector> & vec, const ColumnArray & array) const + { + const auto & data = typeid_cast &>(array.getData()).getData(); + const auto & offsets = array.getOffsets(); + vec.reserve(offsets.size()); + + ColumnArray::Offset prev = 0; + for (auto off : offsets) + { + Eigen::VectorX mat(off - prev); + for (ColumnArray::Offset row = 0; row + prev < off; ++row) + { + mat[row] = static_cast(data[prev + row]); + } + prev = off; + vec.emplace_back(mat); + } + } +}; + +void registerFunctionArrayNorm(FunctionFactory & factory) +{ + factory.registerFunction>>(); + factory.registerFunction>>(); + factory.registerFunction>(); +} + +} diff --git a/src/Functions/array/registerFunctionsArray.cpp b/src/Functions/array/registerFunctionsArray.cpp index 3bb27cbadf9..e2e8b08fbf2 100644 --- a/src/Functions/array/registerFunctionsArray.cpp +++ b/src/Functions/array/registerFunctionsArray.cpp @@ -37,6 +37,8 @@ void registerFunctionArrayAUC(FunctionFactory &); void registerFunctionArrayReduceInRanges(FunctionFactory &); void registerFunctionMapOp(FunctionFactory &); void registerFunctionMapPopulateSeries(FunctionFactory &); +void registerFunctionArrayDistance(FunctionFactory &); +void registerFunctionArrayNorm(FunctionFactory &); void registerFunctionsArray(FunctionFactory & factory) { @@ -75,6 +77,8 @@ void registerFunctionsArray(FunctionFactory & factory) registerFunctionArrayAUC(factory); registerFunctionMapOp(factory); registerFunctionMapPopulateSeries(factory); + registerFunctionArrayDistance(factory); + registerFunctionArrayNorm(factory); } } diff --git a/src/Functions/extract.cpp b/src/Functions/extract.cpp index 0296602d205..5b138d19747 100644 --- a/src/Functions/extract.cpp +++ b/src/Functions/extract.cpp @@ -21,7 +21,7 @@ struct ExtractImpl res_data.reserve(data.size() / 5); res_offsets.resize(offsets.size()); - const auto & regexp = Regexps::get(pattern); + const auto & regexp = Regexps::get(pattern); unsigned capture = regexp->getNumberOfSubpatterns() > 0 ? 1 : 0; OptimizedRegularExpression::MatchVec matches; diff --git a/src/Functions/extractAllGroups.h b/src/Functions/extractAllGroups.h index 057dedab6e4..e6d31e00616 100644 --- a/src/Functions/extractAllGroups.h +++ b/src/Functions/extractAllGroups.h @@ -95,7 +95,7 @@ public: throw Exception("Length of 'needle' argument must be greater than 0.", ErrorCodes::BAD_ARGUMENTS); using StringPiece = typename Regexps::Regexp::StringPieceType; - auto holder = Regexps::get(needle); + auto holder = Regexps::get(needle); const auto & regexp = holder->getRE2(); if (!regexp) diff --git a/src/Functions/extractGroups.cpp b/src/Functions/extractGroups.cpp index 2286951bb8f..c5b958ec345 100644 --- a/src/Functions/extractGroups.cpp +++ b/src/Functions/extractGroups.cpp @@ -63,7 +63,7 @@ public: if (needle.empty()) throw Exception(getName() + " length of 'needle' argument must be greater than 0.", ErrorCodes::BAD_ARGUMENTS); - auto regexp = Regexps::get(needle); + auto regexp = Regexps::get(needle); const auto & re2 = regexp->getRE2(); if (!re2) diff --git a/src/Functions/h3kRing.cpp b/src/Functions/h3kRing.cpp index a801fd299d6..a68f2a5e23d 100644 --- a/src/Functions/h3kRing.cpp +++ b/src/Functions/h3kRing.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include @@ -51,10 +52,10 @@ public: arg->getName(), 1, getName()); arg = arguments[1].get(); - if (!WhichDataType(arg).isUInt16()) + if (!WhichDataType(arg).isNativeUInt()) throw Exception( ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, - "Illegal type {} of argument {} of function {}. Must be UInt16", + "Illegal type {} of argument {} of function {}. Must be unsigned native integer.", arg->getName(), 2, getName()); @@ -80,7 +81,8 @@ public: const auto & data_hindex = col_hindex->getData(); /// ColumnUInt16 is sufficient as the max value of 2nd arg is checked (arg > 0 < 10000) in implementation below - const auto * col_k = checkAndGetColumn(non_const_arguments[1].column.get()); + auto cast_result = castColumnAccurate(non_const_arguments[1], std::make_shared()); + const auto * col_k = checkAndGetColumn(cast_result.get()); if (!col_k) throw Exception( ErrorCodes::ILLEGAL_COLUMN, diff --git a/src/IO/HTTPCommon.cpp b/src/IO/HTTPCommon.cpp index 9f53572fcde..7ed2f343209 100644 --- a/src/IO/HTTPCommon.cpp +++ b/src/IO/HTTPCommon.cpp @@ -74,8 +74,12 @@ namespace if (https) { #if USE_SSL - /// Cannot resolve host in advance, otherwise SNI won't work in Poco. - session = std::make_shared(host, port); + String resolved_host = resolve_host ? DNSResolver::instance().resolveHost(host).toString() : host; + auto https_session = std::make_shared(host, port); + if (resolve_host) + https_session->setResolvedHost(DNSResolver::instance().resolveHost(host).toString()); + + session = std::move(https_session); #else throw Exception("ClickHouse was built without HTTPS support", ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME); #endif diff --git a/src/IO/Progress.cpp b/src/IO/Progress.cpp index 1d16f54de7b..45710db5c92 100644 --- a/src/IO/Progress.cpp +++ b/src/IO/Progress.cpp @@ -68,10 +68,9 @@ bool Progress::incrementPiecewiseAtomically(const Progress & rhs) { read_rows += rhs.read_rows; read_bytes += rhs.read_bytes; - read_raw_bytes += rhs.read_raw_bytes; total_rows_to_read += rhs.total_rows_to_read; - total_raw_bytes_to_read += rhs.total_raw_bytes_to_read; + total_bytes_to_read += rhs.total_bytes_to_read; written_rows += rhs.written_rows; written_bytes += rhs.written_bytes; @@ -83,10 +82,9 @@ void Progress::reset() { read_rows = 0; read_bytes = 0; - read_raw_bytes = 0; total_rows_to_read = 0; - total_raw_bytes_to_read = 0; + total_bytes_to_read = 0; written_rows = 0; written_bytes = 0; @@ -98,10 +96,9 @@ ProgressValues Progress::getValues() const res.read_rows = read_rows.load(std::memory_order_relaxed); res.read_bytes = read_bytes.load(std::memory_order_relaxed); - res.read_raw_bytes = read_raw_bytes.load(std::memory_order_relaxed); res.total_rows_to_read = total_rows_to_read.load(std::memory_order_relaxed); - res.total_raw_bytes_to_read = total_raw_bytes_to_read.load(std::memory_order_relaxed); + res.total_bytes_to_read = total_bytes_to_read.load(std::memory_order_relaxed); res.written_rows = written_rows.load(std::memory_order_relaxed); res.written_bytes = written_bytes.load(std::memory_order_relaxed); @@ -109,16 +106,31 @@ ProgressValues Progress::getValues() const return res; } -ProgressValues Progress::fetchAndResetPiecewiseAtomically() +ProgressValues Progress::fetchValuesAndResetPiecewiseAtomically() { ProgressValues res; res.read_rows = read_rows.fetch_and(0); res.read_bytes = read_bytes.fetch_and(0); - res.read_raw_bytes = read_raw_bytes.fetch_and(0); res.total_rows_to_read = total_rows_to_read.fetch_and(0); - res.total_raw_bytes_to_read = total_raw_bytes_to_read.fetch_and(0); + res.total_bytes_to_read = total_bytes_to_read.fetch_and(0); + + res.written_rows = written_rows.fetch_and(0); + res.written_bytes = written_bytes.fetch_and(0); + + return res; +} + +Progress Progress::fetchAndResetPiecewiseAtomically() +{ + Progress res; + + res.read_rows = read_rows.fetch_and(0); + res.read_bytes = read_bytes.fetch_and(0); + + res.total_rows_to_read = total_rows_to_read.fetch_and(0); + res.total_bytes_to_read = total_bytes_to_read.fetch_and(0); res.written_rows = written_rows.fetch_and(0); res.written_bytes = written_bytes.fetch_and(0); @@ -130,10 +142,9 @@ Progress & Progress::operator=(Progress && other) noexcept { read_rows = other.read_rows.load(std::memory_order_relaxed); read_bytes = other.read_bytes.load(std::memory_order_relaxed); - read_raw_bytes = other.read_raw_bytes.load(std::memory_order_relaxed); total_rows_to_read = other.total_rows_to_read.load(std::memory_order_relaxed); - total_raw_bytes_to_read = other.total_raw_bytes_to_read.load(std::memory_order_relaxed); + total_bytes_to_read = other.total_bytes_to_read.load(std::memory_order_relaxed); written_rows = other.written_rows.load(std::memory_order_relaxed); written_bytes = other.written_bytes.load(std::memory_order_relaxed); @@ -149,6 +160,7 @@ void Progress::read(ReadBuffer & in, UInt64 server_revision) read_rows.store(values.read_rows, std::memory_order_relaxed); read_bytes.store(values.read_bytes, std::memory_order_relaxed); total_rows_to_read.store(values.total_rows_to_read, std::memory_order_relaxed); + written_rows.store(values.written_rows, std::memory_order_relaxed); written_bytes.store(values.written_bytes, std::memory_order_relaxed); } diff --git a/src/IO/Progress.h b/src/IO/Progress.h index 4f1a3df0ffd..f04822f26bb 100644 --- a/src/IO/Progress.h +++ b/src/IO/Progress.h @@ -18,10 +18,9 @@ struct ProgressValues { size_t read_rows; size_t read_bytes; - size_t read_raw_bytes; size_t total_rows_to_read; - size_t total_raw_bytes_to_read; + size_t total_bytes_to_read; size_t written_rows; size_t written_bytes; @@ -68,15 +67,12 @@ struct Progress { std::atomic read_rows {0}; /// Rows (source) processed. std::atomic read_bytes {0}; /// Bytes (uncompressed, source) processed. - std::atomic read_raw_bytes {0}; /// Raw bytes processed. /** How much rows/bytes must be processed, in total, approximately. Non-zero value is sent when there is information about * some new part of job. Received values must be summed to get estimate of total rows to process. - * `total_raw_bytes_to_process` is used for file table engine or when reading from file descriptor. - * Used for rendering progress bar on client. */ std::atomic total_rows_to_read {0}; - std::atomic total_raw_bytes_to_read {0}; + std::atomic total_bytes_to_read {0}; std::atomic written_rows {0}; std::atomic written_bytes {0}; @@ -93,7 +89,7 @@ struct Progress : written_rows(write_progress.written_rows), written_bytes(write_progress.written_bytes) {} explicit Progress(FileProgress file_progress) - : read_raw_bytes(file_progress.read_bytes), total_raw_bytes_to_read(file_progress.total_bytes_to_read) {} + : read_bytes(file_progress.read_bytes), total_bytes_to_read(file_progress.total_bytes_to_read) {} void read(ReadBuffer & in, UInt64 server_revision); @@ -109,7 +105,9 @@ struct Progress ProgressValues getValues() const; - ProgressValues fetchAndResetPiecewiseAtomically(); + ProgressValues fetchValuesAndResetPiecewiseAtomically(); + + Progress fetchAndResetPiecewiseAtomically(); Progress & operator=(Progress && other) noexcept; diff --git a/src/IO/ReadBufferFromFile.h b/src/IO/ReadBufferFromFile.h index 52b18b94616..a720f8dd36d 100644 --- a/src/IO/ReadBufferFromFile.h +++ b/src/IO/ReadBufferFromFile.h @@ -50,8 +50,6 @@ public: return file_name; } - Range getRemainingReadRange() const override { return Range{ .left = file_offset_of_buffer_end, .right = std::nullopt }; } - size_t getFileOffsetOfBufferEnd() const override { return file_offset_of_buffer_end; } }; diff --git a/src/IO/ReadBufferFromFileDescriptor.h b/src/IO/ReadBufferFromFileDescriptor.h index ba1502fb9aa..a1d19c08087 100644 --- a/src/IO/ReadBufferFromFileDescriptor.h +++ b/src/IO/ReadBufferFromFileDescriptor.h @@ -49,6 +49,8 @@ public: return file_offset_of_buffer_end - (working_buffer.end() - pos); } + Range getRemainingReadRange() const override { return Range{ .left = file_offset_of_buffer_end, .right = std::nullopt }; } + /// If 'offset' is small enough to stay in buffer after seek, then true seek in file does not happen. off_t seek(off_t off, int whence) override; diff --git a/src/IO/ReadSettings.h b/src/IO/ReadSettings.h index 92346615a7a..52bddc57b48 100644 --- a/src/IO/ReadSettings.h +++ b/src/IO/ReadSettings.h @@ -57,7 +57,7 @@ struct ReadSettings /// Method to use reading from local filesystem. LocalFSReadMethod local_fs_method = LocalFSReadMethod::pread; /// Method to use reading from remote filesystem. - RemoteFSReadMethod remote_fs_method = RemoteFSReadMethod::read; + RemoteFSReadMethod remote_fs_method = RemoteFSReadMethod::threadpool; size_t local_fs_buffer_size = DBMS_DEFAULT_BUFFER_SIZE; size_t remote_fs_buffer_size = DBMS_DEFAULT_BUFFER_SIZE; @@ -80,6 +80,7 @@ struct ReadSettings bool enable_filesystem_cache = true; size_t filesystem_cache_max_wait_sec = 1; bool read_from_filesystem_cache_if_exists_otherwise_bypass_cache = false; + bool enable_filesystem_cache_log = false; size_t remote_read_min_bytes_for_seek = DBMS_DEFAULT_BUFFER_SIZE; diff --git a/src/IO/WriteBufferFromS3.cpp b/src/IO/WriteBufferFromS3.cpp index 342a512ee52..c25eed0e6c5 100644 --- a/src/IO/WriteBufferFromS3.cpp +++ b/src/IO/WriteBufferFromS3.cpp @@ -21,7 +21,7 @@ namespace ProfileEvents { extern const Event WriteBufferFromS3Bytes; - extern const Event RemoteFSCacheDownloadBytes; + extern const Event CachedReadBufferCacheWriteBytes; } namespace DB @@ -490,7 +490,7 @@ void WriteBufferFromS3::finalizeCacheIfNeeded(std::optional size_t size = (*file_segment_it)->finalizeWrite(); file_segment_it = file_segments.erase(file_segment_it); - ProfileEvents::increment(ProfileEvents::RemoteFSCacheDownloadBytes, size); + ProfileEvents::increment(ProfileEvents::CachedReadBufferCacheWriteBytes, size); } catch (...) { diff --git a/src/Interpreters/ActionsDAG.h b/src/Interpreters/ActionsDAG.h index 1ff82c8ea60..d11dfc3b8ad 100644 --- a/src/Interpreters/ActionsDAG.h +++ b/src/Interpreters/ActionsDAG.h @@ -214,7 +214,7 @@ public: ActionsDAGPtr clone() const; /// Execute actions for header. Input block must have empty columns. - /// Result should be equal to the execution of ExpressionActions build form this DAG. + /// Result should be equal to the execution of ExpressionActions built from this DAG. /// Actions are not changed, no expressions are compiled. /// /// In addition, check that result constants are constants according to DAG. diff --git a/src/Interpreters/Aggregator.h b/src/Interpreters/Aggregator.h index e7cf3c85a15..1806465db4a 100644 --- a/src/Interpreters/Aggregator.h +++ b/src/Interpreters/Aggregator.h @@ -18,6 +18,7 @@ #include #include #include +#include #include @@ -991,9 +992,13 @@ public: } /// Only parameters that matter during merge. - Params(const Block & intermediate_header_, - const ColumnNumbers & keys_, const AggregateDescriptions & aggregates_, bool overflow_row_, size_t max_threads_) - : Params(Block(), keys_, aggregates_, overflow_row_, 0, OverflowMode::THROW, 0, 0, 0, false, nullptr, max_threads_, 0, false, 0) + Params( + const Block & intermediate_header_, + const ColumnNumbers & keys_, + const AggregateDescriptions & aggregates_, + bool overflow_row_, + size_t max_threads_) + : Params(Block(), keys_, aggregates_, overflow_row_, 0, OverflowMode::THROW, 0, 0, 0, false, nullptr, max_threads_, 0, false, 0, {}, {}) { intermediate_header = intermediate_header_; } diff --git a/src/Interpreters/ConcurrentHashJoin.cpp b/src/Interpreters/ConcurrentHashJoin.cpp index f4e49d3230d..f6ba9f95bbc 100644 --- a/src/Interpreters/ConcurrentHashJoin.cpp +++ b/src/Interpreters/ConcurrentHashJoin.cpp @@ -16,25 +16,27 @@ #include #include #include +#include #include + namespace DB { + namespace ErrorCodes { extern const int LOGICAL_ERROR; extern const int SET_SIZE_LIMIT_EXCEEDED; extern const int BAD_ARGUMENTS; } -namespace JoinStuff -{ + ConcurrentHashJoin::ConcurrentHashJoin(ContextPtr context_, std::shared_ptr table_join_, size_t slots_, const Block & right_sample_block, bool any_take_last_row_) : context(context_) , table_join(table_join_) , slots(slots_) { - if (!slots_ || slots_ >= 256) + if (slots < 1 || 255 < slots) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Invalid argument slot : {}", slots_); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Number of slots should be [1, 255], got {}", slots); } for (size_t i = 0; i < slots; ++i) @@ -43,36 +45,44 @@ ConcurrentHashJoin::ConcurrentHashJoin(ContextPtr context_, std::shared_ptrdata = std::make_unique(table_join_, right_sample_block, any_take_last_row_); hash_joins.emplace_back(std::move(inner_hash_join)); } - } -bool ConcurrentHashJoin::addJoinedBlock(const Block & block, bool check_limits) +bool ConcurrentHashJoin::addJoinedBlock(const Block & right_block, bool check_limits) { - Blocks dispatched_blocks = dispatchBlock(table_join->getOnlyClause().key_names_right, block); + Blocks dispatched_blocks = dispatchBlock(table_join->getOnlyClause().key_names_right, right_block); - std::list pending_blocks; - for (size_t i = 0; i < dispatched_blocks.size(); ++i) - pending_blocks.emplace_back(i); - while (!pending_blocks.empty()) + size_t blocks_left = 0; + for (const auto & block : dispatched_blocks) { - for (auto iter = pending_blocks.begin(); iter != pending_blocks.end();) + if (block) + { + ++blocks_left; + } + } + + while (blocks_left > 0) + { + /// insert blocks into corresponding HashJoin instances + for (size_t i = 0; i < dispatched_blocks.size(); ++i) { - auto & i = *iter; auto & hash_join = hash_joins[i]; auto & dispatched_block = dispatched_blocks[i]; - if (hash_join->mutex.try_lock()) - { - if (!hash_join->data->addJoinedBlock(dispatched_block, check_limits)) - { - hash_join->mutex.unlock(); - return false; - } - hash_join->mutex.unlock(); - iter = pending_blocks.erase(iter); + if (dispatched_block) + { + /// if current hash_join is already processed by another thread, skip it and try later + std::unique_lock lock(hash_join->mutex, std::try_to_lock); + if (!lock.owns_lock()) + continue; + + bool limit_exceeded = !hash_join->data->addJoinedBlock(dispatched_block, check_limits); + + dispatched_block = {}; + blocks_left--; + + if (limit_exceeded) + return false; } - else - iter++; } } @@ -161,30 +171,32 @@ std::shared_ptr ConcurrentHashJoin::getNonJoinedBlocks( throw Exception(ErrorCodes::LOGICAL_ERROR, "Invalid join type. join kind: {}, strictness: {}", table_join->kind(), table_join->strictness()); } +static IColumn::Selector hashToSelector(const WeakHash32 & hash, size_t num_shards) +{ + const auto & data = hash.getData(); + size_t num_rows = data.size(); + + IColumn::Selector selector(num_rows); + for (size_t i = 0; i < num_rows; ++i) + selector[i] = data[i] % num_shards; + return selector; +} + Blocks ConcurrentHashJoin::dispatchBlock(const Strings & key_columns_names, const Block & from_block) { - Blocks result; - size_t num_shards = hash_joins.size(); size_t num_rows = from_block.rows(); size_t num_cols = from_block.columns(); - ColumnRawPtrs key_cols; + WeakHash32 hash(num_rows); for (const auto & key_name : key_columns_names) { - key_cols.push_back(from_block.getByName(key_name).column.get()); - } - IColumn::Selector selector(num_rows); - for (size_t i = 0; i < num_rows; ++i) - { - SipHash hash; - for (const auto & key_col : key_cols) - { - key_col->updateHashWithValue(i, hash); - } - selector[i] = hash.get64() % num_shards; + const auto & key_col = from_block.getByName(key_name).column; + key_col->updateWeakHash32(hash); } + auto selector = hashToSelector(hash, num_shards); + Blocks result; for (size_t i = 0; i < num_shards; ++i) { result.emplace_back(from_block.cloneEmpty()); @@ -203,4 +215,3 @@ Blocks ConcurrentHashJoin::dispatchBlock(const Strings & key_columns_names, cons } } -} diff --git a/src/Interpreters/ConcurrentHashJoin.h b/src/Interpreters/ConcurrentHashJoin.h index 47fa2b2112f..fb226c39a0c 100644 --- a/src/Interpreters/ConcurrentHashJoin.h +++ b/src/Interpreters/ConcurrentHashJoin.h @@ -15,8 +15,7 @@ namespace DB { -namespace JoinStuff -{ + /** * Can run addJoinedBlock() parallelly to speedup the join process. On test, it almose linear speedup by * the degree of parallelism. @@ -33,6 +32,7 @@ namespace JoinStuff */ class ConcurrentHashJoin : public IJoin { + public: explicit ConcurrentHashJoin(ContextPtr context_, std::shared_ptr table_join_, size_t slots_, const Block & right_sample_block, bool any_take_last_row_ = false); ~ConcurrentHashJoin() override = default; @@ -49,6 +49,7 @@ public: bool supportParallelJoin() const override { return true; } std::shared_ptr getNonJoinedBlocks(const Block & left_sample_block, const Block & result_sample_block, UInt64 max_block_size) const override; + private: struct InternalHashJoin { @@ -71,5 +72,5 @@ private: Blocks dispatchBlock(const Strings & key_columns_names, const Block & from_block); }; -} + } diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 78371002535..34f396b978c 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -535,6 +535,7 @@ ContextMutablePtr Context::createCopy(const ContextMutablePtr & other) Context::~Context() = default; InterserverIOHandler & Context::getInterserverIOHandler() { return shared->interserver_io_handler; } +const InterserverIOHandler & Context::getInterserverIOHandler() const { return shared->interserver_io_handler; } std::unique_lock Context::getLock() const { @@ -2226,7 +2227,7 @@ bool Context::hasAuxiliaryZooKeeper(const String & name) const return getConfigRef().has("auxiliary_zookeepers." + name); } -InterserverCredentialsPtr Context::getInterserverCredentials() +InterserverCredentialsPtr Context::getInterserverCredentials() const { return shared->interserver_io_credentials.get(); } @@ -3417,6 +3418,7 @@ ReadSettings Context::getReadSettings() const res.enable_filesystem_cache = settings.enable_filesystem_cache; res.filesystem_cache_max_wait_sec = settings.filesystem_cache_max_wait_sec; res.read_from_filesystem_cache_if_exists_otherwise_bypass_cache = settings.read_from_filesystem_cache_if_exists_otherwise_bypass_cache; + res.enable_filesystem_cache_log = settings.enable_filesystem_cache_log; res.remote_read_min_bytes_for_seek = settings.remote_read_min_bytes_for_seek; diff --git a/src/Interpreters/Context.h b/src/Interpreters/Context.h index dbddda39aad..ddc474ca347 100644 --- a/src/Interpreters/Context.h +++ b/src/Interpreters/Context.h @@ -612,6 +612,7 @@ public: OutputFormatPtr getOutputFormatParallelIfPossible(const String & name, WriteBuffer & buf, const Block & sample) const; InterserverIOHandler & getInterserverIOHandler(); + const InterserverIOHandler & getInterserverIOHandler() const; /// How other servers can access this for downloading replicated data. void setInterserverIOAddress(const String & host, UInt16 port); @@ -619,7 +620,7 @@ public: /// Credentials which server will use to communicate with others void updateInterserverCredentials(const Poco::Util::AbstractConfiguration & config); - InterserverCredentialsPtr getInterserverCredentials(); + InterserverCredentialsPtr getInterserverCredentials() const; /// Interserver requests scheme (http or https) void setInterserverScheme(const String & scheme); diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index 0a156ba0b3e..e7325363c08 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -43,7 +43,9 @@ #include #include +#include +#include #include #include @@ -325,6 +327,12 @@ void ExpressionAnalyzer::analyzeAggregation(ActionsDAGPtr & temp_actions) { NameSet unique_keys; ASTs & group_asts = group_by_ast->children; + + /// For GROUPING SETS with multiple groups we always add virtual __grouping_set column + /// With set number, which is used as an additional key at the stage of merging aggregating data. + if (select_query->group_by_with_grouping_sets && group_asts.size() > 1) + aggregated_columns.emplace_back("__grouping_set", std::make_shared()); + for (ssize_t i = 0; i < static_cast(group_asts.size()); ++i) { ssize_t size = group_asts.size(); @@ -332,46 +340,105 @@ void ExpressionAnalyzer::analyzeAggregation(ActionsDAGPtr & temp_actions) if (getContext()->getSettingsRef().enable_positional_arguments) replaceForPositionalArguments(group_asts[i], select_query, ASTSelectQuery::Expression::GROUP_BY); - getRootActionsNoMakeSet(group_asts[i], temp_actions, false); - - const auto & column_name = group_asts[i]->getColumnName(); - - const auto * node = temp_actions->tryFindInIndex(column_name); - if (!node) - throw Exception("Unknown identifier (in GROUP BY): " + column_name, ErrorCodes::UNKNOWN_IDENTIFIER); - - /// Only removes constant keys if it's an initiator or distributed_group_by_no_merge is enabled. - if (getContext()->getClientInfo().distributed_depth == 0 || settings.distributed_group_by_no_merge > 0) + if (select_query->group_by_with_grouping_sets) { - /// Constant expressions have non-null column pointer at this stage. - if (node->column && isColumnConst(*node->column)) + ASTs group_elements_ast; + const ASTExpressionList * group_ast_element = group_asts[i]->as(); + group_elements_ast = group_ast_element->children; + + NamesAndTypesList grouping_set_list; + + for (ssize_t j = 0; j < ssize_t(group_elements_ast.size()); ++j) { - select_query->group_by_with_constant_keys = true; + getRootActionsNoMakeSet(group_elements_ast[j], temp_actions, false); - /// But don't remove last key column if no aggregate functions, otherwise aggregation will not work. - if (!aggregate_descriptions.empty() || size > 1) + ssize_t group_size = group_elements_ast.size(); + const auto & column_name = group_elements_ast[j]->getColumnName(); + const auto * node = temp_actions->tryFindInIndex(column_name); + if (!node) + throw Exception("Unknown identifier (in GROUP BY): " + column_name, ErrorCodes::UNKNOWN_IDENTIFIER); + + /// Only removes constant keys if it's an initiator or distributed_group_by_no_merge is enabled. + if (getContext()->getClientInfo().distributed_depth == 0 || settings.distributed_group_by_no_merge > 0) { - if (i + 1 < static_cast(size)) - group_asts[i] = std::move(group_asts.back()); + /// Constant expressions have non-null column pointer at this stage. + if (node->column && isColumnConst(*node->column)) + { + select_query->group_by_with_constant_keys = true; - group_asts.pop_back(); + /// But don't remove last key column if no aggregate functions, otherwise aggregation will not work. + if (!aggregate_descriptions.empty() || group_size > 1) + { + if (j + 1 < static_cast(group_size)) + group_elements_ast[j] = std::move(group_elements_ast.back()); - --i; - continue; + group_elements_ast.pop_back(); + + --j; + continue; + } + } + } + + NameAndTypePair key{column_name, node->result_type}; + + grouping_set_list.push_back(key); + + /// Aggregation keys are unique. + if (!unique_keys.contains(key.name)) + { + unique_keys.insert(key.name); + aggregation_keys.push_back(key); + + /// Key is no longer needed, therefore we can save a little by moving it. + aggregated_columns.push_back(std::move(key)); } } + + aggregation_keys_list.push_back(std::move(grouping_set_list)); } - - NameAndTypePair key{column_name, node->result_type}; - - /// Aggregation keys are uniqued. - if (!unique_keys.contains(key.name)) + else { - unique_keys.insert(key.name); - aggregation_keys.push_back(key); + getRootActionsNoMakeSet(group_asts[i], temp_actions, false); - /// Key is no longer needed, therefore we can save a little by moving it. - aggregated_columns.push_back(std::move(key)); + const auto & column_name = group_asts[i]->getColumnName(); + const auto * node = temp_actions->tryFindInIndex(column_name); + if (!node) + throw Exception("Unknown identifier (in GROUP BY): " + column_name, ErrorCodes::UNKNOWN_IDENTIFIER); + + /// Only removes constant keys if it's an initiator or distributed_group_by_no_merge is enabled. + if (getContext()->getClientInfo().distributed_depth == 0 || settings.distributed_group_by_no_merge > 0) + { + /// Constant expressions have non-null column pointer at this stage. + if (node->column && isColumnConst(*node->column)) + { + select_query->group_by_with_constant_keys = true; + + /// But don't remove last key column if no aggregate functions, otherwise aggregation will not work. + if (!aggregate_descriptions.empty() || size > 1) + { + if (i + 1 < static_cast(size)) + group_asts[i] = std::move(group_asts.back()); + + group_asts.pop_back(); + + --i; + continue; + } + } + } + + NameAndTypePair key{column_name, node->result_type}; + + /// Aggregation keys are uniqued. + if (!unique_keys.contains(key.name)) + { + unique_keys.insert(key.name); + aggregation_keys.push_back(key); + + /// Key is no longer needed, therefore we can save a little by moving it. + aggregated_columns.push_back(std::move(key)); + } } } @@ -726,6 +793,8 @@ void makeWindowDescriptionFromAST(const Context & context, void ExpressionAnalyzer::makeWindowDescriptions(ActionsDAGPtr actions) { + auto current_context = getContext(); + // Window definitions from the WINDOW clause const auto * select_query = query->as(); if (select_query && select_query->window()) @@ -735,7 +804,7 @@ void ExpressionAnalyzer::makeWindowDescriptions(ActionsDAGPtr actions) const auto & elem = ptr->as(); WindowDescription desc; desc.window_name = elem.name; - makeWindowDescriptionFromAST(*getContext(), window_descriptions, + makeWindowDescriptionFromAST(*current_context, window_descriptions, desc, elem.definition.get()); auto [it, inserted] = window_descriptions.insert( @@ -820,7 +889,7 @@ void ExpressionAnalyzer::makeWindowDescriptions(ActionsDAGPtr actions) const ASTWindowDefinition &>(); WindowDescription desc; desc.window_name = definition.getDefaultWindowName(); - makeWindowDescriptionFromAST(*getContext(), window_descriptions, + makeWindowDescriptionFromAST(*current_context, window_descriptions, desc, &definition); auto [it, inserted] = window_descriptions.insert( @@ -835,6 +904,18 @@ void ExpressionAnalyzer::makeWindowDescriptions(ActionsDAGPtr actions) it->second.window_functions.push_back(window_function); } } + + bool compile_sort_description = current_context->getSettingsRef().compile_sort_description; + size_t min_count_to_compile_sort_description = current_context->getSettingsRef().min_count_to_compile_sort_description; + + for (auto & [_, window_description] : window_descriptions) + { + window_description.full_sort_description.compile_sort_description = compile_sort_description; + window_description.full_sort_description.min_count_to_compile_sort_description = min_count_to_compile_sort_description; + + window_description.partition_by.compile_sort_description = compile_sort_description; + window_description.partition_by.min_count_to_compile_sort_description = min_count_to_compile_sort_description; + } } @@ -939,7 +1020,7 @@ static std::shared_ptr chooseJoinAlgorithm(std::shared_ptr ana { if (analyzed_join->allowParallelHashJoin()) { - return std::make_shared(context, analyzed_join, context->getSettings().max_threads, sample_block); + return std::make_shared(context, analyzed_join, context->getSettings().max_threads, sample_block); } return std::make_shared(analyzed_join, sample_block); } @@ -1169,10 +1250,24 @@ bool SelectQueryExpressionAnalyzer::appendGroupBy(ExpressionActionsChain & chain ExpressionActionsChain::Step & step = chain.lastStep(columns_after_join); ASTs asts = select_query->groupBy()->children; - for (const auto & ast : asts) + if (select_query->group_by_with_grouping_sets) { - step.addRequiredOutput(ast->getColumnName()); - getRootActions(ast, only_types, step.actions()); + for (const auto & ast : asts) + { + for (const auto & ast_element : ast->children) + { + step.addRequiredOutput(ast_element->getColumnName()); + getRootActions(ast_element, only_types, step.actions()); + } + } + } + else + { + for (const auto & ast : asts) + { + step.addRequiredOutput(ast->getColumnName()); + getRootActions(ast, only_types, step.actions()); + } } if (optimize_aggregation_in_order) @@ -1584,6 +1679,7 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( , second_stage(second_stage_) , need_aggregate(query_analyzer.hasAggregation()) , has_window(query_analyzer.hasWindow()) + , use_grouping_set_key(query_analyzer.useGroupingSetKey()) { /// first_stage: Do I need to perform the first part of the pipeline - running on remote servers during distributed processing. /// second_stage: Do I need to execute the second part of the pipeline - running on the initiating server during distributed processing. diff --git a/src/Interpreters/ExpressionAnalyzer.h b/src/Interpreters/ExpressionAnalyzer.h index 0fbd9cdaac1..b3704095c92 100644 --- a/src/Interpreters/ExpressionAnalyzer.h +++ b/src/Interpreters/ExpressionAnalyzer.h @@ -64,6 +64,7 @@ struct ExpressionAnalyzerData bool has_aggregation = false; NamesAndTypesList aggregation_keys; + NamesAndTypesLists aggregation_keys_list; bool has_const_aggregation_keys = false; AggregateDescriptions aggregate_descriptions; @@ -221,6 +222,8 @@ struct ExpressionAnalysisResult bool optimize_aggregation_in_order = false; bool join_has_delayed_stream = false; + bool use_grouping_set_key = false; + ActionsDAGPtr before_array_join; ArrayJoinActionPtr array_join; ActionsDAGPtr before_join; @@ -321,8 +324,11 @@ public: bool hasGlobalSubqueries() { return has_global_subqueries; } bool hasTableJoin() const { return syntax->ast_join; } + bool useGroupingSetKey() const { return aggregation_keys_list.size() > 1; } + const NamesAndTypesList & aggregationKeys() const { return aggregation_keys; } bool hasConstAggregationKeys() const { return has_const_aggregation_keys; } + const NamesAndTypesLists & aggregationKeysList() const { return aggregation_keys_list; } const AggregateDescriptions & aggregates() const { return aggregate_descriptions; } std::unique_ptr getJoinedPlan(); diff --git a/src/Interpreters/FilesystemCacheLog.cpp b/src/Interpreters/FilesystemCacheLog.cpp index 9eec65a38ab..609305321b1 100644 --- a/src/Interpreters/FilesystemCacheLog.cpp +++ b/src/Interpreters/FilesystemCacheLog.cpp @@ -2,6 +2,8 @@ #include #include #include +#include +#include #include @@ -34,9 +36,13 @@ NamesAndTypesList FilesystemCacheLogElement::getNamesAndTypes() {"event_time", std::make_shared()}, {"query_id", std::make_shared()}, {"source_file_path", std::make_shared()}, - {"file_segment_range", std::make_shared(std::move(types))}, + {"file_segment_range", std::make_shared(types)}, + {"total_requested_range", std::make_shared(types)}, {"size", std::make_shared()}, {"read_type", std::make_shared()}, + {"cache_attempted", std::make_shared()}, + {"ProfileEvents", std::make_shared(std::make_shared(), std::make_shared())}, + {"read_buffer_id", std::make_shared()}, }; } @@ -51,8 +57,22 @@ void FilesystemCacheLogElement::appendToBlock(MutableColumns & columns) const columns[i++]->insert(source_file_path); columns[i++]->insert(Tuple{file_segment_range.first, file_segment_range.second}); + columns[i++]->insert(Tuple{requested_range.first, requested_range.second}); columns[i++]->insert(file_segment_size); columns[i++]->insert(typeToString(read_type)); + columns[i++]->insert(cache_attempted); + + if (profile_counters) + { + auto * column = columns[i++].get(); + ProfileEvents::dumpToMapColumn(*profile_counters, column, true); + } + else + { + columns[i++]->insertDefault(); + } + + columns[i++]->insert(read_buffer_id); } }; diff --git a/src/Interpreters/FilesystemCacheLog.h b/src/Interpreters/FilesystemCacheLog.h index 208f53a76cf..77bae7d788a 100644 --- a/src/Interpreters/FilesystemCacheLog.h +++ b/src/Interpreters/FilesystemCacheLog.h @@ -37,8 +37,12 @@ struct FilesystemCacheLogElement String source_file_path; std::pair file_segment_range{}; + std::pair requested_range{}; ReadType read_type{}; size_t file_segment_size; + bool cache_attempted; + String read_buffer_id; + std::shared_ptr profile_counters; static std::string name() { return "FilesystemCacheLog"; } diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index 672c5d99d0b..6bfadc66352 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -81,6 +81,8 @@ #include #include #include +#include +#include #include #include #include @@ -736,6 +738,9 @@ Block InterpreterSelectQuery::getSampleBlockImpl() Block res; + if (analysis_result.use_grouping_set_key) + res.insert({ nullptr, std::make_shared(), "__grouping_set" }); + for (const auto & key : query_analyzer->aggregationKeys()) res.insert({nullptr, header.getByName(key.name).type, key.name}); @@ -858,6 +863,9 @@ static SortDescription getSortDescription(const ASTSelectQuery & query, ContextP order_descr.emplace_back(name, order_by_elem.direction, order_by_elem.nulls_direction, collator); } + order_descr.compile_sort_description = context->getSettingsRef().compile_sort_description; + order_descr.min_count_to_compile_sort_description = context->getSettingsRef().min_count_to_compile_sort_description; + return order_descr; } @@ -1082,6 +1090,11 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional

QueryProcessingStage::WithMergeableState && !query.group_by_with_totals && !query.group_by_with_rollup && !query.group_by_with_cube; + bool use_grouping_set_key = expressions.use_grouping_set_key; + + if (query.group_by_with_grouping_sets && query.group_by_with_totals) + throw Exception("WITH TOTALS and GROUPING SETS are not supported together", ErrorCodes::NOT_IMPLEMENTED); + if (query_info.projection && query_info.projection->desc->type == ProjectionDescription::Type::Aggregate) { query_info.projection->aggregate_overflow_row = aggregate_overflow_row; @@ -1196,7 +1209,7 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional

aggregate_overflow_row, query_info.projection->aggregate_final, false, + false, context_->getSettingsRef(), query_info.projection->aggregation_keys, query_info.projection->aggregate_descriptions); @@ -2165,7 +2181,6 @@ void InterpreterSelectQuery::executeFetchColumns(QueryProcessingStage::Enum proc } } - void InterpreterSelectQuery::executeWhere(QueryPlan & query_plan, const ActionsDAGPtr & expression, bool remove_filter) { auto where_step = std::make_unique( @@ -2175,6 +2190,80 @@ void InterpreterSelectQuery::executeWhere(QueryPlan & query_plan, const ActionsD query_plan.addStep(std::move(where_step)); } +static Aggregator::Params getAggregatorParams( + const ASTPtr & query_ptr, + const SelectQueryExpressionAnalyzer & query_analyzer, + const Context & context, + const Block & current_data_stream_header, + const ColumnNumbers & keys, + const AggregateDescriptions & aggregates, + bool overflow_row, const Settings & settings, + size_t group_by_two_level_threshold, size_t group_by_two_level_threshold_bytes) +{ + const auto stats_collecting_params = Aggregator::Params::StatsCollectingParams( + query_ptr, + settings.collect_hash_table_stats_during_aggregation, + settings.max_entries_for_hash_table_stats, + settings.max_size_to_preallocate_for_aggregation); + + return Aggregator::Params{ + current_data_stream_header, + keys, + aggregates, + overflow_row, + settings.max_rows_to_group_by, + settings.group_by_overflow_mode, + group_by_two_level_threshold, + group_by_two_level_threshold_bytes, + settings.max_bytes_before_external_group_by, + settings.empty_result_for_aggregation_by_empty_set + || (settings.empty_result_for_aggregation_by_constant_keys_on_empty_set && keys.empty() + && query_analyzer.hasConstAggregationKeys()), + context.getTemporaryVolume(), + settings.max_threads, + settings.min_free_disk_space_for_temporary_data, + settings.compile_aggregate_expressions, + settings.min_count_to_compile_aggregate_expression, + Block{}, + stats_collecting_params + }; +} + +static GroupingSetsParamsList getAggregatorGroupingSetsParams( + const SelectQueryExpressionAnalyzer & query_analyzer, + const Block & header_before_aggregation, + const ColumnNumbers & all_keys +) +{ + GroupingSetsParamsList result; + if (query_analyzer.useGroupingSetKey()) + { + auto const & aggregation_keys_list = query_analyzer.aggregationKeysList(); + + ColumnNumbersList grouping_sets_with_keys; + ColumnNumbersList missing_columns_per_set; + + for (const auto & aggregation_keys : aggregation_keys_list) + { + ColumnNumbers keys; + std::unordered_set keys_set; + for (const auto & key : aggregation_keys) + { + keys.push_back(header_before_aggregation.getPositionByName(key.name)); + keys_set.insert(keys.back()); + } + + ColumnNumbers missing_indexes; + for (size_t i = 0; i < all_keys.size(); ++i) + { + if (!keys_set.contains(all_keys[i])) + missing_indexes.push_back(i); + } + result.emplace_back(std::move(keys), std::move(missing_indexes)); + } + } + return result; +} void InterpreterSelectQuery::executeAggregation(QueryPlan & query_plan, const ActionsDAGPtr & expression, bool overflow_row, bool final, InputOrderInfoPtr group_by_info) { @@ -2186,9 +2275,6 @@ void InterpreterSelectQuery::executeAggregation(QueryPlan & query_plan, const Ac return; const auto & header_before_aggregation = query_plan.getCurrentDataStream().header; - ColumnNumbers keys; - for (const auto & key : query_analyzer->aggregationKeys()) - keys.push_back(header_before_aggregation.getPositionByName(key.name)); AggregateDescriptions aggregates = query_analyzer->aggregates(); for (auto & descr : aggregates) @@ -2198,32 +2284,14 @@ void InterpreterSelectQuery::executeAggregation(QueryPlan & query_plan, const Ac const Settings & settings = context->getSettingsRef(); - const auto stats_collecting_params = Aggregator::Params::StatsCollectingParams( - query_ptr, - settings.collect_hash_table_stats_during_aggregation, - settings.max_entries_for_hash_table_stats, - settings.max_size_to_preallocate_for_aggregation); + ColumnNumbers keys; + for (const auto & key : query_analyzer->aggregationKeys()) + keys.push_back(header_before_aggregation.getPositionByName(key.name)); - Aggregator::Params params( - header_before_aggregation, - keys, - aggregates, - overflow_row, - settings.max_rows_to_group_by, - settings.group_by_overflow_mode, - settings.group_by_two_level_threshold, - settings.group_by_two_level_threshold_bytes, - settings.max_bytes_before_external_group_by, - settings.empty_result_for_aggregation_by_empty_set - || (settings.empty_result_for_aggregation_by_constant_keys_on_empty_set && keys.empty() - && query_analyzer->hasConstAggregationKeys()), - context->getTemporaryVolume(), - settings.max_threads, - settings.min_free_disk_space_for_temporary_data, - settings.compile_aggregate_expressions, - settings.min_count_to_compile_aggregate_expression, - Block{}, - stats_collecting_params); + auto aggregator_params = getAggregatorParams(query_ptr, *query_analyzer, *context, header_before_aggregation, keys, aggregates, overflow_row, settings, + settings.group_by_two_level_threshold, settings.group_by_two_level_threshold_bytes); + + auto grouping_sets_params = getAggregatorGroupingSetsParams(*query_analyzer, header_before_aggregation, keys); SortDescription group_by_sort_description; @@ -2241,7 +2309,8 @@ void InterpreterSelectQuery::executeAggregation(QueryPlan & query_plan, const Ac auto aggregating_step = std::make_unique( query_plan.getCurrentDataStream(), - params, + std::move(aggregator_params), + std::move(grouping_sets_params), final, settings.max_block_size, settings.aggregation_in_order_max_block_bytes, @@ -2250,11 +2319,10 @@ void InterpreterSelectQuery::executeAggregation(QueryPlan & query_plan, const Ac storage_has_evenly_distributed_read, std::move(group_by_info), std::move(group_by_sort_description)); - query_plan.addStep(std::move(aggregating_step)); } -void InterpreterSelectQuery::executeMergeAggregated(QueryPlan & query_plan, bool overflow_row, bool final) +void InterpreterSelectQuery::executeMergeAggregated(QueryPlan & query_plan, bool overflow_row, bool final, bool has_grouping_sets) { /// If aggregate projection was chosen for table, avoid adding MergeAggregated. /// It is already added by storage (because of performance issues). @@ -2268,6 +2336,7 @@ void InterpreterSelectQuery::executeMergeAggregated(QueryPlan & query_plan, bool overflow_row, final, storage && storage->isRemote(), + has_grouping_sets, context->getSettingsRef(), query_analyzer->aggregationKeys(), query_analyzer->aggregates()); @@ -2302,47 +2371,28 @@ void InterpreterSelectQuery::executeTotalsAndHaving( query_plan.addStep(std::move(totals_having_step)); } - void InterpreterSelectQuery::executeRollupOrCube(QueryPlan & query_plan, Modificator modificator) { const auto & header_before_transform = query_plan.getCurrentDataStream().header; - ColumnNumbers keys; + const Settings & settings = context->getSettingsRef(); + ColumnNumbers keys; for (const auto & key : query_analyzer->aggregationKeys()) keys.push_back(header_before_transform.getPositionByName(key.name)); - const Settings & settings = context->getSettingsRef(); - - Aggregator::Params params( - header_before_transform, - keys, - query_analyzer->aggregates(), - false, - settings.max_rows_to_group_by, - settings.group_by_overflow_mode, - 0, - 0, - settings.max_bytes_before_external_group_by, - settings.empty_result_for_aggregation_by_empty_set, - context->getTemporaryVolume(), - settings.max_threads, - settings.min_free_disk_space_for_temporary_data, - settings.compile_aggregate_expressions, - settings.min_count_to_compile_aggregate_expression); - - auto transform_params = std::make_shared(params, true); + auto params = getAggregatorParams(query_ptr, *query_analyzer, *context, header_before_transform, keys, query_analyzer->aggregates(), false, settings, 0, 0); + auto transform_params = std::make_shared(std::move(params), true); QueryPlanStepPtr step; if (modificator == Modificator::ROLLUP) step = std::make_unique(query_plan.getCurrentDataStream(), std::move(transform_params)); - else + else if (modificator == Modificator::CUBE) step = std::make_unique(query_plan.getCurrentDataStream(), std::move(transform_params)); query_plan.addStep(std::move(step)); } - void InterpreterSelectQuery::executeExpression(QueryPlan & query_plan, const ActionsDAGPtr & expression, const std::string & description) { if (!expression) @@ -2405,26 +2455,25 @@ void InterpreterSelectQuery::executeWindow(QueryPlan & query_plan) // Try to sort windows in such an order that the window with the longest // sort description goes first, and all window that use its prefixes follow. std::vector windows_sorted; - for (const auto & [_, w] : query_analyzer->windowDescriptions()) - windows_sorted.push_back(&w); + for (const auto & [_, window] : query_analyzer->windowDescriptions()) + windows_sorted.push_back(&window); ::sort(windows_sorted.begin(), windows_sorted.end(), windowDescriptionComparator); const Settings & settings = context->getSettingsRef(); for (size_t i = 0; i < windows_sorted.size(); ++i) { - const auto & w = *windows_sorted[i]; + const auto & window = *windows_sorted[i]; // We don't need to sort again if the input from previous window already // has suitable sorting. Also don't create sort steps when there are no // columns to sort by, because the sort nodes are confused by this. It // happens in case of `over ()`. - if (!w.full_sort_description.empty() && (i == 0 || !sortIsPrefix(w, *windows_sorted[i - 1]))) + if (!window.full_sort_description.empty() && (i == 0 || !sortIsPrefix(window, *windows_sorted[i - 1]))) { - auto sorting_step = std::make_unique( query_plan.getCurrentDataStream(), - w.full_sort_description, + window.full_sort_description, settings.max_block_size, 0 /* LIMIT */, SizeLimits(settings.max_rows_to_sort, settings.max_bytes_to_sort, settings.sort_overflow_mode), @@ -2433,12 +2482,12 @@ void InterpreterSelectQuery::executeWindow(QueryPlan & query_plan) settings.max_bytes_before_external_sort, context->getTemporaryVolume(), settings.min_free_disk_space_for_temporary_data); - sorting_step->setStepDescription("Sorting for window '" + w.window_name + "'"); + sorting_step->setStepDescription("Sorting for window '" + window.window_name + "'"); query_plan.addStep(std::move(sorting_step)); } - auto window_step = std::make_unique(query_plan.getCurrentDataStream(), w, w.window_functions); - window_step->setStepDescription("Window step for window '" + w.window_name + "'"); + auto window_step = std::make_unique(query_plan.getCurrentDataStream(), window, window.window_functions); + window_step->setStepDescription("Window step for window '" + window.window_name + "'"); query_plan.addStep(std::move(window_step)); } @@ -2510,8 +2559,7 @@ void InterpreterSelectQuery::executeMergeSorted(QueryPlan & query_plan, const So { const Settings & settings = context->getSettingsRef(); - auto merging_sorted - = std::make_unique(query_plan.getCurrentDataStream(), sort_description, settings.max_block_size, limit); + auto merging_sorted = std::make_unique(query_plan.getCurrentDataStream(), sort_description, settings.max_block_size, limit); merging_sorted->setStepDescription("Merge sorted streams " + description); query_plan.addStep(std::move(merging_sorted)); diff --git a/src/Interpreters/InterpreterSelectQuery.h b/src/Interpreters/InterpreterSelectQuery.h index bff2f6c4f90..3adbcad909c 100644 --- a/src/Interpreters/InterpreterSelectQuery.h +++ b/src/Interpreters/InterpreterSelectQuery.h @@ -27,6 +27,9 @@ class InterpreterSelectWithUnionQuery; class Context; class QueryPlan; +struct GroupingSetsParams; +using GroupingSetsParamsList = std::vector; + struct TreeRewriterResult; using TreeRewriterResultPtr = std::shared_ptr; @@ -140,12 +143,11 @@ private: void executeImpl(QueryPlan & query_plan, std::optional prepared_pipe); /// Different stages of query execution. - void executeFetchColumns(QueryProcessingStage::Enum processing_stage, QueryPlan & query_plan); void executeWhere(QueryPlan & query_plan, const ActionsDAGPtr & expression, bool remove_filter); void executeAggregation( QueryPlan & query_plan, const ActionsDAGPtr & expression, bool overflow_row, bool final, InputOrderInfoPtr group_by_info); - void executeMergeAggregated(QueryPlan & query_plan, bool overflow_row, bool final); + void executeMergeAggregated(QueryPlan & query_plan, bool overflow_row, bool final, bool has_grouping_sets); void executeTotalsAndHaving(QueryPlan & query_plan, bool has_having, const ActionsDAGPtr & expression, bool remove_filter, bool overflow_row, bool final); void executeHaving(QueryPlan & query_plan, const ActionsDAGPtr & expression, bool remove_filter); static void executeExpression(QueryPlan & query_plan, const ActionsDAGPtr & expression, const std::string & description); @@ -171,7 +173,7 @@ private: enum class Modificator { ROLLUP = 0, - CUBE = 1 + CUBE = 1, }; void executeRollupOrCube(QueryPlan & query_plan, Modificator modificator); diff --git a/src/Interpreters/InterpreterSystemQuery.cpp b/src/Interpreters/InterpreterSystemQuery.cpp index b52645c7854..0db6f353cf4 100644 --- a/src/Interpreters/InterpreterSystemQuery.cpp +++ b/src/Interpreters/InterpreterSystemQuery.cpp @@ -312,12 +312,12 @@ BlockIO InterpreterSystemQuery::execute() { auto caches = FileCacheFactory::instance().getAll(); for (const auto & [_, cache_data] : caches) - cache_data.cache->remove(query.force_removal); + cache_data.cache->remove(); } else { auto cache = FileCacheFactory::instance().get(query.filesystem_cache_path); - cache->remove(query.force_removal); + cache->remove(); } break; } diff --git a/src/Interpreters/InterserverIOHandler.h b/src/Interpreters/InterserverIOHandler.h index 7ad1c01cbf1..69b742db2ec 100644 --- a/src/Interpreters/InterserverIOHandler.h +++ b/src/Interpreters/InterserverIOHandler.h @@ -69,7 +69,7 @@ public: return endpoint_map.erase(name); } - InterserverIOEndpointPtr getEndpoint(const String & name) + InterserverIOEndpointPtr getEndpoint(const String & name) const try { std::lock_guard lock(mutex); @@ -84,7 +84,7 @@ private: using EndpointMap = std::map; EndpointMap endpoint_map; - std::mutex mutex; + mutable std::mutex mutex; }; } diff --git a/src/Interpreters/JIT/compileFunction.cpp b/src/Interpreters/JIT/compileFunction.cpp index a820ead29b3..9d2ab40bf76 100644 --- a/src/Interpreters/JIT/compileFunction.cpp +++ b/src/Interpreters/JIT/compileFunction.cpp @@ -775,6 +775,133 @@ CompiledAggregateFunctions compileAggregateFunctions(CHJIT & jit, const std::vec return compiled_aggregate_functions; } +CompiledSortDescriptionFunction compileSortDescription( + CHJIT & jit, + SortDescription & description, + const DataTypes & sort_description_types, + const std::string & sort_description_dump) +{ + Stopwatch watch; + + auto compiled_module = jit.compileModule([&](llvm::Module & module) + { + auto & context = module.getContext(); + llvm::IRBuilder<> b(context); + + auto * size_type = b.getIntNTy(sizeof(size_t) * 8); + + auto * column_data_type = llvm::StructType::get(b.getInt8PtrTy(), b.getInt8PtrTy()); + + std::vector types = { size_type, size_type, column_data_type->getPointerTo(), column_data_type->getPointerTo() }; + auto * comparator_func_declaration = llvm::FunctionType::get(b.getInt8Ty(), types, false); + auto * comparator_func = llvm::Function::Create(comparator_func_declaration, llvm::Function::ExternalLinkage, sort_description_dump, module); + + auto * arguments = comparator_func->args().begin(); + llvm::Value * lhs_index_arg = &*arguments++; + llvm::Value * rhs_index_arg = &*arguments++; + llvm::Value * columns_lhs_arg = &*arguments++; + llvm::Value * columns_rhs_arg = &*arguments++; + + size_t columns_size = description.size(); + + std::vector> comparator_steps_and_results; + for (size_t i = 0; i < columns_size; ++i) + { + auto * step = llvm::BasicBlock::Create(b.getContext(), "step_" + std::to_string(i), comparator_func); + llvm::Value * result_value = nullptr; + comparator_steps_and_results.emplace_back(step, result_value); + } + + auto * lhs_equals_rhs_result = llvm::ConstantInt::getSigned(b.getInt8Ty(), 0); + + auto * comparator_join = llvm::BasicBlock::Create(b.getContext(), "comparator_join", comparator_func); + + for (size_t i = 0; i < columns_size; ++i) + { + b.SetInsertPoint(comparator_steps_and_results[i].first); + + const auto & sort_description = description[i]; + const auto & column_type = sort_description_types[i]; + + auto dummy_column = column_type->createColumn(); + + auto * column_native_type_nullable = toNativeType(b, column_type); + auto * column_native_type = toNativeType(b, removeNullable(column_type)); + if (!column_native_type) + throw Exception(ErrorCodes::LOGICAL_ERROR, "No native type for column type {}", column_type->getName()); + + auto * column_native_type_pointer = column_native_type->getPointerTo(); + bool column_type_is_nullable = column_type->isNullable(); + + auto * nullable_unitilized = llvm::Constant::getNullValue(column_native_type_nullable); + + auto * lhs_column = b.CreateLoad(column_data_type, b.CreateConstInBoundsGEP1_64(column_data_type, columns_lhs_arg, i)); + auto * lhs_column_data = b.CreatePointerCast(b.CreateExtractValue(lhs_column, {0}), column_native_type_pointer); + auto * lhs_column_null_data = column_type_is_nullable ? b.CreateExtractValue(lhs_column, {1}) : nullptr; + + llvm::Value * lhs_value = b.CreateLoad(b.CreateInBoundsGEP(nullptr, lhs_column_data, lhs_index_arg)); + + if (lhs_column_null_data) + { + auto * is_null_value_pointer = b.CreateInBoundsGEP(nullptr, lhs_column_null_data, lhs_index_arg); + auto * is_null = b.CreateICmpNE(b.CreateLoad(b.getInt8Ty(), is_null_value_pointer), b.getInt8(0)); + auto * lhs_nullable_value = b.CreateInsertValue(b.CreateInsertValue(nullable_unitilized, lhs_value, {0}), is_null, {1}); + lhs_value = lhs_nullable_value; + } + + auto * rhs_column = b.CreateLoad(column_data_type, b.CreateConstInBoundsGEP1_64(column_data_type, columns_rhs_arg, i)); + auto * rhs_column_data = b.CreatePointerCast(b.CreateExtractValue(rhs_column, {0}), column_native_type_pointer); + auto * rhs_column_null_data = column_type_is_nullable ? b.CreateExtractValue(rhs_column, {1}) : nullptr; + + llvm::Value * rhs_value = b.CreateLoad(b.CreateInBoundsGEP(nullptr, rhs_column_data, rhs_index_arg)); + if (rhs_column_null_data) + { + auto * is_null_value_pointer = b.CreateInBoundsGEP(nullptr, rhs_column_null_data, rhs_index_arg); + auto * is_null = b.CreateICmpNE(b.CreateLoad(b.getInt8Ty(), is_null_value_pointer), b.getInt8(0)); + auto * rhs_nullable_value = b.CreateInsertValue(b.CreateInsertValue(nullable_unitilized, rhs_value, {0}), is_null, {1}); + rhs_value = rhs_nullable_value; + } + + llvm::Value * direction = llvm::ConstantInt::getSigned(b.getInt8Ty(), sort_description.direction); + llvm::Value * nan_direction_hint = llvm::ConstantInt::getSigned(b.getInt8Ty(), sort_description.nulls_direction); + llvm::Value * compare_result = dummy_column->compileComparator(b, lhs_value, rhs_value, nan_direction_hint); + llvm::Value * result = b.CreateMul(direction, compare_result); + + comparator_steps_and_results[i].first = b.GetInsertBlock(); + comparator_steps_and_results[i].second = result; + + if (i == columns_size - 1) + b.CreateBr(comparator_join); + else + b.CreateCondBr(b.CreateICmpEQ(result, lhs_equals_rhs_result), comparator_steps_and_results[i + 1].first, comparator_join); + } + + b.SetInsertPoint(comparator_join); + auto * phi = b.CreatePHI(b.getInt8Ty(), comparator_steps_and_results.size()); + + for (const auto & [block, result_value] : comparator_steps_and_results) + phi->addIncoming(result_value, block); + + b.CreateRet(phi); + }); + + ProfileEvents::increment(ProfileEvents::CompileExpressionsMicroseconds, watch.elapsedMicroseconds()); + ProfileEvents::increment(ProfileEvents::CompileExpressionsBytes, compiled_module.size); + ProfileEvents::increment(ProfileEvents::CompileFunction); + + auto comparator_function = reinterpret_cast(compiled_module.function_name_to_symbol[sort_description_dump]); + assert(comparator_function); + + CompiledSortDescriptionFunction compiled_sort_descriptor_function + { + .comparator_function = comparator_function, + + .compiled_module = std::move(compiled_module) + }; + + return compiled_sort_descriptor_function; +} + } #endif diff --git a/src/Interpreters/JIT/compileFunction.h b/src/Interpreters/JIT/compileFunction.h index f523bf5b970..bcd82ae8bab 100644 --- a/src/Interpreters/JIT/compileFunction.h +++ b/src/Interpreters/JIT/compileFunction.h @@ -4,10 +4,12 @@ #if USE_EMBEDDED_COMPILER +#include #include #include #include + namespace DB { @@ -83,6 +85,21 @@ struct CompiledAggregateFunctions */ CompiledAggregateFunctions compileAggregateFunctions(CHJIT & jit, const std::vector & functions, std::string functions_dump_name); + +using JITSortDescriptionFunc = int8_t (*)(size_t, size_t, ColumnData *, ColumnData *); + +struct CompiledSortDescriptionFunction +{ + JITSortDescriptionFunc comparator_function; + CHJIT::CompiledModule compiled_module; +}; + +CompiledSortDescriptionFunction compileSortDescription( + CHJIT & jit, + SortDescription & description, + const DataTypes & sort_description_types, + const std::string & sort_description_dump); + } #endif diff --git a/src/Interpreters/MergeJoin.cpp b/src/Interpreters/MergeJoin.cpp index 81603d6626d..d0bf4939c90 100644 --- a/src/Interpreters/MergeJoin.cpp +++ b/src/Interpreters/MergeJoin.cpp @@ -590,7 +590,7 @@ void MergeJoin::mergeInMemoryRightBlocks() /// TODO: there should be no split keys by blocks for RIGHT|FULL JOIN builder.addTransform(std::make_shared( - builder.getHeader(), right_sort_description, max_rows_in_right_block, 0, 0, 0, 0, nullptr, 0)); + builder.getHeader(), right_sort_description, max_rows_in_right_block, 0, false, 0, 0, 0, nullptr, 0)); auto pipeline = QueryPipelineBuilder::getPipeline(std::move(builder)); PullingPipelineExecutor executor(pipeline); diff --git a/src/Interpreters/OptimizeShardingKeyRewriteInVisitor.cpp b/src/Interpreters/OptimizeShardingKeyRewriteInVisitor.cpp index 991b449196d..18ca70c7c09 100644 --- a/src/Interpreters/OptimizeShardingKeyRewriteInVisitor.cpp +++ b/src/Interpreters/OptimizeShardingKeyRewriteInVisitor.cpp @@ -39,10 +39,8 @@ bool shardContains( const std::string & sharding_column_name, const OptimizeShardingKeyRewriteInMatcher::Data & data) { - UInt64 field_value; - /// Convert value to numeric (if required). - if (!sharding_column_value.tryGet(field_value)) - sharding_column_value = convertFieldToType(sharding_column_value, *data.sharding_key_type); + /// Implicit conversion. + sharding_column_value = convertFieldToType(sharding_column_value, *data.sharding_key_type); /// NULL is not allowed in sharding key, /// so it should be safe to assume that shard cannot contain it. diff --git a/src/Interpreters/ProcessList.cpp b/src/Interpreters/ProcessList.cpp index ac59d2c7235..6c101143234 100644 --- a/src/Interpreters/ProcessList.cpp +++ b/src/Interpreters/ProcessList.cpp @@ -212,7 +212,7 @@ ProcessList::EntryPtr ProcessList::insert(const String & query_, const IAST * as /// Set query-level memory trackers thread_group->memory_tracker.setOrRaiseHardLimit(settings.max_memory_usage); - thread_group->memory_tracker.setSoftLimit(settings.max_guaranteed_memory_usage); + thread_group->memory_tracker.setSoftLimit(settings.memory_overcommit_ratio_denominator); if (query_context->hasTraceCollector()) { @@ -242,7 +242,7 @@ ProcessList::EntryPtr ProcessList::insert(const String & query_, const IAST * as /// Track memory usage for all simultaneously running queries from single user. user_process_list.user_memory_tracker.setOrRaiseHardLimit(settings.max_memory_usage_for_user); - user_process_list.user_memory_tracker.setSoftLimit(settings.max_guaranteed_memory_usage_for_user); + user_process_list.user_memory_tracker.setSoftLimit(settings.memory_overcommit_ratio_denominator_for_user); user_process_list.user_memory_tracker.setDescription("(for user)"); user_process_list.user_overcommit_tracker.setMaxWaitTime(settings.memory_usage_overcommit_max_wait_microseconds); diff --git a/src/Interpreters/TreeOptimizer.cpp b/src/Interpreters/TreeOptimizer.cpp index 19041a19aa4..b8a31dd968b 100644 --- a/src/Interpreters/TreeOptimizer.cpp +++ b/src/Interpreters/TreeOptimizer.cpp @@ -210,10 +210,22 @@ GroupByKeysInfo getGroupByKeysInfo(const ASTs & group_by_keys) /// filling set with short names of keys for (const auto & group_key : group_by_keys) { - if (group_key->as()) - data.has_function = true; + /// for grouping sets case + if (group_key->as()) + { + const auto express_list_ast = group_key->as(); + for (const auto & group_elem : express_list_ast.children) + { + data.key_names.insert(group_elem->getColumnName()); + } + } + else + { + if (group_key->as()) + data.has_function = true; - data.key_names.insert(group_key->getColumnName()); + data.key_names.insert(group_key->getColumnName()); + } } return data; @@ -646,12 +658,6 @@ void optimizeSumIfFunctions(ASTPtr & query) RewriteSumIfFunctionVisitor(data).visit(query); } -void optimizeCountConstantAndSumOne(ASTPtr & query) -{ - RewriteCountVariantsVisitor::visit(query); -} - - void optimizeInjectiveFunctionsInsideUniq(ASTPtr & query, ContextPtr context) { RemoveInjectiveFunctionsVisitor::Data data(context); @@ -740,6 +746,11 @@ void TreeOptimizer::optimizeIf(ASTPtr & query, Aliases & aliases, bool if_chain_ OptimizeIfChainsVisitor().visit(query); } +void TreeOptimizer::optimizeCountConstantAndSumOne(ASTPtr & query) +{ + RewriteCountVariantsVisitor::visit(query); +} + void TreeOptimizer::apply(ASTPtr & query, TreeRewriterResult & result, const std::vector & tables_with_columns, ContextPtr context) { diff --git a/src/Interpreters/TreeOptimizer.h b/src/Interpreters/TreeOptimizer.h index 1d9a29bd0ac..ced185373cc 100644 --- a/src/Interpreters/TreeOptimizer.h +++ b/src/Interpreters/TreeOptimizer.h @@ -24,6 +24,7 @@ public: ContextPtr context); static void optimizeIf(ASTPtr & query, Aliases & aliases, bool if_chain_to_multiif); + static void optimizeCountConstantAndSumOne(ASTPtr & query); }; } diff --git a/src/Interpreters/TreeRewriter.cpp b/src/Interpreters/TreeRewriter.cpp index 9a00c0e7135..c90421d6f4f 100644 --- a/src/Interpreters/TreeRewriter.cpp +++ b/src/Interpreters/TreeRewriter.cpp @@ -1151,6 +1151,10 @@ TreeRewriterResultPtr TreeRewriter::analyzeSelect( if (remove_duplicates) renameDuplicatedColumns(select_query); + /// Perform it before analyzing JOINs, because it may change number of columns with names unique and break some login inside JOINs + if (settings.optimize_normalize_count_variants) + TreeOptimizer::optimizeCountConstantAndSumOne(query); + if (tables_with_columns.size() > 1) { const auto & right_table = tables_with_columns[1]; diff --git a/src/Interpreters/join_common.cpp b/src/Interpreters/join_common.cpp index 522244d76f0..37f46d6a65e 100644 --- a/src/Interpreters/join_common.cpp +++ b/src/Interpreters/join_common.cpp @@ -597,41 +597,29 @@ NotJoinedBlocks::NotJoinedBlocks(std::unique_ptr filler_, column_indices_left.emplace_back(left_pos); } - /// `sample_block_names` may contain non unique column names + /// `saved_block_sample` may contains non unique column names, get any of them /// (e.g. in case of `... JOIN (SELECT a, a, b FROM table) as t2`) - /// proper fix is to get rid of it - std::unordered_set sample_block_names; - for (size_t right_pos = 0; right_pos < saved_block_sample.columns(); ++right_pos) + for (const auto & [name, right_pos] : saved_block_sample.getNamesToIndexesMap()) { - const String & name = saved_block_sample.getByPosition(right_pos).name; - - auto [_, inserted] = sample_block_names.insert(name); - /// skip columns with same names - if (!inserted) - continue; - - if (!result_sample_block.has(name)) - continue; - - size_t result_position = result_sample_block.getPositionByName(name); - - /// Don't remap left keys twice. We need only qualified right keys here - if (result_position < left_columns_count) - continue; - - setRightIndex(right_pos, result_position); + /// Start from left_columns_count to don't remap left keys twice. We need only qualified right keys here + /// `result_sample_block` may contains non unique column names, need to set index for all of them + for (size_t result_pos = left_columns_count; result_pos < result_sample_block.columns(); ++result_pos) + { + const auto & result_name = result_sample_block.getByPosition(result_pos).name; + if (result_name == name) + setRightIndex(right_pos, result_pos); + } } - /// `result_sample_block` also may contains non unique column names - const auto & result_names = result_sample_block.getNames(); - size_t unique_names_count = std::unordered_set(result_names.begin(), result_names.end()).size(); - if (column_indices_left.size() + column_indices_right.size() + same_result_keys.size() != unique_names_count) + if (column_indices_left.size() + column_indices_right.size() + same_result_keys.size() != result_sample_block.columns()) + { throw Exception( ErrorCodes::LOGICAL_ERROR, - "Error in columns mapping in JOIN. (assertion failed {} + {} + {} != {}) " + "Error in columns mapping in JOIN: assertion failed {} + {} + {} != {}; " "Result block [{}], Saved block [{}]", - column_indices_left.size(), column_indices_right.size(), same_result_keys.size(), unique_names_count, + column_indices_left.size(), column_indices_right.size(), same_result_keys.size(), result_sample_block.columns(), result_sample_block.dumpNames(), saved_block_sample.dumpNames()); + } } void NotJoinedBlocks::setRightIndex(size_t right_pos, size_t result_position) diff --git a/src/Parsers/ASTExpressionList.cpp b/src/Parsers/ASTExpressionList.cpp index 2724465537f..2590c6b2941 100644 --- a/src/Parsers/ASTExpressionList.cpp +++ b/src/Parsers/ASTExpressionList.cpp @@ -26,7 +26,15 @@ void ASTExpressionList::formatImpl(const FormatSettings & settings, FormatState settings.ostr << ' '; } - (*it)->formatImpl(settings, state, frame); + if (frame.surround_each_list_element_with_parens) + settings.ostr << "("; + + FormatStateStacked frame_nested = frame; + frame_nested.surround_each_list_element_with_parens = false; + (*it)->formatImpl(settings, state, frame_nested); + + if (frame.surround_each_list_element_with_parens) + settings.ostr << ")"; } } @@ -41,6 +49,7 @@ void ASTExpressionList::formatImplMultiline(const FormatSettings & settings, For } ++frame.indent; + for (ASTs::const_iterator it = children.begin(); it != children.end(); ++it) { if (it != children.begin()) @@ -54,7 +63,15 @@ void ASTExpressionList::formatImplMultiline(const FormatSettings & settings, For FormatStateStacked frame_nested = frame; frame_nested.expression_list_always_start_on_new_line = false; + frame_nested.surround_each_list_element_with_parens = false; + + if (frame.surround_each_list_element_with_parens) + settings.ostr << "("; + (*it)->formatImpl(settings, state, frame_nested); + + if (frame.surround_each_list_element_with_parens) + settings.ostr << ")"; } } diff --git a/src/Parsers/ASTSelectQuery.cpp b/src/Parsers/ASTSelectQuery.cpp index 43186056077..4408fd21465 100644 --- a/src/Parsers/ASTSelectQuery.cpp +++ b/src/Parsers/ASTSelectQuery.cpp @@ -96,9 +96,12 @@ void ASTSelectQuery::formatImpl(const FormatSettings & s, FormatState & state, F if (groupBy()) { s.ostr << (s.hilite ? hilite_keyword : "") << s.nl_or_ws << indent_str << "GROUP BY" << (s.hilite ? hilite_none : ""); - s.one_line + if (!group_by_with_grouping_sets) + { + s.one_line ? groupBy()->formatImpl(s, state, frame) : groupBy()->as().formatImplMultiline(s, state, frame); + } } if (group_by_with_rollup) @@ -107,6 +110,18 @@ void ASTSelectQuery::formatImpl(const FormatSettings & s, FormatState & state, F if (group_by_with_cube) s.ostr << (s.hilite ? hilite_keyword : "") << s.nl_or_ws << indent_str << (s.one_line ? "" : " ") << "WITH CUBE" << (s.hilite ? hilite_none : ""); + if (group_by_with_grouping_sets) + { + frame.surround_each_list_element_with_parens = true; + s.ostr << (s.hilite ? hilite_keyword : "") << s.nl_or_ws << indent_str << (s.one_line ? "" : " ") << "GROUPING SETS" << (s.hilite ? hilite_none : ""); + s.ostr << " ("; + s.one_line + ? groupBy()->formatImpl(s, state, frame) + : groupBy()->as().formatImplMultiline(s, state, frame); + s.ostr << ")"; + frame.surround_each_list_element_with_parens = false; + } + if (group_by_with_totals) s.ostr << (s.hilite ? hilite_keyword : "") << s.nl_or_ws << indent_str << (s.one_line ? "" : " ") << "WITH TOTALS" << (s.hilite ? hilite_none : ""); diff --git a/src/Parsers/ASTSelectQuery.h b/src/Parsers/ASTSelectQuery.h index 4a30d6afee3..704aeeeea7c 100644 --- a/src/Parsers/ASTSelectQuery.h +++ b/src/Parsers/ASTSelectQuery.h @@ -86,6 +86,7 @@ public: bool group_by_with_rollup = false; bool group_by_with_cube = false; bool group_by_with_constant_keys = false; + bool group_by_with_grouping_sets = false; bool limit_with_ties = false; ASTPtr & refSelect() { return getExpression(Expression::SELECT); } diff --git a/src/Parsers/ASTSystemQuery.cpp b/src/Parsers/ASTSystemQuery.cpp index 8a7891edfe1..a6ff52b74b7 100644 --- a/src/Parsers/ASTSystemQuery.cpp +++ b/src/Parsers/ASTSystemQuery.cpp @@ -200,8 +200,6 @@ void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState &, { if (!filesystem_cache_path.empty()) settings.ostr << (settings.hilite ? hilite_none : "") << " " << filesystem_cache_path; - if (force_removal) - settings.ostr << (settings.hilite ? hilite_keyword : "") << " FORCE"; } } diff --git a/src/Parsers/ASTSystemQuery.h b/src/Parsers/ASTSystemQuery.h index 99be7b709bc..eff71a3a9a0 100644 --- a/src/Parsers/ASTSystemQuery.h +++ b/src/Parsers/ASTSystemQuery.h @@ -91,9 +91,7 @@ public: String disk; UInt64 seconds{}; - /// Values for `drop filesystem cache` system query. String filesystem_cache_path; - bool force_removal = false; String getID(char) const override { return "SYSTEM query"; } diff --git a/src/Parsers/ExpressionListParsers.cpp b/src/Parsers/ExpressionListParsers.cpp index 8f5d1fa4dda..caf9be1fea6 100644 --- a/src/Parsers/ExpressionListParsers.cpp +++ b/src/Parsers/ExpressionListParsers.cpp @@ -755,13 +755,61 @@ bool ParserNotEmptyExpressionList::parseImpl(Pos & pos, ASTPtr & node, Expected return nested_parser.parse(pos, node, expected) && !node->children.empty(); } - bool ParserOrderByExpressionList::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { return ParserList(std::make_unique(), std::make_unique(TokenType::Comma), false) .parse(pos, node, expected); } +bool ParserGroupingSetsExpressionListElements::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) +{ + auto command_list = std::make_shared(); + node = command_list; + + ParserToken s_comma(TokenType::Comma); + ParserToken s_open(TokenType::OpeningRoundBracket); + ParserToken s_close(TokenType::ClosingRoundBracket); + ParserExpressionWithOptionalAlias p_expression(false); + ParserList p_command(std::make_unique(false), + std::make_unique(TokenType::Comma), true); + + do + { + Pos begin = pos; + ASTPtr command; + if (!s_open.ignore(pos, expected)) + { + pos = begin; + if (!p_expression.parse(pos, command, expected)) + { + return false; + } + auto list = std::make_shared(','); + list->children.push_back(command); + command = std::move(list); + } + else + { + if (!p_command.parse(pos, command, expected)) + return false; + + if (!s_close.ignore(pos, expected)) + break; + } + + command_list->children.push_back(command); + } + while (s_comma.ignore(pos, expected)); + + return true; +} + +bool ParserGroupingSetsExpressionList::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) +{ + ParserGroupingSetsExpressionListElements grouping_sets_elements; + return grouping_sets_elements.parse(pos, node, expected); + +} bool ParserInterpolateExpressionList::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { diff --git a/src/Parsers/ExpressionListParsers.h b/src/Parsers/ExpressionListParsers.h index 2325433a00a..2b127dc2607 100644 --- a/src/Parsers/ExpressionListParsers.h +++ b/src/Parsers/ExpressionListParsers.h @@ -517,6 +517,20 @@ protected: bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; }; +class ParserGroupingSetsExpressionList : public IParserBase +{ +protected: + const char * getName() const override { return "grouping sets expression"; } + bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; +}; + +class ParserGroupingSetsExpressionListElements : public IParserBase +{ +protected: + const char * getName() const override { return "grouping sets expression elements"; } + bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; +}; + class ParserInterpolateExpressionList : public IParserBase { protected: diff --git a/src/Parsers/IAST.h b/src/Parsers/IAST.h index bd8167c64fe..b73919f4f36 100644 --- a/src/Parsers/IAST.h +++ b/src/Parsers/IAST.h @@ -224,6 +224,7 @@ public: bool need_parens = false; bool expression_list_always_start_on_new_line = false; /// Line feed and indent before expression list even if it's of single element. bool expression_list_prepend_whitespace = false; /// Prepend whitespace (if it is required) + bool surround_each_list_element_with_parens = false; const IAST * current_select = nullptr; }; diff --git a/src/Parsers/ParserSelectQuery.cpp b/src/Parsers/ParserSelectQuery.cpp index 4e9d5c1d57d..66428b144bf 100644 --- a/src/Parsers/ParserSelectQuery.cpp +++ b/src/Parsers/ParserSelectQuery.cpp @@ -54,6 +54,7 @@ bool ParserSelectQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) ParserKeyword s_by("BY"); ParserKeyword s_rollup("ROLLUP"); ParserKeyword s_cube("CUBE"); + ParserKeyword s_grouping_sets("GROUPING SETS"); ParserKeyword s_top("TOP"); ParserKeyword s_with_ties("WITH TIES"); ParserKeyword s_offset("OFFSET"); @@ -70,6 +71,7 @@ bool ParserSelectQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) ParserNotEmptyExpressionList exp_list_for_select_clause(true); /// Allows aliases without AS keyword. ParserExpressionWithOptionalAlias exp_elem(false); ParserOrderByExpressionList order_list; + ParserGroupingSetsExpressionList grouping_sets_list; ParserInterpolateExpressionList interpolate_list; ParserToken open_bracket(TokenType::OpeningRoundBracket); @@ -191,24 +193,39 @@ bool ParserSelectQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) select_query->group_by_with_rollup = true; else if (s_cube.ignore(pos, expected)) select_query->group_by_with_cube = true; + else if (s_grouping_sets.ignore(pos, expected)) + select_query->group_by_with_grouping_sets = true; - if ((select_query->group_by_with_rollup || select_query->group_by_with_cube) && !open_bracket.ignore(pos, expected)) + if ((select_query->group_by_with_rollup || select_query->group_by_with_cube || select_query->group_by_with_grouping_sets) && + !open_bracket.ignore(pos, expected)) return false; - if (!exp_list.parse(pos, group_expression_list, expected)) - return false; + if (select_query->group_by_with_grouping_sets) + { + if (!grouping_sets_list.parse(pos, group_expression_list, expected)) + return false; + } + else + { + if (!exp_list.parse(pos, group_expression_list, expected)) + return false; + } - if ((select_query->group_by_with_rollup || select_query->group_by_with_cube) && !close_bracket.ignore(pos, expected)) + + if ((select_query->group_by_with_rollup || select_query->group_by_with_cube || select_query->group_by_with_grouping_sets) && + !close_bracket.ignore(pos, expected)) return false; } - /// WITH ROLLUP, CUBE or TOTALS + /// WITH ROLLUP, CUBE, GROUPING SETS or TOTALS if (s_with.ignore(pos, expected)) { if (s_rollup.ignore(pos, expected)) select_query->group_by_with_rollup = true; else if (s_cube.ignore(pos, expected)) select_query->group_by_with_cube = true; + else if (s_grouping_sets.ignore(pos, expected)) + select_query->group_by_with_grouping_sets = true; else if (s_totals.ignore(pos, expected)) select_query->group_by_with_totals = true; else diff --git a/src/Parsers/ParserSystemQuery.cpp b/src/Parsers/ParserSystemQuery.cpp index 599de6ec828..e18b0aa5e10 100644 --- a/src/Parsers/ParserSystemQuery.cpp +++ b/src/Parsers/ParserSystemQuery.cpp @@ -360,8 +360,6 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected & ASTPtr ast; if (path_parser.parse(pos, ast, expected)) res->filesystem_cache_path = ast->as()->value.safeGet(); - if (ParserKeyword{"FORCE"}.ignore(pos, expected)) - res->force_removal = true; break; } diff --git a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp index 1765615f9d1..c5a72bf66bc 100644 --- a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp @@ -1,6 +1,8 @@ #include #include #include +#include +#include namespace DB { @@ -22,11 +24,21 @@ MergingSortedAlgorithm::MergingSortedAlgorithm( , merged_data(header.cloneEmptyColumns(), use_average_block_sizes, max_block_size) , description(std::move(description_)) , limit(limit_) - , has_collation(std::any_of(description.begin(), description.end(), [](const auto & descr) { return descr.collator != nullptr; })) , out_row_sources_buf(out_row_sources_buf_) , current_inputs(num_inputs) , cursors(num_inputs) { + DataTypes sort_description_types; + sort_description_types.reserve(description.size()); + + /// Replace column names in description to positions. + for (auto & column_description : description) + { + has_collation |= column_description.collator != nullptr; + sort_description_types.emplace_back(header.getByName(column_description.column_name).type); + } + + compileSortDescriptionIfNeeded(description, sort_description_types, true /*increase_compile_attemps*/); } void MergingSortedAlgorithm::addInput() diff --git a/src/Processors/QueryPlan/AggregatingStep.cpp b/src/Processors/QueryPlan/AggregatingStep.cpp index e617f9be72c..d7d62d07d92 100644 --- a/src/Processors/QueryPlan/AggregatingStep.cpp +++ b/src/Processors/QueryPlan/AggregatingStep.cpp @@ -1,10 +1,18 @@ +#include +#include +#include #include #include +#include #include #include #include +#include #include #include +#include +#include +#include namespace DB { @@ -25,9 +33,28 @@ static ITransformingStep::Traits getTraits() }; } +static Block appendGroupingColumn(Block block, const GroupingSetsParamsList & params) +{ + if (params.empty()) + return block; + + Block res; + + size_t rows = block.rows(); + auto column = ColumnUInt64::create(rows); + + res.insert({ColumnPtr(std::move(column)), std::make_shared(), "__grouping_set"}); + + for (auto & col : block) + res.insert(std::move(col)); + + return res; +} + AggregatingStep::AggregatingStep( const DataStream & input_stream_, Aggregator::Params params_, + GroupingSetsParamsList grouping_sets_params_, bool final_, size_t max_block_size_, size_t aggregation_in_order_max_block_bytes_, @@ -36,8 +63,9 @@ AggregatingStep::AggregatingStep( bool storage_has_evenly_distributed_read_, InputOrderInfoPtr group_by_info_, SortDescription group_by_sort_description_) - : ITransformingStep(input_stream_, params_.getHeader(final_), getTraits(), false) + : ITransformingStep(input_stream_, appendGroupingColumn(params_.getHeader(final_), grouping_sets_params_), getTraits(), false) , params(std::move(params_)) + , grouping_sets_params(std::move(grouping_sets_params_)) , final(std::move(final_)) , max_block_size(max_block_size_) , aggregation_in_order_max_block_bytes(aggregation_in_order_max_block_bytes_) @@ -49,7 +77,7 @@ AggregatingStep::AggregatingStep( { } -void AggregatingStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) +void AggregatingStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & settings) { QueryPipelineProcessorsCollector collector(pipeline, this); @@ -80,6 +108,150 @@ void AggregatingStep::transformPipeline(QueryPipelineBuilder & pipeline, const B */ auto transform_params = std::make_shared(std::move(params), final); + if (!grouping_sets_params.empty()) + { + const size_t grouping_sets_size = grouping_sets_params.size(); + + const size_t streams = pipeline.getNumStreams(); + + auto input_header = pipeline.getHeader(); + pipeline.transform([&](OutputPortRawPtrs ports) + { + Processors copiers; + copiers.reserve(ports.size()); + + for (auto * port : ports) + { + auto copier = std::make_shared(input_header, grouping_sets_size); + connect(*port, copier->getInputPort()); + copiers.push_back(copier); + } + + return copiers; + }); + + pipeline.transform([&](OutputPortRawPtrs ports) + { + assert(streams * grouping_sets_size == ports.size()); + Processors processors; + for (size_t i = 0; i < grouping_sets_size; ++i) + { + Aggregator::Params params_for_set + { + transform_params->params.src_header, + grouping_sets_params[i].used_keys, + transform_params->params.aggregates, + transform_params->params.overflow_row, + transform_params->params.max_rows_to_group_by, + transform_params->params.group_by_overflow_mode, + transform_params->params.group_by_two_level_threshold, + transform_params->params.group_by_two_level_threshold_bytes, + transform_params->params.max_bytes_before_external_group_by, + transform_params->params.empty_result_for_aggregation_by_empty_set, + transform_params->params.tmp_volume, + transform_params->params.max_threads, + transform_params->params.min_free_disk_space, + transform_params->params.compile_aggregate_expressions, + transform_params->params.min_count_to_compile_aggregate_expression, + transform_params->params.intermediate_header, + transform_params->params.stats_collecting_params + }; + auto transform_params_for_set = std::make_shared(std::move(params_for_set), final); + + if (streams > 1) + { + auto many_data = std::make_shared(streams); + for (size_t j = 0; j < streams; ++j) + { + auto aggregation_for_set = std::make_shared(input_header, transform_params_for_set, many_data, j, merge_threads, temporary_data_merge_threads); + // For each input stream we have `grouping_sets_size` copies, so port index + // for transform #j should skip ports of first (j-1) streams. + connect(*ports[i + grouping_sets_size * j], aggregation_for_set->getInputs().front()); + ports[i + grouping_sets_size * j] = &aggregation_for_set->getOutputs().front(); + processors.push_back(aggregation_for_set); + } + } + else + { + auto aggregation_for_set = std::make_shared(input_header, transform_params_for_set); + connect(*ports[i], aggregation_for_set->getInputs().front()); + ports[i] = &aggregation_for_set->getOutputs().front(); + processors.push_back(aggregation_for_set); + } + } + + if (streams > 1) + { + OutputPortRawPtrs new_ports; + new_ports.reserve(grouping_sets_size); + + for (size_t i = 0; i < grouping_sets_size; ++i) + { + size_t output_it = i; + auto resize = std::make_shared(ports[output_it]->getHeader(), streams, 1); + auto & inputs = resize->getInputs(); + + for (auto input_it = inputs.begin(); input_it != inputs.end(); output_it += grouping_sets_size, ++input_it) + connect(*ports[output_it], *input_it); + new_ports.push_back(&resize->getOutputs().front()); + processors.push_back(resize); + } + + ports.swap(new_ports); + } + + assert(ports.size() == grouping_sets_size); + auto output_header = transform_params->getHeader(); + + for (size_t set_counter = 0; set_counter < grouping_sets_size; ++set_counter) + { + auto & header = ports[set_counter]->getHeader(); + + /// Here we create a DAG which fills missing keys and adds `__grouping_set` column + auto dag = std::make_shared(header.getColumnsWithTypeAndName()); + ActionsDAG::NodeRawConstPtrs index; + index.reserve(output_header.columns() + 1); + + auto grouping_col = ColumnConst::create(ColumnUInt64::create(1, set_counter), 0); + const auto * grouping_node = &dag->addColumn( + {ColumnPtr(std::move(grouping_col)), std::make_shared(), "__grouping_set"}); + + grouping_node = &dag->materializeNode(*grouping_node); + index.push_back(grouping_node); + + size_t missign_column_index = 0; + const auto & missing_columns = grouping_sets_params[set_counter].missing_keys; + + for (size_t i = 0; i < output_header.columns(); ++i) + { + auto & col = output_header.getByPosition(i); + if (missign_column_index < missing_columns.size() && missing_columns[missign_column_index] == i) + { + ++missign_column_index; + auto column = ColumnConst::create(col.column->cloneResized(1), 0); + const auto * node = &dag->addColumn({ColumnPtr(std::move(column)), col.type, col.name}); + node = &dag->materializeNode(*node); + index.push_back(node); + } + else + index.push_back(dag->getIndex()[header.getPositionByName(col.name)]); + } + + dag->getIndex().swap(index); + auto expression = std::make_shared(dag, settings.getActionsSettings()); + auto transform = std::make_shared(header, expression); + + connect(*ports[set_counter], transform->getInputPort()); + processors.emplace_back(std::move(transform)); + } + + return processors; + }); + + aggregating = collector.detachProcessors(0); + return; + } + if (group_by_info) { if (pipeline.getNumStreams() > 1) diff --git a/src/Processors/QueryPlan/AggregatingStep.h b/src/Processors/QueryPlan/AggregatingStep.h index 154ff0abc0a..b933daaa474 100644 --- a/src/Processors/QueryPlan/AggregatingStep.h +++ b/src/Processors/QueryPlan/AggregatingStep.h @@ -10,6 +10,21 @@ namespace DB struct AggregatingTransformParams; using AggregatingTransformParamsPtr = std::shared_ptr; +struct GroupingSetsParams +{ + GroupingSetsParams() = default; + + GroupingSetsParams(ColumnNumbers used_keys_, ColumnNumbers missing_keys_) + : used_keys(std::move(used_keys_)) + , missing_keys(std::move(missing_keys_)) + {} + + ColumnNumbers used_keys; + ColumnNumbers missing_keys; +}; + +using GroupingSetsParamsList = std::vector; + /// Aggregation. See AggregatingTransform. class AggregatingStep : public ITransformingStep { @@ -17,6 +32,7 @@ public: AggregatingStep( const DataStream & input_stream_, Aggregator::Params params_, + GroupingSetsParamsList grouping_sets_params_, bool final_, size_t max_block_size_, size_t aggregation_in_order_max_block_bytes_, @@ -39,6 +55,7 @@ public: private: Aggregator::Params params; + GroupingSetsParamsList grouping_sets_params; bool final; size_t max_block_size; size_t aggregation_in_order_max_block_bytes; @@ -55,7 +72,6 @@ private: Processors finalizing; Processors aggregating; - }; } diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 0d919c6431c..feb4a7d50f3 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -524,6 +524,9 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder( const auto & sorting_columns = metadata_for_reading->getSortingKey().column_names; SortDescription sort_description; + sort_description.compile_sort_description = settings.compile_sort_description; + sort_description.min_count_to_compile_sort_description = settings.min_count_to_compile_sort_description; + for (size_t j = 0; j < prefix_size; ++j) sort_description.emplace_back(sorting_columns[j], input_order_info->direction); @@ -763,6 +766,9 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsFinal( Names sort_columns = metadata_for_reading->getSortingKeyColumns(); SortDescription sort_description; + sort_description.compile_sort_description = settings.compile_sort_description; + sort_description.min_count_to_compile_sort_description = settings.min_count_to_compile_sort_description; + size_t sort_columns_size = sort_columns.size(); sort_description.reserve(sort_columns_size); diff --git a/src/Processors/QueryPlan/SortingStep.cpp b/src/Processors/QueryPlan/SortingStep.cpp index 255beebbdf1..2d8fcf3d6cc 100644 --- a/src/Processors/QueryPlan/SortingStep.cpp +++ b/src/Processors/QueryPlan/SortingStep.cpp @@ -140,11 +140,21 @@ void SortingStep::transformPipeline(QueryPipelineBuilder & pipeline, const Build return std::make_shared(header, result_description, limit); }); + bool increase_sort_description_compile_attempts = true; + /// NOTE limits are not applied to the size of temporary sets in FinishSortingTransform - pipeline.addSimpleTransform([&](const Block & header) -> ProcessorPtr + pipeline.addSimpleTransform([&, increase_sort_description_compile_attempts](const Block & header) mutable -> ProcessorPtr { + /** For multiple FinishSortingTransform we need to count identical comparators only once per QueryPlan + * To property support min_count_to_compile_sort_description. + */ + bool increase_sort_description_compile_attempts_current = increase_sort_description_compile_attempts; + + if (increase_sort_description_compile_attempts) + increase_sort_description_compile_attempts = false; + return std::make_shared( - header, prefix_description, result_description, max_block_size, limit); + header, prefix_description, result_description, max_block_size, limit, increase_sort_description_compile_attempts_current); }); } } @@ -171,13 +181,23 @@ void SortingStep::transformPipeline(QueryPipelineBuilder & pipeline, const Build return transform; }); - pipeline.addSimpleTransform([&](const Block & header, QueryPipelineBuilder::StreamType stream_type) -> ProcessorPtr + bool increase_sort_description_compile_attempts = true; + + pipeline.addSimpleTransform([&, increase_sort_description_compile_attempts](const Block & header, QueryPipelineBuilder::StreamType stream_type) mutable -> ProcessorPtr { if (stream_type == QueryPipelineBuilder::StreamType::Totals) return nullptr; + /** For multiple FinishSortingTransform we need to count identical comparators only once per QueryPlan. + * To property support min_count_to_compile_sort_description. + */ + bool increase_sort_description_compile_attempts_current = increase_sort_description_compile_attempts; + + if (increase_sort_description_compile_attempts) + increase_sort_description_compile_attempts = false; + return std::make_shared( - header, result_description, max_block_size, limit, + header, result_description, max_block_size, limit, increase_sort_description_compile_attempts_current, max_bytes_before_remerge / pipeline.getNumStreams(), remerge_lowered_memory_bytes_ratio, max_bytes_before_external_sort, @@ -188,12 +208,12 @@ void SortingStep::transformPipeline(QueryPipelineBuilder & pipeline, const Build /// If there are several streams, then we merge them into one if (pipeline.getNumStreams() > 1) { - auto transform = std::make_shared( pipeline.getHeader(), pipeline.getNumStreams(), result_description, - max_block_size, limit); + max_block_size, + limit); pipeline.addTransform(std::move(transform)); } @@ -202,12 +222,12 @@ void SortingStep::transformPipeline(QueryPipelineBuilder & pipeline, const Build { /// If there are several streams, then we merge them into one if (pipeline.getNumStreams() > 1) { - auto transform = std::make_shared( pipeline.getHeader(), pipeline.getNumStreams(), result_description, - max_block_size, limit); + max_block_size, + limit); pipeline.addTransform(std::move(transform)); } diff --git a/src/Processors/Sources/SourceWithProgress.h b/src/Processors/Sources/SourceWithProgress.h index 912a548f977..57002006957 100644 --- a/src/Processors/Sources/SourceWithProgress.h +++ b/src/Processors/Sources/SourceWithProgress.h @@ -42,6 +42,7 @@ public: /// Set the approximate total number of rows to read. virtual void addTotalRowsApprox(size_t value) = 0; + virtual void setTotalRowsApprox(size_t value) = 0; }; /// Implementation for ISourceWithProgress @@ -58,6 +59,7 @@ public: void setProcessListElement(QueryStatus * elem) final; void setProgressCallback(const ProgressCallback & callback) final { progress_callback = callback; } void addTotalRowsApprox(size_t value) final { total_rows_approx += value; } + void setTotalRowsApprox(size_t value) final { total_rows_approx = value; } protected: /// Call this method to provide information about progress. diff --git a/src/Processors/Transforms/FinishSortingTransform.cpp b/src/Processors/Transforms/FinishSortingTransform.cpp index abded9bd2f0..3842e034de7 100644 --- a/src/Processors/Transforms/FinishSortingTransform.cpp +++ b/src/Processors/Transforms/FinishSortingTransform.cpp @@ -25,8 +25,9 @@ FinishSortingTransform::FinishSortingTransform( const SortDescription & description_sorted_, const SortDescription & description_to_sort_, size_t max_merged_block_size_, - UInt64 limit_) - : SortingTransform(header, description_to_sort_, max_merged_block_size_, limit_) + UInt64 limit_, + bool increase_sort_description_compile_attempts) + : SortingTransform(header, description_to_sort_, max_merged_block_size_, limit_, increase_sort_description_compile_attempts) { /// Check for sanity non-modified descriptions if (!isPrefix(description_sorted_, description_to_sort_)) diff --git a/src/Processors/Transforms/FinishSortingTransform.h b/src/Processors/Transforms/FinishSortingTransform.h index 3bebcc0a68f..99a293fc1b0 100644 --- a/src/Processors/Transforms/FinishSortingTransform.h +++ b/src/Processors/Transforms/FinishSortingTransform.h @@ -16,7 +16,8 @@ public: const SortDescription & description_sorted_, const SortDescription & description_to_sort_, size_t max_merged_block_size_, - UInt64 limit_); + UInt64 limit_, + bool increase_sort_description_compile_attempts); String getName() const override { return "FinishSortingTransform"; } diff --git a/src/Processors/Transforms/MergeSortingTransform.cpp b/src/Processors/Transforms/MergeSortingTransform.cpp index 1fe945cbbc9..0c4615e9273 100644 --- a/src/Processors/Transforms/MergeSortingTransform.cpp +++ b/src/Processors/Transforms/MergeSortingTransform.cpp @@ -92,12 +92,13 @@ MergeSortingTransform::MergeSortingTransform( const SortDescription & description_, size_t max_merged_block_size_, UInt64 limit_, + bool increase_sort_description_compile_attempts, size_t max_bytes_before_remerge_, double remerge_lowered_memory_bytes_ratio_, size_t max_bytes_before_external_sort_, VolumePtr tmp_volume_, size_t min_free_disk_space_) - : SortingTransform(header, description_, max_merged_block_size_, limit_) + : SortingTransform(header, description_, max_merged_block_size_, limit_, increase_sort_description_compile_attempts) , max_bytes_before_remerge(max_bytes_before_remerge_) , remerge_lowered_memory_bytes_ratio(remerge_lowered_memory_bytes_ratio_) , max_bytes_before_external_sort(max_bytes_before_external_sort_) diff --git a/src/Processors/Transforms/MergeSortingTransform.h b/src/Processors/Transforms/MergeSortingTransform.h index 34a4842949b..e118a2a655b 100644 --- a/src/Processors/Transforms/MergeSortingTransform.h +++ b/src/Processors/Transforms/MergeSortingTransform.h @@ -23,6 +23,7 @@ public: const SortDescription & description_, size_t max_merged_block_size_, UInt64 limit_, + bool increase_sort_description_compile_attempts, size_t max_bytes_before_remerge_, double remerge_lowered_memory_bytes_ratio_, size_t max_bytes_before_external_sort_, diff --git a/src/Processors/Transforms/SortingTransform.cpp b/src/Processors/Transforms/SortingTransform.cpp index c0f700070fa..37a0bf0b7d1 100644 --- a/src/Processors/Transforms/SortingTransform.cpp +++ b/src/Processors/Transforms/SortingTransform.cpp @@ -127,11 +127,12 @@ Chunk MergeSorter::mergeImpl(TSortingHeap & queue) return Chunk(std::move(merged_columns), merged_rows); } - SortingTransform::SortingTransform( const Block & header, const SortDescription & description_, - size_t max_merged_block_size_, UInt64 limit_) + size_t max_merged_block_size_, + UInt64 limit_, + bool increase_sort_description_compile_attempts) : IProcessor({header}, {header}) , description(description_) , max_merged_block_size(max_merged_block_size_) @@ -154,6 +155,9 @@ SortingTransform::SortingTransform( } } + DataTypes sort_description_types; + sort_description_types.reserve(description.size()); + /// Remove constants from column_description and remap positions. SortDescription description_without_constants; description_without_constants.reserve(description.size()); @@ -161,11 +165,17 @@ SortingTransform::SortingTransform( { auto old_pos = header.getPositionByName(column_description.column_name); auto new_pos = map[old_pos]; + if (new_pos < num_columns) + { + sort_description_types.emplace_back(sample.safeGetByPosition(old_pos).type); description_without_constants.push_back(column_description); + } } description.swap(description_without_constants); + + compileSortDescriptionIfNeeded(description, sort_description_types, increase_sort_description_compile_attempts /*increase_compile_attemps*/); } SortingTransform::~SortingTransform() = default; diff --git a/src/Processors/Transforms/SortingTransform.h b/src/Processors/Transforms/SortingTransform.h index 380ef4dff88..938fd68811b 100644 --- a/src/Processors/Transforms/SortingTransform.h +++ b/src/Processors/Transforms/SortingTransform.h @@ -68,7 +68,9 @@ public: /// limit - if not 0, allowed to return just first 'limit' rows in sorted order. SortingTransform(const Block & header, const SortDescription & description_, - size_t max_merged_block_size_, UInt64 limit_); + size_t max_merged_block_size_, + UInt64 limit_, + bool increase_sort_description_compile_attempts); ~SortingTransform() override; diff --git a/src/QueryPipeline/Pipe.cpp b/src/QueryPipeline/Pipe.cpp index 67faaefbf66..19009d9692a 100644 --- a/src/QueryPipeline/Pipe.cpp +++ b/src/QueryPipeline/Pipe.cpp @@ -509,7 +509,7 @@ void Pipe::addTransform(ProcessorPtr transform, OutputPort * totals, OutputPort for (size_t i = 1; i < output_ports.size(); ++i) assertBlocksHaveEqualStructure(header, output_ports[i]->getHeader(), "Pipes"); - // Temporarily skip this check. TotaslHavingTransform may return finalized totals but not finalized data. + // Temporarily skip this check. TotalsHavingTransform may return finalized totals but not finalized data. // if (totals_port) // assertBlocksHaveEqualStructure(header, totals_port->getHeader(), "Pipes"); diff --git a/src/QueryPipeline/QueryPipelineBuilder.cpp b/src/QueryPipeline/QueryPipelineBuilder.cpp index 5e074861110..012a825a9d5 100644 --- a/src/QueryPipeline/QueryPipelineBuilder.cpp +++ b/src/QueryPipeline/QueryPipelineBuilder.cpp @@ -347,7 +347,7 @@ std::unique_ptr QueryPipelineBuilder::joinPipelines( /// ╞> FillingJoin ─> Resize ╣ ╞> Joining ─> (totals) /// (totals) ─────────┘ ╙─────┘ - auto num_streams = left->getNumStreams(); + size_t num_streams = left->getNumStreams(); if (join->supportParallelJoin() && !right->hasTotals()) { diff --git a/src/QueryPipeline/RemoteInserter.cpp b/src/QueryPipeline/RemoteInserter.cpp index aec7562e133..d5cef72b020 100644 --- a/src/QueryPipeline/RemoteInserter.cpp +++ b/src/QueryPipeline/RemoteInserter.cpp @@ -50,7 +50,7 @@ RemoteInserter::RemoteInserter( /** Send query and receive "header", that describes table structure. * Header is needed to know, what structure is required for blocks to be passed to 'write' method. */ - connection.sendQuery(timeouts, query, "", QueryProcessingStage::Complete, &settings_, &modified_client_info, false); + connection.sendQuery(timeouts, query, "", QueryProcessingStage::Complete, &settings_, &modified_client_info, false, {}); while (true) { diff --git a/src/QueryPipeline/tests/gtest_blocks_size_merging_streams.cpp b/src/QueryPipeline/tests/gtest_blocks_size_merging_streams.cpp index e19d2c7114b..feae7127349 100644 --- a/src/QueryPipeline/tests/gtest_blocks_size_merging_streams.cpp +++ b/src/QueryPipeline/tests/gtest_blocks_size_merging_streams.cpp @@ -83,7 +83,7 @@ TEST(MergingSortedTest, SimpleBlockSizeTest) EXPECT_EQ(pipe.numOutputPorts(), 3); auto transform = std::make_shared(pipe.getHeader(), pipe.numOutputPorts(), sort_description, - DEFAULT_MERGE_BLOCK_SIZE, 0, nullptr, false, true); + DEFAULT_MERGE_BLOCK_SIZE, false, nullptr, false, true); pipe.addTransform(std::move(transform)); @@ -130,7 +130,7 @@ TEST(MergingSortedTest, MoreInterestingBlockSizes) EXPECT_EQ(pipe.numOutputPorts(), 3); auto transform = std::make_shared(pipe.getHeader(), pipe.numOutputPorts(), sort_description, - DEFAULT_MERGE_BLOCK_SIZE, 0, nullptr, false, true); + DEFAULT_MERGE_BLOCK_SIZE, false, nullptr, false, true); pipe.addTransform(std::move(transform)); diff --git a/src/Server/GRPCServer.cpp b/src/Server/GRPCServer.cpp index 3942e701d47..414c596eb05 100644 --- a/src/Server/GRPCServer.cpp +++ b/src/Server/GRPCServer.cpp @@ -1482,7 +1482,7 @@ namespace void Call::addProgressToResult() { - auto values = progress.fetchAndResetPiecewiseAtomically(); + auto values = progress.fetchValuesAndResetPiecewiseAtomically(); if (!values.read_rows && !values.read_bytes && !values.total_rows_to_read && !values.written_rows && !values.written_bytes) return; auto & grpc_progress = *result.mutable_progress(); diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index a038dcb3e6c..c71b3834726 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -1329,6 +1329,7 @@ void TCPHandler::receiveQuery() query_context->getIgnoredPartUUIDs()->add(*state.part_uuids_to_ignore); query_context->setProgressCallback([this] (const Progress & value) { return this->updateProgress(value); }); + query_context->setFileProgressCallback([this](const FileProgress & value) { this->updateProgress(Progress(value)); }); /// /// Settings @@ -1736,7 +1737,7 @@ void TCPHandler::updateProgress(const Progress & value) void TCPHandler::sendProgress() { writeVarUInt(Protocol::Server::Progress, *out); - auto increment = state.progress.fetchAndResetPiecewiseAtomically(); + auto increment = state.progress.fetchValuesAndResetPiecewiseAtomically(); increment.write(*out, client_tcp_protocol_version); out->next(); } diff --git a/src/Storages/MergeTree/MergeList.cpp b/src/Storages/MergeTree/MergeList.cpp index 11d0bc8c565..8722ddc5a82 100644 --- a/src/Storages/MergeTree/MergeList.cpp +++ b/src/Storages/MergeTree/MergeList.cpp @@ -87,7 +87,7 @@ MergeListElement::MergeListElement( /// thread_group::memory_tracker, but MemoryTrackerThreadSwitcher will reset parent). memory_tracker.setProfilerStep(settings.memory_profiler_step); memory_tracker.setSampleProbability(settings.memory_profiler_sample_probability); - memory_tracker.setSoftLimit(settings.max_guaranteed_memory_usage); + memory_tracker.setSoftLimit(settings.memory_overcommit_ratio_denominator); if (settings.memory_tracker_fault_probability) memory_tracker.setFaultProbability(settings.memory_tracker_fault_probability); diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index cb01fe3fcfc..58bffaab34b 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -778,6 +778,9 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::createMergedStream() Names sort_columns = global_ctx->metadata_snapshot->getSortingKeyColumns(); SortDescription sort_description; + sort_description.compile_sort_description = global_ctx->data->getContext()->getSettingsRef().compile_sort_description; + sort_description.min_count_to_compile_sort_description = global_ctx->data->getContext()->getSettingsRef().min_count_to_compile_sort_description; + size_t sort_columns_size = sort_columns.size(); sort_description.reserve(sort_columns_size); diff --git a/src/Storages/StorageFile.cpp b/src/Storages/StorageFile.cpp index 30b167f0d7b..5b191b37f5e 100644 --- a/src/Storages/StorageFile.cpp +++ b/src/Storages/StorageFile.cpp @@ -433,6 +433,8 @@ public: bool need_path_column = false; bool need_file_column = false; + + size_t total_bytes_to_read = 0; }; using FilesInfoPtr = std::shared_ptr; @@ -573,6 +575,25 @@ public: chunk.addColumn(column->convertToFullColumnIfConst()); } + if (num_rows) + { + auto bytes_per_row = std::ceil(static_cast(chunk.bytes()) / num_rows); + size_t total_rows_approx = std::ceil(static_cast(files_info->total_bytes_to_read) / bytes_per_row); + total_rows_approx_accumulated += total_rows_approx; + ++total_rows_count_times; + total_rows_approx = total_rows_approx_accumulated / total_rows_count_times; + + /// We need to add diff, because total_rows_approx is incremental value. + /// It would be more correct to send total_rows_approx as is (not a diff), + /// but incrementation of total_rows_to_read does not allow that. + /// A new field can be introduces for that to be sent to client, but it does not worth it. + if (total_rows_approx > total_rows_approx_prev) + { + size_t diff = total_rows_approx - total_rows_approx_prev; + addTotalRowsApprox(diff); + total_rows_approx_prev = total_rows_approx; + } + } return chunk; } @@ -608,6 +629,10 @@ private: bool finished_generate = false; std::shared_lock shared_lock; + + UInt64 total_rows_approx_accumulated = 0; + size_t total_rows_count_times = 0; + UInt64 total_rows_approx_prev = 0; }; @@ -637,6 +662,7 @@ Pipe StorageFile::read( auto files_info = std::make_shared(); files_info->files = paths; + files_info->total_bytes_to_read = total_bytes_to_read; for (const auto & column : column_names) { @@ -656,9 +682,8 @@ Pipe StorageFile::read( /// Set total number of bytes to process. For progress bar. auto progress_callback = context->getFileProgressCallback(); - if ((context->getApplicationType() == Context::ApplicationType::LOCAL - || context->getApplicationType() == Context::ApplicationType::CLIENT) - && progress_callback) + + if (progress_callback) progress_callback(FileProgress(0, total_bytes_to_read)); for (size_t i = 0; i < num_streams; ++i) diff --git a/src/Storages/WindowView/StorageWindowView.cpp b/src/Storages/WindowView/StorageWindowView.cpp index 1319c864b7b..13c6fca5163 100644 --- a/src/Storages/WindowView/StorageWindowView.cpp +++ b/src/Storages/WindowView/StorageWindowView.cpp @@ -37,6 +37,11 @@ #include #include #include +#include +#include +#include +#include +#include #include #include #include @@ -927,6 +932,76 @@ void StorageWindowView::threadFuncFireEvent() } } +Pipe StorageWindowView::read( + const Names & column_names, + const StorageSnapshotPtr & storage_snapshot, + SelectQueryInfo & query_info, + ContextPtr local_context, + QueryProcessingStage::Enum processed_stage, + const size_t max_block_size, + const unsigned num_streams) +{ + QueryPlan plan; + read(plan, column_names, storage_snapshot, query_info, local_context, processed_stage, max_block_size, num_streams); + return plan.convertToPipe( + QueryPlanOptimizationSettings::fromContext(local_context), BuildQueryPipelineSettings::fromContext(local_context)); +} + +void StorageWindowView::read( + QueryPlan & query_plan, + const Names & column_names, + const StorageSnapshotPtr & storage_snapshot, + SelectQueryInfo & query_info, + ContextPtr local_context, + QueryProcessingStage::Enum processed_stage, + const size_t max_block_size, + const unsigned num_streams) +{ + if (target_table_id.empty()) + return; + + auto storage = getTargetStorage(); + auto lock = storage->lockForShare(local_context->getCurrentQueryId(), local_context->getSettingsRef().lock_acquire_timeout); + auto target_metadata_snapshot = storage->getInMemoryMetadataPtr(); + auto target_storage_snapshot = storage->getStorageSnapshot(target_metadata_snapshot, local_context); + + if (query_info.order_optimizer) + query_info.input_order_info = query_info.order_optimizer->getInputOrder(target_metadata_snapshot, local_context); + + storage->read(query_plan, column_names, target_storage_snapshot, query_info, local_context, processed_stage, max_block_size, num_streams); + + if (query_plan.isInitialized()) + { + auto wv_header = getHeaderForProcessingStage(column_names, storage_snapshot, query_info, local_context, processed_stage); + auto target_header = query_plan.getCurrentDataStream().header; + + if (!blocksHaveEqualStructure(wv_header, target_header)) + { + auto converting_actions = ActionsDAG::makeConvertingActions( + target_header.getColumnsWithTypeAndName(), wv_header.getColumnsWithTypeAndName(), ActionsDAG::MatchColumnsMode::Name); + auto converting_step = std::make_unique(query_plan.getCurrentDataStream(), converting_actions); + converting_step->setStepDescription("Convert Target table structure to WindowView structure"); + query_plan.addStep(std::move(converting_step)); + } + + StreamLocalLimits limits; + SizeLimits leaf_limits; + + /// Add table lock for target table. + auto adding_limits_and_quota = std::make_unique( + query_plan.getCurrentDataStream(), + storage, + std::move(lock), + limits, + leaf_limits, + nullptr, + nullptr); + + adding_limits_and_quota->setStepDescription("Lock target table for WindowView"); + query_plan.addStep(std::move(adding_limits_and_quota)); + } +} + Pipe StorageWindowView::watch( const Names & /*column_names*/, const SelectQueryInfo & query_info, @@ -1316,6 +1391,18 @@ void StorageWindowView::writeIntoWindowView( auto metadata_snapshot = inner_storage->getInMemoryMetadataPtr(); auto output = inner_storage->write(window_view.getMergeableQuery(), metadata_snapshot, local_context); + if (!blocksHaveEqualStructure(builder.getHeader(), output->getHeader())) + { + auto convert_actions_dag = ActionsDAG::makeConvertingActions( + builder.getHeader().getColumnsWithTypeAndName(), + output->getHeader().getColumnsWithTypeAndName(), + ActionsDAG::MatchColumnsMode::Name); + auto convert_actions = std::make_shared( + convert_actions_dag, ExpressionActionsSettings::fromContext(local_context, CompileExpressions::yes)); + + builder.addSimpleTransform([&](const Block & header) { return std::make_shared(header, convert_actions); }); + } + builder.addChain(Chain(std::move(output))); builder.setSinks([&](const Block & cur_header, Pipe::StreamType) { diff --git a/src/Storages/WindowView/StorageWindowView.h b/src/Storages/WindowView/StorageWindowView.h index 782e8f2b899..101d29d1ae7 100644 --- a/src/Storages/WindowView/StorageWindowView.h +++ b/src/Storages/WindowView/StorageWindowView.h @@ -137,6 +137,25 @@ public: void startup() override; void shutdown() override; + Pipe read( + const Names & column_names, + const StorageSnapshotPtr & storage_snapshot, + SelectQueryInfo & query_info, + ContextPtr context, + QueryProcessingStage::Enum processed_stage, + size_t max_block_size, + unsigned num_streams) override; + + void read( + QueryPlan & query_plan, + const Names & column_names, + const StorageSnapshotPtr & storage_snapshot, + SelectQueryInfo & query_info, + ContextPtr context, + QueryProcessingStage::Enum processed_stage, + size_t max_block_size, + unsigned num_streams) override; + Pipe watch( const Names & column_names, const SelectQueryInfo & query_info, diff --git a/src/Storages/WindowView/WindowViewSource.h b/src/Storages/WindowView/WindowViewSource.h index a726cdc8712..7b914933035 100644 --- a/src/Storages/WindowView/WindowViewSource.h +++ b/src/Storages/WindowView/WindowViewSource.h @@ -51,6 +51,8 @@ protected: Block block; UInt32 watermark; std::tie(block, watermark) = generateImpl(); + if (!block) + return Chunk(); if (is_events) { return Chunk( diff --git a/tests/ci/build_check.py b/tests/ci/build_check.py index 6a3615fc5db..b73bf057393 100644 --- a/tests/ci/build_check.py +++ b/tests/ci/build_check.py @@ -254,6 +254,9 @@ def main(): logging.info("Got version from repo %s", version.string) official_flag = pr_info.number == 0 + if "official" in build_config: + official_flag = build_config["official"] + version_type = "testing" if "release" in pr_info.labels or "release-lts" in pr_info.labels: version_type = "stable" diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py index c9915c1c7f4..49ee3563722 100644 --- a/tests/ci/ci_config.py +++ b/tests/ci/ci_config.py @@ -37,6 +37,7 @@ CI_CONFIG = { "splitted": "unsplitted", "tidy": "disable", "with_coverage": False, + "official": False, }, # FIXME update to gcc-12 and turn on # "binary_gcc": { diff --git a/tests/ci/fast_test_check.py b/tests/ci/fast_test_check.py index d659245dad9..acc07ccfb23 100644 --- a/tests/ci/fast_test_check.py +++ b/tests/ci/fast_test_check.py @@ -84,7 +84,6 @@ if __name__ == "__main__": stopwatch = Stopwatch() temp_path = TEMP_PATH - caches_path = CACHES_PATH if not os.path.exists(temp_path): os.makedirs(temp_path) @@ -110,7 +109,10 @@ if __name__ == "__main__": if not os.path.exists(output_path): os.makedirs(output_path) - cache_path = os.path.join(caches_path, "fasttest") + if not os.path.exists(CACHES_PATH): + os.makedirs(CACHES_PATH) + subprocess.check_call(f"sudo chown -R ubuntu:ubuntu {CACHES_PATH}", shell=True) + cache_path = os.path.join(CACHES_PATH, "fasttest") logging.info("Will try to fetch cache for our build") ccache_for_pr = get_ccache_if_not_exists( diff --git a/tests/integration/test_global_overcommit_tracker/test.py b/tests/integration/test_global_overcommit_tracker/test.py index cacc447be1a..d3d56e82f38 100644 --- a/tests/integration/test_global_overcommit_tracker/test.py +++ b/tests/integration/test_global_overcommit_tracker/test.py @@ -18,8 +18,8 @@ def start_cluster(): cluster.shutdown() -TEST_QUERY_A = "SELECT number FROM numbers(1000) GROUP BY number SETTINGS max_guaranteed_memory_usage_for_user=1" -TEST_QUERY_B = "SELECT number FROM numbers(1000) GROUP BY number SETTINGS max_guaranteed_memory_usage_for_user=2" +TEST_QUERY_A = "SELECT number FROM numbers(1000) GROUP BY number SETTINGS memory_overcommit_ratio_denominator_for_user=1" +TEST_QUERY_B = "SELECT number FROM numbers(1000) GROUP BY number SETTINGS memory_overcommit_ratio_denominator_for_user=2" def test_overcommited_is_killed(): diff --git a/tests/performance/jit_sort.xml b/tests/performance/jit_sort.xml new file mode 100644 index 00000000000..f99124a360b --- /dev/null +++ b/tests/performance/jit_sort.xml @@ -0,0 +1,122 @@ + + + hits_100m_single + + + + CREATE TABLE jit_test_memory ( + key UInt64, + value_1 UInt64, + value_2 UInt64, + value_3 UInt64, + value_4 UInt64, + value_5 UInt64 + ) Engine = Memory + + + + CREATE TABLE jit_test_merge_tree ( + key UInt64, + value_1 UInt64, + value_2 UInt64, + value_3 UInt64, + value_4 UInt64, + value_5 UInt64 + ) Engine = MergeTree + ORDER BY key + + + + CREATE TABLE jit_test_memory_nullable ( + key UInt64, + value_1 Nullable(UInt64), + value_2 Nullable(UInt64), + value_3 Nullable(UInt64), + value_4 Nullable(UInt64), + value_5 Nullable(UInt64) + ) Engine = Memory + + + + CREATE TABLE jit_test_merge_tree_nullable ( + key UInt64, + value_1 Nullable(UInt64), + value_2 Nullable(UInt64), + value_3 Nullable(UInt64), + value_4 Nullable(UInt64), + value_5 Nullable(UInt64) + ) Engine = MergeTree + ORDER BY key + + + + + table + + jit_test_memory + jit_test_merge_tree + jit_test_memory_nullable + jit_test_merge_tree_nullable + + + + + + INSERT INTO {table} + SELECT + number, + rand64(0), + rand64(1), + rand64(2), + rand64(3), + rand64(4) + FROM + system.numbers_mt + LIMIT 10000000 + + + + SELECT + * + FROM {table} + ORDER BY value_1 ASC + FORMAT Null + + + + SELECT + * + FROM {table} + ORDER BY value_1 ASC, value_2 DESC, value_3 ASC + FORMAT Null + + + + SELECT + * + FROM {table} + ORDER BY value_1 ASC, value_2 DESC, value_3 ASC, value_4 DESC, value_5 ASC + FORMAT Null + + + + SELECT + WatchID + FROM hits_100m_single + ORDER BY WatchID ASC, CounterID DESC, ClientIP ASC + LIMIT 2000000 + FORMAT Null + + + + SELECT + WatchID + FROM hits_100m_single + ORDER BY WatchID ASC, CounterID DESC, ClientIP ASC, IPNetworkID DESC, SearchEngineID ASC + LIMIT 2000000 + FORMAT Null + + + DROP TABLE IF EXISTS {table} + + diff --git a/tests/queries/0_stateless/01042_h3_k_ring.reference b/tests/queries/0_stateless/01042_h3_k_ring.reference index 24f4b9885ce..770468da350 100644 --- a/tests/queries/0_stateless/01042_h3_k_ring.reference +++ b/tests/queries/0_stateless/01042_h3_k_ring.reference @@ -1,5 +1,7 @@ [581250224954015743,581259021047037951,581267817140060159,581276613233082367,581500913605148671,581518505791193087,581764796395814911] [581276613233082367] +[581250224954015743,581259021047037951,581267817140060159,581276613233082367,581500913605148671,581518505791193087,581764796395814911] +[581276613233082367] [578466261512486911,578712552117108735,578888473977552895,579205133326352383,579275502070530047,579662530163507199,579768083279773695] [580995138256371711,581144671837749247,581162264023793663,581166662070304767,581171060116815871,581250224954015743,581254623000526847,581259021047037951,581263419093549055,581267817140060159,581272215186571263,581276613233082367,581531699930726399,581536097977237503,581549292116770815,581553690163281919,581558088209793023,581747204209770495,581764796395814911] [589624655266971647,589625205022785535,589626854290227199,589627404046041087,589642797208829951,589644996232085503,589708218150682623,589708767906496511,589709317662310399,589709867418124287,589710417173938175,589710966929752063,589711516685565951,589714815220449279,589715914732077055,589725810336727039,589726909848354815,589727459604168703,589728009359982591,589729108871610367,589734606429749247,589735156185563135,589735705941377023,589736255697190911,589736805453004799,589737355208818687,589737904964632575,589742303011143679,589744502034399231,589745051790213119,589752198615793663,589752748371607551,589753298127421439,589753847883235327,589754397639049215,589754947394863103,589755497150676991] diff --git a/tests/queries/0_stateless/01042_h3_k_ring.sql b/tests/queries/0_stateless/01042_h3_k_ring.sql index d450954ab7a..8931efc44c2 100644 --- a/tests/queries/0_stateless/01042_h3_k_ring.sql +++ b/tests/queries/0_stateless/01042_h3_k_ring.sql @@ -5,6 +5,9 @@ SELECT h3kRing(581276613233082367, toUInt16(0)); SELECT h3kRing(581276613233082367, -1); -- { serverError 43 } SELECT h3kRing(581276613233082367, toUInt16(-1)); -- { serverError 12 } +SELECT arraySort(h3kRing(581276613233082367, 1)); +SELECT h3kRing(581276613233082367, 0); +SELECT h3kRing(581276613233082367, -1); -- { serverError 43 } DROP TABLE IF EXISTS h3_indexes; diff --git a/tests/queries/0_stateless/01072_window_view_multiple_columns_groupby.reference b/tests/queries/0_stateless/01072_window_view_multiple_columns_groupby.reference new file mode 100644 index 00000000000..828667becf2 --- /dev/null +++ b/tests/queries/0_stateless/01072_window_view_multiple_columns_groupby.reference @@ -0,0 +1 @@ +test1 test2 diff --git a/tests/queries/0_stateless/01072_window_view_multiple_columns_groupby.sh b/tests/queries/0_stateless/01072_window_view_multiple_columns_groupby.sh new file mode 100755 index 00000000000..3e2eda96f93 --- /dev/null +++ b/tests/queries/0_stateless/01072_window_view_multiple_columns_groupby.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT --multiquery <= yesterday() and + event_time > now() - interval 1 hour and + not is_initial_query and + query not like '%system%query_log%' and + query like concat('WITH%', currentDatabase(), '%AS `id_no` %') and + type = 'QueryFinish' +order by query; WITH _CAST(\'default\', \'Nullable(String)\') AS `id_no` SELECT `one`.`dummy`, ignore(`id_no`) FROM `system`.`one` WHERE `dummy` IN (0, 2) WITH _CAST(\'default\', \'Nullable(String)\') AS `id_no` SELECT `one`.`dummy`, ignore(`id_no`) FROM `system`.`one` WHERE `dummy` IN (0, 2) +-- +-- w/ optimize_skip_unused_shards_rewrite_in=1 +-- + +set optimize_skip_unused_shards_rewrite_in=1; +-- detailed coverage for realistic examples +select 'optimize_skip_unused_shards_rewrite_in(0, 2)'; optimize_skip_unused_shards_rewrite_in(0, 2) +with (select currentDatabase()) as id_02 select *, ignore(id_02) from dist_01756 where dummy in (0, 2); 0 0 +system flush logs; +select query from system.query_log where + event_date >= yesterday() and + event_time > now() - interval 1 hour and + not is_initial_query and + query not like '%system%query_log%' and + query like concat('WITH%', currentDatabase(), '%AS `id_02` %') and + type = 'QueryFinish' +order by query; WITH _CAST(\'default\', \'Nullable(String)\') AS `id_02` SELECT `one`.`dummy`, ignore(`id_02`) FROM `system`.`one` WHERE `dummy` IN tuple(0) WITH _CAST(\'default\', \'Nullable(String)\') AS `id_02` SELECT `one`.`dummy`, ignore(`id_02`) FROM `system`.`one` WHERE `dummy` IN tuple(2) +select 'optimize_skip_unused_shards_rewrite_in(2,)'; optimize_skip_unused_shards_rewrite_in(2,) +with (select currentDatabase()) as id_2 select *, ignore(id_2) from dist_01756 where dummy in (2,); +system flush logs; +select query from system.query_log where + event_date >= yesterday() and + event_time > now() - interval 1 hour and + not is_initial_query and + query not like '%system%query_log%' and + query like concat('WITH%', currentDatabase(), '%AS `id_2` %') and + type = 'QueryFinish' +order by query; WITH _CAST(\'default\', \'Nullable(String)\') AS `id_2` SELECT `one`.`dummy`, ignore(`id_2`) FROM `system`.`one` WHERE `dummy` IN tuple(2) +select 'optimize_skip_unused_shards_rewrite_in(0,)'; optimize_skip_unused_shards_rewrite_in(0,) +with (select currentDatabase()) as id_0 select *, ignore(id_0) from dist_01756 where dummy in (0,); 0 0 +system flush logs; +select query from system.query_log where + event_date >= yesterday() and + event_time > now() - interval 1 hour and + not is_initial_query and + query not like '%system%query_log%' and + query like concat('WITH%', currentDatabase(), '%AS `id_0` %') and + type = 'QueryFinish' +order by query; WITH _CAST(\'default\', \'Nullable(String)\') AS `id_0` SELECT `one`.`dummy`, ignore(`id_0`) FROM `system`.`one` WHERE `dummy` IN tuple(0) +-- signed column +select 'signed column'; signed column +create table data_01756_signed (key Int) engine=Null; +with (select currentDatabase()) as key_signed select *, ignore(key_signed) from cluster(test_cluster_two_shards, currentDatabase(), data_01756_signed, key) where key in (-1, -2); +system flush logs; +select query from system.query_log where + event_date >= yesterday() and + event_time > now() - interval 1 hour and + not is_initial_query and + query not like '%system%query_log%' and + query like concat('WITH%', currentDatabase(), '%AS `key_signed` %') and + type = 'QueryFinish' +order by query; WITH _CAST(\'default\', \'Nullable(String)\') AS `key_signed` SELECT `key`, ignore(`key_signed`) FROM `default`.`data_01756_signed` WHERE `key` IN tuple(-1) WITH _CAST(\'default\', \'Nullable(String)\') AS `key_signed` SELECT `key`, ignore(`key_signed`) FROM `default`.`data_01756_signed` WHERE `key` IN tuple(-2) +-- not tuple +select * from dist_01756 where dummy in (0); 0 +select * from dist_01756 where dummy in ('0'); 0 +-- +-- errors +-- +select 'errors'; errors +-- optimize_skip_unused_shards does not support non-constants +select * from dist_01756 where dummy in (select * from system.one); -- { serverError 507 } +select * from dist_01756 where dummy in (toUInt8(0)); -- { serverError 507 } +-- NOT IN does not supported +select * from dist_01756 where dummy not in (0, 2); -- { serverError 507 } +-- +-- others +-- +select 'others'; others +select * from dist_01756 where dummy not in (2, 3) and dummy in (0, 2); 0 +select * from dist_01756 where dummy in tuple(0, 2); 0 +select * from dist_01756 where dummy in tuple(0); 0 +select * from dist_01756 where dummy in tuple(2); +-- Identifier is NULL +select (2 IN (2,)), * from dist_01756 where dummy in (0, 2) format Null; +-- Literal is NULL +select (dummy IN (toUInt8(2),)), * from dist_01756 where dummy in (0, 2) format Null; +-- different type +select 'different types -- prohibited'; different types -- prohibited +create table data_01756_str (key String) engine=Memory(); +insert into data_01756_str values (0)(1); +-- SELECT +-- cityHash64(0) % 2, +-- cityHash64(2) % 2 +-- +-- ┌─modulo(cityHash64(0), 2)─┬─modulo(cityHash64(2), 2)─┐ +-- │ 0 │ 1 │ +-- └──────────────────────────┴──────────────────────────┘ +create table dist_01756_str as data_01756_str engine=Distributed(test_cluster_two_shards, currentDatabase(), data_01756_str, cityHash64(key)); +select * from dist_01756_str where key in ('0', '2'); +0 +select * from dist_01756_str where key in (0, 2); +0 +select * from dist_01756_str where key in ('0', Null); -- { serverError 507 } +-- select * from dist_01756_str where key in (0, 2); -- { serverError 53 } +-- select * from dist_01756_str where key in (0, Null); -- { serverError 53 } + +-- different type #2 +select 'different types -- conversion'; different types -- conversion +create table dist_01756_column as system.one engine=Distributed(test_cluster_two_shards, system, one, dummy); +select * from dist_01756_column where dummy in (0, '255'); 0 +select * from dist_01756_column where dummy in (0, '255foo'); -- { serverError 53 } +-- intHash64 does not accept string, but implicit conversion should be done +select * from dist_01756 where dummy in ('0', '2'); 0 +-- optimize_skip_unused_shards_limit +select 'optimize_skip_unused_shards_limit'; optimize_skip_unused_shards_limit +select * from dist_01756 where dummy in (0, 2) settings optimize_skip_unused_shards_limit=1; -- { serverError 507 } +select * from dist_01756 where dummy in (0, 2) settings optimize_skip_unused_shards_limit=1, force_optimize_skip_unused_shards=0; 0 0 diff --git a/tests/queries/0_stateless/01756_optimize_skip_unused_shards_rewrite_in.sql b/tests/queries/0_stateless/01756_optimize_skip_unused_shards_rewrite_in.sql index a5090551c89..59709a7ee2a 100644 --- a/tests/queries/0_stateless/01756_optimize_skip_unused_shards_rewrite_in.sql +++ b/tests/queries/0_stateless/01756_optimize_skip_unused_shards_rewrite_in.sql @@ -11,6 +11,15 @@ drop table if exists dist_01756_column; drop table if exists data_01756_str; drop table if exists data_01756_signed; +-- separate log entry for localhost queries +set prefer_localhost_replica=0; +set force_optimize_skip_unused_shards=2; +set optimize_skip_unused_shards=1; +set optimize_skip_unused_shards_rewrite_in=0; +set log_queries=1; + +-- { echoOn } + -- SELECT -- intHash64(0) % 2, -- intHash64(2) % 2 @@ -19,13 +28,6 @@ drop table if exists data_01756_signed; -- └─────────────────────────┴─────────────────────────┘ create table dist_01756 as system.one engine=Distributed(test_cluster_two_shards, system, one, intHash64(dummy)); --- separate log entry for localhost queries -set prefer_localhost_replica=0; -set force_optimize_skip_unused_shards=2; -set optimize_skip_unused_shards=1; -set optimize_skip_unused_shards_rewrite_in=0; -set log_queries=1; - -- -- w/o optimize_skip_unused_shards_rewrite_in=1 -- @@ -131,8 +133,17 @@ select (dummy IN (toUInt8(2),)), * from dist_01756 where dummy in (0, 2) format -- different type select 'different types -- prohibited'; create table data_01756_str (key String) engine=Memory(); +insert into data_01756_str values (0)(1); +-- SELECT +-- cityHash64(0) % 2, +-- cityHash64(2) % 2 +-- +-- ┌─modulo(cityHash64(0), 2)─┬─modulo(cityHash64(2), 2)─┐ +-- │ 0 │ 1 │ +-- └──────────────────────────┴──────────────────────────┘ create table dist_01756_str as data_01756_str engine=Distributed(test_cluster_two_shards, currentDatabase(), data_01756_str, cityHash64(key)); select * from dist_01756_str where key in ('0', '2'); +select * from dist_01756_str where key in (0, 2); select * from dist_01756_str where key in ('0', Null); -- { serverError 507 } -- select * from dist_01756_str where key in (0, 2); -- { serverError 53 } -- select * from dist_01756_str where key in (0, Null); -- { serverError 53 } @@ -150,6 +161,8 @@ select 'optimize_skip_unused_shards_limit'; select * from dist_01756 where dummy in (0, 2) settings optimize_skip_unused_shards_limit=1; -- { serverError 507 } select * from dist_01756 where dummy in (0, 2) settings optimize_skip_unused_shards_limit=1, force_optimize_skip_unused_shards=0; +-- { echoOff } + drop table dist_01756; drop table dist_01756_str; drop table dist_01756_column; diff --git a/tests/queries/0_stateless/01883_grouping_sets_crash.reference b/tests/queries/0_stateless/01883_grouping_sets_crash.reference new file mode 100644 index 00000000000..4d9e967b766 --- /dev/null +++ b/tests/queries/0_stateless/01883_grouping_sets_crash.reference @@ -0,0 +1,209 @@ +0 0 +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +10 10 +SECOND QUERY: +0 0 +0 0 +0 0 +0 0 +0 0 +0 0 +0 0 +0 0 +0 0 +0 0 +0 0 +0 0 +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +10 10 +THIRD QUERY: +\N 1 0 0 +\N 2 0 0 +\N 3 0 0 +\N 4 0 0 +\N 5 0 0 +\N 1 0 0 +\N 2 0 0 +\N 3 0 0 +\N 4 0 0 +\N 5 0 0 +\N 0 10 10 +\N 0 9 9 +\N 0 8 8 +\N 0 7 7 +\N 0 6 6 +\N 0 5 5 +\N 0 4 4 +\N 0 3 3 +\N 0 2 2 +\N 0 1 1 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +\N 0 0 0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +w\0\0ldworldwo\0l\0world +w\0\0ldworldwo\0l\0world +w\0\0ldworldwo\0l\0world +w\0\0ldworldwo\0l\0world +w\0\0ldworldwo\0l\0world +w\0\0ldworldwo\0l\0world +w\0\0ldworldwo\0l\0world +w\0\0ldworldwo\0l\0world +w\0\0ldworldwo\0l\0world +w\0\0ldworldwo\0l\0world +w\0\0ldworldwo\0l\0world +w\0\0ldworldwo\0l\0world +w\0\0ldworldwo\0l\0world +w\0\0ldworldwo\0l\0world +w\0\0ldworldwo\0l\0world +w\0\0ldworldwo\0l\0world +w\0\0ldworldwo\0l\0world +w\0\0ldworldwo\0l\0world +w\0\0ldworldwo\0l\0world +w\0\0ldworldwo\0l\0world +w\0\0ldworldwo\0l\0world +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 diff --git a/tests/queries/0_stateless/01883_grouping_sets_crash.sql b/tests/queries/0_stateless/01883_grouping_sets_crash.sql new file mode 100644 index 00000000000..cf56c8546ce --- /dev/null +++ b/tests/queries/0_stateless/01883_grouping_sets_crash.sql @@ -0,0 +1,99 @@ +DROP TABLE IF EXISTS grouping_sets; + +CREATE TABLE grouping_sets(fact_1_id Int32, fact_2_id Int32, fact_3_id Int32, fact_4_id Int32, sales_value Int32) ENGINE = Memory; + +INSERT INTO grouping_sets +SELECT + number % 2 + 1 AS fact_1_id, + number % 5 + 1 AS fact_2_id, + number % 10 + 1 AS fact_3_id, + number % 10 + 1 AS fact_4_id, + number % 100 AS sales_value +FROM system.numbers limit 1000; + +SELECT + fact_3_id, + fact_4_id +FROM grouping_sets +GROUP BY + GROUPING SETS ( + ('wo\0ldworldwo\0ldworld'), + (fact_3_id, fact_4_id)) +ORDER BY + fact_3_id, fact_4_id; + +SELECT 'SECOND QUERY:'; + +SELECT + fact_3_id, + fact_4_id +FROM grouping_sets +GROUP BY + GROUPING SETS ( + (fact_1_id, fact_2_id), + ((-9223372036854775808, NULL, (tuple(1.), (tuple(1.), 1048576), 65535))), + ((tuple(3.4028234663852886e38), (tuple(1024), -2147483647), NULL)), + (fact_3_id, fact_4_id)) +ORDER BY + (NULL, ('256', (tuple(NULL), NULL), NULL, NULL), NULL) ASC, + fact_1_id DESC NULLS FIRST, + fact_2_id DESC NULLS FIRST, + fact_4_id ASC; + +SELECT 'THIRD QUERY:'; + +SELECT + extractAllGroups(NULL, 'worldworldworldwo\0ldworldworldworldwo\0ld'), + fact_2_id, + fact_3_id, + fact_4_id +FROM grouping_sets +GROUP BY + GROUPING SETS ( + (sales_value), + (fact_1_id, fact_2_id), + ('wo\0ldworldwo\0ldworld'), + (fact_3_id, fact_4_id)) +ORDER BY + fact_1_id DESC NULLS LAST, + fact_1_id DESC NULLS FIRST, + fact_2_id ASC, + fact_3_id DESC NULLS FIRST, + fact_4_id ASC; + +SELECT fact_3_id +FROM grouping_sets +GROUP BY + GROUPING SETS ((fact_3_id, fact_4_id)) +ORDER BY fact_3_id ASC; + +-- Following two queries were fuzzed +SELECT 'w\0\0ldworldwo\0l\0world' +FROM grouping_sets +GROUP BY + GROUPING SETS ( + ( fact_4_id), + ( NULL), + ( fact_3_id, fact_4_id)) +ORDER BY + NULL ASC, + NULL DESC NULLS FIRST, + fact_3_id ASC, + fact_3_id ASC NULLS LAST, + 'wo\0ldworldwo\0ldworld' ASC NULLS LAST, + 'w\0\0ldworldwo\0l\0world' DESC NULLS FIRST, + 'wo\0ldworldwo\0ldworld' ASC, + NULL ASC NULLS FIRST, + fact_4_id DESC NULLS LAST; + +SELECT fact_3_id +FROM grouping_sets +GROUP BY + GROUPING SETS ( + ( 'wo\0ldworldwo\0ldworldwo\0ldworldwo\0ldworldwo\0ldworldwo\0ldworldwo\0ldworldwo\0ldworld'), + ( NULL), + ( fact_4_id), + ( fact_3_id, fact_4_id)) +ORDER BY fact_3_id ASC NULLS FIRST; + +DROP TABLE IF EXISTS grouping_sets; \ No newline at end of file diff --git a/tests/queries/0_stateless/01883_with_grouping_sets.reference b/tests/queries/0_stateless/01883_with_grouping_sets.reference new file mode 100644 index 00000000000..a036ccb0796 --- /dev/null +++ b/tests/queries/0_stateless/01883_with_grouping_sets.reference @@ -0,0 +1,208 @@ +(Expression) +ExpressionTransform + (Sorting) + MergingSortedTransform 2 → 1 + MergeSortingTransform × 2 + LimitsCheckingTransform × 2 + PartialSortingTransform × 2 + (Expression) + ExpressionTransform × 2 + (Aggregating) + ExpressionTransform × 2 + AggregatingTransform × 2 + Copy 1 → 2 + (Expression) + ExpressionTransform + (SettingQuotaAndLimits) + (ReadFromStorage) + Memory 0 → 1 +1 0 1 4500 +1 0 3 4700 +1 0 5 4900 +1 0 7 5100 +1 0 9 5300 +1 1 0 4500 +1 2 0 5100 +1 3 0 4700 +1 4 0 5300 +1 5 0 4900 +2 0 2 4600 +2 0 4 4800 +2 0 6 5000 +2 0 8 5200 +2 0 10 5400 +2 1 0 5000 +2 2 0 4600 +2 3 0 5200 +2 4 0 4800 +2 5 0 5400 +0 0 1 1 4500 +0 0 2 2 4600 +0 0 3 3 4700 +0 0 4 4 4800 +0 0 5 5 4900 +0 0 6 6 5000 +0 0 7 7 5100 +0 0 8 8 5200 +0 0 9 9 5300 +0 0 10 10 5400 +1 1 0 0 4500 +1 2 0 0 5100 +1 3 0 0 4700 +1 4 0 0 5300 +1 5 0 0 4900 +2 1 0 0 5000 +2 2 0 0 4600 +2 3 0 0 5200 +2 4 0 0 4800 +2 5 0 0 5400 +0 0 0 49500 +0 0 1 4500 +0 0 2 4600 +0 0 3 4700 +0 0 4 4800 +0 0 5 4900 +0 0 6 5000 +0 0 7 5100 +0 0 8 5200 +0 0 9 5300 +0 0 10 5400 +1 1 0 4500 +1 2 0 5100 +1 3 0 4700 +1 4 0 5300 +1 5 0 4900 +2 1 0 5000 +2 2 0 4600 +2 3 0 5200 +2 4 0 4800 +2 5 0 5400 +(Expression) +ExpressionTransform + (Sorting) + MergingSortedTransform 2 → 1 + MergeSortingTransform × 2 + LimitsCheckingTransform × 2 + PartialSortingTransform × 2 + (Expression) + ExpressionTransform × 2 + (Aggregating) + ExpressionTransform × 2 + Resize × 2 3 → 1 + AggregatingTransform × 6 + Copy × 3 1 → 2 + (Expression) + ExpressionTransform × 3 + (SettingQuotaAndLimits) + (ReadFromStorage) + NumbersMt × 3 0 → 1 +4999500000 10000 +4999510000 10000 +4999520000 10000 +4999530000 10000 +4999540000 10000 +4999550000 10000 +4999560000 10000 +4999570000 10000 +4999580000 10000 +4999590000 10000 +4999600000 10000 +4999610000 10000 +4999620000 10000 +4999630000 10000 +4999640000 10000 +4999650000 10000 +4999660000 10000 +4999670000 10000 +4999680000 10000 +4999690000 10000 +4999700000 10000 +4999710000 10000 +4999720000 10000 +4999730000 10000 +4999740000 10000 +4999750000 10000 +4999760000 10000 +4999770000 10000 +4999780000 10000 +4999790000 10000 +4999800000 10000 +4999810000 10000 +4999820000 10000 +4999830000 10000 +4999840000 10000 +4999850000 10000 +4999860000 10000 +4999870000 10000 +4999880000 10000 +4999890000 10000 +4999900000 10000 +4999910000 10000 +4999920000 10000 +4999930000 10000 +4999940000 10000 +4999950000 10000 +4999960000 10000 +4999970000 10000 +4999980000 10000 +4999990000 10000 +5000000000 10000 +5000010000 10000 +5000020000 10000 +5000030000 10000 +5000040000 10000 +5000050000 10000 +5000060000 10000 +5000070000 10000 +5000080000 10000 +5000090000 10000 +5000100000 10000 +5000110000 10000 +5000120000 10000 +5000130000 10000 +5000140000 10000 +5000150000 10000 +5000160000 10000 +5000170000 10000 +5000180000 10000 +5000190000 10000 +5000200000 10000 +5000210000 10000 +5000220000 10000 +5000230000 10000 +5000240000 10000 +5000250000 10000 +5000260000 10000 +5000270000 10000 +5000280000 10000 +5000290000 10000 +5000300000 10000 +5000310000 10000 +5000320000 10000 +5000330000 10000 +5000340000 10000 +5000350000 10000 +5000360000 10000 +5000370000 10000 +5000380000 10000 +5000390000 10000 +5000400000 10000 +5000410000 10000 +5000420000 10000 +5000430000 10000 +5000440000 10000 +5000450000 10000 +5000460000 10000 +5000470000 10000 +5000480000 10000 +5000490000 10000 +49999500000 100000 +49999600000 100000 +49999700000 100000 +49999800000 100000 +49999900000 100000 +50000000000 100000 +50000100000 100000 +50000200000 100000 +50000300000 100000 +50000400000 100000 diff --git a/tests/queries/0_stateless/01883_with_grouping_sets.sql b/tests/queries/0_stateless/01883_with_grouping_sets.sql new file mode 100644 index 00000000000..bf96248e10e --- /dev/null +++ b/tests/queries/0_stateless/01883_with_grouping_sets.sql @@ -0,0 +1,58 @@ +DROP TABLE IF EXISTS grouping_sets; + +CREATE TABLE grouping_sets(fact_1_id Int32, fact_2_id Int32, fact_3_id Int32, fact_4_id Int32, sales_value Int32) ENGINE = Memory; + +SELECT fact_1_id, fact_3_id, sum(sales_value), count() from grouping_sets GROUP BY GROUPING SETS(fact_1_id, fact_3_id) ORDER BY fact_1_id, fact_3_id; + +INSERT INTO grouping_sets +SELECT + number % 2 + 1 AS fact_1_id, + number % 5 + 1 AS fact_2_id, + number % 10 + 1 AS fact_3_id, + number % 10 + 1 AS fact_4_id, + number % 100 AS sales_value +FROM system.numbers limit 1000; + +EXPLAIN PIPELINE +SELECT fact_1_id, fact_2_id, fact_3_id, SUM(sales_value) AS sales_value from grouping_sets +GROUP BY GROUPING SETS ((fact_1_id, fact_2_id), (fact_1_id, fact_3_id)) +ORDER BY fact_1_id, fact_2_id, fact_3_id; + +SELECT fact_1_id, fact_2_id, fact_3_id, SUM(sales_value) AS sales_value from grouping_sets +GROUP BY GROUPING SETS ((fact_1_id, fact_2_id), (fact_1_id, fact_3_id)) +ORDER BY fact_1_id, fact_2_id, fact_3_id; + +SELECT fact_1_id, fact_2_id, fact_3_id, fact_4_id, SUM(sales_value) AS sales_value from grouping_sets +GROUP BY GROUPING SETS ((fact_1_id, fact_2_id), (fact_3_id, fact_4_id)) +ORDER BY fact_1_id, fact_2_id, fact_3_id, fact_4_id; + +SELECT fact_1_id, fact_2_id, fact_3_id, SUM(sales_value) AS sales_value from grouping_sets +GROUP BY GROUPING SETS ((fact_1_id, fact_2_id), (fact_3_id), ()) +ORDER BY fact_1_id, fact_2_id, fact_3_id; + +SELECT + fact_1_id, + fact_3_id, + SUM(sales_value) AS sales_value +FROM grouping_sets +GROUP BY grouping sets ((fact_1_id), (fact_1_id, fact_3_id)) WITH TOTALS +ORDER BY fact_1_id, fact_3_id; -- { serverError NOT_IMPLEMENTED } + +SELECT + fact_1_id, + fact_3_id, + SUM(sales_value) AS sales_value +FROM grouping_sets +GROUP BY grouping sets (fact_1_id, (fact_1_id, fact_3_id)) WITH TOTALS +ORDER BY fact_1_id, fact_3_id; -- { serverError NOT_IMPLEMENTED } + +DROP TABLE grouping_sets; + +EXPLAIN PIPELINE +SELECT SUM(number) as sum_value, count() AS count_value from numbers_mt(1000000) +GROUP BY GROUPING SETS ((number % 10), (number % 100)) +ORDER BY sum_value, count_value SETTINGS max_threads=3; + +SELECT SUM(number) as sum_value, count() AS count_value from numbers_mt(1000000) +GROUP BY GROUPING SETS ((number % 10), (number % 100)) +ORDER BY sum_value, count_value SETTINGS max_threads=3; diff --git a/tests/queries/0_stateless/02104_overcommit_memory.sh b/tests/queries/0_stateless/02104_overcommit_memory.sh index 7fdf74a30bf..f2016dbc0c1 100755 --- a/tests/queries/0_stateless/02104_overcommit_memory.sh +++ b/tests/queries/0_stateless/02104_overcommit_memory.sh @@ -11,13 +11,13 @@ $CLICKHOUSE_CLIENT -q 'GRANT ALL ON *.* TO u02104' function overcommited() { - $CLICKHOUSE_CLIENT -u u02104 -q 'SELECT number FROM numbers(130000) GROUP BY number SETTINGS max_guaranteed_memory_usage=1,memory_usage_overcommit_max_wait_microseconds=500' 2>&1 \ + $CLICKHOUSE_CLIENT -u u02104 -q 'SELECT number FROM numbers(130000) GROUP BY number SETTINGS memory_overcommit_ratio_denominator=1,memory_usage_overcommit_max_wait_microseconds=500' 2>&1 \ | grep -F -q "MEMORY_LIMIT_EXCEEDED" && echo "OVERCOMMITED WITH USER LIMIT IS KILLED" } function expect_execution() { - $CLICKHOUSE_CLIENT -u u02104 -q 'SELECT number FROM numbers(130000) GROUP BY number SETTINGS max_memory_usage_for_user=5000000,max_guaranteed_memory_usage=2,memory_usage_overcommit_max_wait_microseconds=500' >/dev/null 2>/dev/null + $CLICKHOUSE_CLIENT -u u02104 -q 'SELECT number FROM numbers(130000) GROUP BY number SETTINGS max_memory_usage_for_user=5000000,memory_overcommit_ratio_denominator=2,memory_usage_overcommit_max_wait_microseconds=500' >/dev/null 2>/dev/null } export -f overcommited diff --git a/tests/queries/0_stateless/02165_replicated_grouping_sets.reference b/tests/queries/0_stateless/02165_replicated_grouping_sets.reference new file mode 100644 index 00000000000..659cd98368d --- /dev/null +++ b/tests/queries/0_stateless/02165_replicated_grouping_sets.reference @@ -0,0 +1,13 @@ +0 0 3 2 +0 1 5 2 +0 0 6 3 +0 2 7 2 +1 0 9 3 +0 0 6 4 +0 1 10 4 +0 0 12 6 +0 2 14 4 +1 0 18 6 +0 6 4 +1 10 4 +2 14 4 diff --git a/tests/queries/0_stateless/02165_replicated_grouping_sets.sql b/tests/queries/0_stateless/02165_replicated_grouping_sets.sql new file mode 100644 index 00000000000..d92d92c3e72 --- /dev/null +++ b/tests/queries/0_stateless/02165_replicated_grouping_sets.sql @@ -0,0 +1,45 @@ +SELECT + k1, + k2, + SUM(number) AS sum_value, + count() AS count_value +FROM numbers(6) +GROUP BY + GROUPING SETS + ( + (number % 2 AS k1), + (number % 3 AS k2) + ) +ORDER BY + sum_value ASC, + count_value ASC; + +SELECT + k1, + k2, + SUM(number) AS sum_value, + count() AS count_value +FROM remote('127.0.0.{2,3}', numbers(6)) +GROUP BY + GROUPING SETS + ( + (number % 2 AS k1), + (number % 3 AS k2) + ) +ORDER BY + sum_value ASC, + count_value ASC; + +SELECT + k2, + SUM(number) AS sum_value, + count() AS count_value +FROM remote('127.0.0.{2,3}', numbers(6)) +GROUP BY + GROUPING SETS + ( + (number % 3 AS k2) + ) +ORDER BY + sum_value ASC, + count_value ASC; diff --git a/tests/queries/0_stateless/02226_s3_with_cache.reference b/tests/queries/0_stateless/02226_s3_with_cache.reference index 4041f51b3f9..596e6e5345a 100644 --- a/tests/queries/0_stateless/02226_s3_with_cache.reference +++ b/tests/queries/0_stateless/02226_s3_with_cache.reference @@ -1,4 +1,4 @@ SELECT 1, * FROM test LIMIT 10 FORMAT Null; 1 0 1 SELECT 2, * FROM test LIMIT 10 FORMAT Null; 0 1 0 0 -SELECT 3, * FROM test LIMIT 10 FORMAT Null; 1 1 0 +SELECT 3, * FROM test LIMIT 10 FORMAT Null; 0 1 0 diff --git a/tests/queries/0_stateless/02226_s3_with_cache.sql b/tests/queries/0_stateless/02226_s3_with_cache.sql index d470f2ef140..e62e63b7f97 100644 --- a/tests/queries/0_stateless/02226_s3_with_cache.sql +++ b/tests/queries/0_stateless/02226_s3_with_cache.sql @@ -13,9 +13,9 @@ SELECT 1, * FROM test LIMIT 10 FORMAT Null; SYSTEM FLUSH LOGS; SELECT query, - ProfileEvents['RemoteFSReadBytes'] > 0 as remote_fs_read, - ProfileEvents['RemoteFSCacheReadBytes'] > 0 as remote_fs_cache_read, - ProfileEvents['RemoteFSCacheDownloadBytes'] > 0 as remote_fs_read_and_download + ProfileEvents['CachedReadBufferReadFromSourceBytes'] > 0 as remote_fs_read, + ProfileEvents['CachedReadBufferReadFromCacheBytes'] > 0 as remote_fs_cache_read, + ProfileEvents['CachedReadBufferCacheWriteBytes'] > 0 as remote_fs_read_and_download FROM system.query_log WHERE query LIKE 'SELECT 1, * FROM test LIMIT%' AND type = 'QueryFinish' @@ -29,9 +29,9 @@ SELECT 2, * FROM test LIMIT 10 FORMAT Null; SYSTEM FLUSH LOGS; SELECT query, - ProfileEvents['RemoteFSReadBytes'] > 0 as remote_fs_read, - ProfileEvents['RemoteFSCacheReadBytes'] > 0 as remote_fs_cache_read, - ProfileEvents['RemoteFSCacheDownloadBytes'] > 0 as remote_fs_read_and_download + ProfileEvents['CachedReadBufferReadFromSourceBytes'] > 0 as remote_fs_read, + ProfileEvents['CachedReadBufferReadFromCacheBytes'] > 0 as remote_fs_cache_read, + ProfileEvents['CachedReadBufferCacheWriteBytes'] > 0 as remote_fs_read_and_download FROM system.query_log WHERE query LIKE 'SELECT 2, * FROM test LIMIT%' AND type = 'QueryFinish' @@ -56,9 +56,9 @@ SELECT 3, * FROM test LIMIT 10 FORMAT Null; SYSTEM FLUSH LOGS; SELECT query, - ProfileEvents['RemoteFSReadBytes'] > 0 as remote_fs_read, - ProfileEvents['RemoteFSCacheReadBytes'] > 0 as remote_fs_cache_read, - ProfileEvents['RemoteFSCacheDownloadBytes'] > 0 as remote_fs_read_and_download + ProfileEvents['CachedReadBufferReadFromSourceBytes'] > 0 as remote_fs_read, + ProfileEvents['CachedReadBufferReadFromCacheBytes'] > 0 as remote_fs_cache_read, + ProfileEvents['CachedReadBufferCacheWriteBytes'] > 0 as remote_fs_read_and_download FROM system.query_log WHERE query LIKE 'SELECT 3, * FROM test LIMIT%' AND type = 'QueryFinish' diff --git a/tests/queries/0_stateless/02240_system_remote_filesystem_cache_log.reference b/tests/queries/0_stateless/02240_system_remote_filesystem_cache_log.reference index 9b0d13a1625..0a797d38d7e 100644 --- a/tests/queries/0_stateless/02240_system_remote_filesystem_cache_log.reference +++ b/tests/queries/0_stateless/02240_system_remote_filesystem_cache_log.reference @@ -7,12 +7,11 @@ DROP TABLE IF EXISTS test; DROP TABLE IF EXISTS system.filesystem_cache_log; CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache', min_bytes_for_wide_part = 10485760; INSERT INTO test SELECT number, toString(number) FROM numbers(100000); -SELECT * FROM test FORMAT Null; +SELECT 2240, * FROM test FORMAT Null; SYSTEM FLUSH LOGS; -SELECT file_segment_range, read_type FROM system.filesystem_cache_log WHERE read_type='READ_FROM_FS_AND_DOWNLOADED_TO_CACHE'; -(0,519) READ_FROM_FS_AND_DOWNLOADED_TO_CACHE +SELECT file_segment_range, read_type FROM system.filesystem_cache_log WHERE read_type='READ_FROM_FS_AND_DOWNLOADED_TO_CACHE' AND query_id = (SELECT query_id from system.query_log where query LIKE '%SELECT 2240%' AND current_database = currentDatabase() ORDER BY event_time desc LIMIT 1); (0,808110) READ_FROM_FS_AND_DOWNLOADED_TO_CACHE -SELECT * FROM test FORMAT Null; +SELECT 2241, * FROM test FORMAT Null; SYSTEM FLUSH LOGS; -SELECT file_segment_range, read_type FROM system.filesystem_cache_log WHERE read_type='READ_FROM_CACHE'; +SELECT file_segment_range, read_type FROM system.filesystem_cache_log WHERE read_type='READ_FROM_CACHE' AND query_id = (SELECT query_id from system.query_log where query LIKE '%SELECT 2241%' AND current_database = currentDatabase() ORDER BY event_time desc LIMIT 1); (0,808110) READ_FROM_CACHE diff --git a/tests/queries/0_stateless/02240_system_remote_filesystem_cache_log.sql b/tests/queries/0_stateless/02240_system_remote_filesystem_cache_log.sql index 59e164d6f9f..871f9305c55 100644 --- a/tests/queries/0_stateless/02240_system_remote_filesystem_cache_log.sql +++ b/tests/queries/0_stateless/02240_system_remote_filesystem_cache_log.sql @@ -11,10 +11,10 @@ DROP TABLE IF EXISTS system.filesystem_cache_log; CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache', min_bytes_for_wide_part = 10485760; INSERT INTO test SELECT number, toString(number) FROM numbers(100000); -SELECT * FROM test FORMAT Null; +SELECT 2240, * FROM test FORMAT Null; SYSTEM FLUSH LOGS; -SELECT file_segment_range, read_type FROM system.filesystem_cache_log WHERE read_type='READ_FROM_FS_AND_DOWNLOADED_TO_CACHE'; +SELECT file_segment_range, read_type FROM system.filesystem_cache_log WHERE read_type='READ_FROM_FS_AND_DOWNLOADED_TO_CACHE' AND query_id = (SELECT query_id from system.query_log where query LIKE '%SELECT 2240%' AND current_database = currentDatabase() ORDER BY event_time desc LIMIT 1); -SELECT * FROM test FORMAT Null; +SELECT 2241, * FROM test FORMAT Null; SYSTEM FLUSH LOGS; -SELECT file_segment_range, read_type FROM system.filesystem_cache_log WHERE read_type='READ_FROM_CACHE'; +SELECT file_segment_range, read_type FROM system.filesystem_cache_log WHERE read_type='READ_FROM_CACHE' AND query_id = (SELECT query_id from system.query_log where query LIKE '%SELECT 2241%' AND current_database = currentDatabase() ORDER BY event_time desc LIMIT 1); diff --git a/tests/queries/0_stateless/02267_join_dup_columns_issue36199.reference b/tests/queries/0_stateless/02267_join_dup_columns_issue36199.reference index 47e37dc8fc8..c075b08e533 100644 --- a/tests/queries/0_stateless/02267_join_dup_columns_issue36199.reference +++ b/tests/queries/0_stateless/02267_join_dup_columns_issue36199.reference @@ -1,2 +1,14 @@ 2 1 2 +0 1 0 +0 1 1 0 +0 1 0 +0 0 1 0 +y 0 0 1 3 +\N 0 \N 10000000000 \N 2 1 2 +0 1 0 +0 1 1 0 +0 1 0 +0 0 1 0 +y 0 0 1 3 +\N 0 \N 10000000000 \N diff --git a/tests/queries/0_stateless/02267_join_dup_columns_issue36199.sql b/tests/queries/0_stateless/02267_join_dup_columns_issue36199.sql index 118d475207a..b51b3cc22e2 100644 --- a/tests/queries/0_stateless/02267_join_dup_columns_issue36199.sql +++ b/tests/queries/0_stateless/02267_join_dup_columns_issue36199.sql @@ -1,13 +1,22 @@ -set join_algorithm = 'hash'; +SET join_algorithm = 'hash'; -SELECT * -FROM ( SELECT 2 AS x ) AS t1 -RIGHT JOIN ( SELECT count('x'), count('y'), 2 AS x ) AS t2 -ON t1.x = t2.x; +SELECT * FROM ( SELECT 2 AS x ) AS t1 RIGHT JOIN ( SELECT count('x'), count('y'), 2 AS x ) AS t2 ON t1.x = t2.x; +SELECT * FROM ( SELECT 2 AS x ) as t1 RIGHT JOIN ( SELECT count('x'), count('y'), 0 AS x ) AS t2 ON t1.x = t2.x; +SELECT * FROM ( SELECT 2 AS x ) as t1 RIGHT JOIN ( SELECT count('x') :: Nullable(Int32), count('y'), 0 AS x ) AS t2 ON t1.x = t2.x; +SELECT * FROM ( SELECT 2 AS x ) as t1 RIGHT JOIN ( SELECT count('x') :: Nullable(Int32), count('y') :: Nullable(Int32), 0 AS x ) AS t2 ON t1.x = t2.x; +SELECT * FROM ( SELECT count('a'), count('b'), count('c'), 2 AS x ) as t1 RIGHT JOIN ( SELECT count('x'), count('y'), 0 AS x ) AS t2 ON t1.x = t2.x; -set join_algorithm = 'partial_merge'; +SELECT 'y', * FROM (SELECT count('y'), count('y'), 2 AS x) AS t1 RIGHT JOIN (SELECT count('x'), count('y'), 3 AS x) AS t2 ON t1.x = t2.x; +SELECT * FROM (SELECT arrayJoin([NULL]), 9223372036854775806, arrayJoin([NULL]), NULL AS x) AS t1 RIGHT JOIN (SELECT arrayJoin([arrayJoin([10000000000.])]), NULL AS x) AS t2 ON t1.x = t2.x; -SELECT * -FROM ( SELECT 2 AS x ) AS t1 -RIGHT JOIN ( SELECT count('x'), count('y'), 2 AS x ) AS t2 -ON t1.x = t2.x; +SET join_algorithm = 'partial_merge'; + +SELECT * FROM ( SELECT 2 AS x ) AS t1 RIGHT JOIN ( SELECT count('x'), count('y'), 2 AS x ) AS t2 ON t1.x = t2.x; +SELECT * FROM ( SELECT 2 AS x ) as t1 RIGHT JOIN ( SELECT count('x'), count('y'), 0 AS x ) AS t2 ON t1.x = t2.x; +SELECT * FROM ( SELECT 2 AS x ) as t1 RIGHT JOIN ( SELECT count('x') :: Nullable(Int32), count('y'), 0 AS x ) AS t2 ON t1.x = t2.x; +SELECT * FROM ( SELECT 2 AS x ) as t1 RIGHT JOIN ( SELECT count('x') :: Nullable(Int32), count('y') :: Nullable(Int32), 0 AS x ) AS t2 ON t1.x = t2.x; + +SELECT * FROM ( SELECT count('a'), count('b'), count('c'), 2 AS x ) as t1 RIGHT JOIN ( SELECT count('x'), count('y'), 0 AS x ) AS t2 ON t1.x = t2.x; + +SELECT 'y', * FROM (SELECT count('y'), count('y'), 2 AS x) AS t1 RIGHT JOIN (SELECT count('x'), count('y'), 3 AS x) AS t2 ON t1.x = t2.x; +SELECT * FROM (SELECT arrayJoin([NULL]), 9223372036854775806, arrayJoin([NULL]), NULL AS x) AS t1 RIGHT JOIN (SELECT arrayJoin([arrayJoin([10000000000.])]), NULL AS x) AS t2 ON t1.x = t2.x; diff --git a/tests/queries/0_stateless/02282_array_distance.reference b/tests/queries/0_stateless/02282_array_distance.reference new file mode 100644 index 00000000000..158df656403 --- /dev/null +++ b/tests/queries/0_stateless/02282_array_distance.reference @@ -0,0 +1,42 @@ +6 +3.7416575 +3 +0.0025851727 +\N +nan +12 +14 +21 +7.071068 +9.165152 +12.124355 +2 +5 +4 +0.16847819 +0.35846698 +0.07417989 +6 +8 +9 +0.020204102886728692 +0.11808289631180302 +0 +1 1 218.74642854227358 +1 2 1348.2117786164013 +2 1 219.28064210048274 +2 2 1347.4008312302617 +3 1 214.35251339790725 +3 2 1342.8856987845243 +1 1 218.74643 +1 2 1348.2118 +2 1 219.28064 +2 2 1347.4009 +3 1 214.35251 +3 2 1342.8857 +1 1 218.74642854227358 +1 2 1348.2117786164013 +2 1 219.28064210048274 +2 2 1347.4008312302617 +3 1 214.35251339790725 +3 2 1342.8856987845243 diff --git a/tests/queries/0_stateless/02282_array_distance.sql b/tests/queries/0_stateless/02282_array_distance.sql new file mode 100644 index 00000000000..04de01d7d66 --- /dev/null +++ b/tests/queries/0_stateless/02282_array_distance.sql @@ -0,0 +1,41 @@ +SELECT arrayL1Distance([0, 0, 0], [1, 2, 3]); +SELECT arrayL2Distance([1, 2, 3], [0, 0, 0]); +SELECT arrayLinfDistance([1, 2, 3], [0, 0, 0]); +SELECT arrayCosineDistance([1, 2, 3], [3, 5, 7]); + +SELECT arrayL2Distance([1, 2, 3], NULL); +SELECT arrayCosineDistance([1, 2, 3], [0, 0, 0]); + +DROP TABLE IF EXISTS vec1; +DROP TABLE IF EXISTS vec2; +DROP TABLE IF EXISTS vec2f; +DROP TABLE IF EXISTS vec2d; +CREATE TABLE vec1 (id UInt64, v Array(UInt8)) ENGINE = Memory; +CREATE TABLE vec2 (id UInt64, v Array(Int64)) ENGINE = Memory; +CREATE TABLE vec2f (id UInt64, v Array(Float32)) ENGINE = Memory; +CREATE TABLE vec2d (id UInt64, v Array(Float64)) ENGINE = Memory; + +INSERT INTO vec1 VALUES (1, [3, 4, 5]), (2, [2, 4, 8]), (3, [7, 7, 7]); +SELECT arrayL1Distance(v, [0, 0, 0]) FROM vec1; +SELECT arrayL2Distance(v, [0, 0, 0]) FROM vec1; +SELECT arrayLinfDistance([5, 4, 3], v) FROM vec1; +SELECT arrayCosineDistance([3, 2, 1], v) FROM vec1; +SELECT arrayLinfDistance(v, materialize([0, -2, 0])) FROM vec1; +SELECT arrayCosineDistance(v, materialize([1., 1., 1.])) FROM vec1; + +INSERT INTO vec2 VALUES (1, [100, 200, 0]), (2, [888, 777, 666]); +SELECT v1.id, v2.id, arrayL2Distance(v1.v, v2.v) as dist FROM vec1 v1, vec2 v2; + +INSERT INTO vec2f VALUES (1, [100, 200, 0]), (2, [888, 777, 666]); +SELECT v1.id, v2.id, arrayL2Distance(v1.v, v2.v) as dist FROM vec1 v1, vec2f v2; + +INSERT INTO vec2d VALUES (1, [100, 200, 0]), (2, [888, 777, 666]); +SELECT v1.id, v2.id, arrayL2Distance(v1.v, v2.v) as dist FROM vec1 v1, vec2d v2; + +SELECT arrayL1Distance([0, 0], [1]); -- { serverError 190 } +SELECT arrayL2Distance((1, 2), (3,4)); -- { serverError 43 } + +DROP TABLE vec1; +DROP TABLE vec2; +DROP TABLE vec2f; +DROP TABLE vec2d; diff --git a/tests/queries/0_stateless/02283_array_norm.reference b/tests/queries/0_stateless/02283_array_norm.reference new file mode 100644 index 00000000000..6dd6b79e6d9 --- /dev/null +++ b/tests/queries/0_stateless/02283_array_norm.reference @@ -0,0 +1,27 @@ +6 +7.0710678118654755 +2 +1 5 +2 2 +3 5.196152 +4 0 +1 11 +2 11 +3 11 +4 11 +1 5 +2 2 +3 5.196152 +4 0 +1 11 +2 11 +3 11 +4 11 +1 5 +2 2 +3 5.196152422706632 +4 0 +1 11 +2 11 +3 11 +4 11 diff --git a/tests/queries/0_stateless/02283_array_norm.sql b/tests/queries/0_stateless/02283_array_norm.sql new file mode 100644 index 00000000000..e11caea7cc1 --- /dev/null +++ b/tests/queries/0_stateless/02283_array_norm.sql @@ -0,0 +1,28 @@ +SELECT arrayL1Norm([1, 2, 3]); +SELECT arrayL2Norm([3., 4., 5.]); +SELECT arrayLinfNorm([0, 0, 2]); + +DROP TABLE IF EXISTS vec1; +DROP TABLE IF EXISTS vec1f; +DROP TABLE IF EXISTS vec1d; +CREATE TABLE vec1 (id UInt64, v Array(UInt8)) ENGINE = Memory; +CREATE TABLE vec1f (id UInt64, v Array(Float32)) ENGINE = Memory; +CREATE TABLE vec1d (id UInt64, v Array(Float64)) ENGINE = Memory; +INSERT INTO vec1 VALUES (1, [3, 4]), (2, [2]), (3, [3, 3, 3]), (4, NULL); +INSERT INTO vec1f VALUES (1, [3, 4]), (2, [2]), (3, [3, 3, 3]), (4, NULL); +INSERT INTO vec1d VALUES (1, [3, 4]), (2, [2]), (3, [3, 3, 3]), (4, NULL); + +SELECT id, arrayL2Norm(v) FROM vec1; +SELECT id, arrayL1Norm(materialize([5., 6.])) FROM vec1; + +SELECT id, arrayL2Norm(v) FROM vec1f; +SELECT id, arrayL1Norm(materialize([5., 6.])) FROM vec1f; + +SELECT id, arrayL2Norm(v) FROM vec1d; +SELECT id, arrayL1Norm(materialize([5., 6.])) FROM vec1d; + +SELECT arrayL1Norm((1, 2,)); -- { serverError 43 } + +DROP TABLE vec1; +DROP TABLE vec1f; +DROP TABLE vec1d; diff --git a/tests/queries/0_stateless/02286_drop_filesystem_cache.reference b/tests/queries/0_stateless/02286_drop_filesystem_cache.reference index fb18ad12c4d..30026f943a1 100644 --- a/tests/queries/0_stateless/02286_drop_filesystem_cache.reference +++ b/tests/queries/0_stateless/02286_drop_filesystem_cache.reference @@ -28,5 +28,3 @@ SELECT count() FROM system.filesystem_cache; SYSTEM DROP FILESYSTEM CACHE './s3_cache/'; SELECT count() FROM system.filesystem_cache; 2 -EXPLAIN SYNTAX SYSTEM DROP FILESYSTEM CACHE './s3_cache/' FORCE; -SYSTEM DROP FILESYSTEM CACHE ./s3_cache/ FORCE diff --git a/tests/queries/0_stateless/02286_drop_filesystem_cache.sql b/tests/queries/0_stateless/02286_drop_filesystem_cache.sql index ee93b165637..85bfb6ca1c7 100644 --- a/tests/queries/0_stateless/02286_drop_filesystem_cache.sql +++ b/tests/queries/0_stateless/02286_drop_filesystem_cache.sql @@ -31,6 +31,4 @@ SELECT * FROM test2 FORMAT Null; SELECT count() FROM system.filesystem_cache; SYSTEM DROP FILESYSTEM CACHE './s3_cache/'; -SELECT count() FROM system.filesystem_cache; - -EXPLAIN SYNTAX SYSTEM DROP FILESYSTEM CACHE './s3_cache/' FORCE; +SELECT count() FROM system.filesystem_cache; \ No newline at end of file diff --git a/tests/queries/0_stateless/02293_ilike_on_fixed_strings.reference b/tests/queries/0_stateless/02293_ilike_on_fixed_strings.reference new file mode 100644 index 00000000000..5489ab3d7ce --- /dev/null +++ b/tests/queries/0_stateless/02293_ilike_on_fixed_strings.reference @@ -0,0 +1,2 @@ +AA 0 1 +Aa 1 1 diff --git a/tests/queries/0_stateless/02293_ilike_on_fixed_strings.sql b/tests/queries/0_stateless/02293_ilike_on_fixed_strings.sql new file mode 100644 index 00000000000..3838e372e24 --- /dev/null +++ b/tests/queries/0_stateless/02293_ilike_on_fixed_strings.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab (col FixedString(2)) engine = MergeTree() ORDER BY col; + +INSERT INTO tab VALUES ('AA') ('Aa'); + +SELECT col, col LIKE '%a', col ILIKE '%a' FROM tab WHERE col = 'AA'; +SELECT col, col LIKE '%a', col ILIKE '%a' FROM tab WHERE col = 'Aa'; + +DROP TABLE IF EXISTS tab; diff --git a/tests/queries/1_stateful/00172_parallel_join.sql b/tests/queries/1_stateful/00172_parallel_join.sql index fce41d7a761..36b12a43b88 100644 --- a/tests/queries/1_stateful/00172_parallel_join.sql +++ b/tests/queries/1_stateful/00172_parallel_join.sql @@ -1,4 +1,5 @@ -set join_algorithm='parallel_hash'; +SET join_algorithm='parallel_hash'; + SELECT EventDate, hits,