mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 08:32:02 +00:00
Merge branch 'master' into hashid
This commit is contained in:
commit
8bff9b8ce9
5
.gitmodules
vendored
5
.gitmodules
vendored
@ -265,6 +265,9 @@
|
||||
[submodule "contrib/wyhash"]
|
||||
path = contrib/wyhash
|
||||
url = https://github.com/wangyi-fudan/wyhash.git
|
||||
[submodule "contrib/eigen"]
|
||||
path = contrib/eigen
|
||||
url = https://github.com/eigen-mirror/eigen
|
||||
[submodule "contrib/hashidsxx"]
|
||||
path = contrib/hashidsxx
|
||||
url = https://github.com/schoentoon/hashidsxx.git
|
||||
url = https://github.com/schoentoon/hashidsxx.git
|
@ -61,7 +61,7 @@ else ()
|
||||
endif ()
|
||||
|
||||
if (ARCH_PPC64LE)
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -maltivec -mcpu=power8 -D__SSE2__=1 -DNO_WARN_X86_INTRINSICS")
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -maltivec -mcpu=power8 -DNO_WARN_X86_INTRINSICS")
|
||||
endif ()
|
||||
|
||||
set (TEST_FLAG "-msse4.2")
|
||||
|
1
contrib/CMakeLists.txt
vendored
1
contrib/CMakeLists.txt
vendored
@ -153,6 +153,7 @@ endif()
|
||||
|
||||
add_contrib (sqlite-cmake sqlite-amalgamation)
|
||||
add_contrib (s2geometry-cmake s2geometry)
|
||||
add_contrib (eigen-cmake eigen)
|
||||
|
||||
# Put all targets defined here and in subdirectories under "contrib/<immediate-subdir>" folders in GUI-based IDEs.
|
||||
# Some of third-party projects may override CMAKE_FOLDER or FOLDER property of their targets, so they would not appear
|
||||
|
1
contrib/eigen
vendored
Submodule
1
contrib/eigen
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 3147391d946bb4b6c68edd901f2add6ac1f31f8c
|
23
contrib/eigen-cmake/CMakeLists.txt
Normal file
23
contrib/eigen-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,23 @@
|
||||
set(EIGEN_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/eigen")
|
||||
|
||||
add_library (_eigen INTERFACE)
|
||||
|
||||
option (ENABLE_MKL "Build Eigen with Intel MKL" OFF)
|
||||
if (ENABLE_MKL)
|
||||
set(MKL_THREADING sequential)
|
||||
set(MKL_INTERFACE lp64)
|
||||
find_package(MKL REQUIRED)
|
||||
if (MKL_FOUND)
|
||||
message("MKL INCLUDE: ${MKL_INCLUDE}")
|
||||
message("MKL LIBRARIES: ${MKL_LIBRARIES}")
|
||||
target_compile_definitions(_eigen INTERFACE EIGEN_USE_MKL_ALL)
|
||||
target_include_directories(_eigen INTERFACE ${MKL_INCLUDE})
|
||||
target_link_libraries(_eigen INTERFACE ${MKL_LIBRARIES})
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# Only include MPL2 code from Eigen library
|
||||
target_compile_definitions(_eigen INTERFACE EIGEN_MPL2_ONLY)
|
||||
|
||||
target_include_directories (_eigen SYSTEM INTERFACE ${EIGEN_LIBRARY_DIR})
|
||||
add_library(ch_contrib::eigen ALIAS _eigen)
|
2
contrib/poco
vendored
2
contrib/poco
vendored
@ -1 +1 @@
|
||||
Subproject commit 6c1a233744d13414e8e8db396c75177b857b2c22
|
||||
Subproject commit 5d11f0aa6fd2faad0a7b34aa355cd50c4deb27e6
|
@ -177,6 +177,7 @@ function clone_submodules
|
||||
contrib/jemalloc
|
||||
contrib/replxx
|
||||
contrib/wyhash
|
||||
contrib/eigen
|
||||
)
|
||||
|
||||
git submodule sync
|
||||
|
@ -10,21 +10,17 @@ description: How to build ClickHouse on Mac OS X
|
||||
You can install pre-built ClickHouse as described in [Quick Start](https://clickhouse.com/#quick-start). Follow **macOS (Intel)** or **macOS (Apple silicon)** installation instructions.
|
||||
:::
|
||||
|
||||
Build should work on x86_64 (Intel) and arm64 (Apple silicon) based macOS 10.15 (Catalina) and higher with Homebrew's vanilla Clang.
|
||||
It is always recommended to use vanilla `clang` compiler.
|
||||
The build works on x86_64 (Intel) and arm64 (Apple Silicon) based on macOS 10.15 (Catalina) or higher with Homebrew's vanilla Clang.
|
||||
|
||||
:::note
|
||||
It is possible to use XCode's `apple-clang` or `gcc`, but it's strongly discouraged.
|
||||
It is also possible to compile with Apple's XCode `apple-clang` or Homebrew's `gcc`, but it's strongly discouraged.
|
||||
:::
|
||||
|
||||
## Install Homebrew {#install-homebrew}
|
||||
|
||||
``` bash
|
||||
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
|
||||
# ...and follow the printed instructions on any additional steps required to complete the installation.
|
||||
```
|
||||
First install [Homebrew](https://brew.sh/)
|
||||
|
||||
## Install Xcode and Command Line Tools {#install-xcode-and-command-line-tools}
|
||||
## For Apple's Clang (discouraged): Install Xcode and Command Line Tools {#install-xcode-and-command-line-tools}
|
||||
|
||||
Install the latest [Xcode](https://apps.apple.com/am/app/xcode/id497799835?mt=12) from App Store.
|
||||
|
||||
@ -57,12 +53,12 @@ To build using Homebrew's vanilla Clang compiler (the only **recommended** way):
|
||||
|
||||
``` bash
|
||||
cd ClickHouse
|
||||
rm -rf build
|
||||
mkdir build
|
||||
cd build
|
||||
cmake -DCMAKE_C_COMPILER=$(brew --prefix llvm)/bin/clang -DCMAKE_CXX_COMPILER=$(brew --prefix llvm)/bin/clang++ -DCMAKE_AR=$(brew --prefix llvm)/bin/llvm-ar -DCMAKE_RANLIB=$(brew --prefix llvm)/bin/llvm-ranlib -DOBJCOPY_PATH=$(brew --prefix llvm)/bin/llvm-objcopy -DCMAKE_BUILD_TYPE=RelWithDebInfo ..
|
||||
cmake --build . --config RelWithDebInfo
|
||||
# The resulting binary will be created at: ./programs/clickhouse
|
||||
export CC=$(brew --prefix llvm)/bin/clang
|
||||
export CXX=$(brew --prefix llvm)/bin/clang++
|
||||
cmake -G Ninja -DCMAKE_BUILD_TYPE=RelWithDebInfo -S . -B build
|
||||
cmake --build build
|
||||
# The resulting binary will be created at: build/programs/clickhouse
|
||||
```
|
||||
|
||||
To build using Xcode's native AppleClang compiler in Xcode IDE (this option is only for development builds and workflows, and is **not recommended** unless you know what you are doing):
|
||||
@ -82,12 +78,12 @@ To build using Homebrew's vanilla GCC compiler (this option is only for developm
|
||||
|
||||
``` bash
|
||||
cd ClickHouse
|
||||
rm -rf build
|
||||
mkdir build
|
||||
cd build
|
||||
cmake -DCMAKE_C_COMPILER=$(brew --prefix gcc)/bin/gcc-11 -DCMAKE_CXX_COMPILER=$(brew --prefix gcc)/bin/g++-11 -DCMAKE_AR=$(brew --prefix gcc)/bin/gcc-ar-11 -DCMAKE_RANLIB=$(brew --prefix gcc)/bin/gcc-ranlib-11 -DOBJCOPY_PATH=$(brew --prefix binutils)/bin/objcopy -DCMAKE_BUILD_TYPE=RelWithDebInfo ..
|
||||
cmake --build . --config RelWithDebInfo
|
||||
# The resulting binary will be created at: ./programs/clickhouse
|
||||
export CC=$(brew --prefix gcc)/bin/gcc-11
|
||||
export CXX=$(brew --prefix gcc)/bin/g++-11
|
||||
cmake -G Ninja -DCMAKE_BUILD_TYPE=RelWithDebInfo -S . -B build
|
||||
cmake --build build
|
||||
# The resulting binary will be created at: build/programs/clickhouse
|
||||
```
|
||||
|
||||
## Caveats {#caveats}
|
||||
|
@ -1,5 +1,6 @@
|
||||
---
|
||||
sidebar_label: New York Taxi Data
|
||||
sidebar_position: 2
|
||||
description: Data for billions of taxi and for-hire vehicle (Uber, Lyft, etc.) trips originating in New York City since 2009
|
||||
---
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
---
|
||||
sidebar_label: UK Property Price Paid
|
||||
sidebar_position: 1
|
||||
---
|
||||
|
||||
# UK Property Price Paid
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
[SSL 'strict' option](../server-configuration-parameters/settings.md#server_configuration_parameters-openssl) enables mandatory certificate validation for the incoming connections. In this case, only connections with trusted certificates can be established. Connections with untrusted certificates will be rejected. Thus, certificate validation allows to uniquely authenticate an incoming connection. `Common Name` field of the certificate is used to identify connected user. This allows to associate multiple certificates with the same user. Additionally, reissuing and revoking of the certificates does not affect the ClickHouse configuration.
|
||||
|
||||
To enable SSL certificate authentication, a list of `Common Name`'s for each ClickHouse user must be sspecified in the settings file `config.xml `:
|
||||
To enable SSL certificate authentication, a list of `Common Name`'s for each ClickHouse user must be specified in the settings file `users.xml `:
|
||||
|
||||
**Example**
|
||||
```xml
|
||||
@ -10,11 +10,11 @@ To enable SSL certificate authentication, a list of `Common Name`'s for each Cli
|
||||
<!- ... -->
|
||||
<users>
|
||||
<user_name>
|
||||
<certificates>
|
||||
<ssl_certificates>
|
||||
<common_name>host.domain.com:example_user</common_name>
|
||||
<common_name>host.domain.com:example_user_dev</common_name>
|
||||
<!-- More names -->
|
||||
</certificates>
|
||||
</ssl_certificates>
|
||||
<!-- Other settings -->
|
||||
</user_name>
|
||||
</users>
|
||||
|
@ -1745,3 +1745,13 @@ Possible values:
|
||||
- Positive integer.
|
||||
|
||||
Default value: `10000`.
|
||||
|
||||
## global_memory_usage_overcommit_max_wait_microseconds {#global_memory_usage_overcommit_max_wait_microseconds}
|
||||
|
||||
Sets maximum waiting time for global overcommit tracker.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
|
||||
Default value: `200`.
|
||||
|
37
docs/en/operations/settings/memory-overcommit.md
Normal file
37
docs/en/operations/settings/memory-overcommit.md
Normal file
@ -0,0 +1,37 @@
|
||||
# Memory overcommit
|
||||
|
||||
Memory overcommit is an experimental technique intended to allow to set more flexible memory limits for queries.
|
||||
|
||||
The idea of this technique is to introduce settings which can represent guaranteed amount of memory a query can use.
|
||||
When memory overcommit is enabled and the memory limit is reached ClickHouse will select the most overcommitted query and try to free memory by killing this query.
|
||||
|
||||
When memory limit is reached any query will wait some time during attempt to allocate new memory.
|
||||
If timeout is passed and memory is freed, the query continues execution.
|
||||
Otherwise an exception will be thrown and the query is killed.
|
||||
|
||||
Selection of query to stop or kill is performed by either global or user overcommit trackers depending on what memory limit is reached.
|
||||
If overcommit tracker can't choose query to stop, MEMORY_LIMIT_EXCEEDED exception is thrown.
|
||||
|
||||
## User overcommit tracker
|
||||
|
||||
User overcommit tracker finds a query with the biggest overcommit ratio in the user's query list.
|
||||
Overcommit ratio for a query is computed as number of allocated bytes divided by value of `memory_overcommit_ratio_denominator` setting.
|
||||
|
||||
If `memory_overcommit_ratio_denominator` for the query is equals to zero, overcommit tracker won't choose this query.
|
||||
|
||||
Waiting timeout is set by `memory_usage_overcommit_max_wait_microseconds` setting.
|
||||
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
SELECT number FROM numbers(1000) GROUP BY number SETTINGS memory_overcommit_ratio_denominator=4000, memory_usage_overcommit_max_wait_microseconds=500
|
||||
```
|
||||
|
||||
## Global overcommit tracker
|
||||
|
||||
Global overcommit tracker finds a query with the biggest overcommit ratio in the list of all queries.
|
||||
In this case overcommit ratio is computed as number of allocated bytes divided by value of `memory_overcommit_ratio_denominator_for_user` setting.
|
||||
|
||||
If `memory_overcommit_ratio_denominator_for_user` for the query is equals to zero, overcommit tracker won't choose this query.
|
||||
|
||||
Waiting timeout is set by `global_memory_usage_overcommit_max_wait_microseconds` parameter in the configuration file.
|
@ -4263,3 +4263,29 @@ Possible values:
|
||||
- 1 — Enabled.
|
||||
|
||||
Default value: 1.
|
||||
|
||||
## memory_overcommit_ratio_denominator
|
||||
|
||||
It represents soft memory limit in case when hard limit is reached on user level.
|
||||
This value is used to compute overcommit ratio for the query.
|
||||
Zero means skip the query.
|
||||
Read more about [memory overcommit](memory-overcommit.md).
|
||||
|
||||
Default value: `1GiB`.
|
||||
|
||||
## memory_usage_overcommit_max_wait_microseconds
|
||||
|
||||
Maximum time thread will wait for memory to be freed in the case of memory overcommit on a user level.
|
||||
If the timeout is reached and memory is not freed, an exception is thrown.
|
||||
Read more about [memory overcommit](memory-overcommit.md).
|
||||
|
||||
Default value: `200`.
|
||||
|
||||
## memory_overcommit_ratio_denominator_for_user
|
||||
|
||||
It represents soft memory limit in case when hard limit is reached on global level.
|
||||
This value is used to compute overcommit ratio for the query.
|
||||
Zero means skip the query.
|
||||
Read more about [memory overcommit](memory-overcommit.md).
|
||||
|
||||
Default value: `1GiB`.
|
||||
|
@ -13,7 +13,7 @@ Simhash is a hash function, which returns close hash values for close (similar)
|
||||
|
||||
[Interprets](../../sql-reference/functions/type-conversion-functions.md#type_conversion_functions-reinterpretAsString) all the input parameters as strings and calculates the [MD5](https://en.wikipedia.org/wiki/MD5) hash value for each of them. Then combines hashes, takes the first 8 bytes of the hash of the resulting string, and interprets them as `UInt64` in big-endian byte order.
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
halfMD5(par1, ...)
|
||||
```
|
||||
|
||||
@ -30,11 +30,11 @@ A [UInt64](../../sql-reference/data-types/int-uint.md) data type hash value.
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT halfMD5(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS halfMD5hash, toTypeName(halfMD5hash) AS type;
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌────────halfMD5hash─┬─type───┐
|
||||
│ 186182704141653334 │ UInt64 │
|
||||
└────────────────────┴────────┘
|
||||
@ -54,7 +54,7 @@ If you want to get the same result as output by the md5sum utility, use lower(he
|
||||
|
||||
Produces a 64-bit [SipHash](https://131002.net/siphash/) hash value.
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
sipHash64(par1,...)
|
||||
```
|
||||
|
||||
@ -77,11 +77,11 @@ A [UInt64](../../sql-reference/data-types/int-uint.md) data type hash value.
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT sipHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS SipHash, toTypeName(SipHash) AS type;
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌──────────────SipHash─┬─type───┐
|
||||
│ 13726873534472839665 │ UInt64 │
|
||||
└──────────────────────┴────────┘
|
||||
@ -93,7 +93,7 @@ Produces a 128-bit [SipHash](https://131002.net/siphash/) hash value. Differs fr
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
sipHash128(par1,...)
|
||||
```
|
||||
|
||||
@ -111,13 +111,13 @@ Type: [FixedString(16)](../../sql-reference/data-types/fixedstring.md).
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT hex(sipHash128('foo', '\x01', 3));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─hex(sipHash128('foo', '', 3))────┐
|
||||
│ 9DE516A64A414D4B1B609415E4523F24 │
|
||||
└──────────────────────────────────┘
|
||||
@ -127,7 +127,7 @@ Result:
|
||||
|
||||
Produces a 64-bit [CityHash](https://github.com/google/cityhash) hash value.
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
cityHash64(par1,...)
|
||||
```
|
||||
|
||||
@ -145,11 +145,11 @@ A [UInt64](../../sql-reference/data-types/int-uint.md) data type hash value.
|
||||
|
||||
Call example:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT cityHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS CityHash, toTypeName(CityHash) AS type;
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─────────────CityHash─┬─type───┐
|
||||
│ 12072650598913549138 │ UInt64 │
|
||||
└──────────────────────┴────────┘
|
||||
@ -157,7 +157,7 @@ SELECT cityHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:0
|
||||
|
||||
The following example shows how to compute the checksum of the entire table with accuracy up to the row order:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT groupBitXor(cityHash64(*)) FROM table
|
||||
```
|
||||
|
||||
@ -177,7 +177,7 @@ Calculates SHA-1, SHA-224, SHA-256, SHA-512 hash from a string and returns the r
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SHA1('s')
|
||||
...
|
||||
SHA512('s')
|
||||
@ -203,24 +203,62 @@ Use the [hex](../functions/encoding-functions.md#hex) function to represent the
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT hex(SHA1('abc'));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─hex(SHA1('abc'))─────────────────────────┐
|
||||
│ A9993E364706816ABA3E25717850C26C9CD0D89D │
|
||||
└──────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## BLAKE3 {#blake3}
|
||||
|
||||
Calculates BLAKE3 hash string and returns the resulting set of bytes as [FixedString](../data-types/fixedstring.md).
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
BLAKE3('s')
|
||||
```
|
||||
|
||||
This cryptographic hash-function is integrated into ClickHouse with BLAKE3 Rust library. The function is rather fast and shows approximately two times faster performance compared to SHA-2, while generating hashes of the same length as SHA-256.
|
||||
|
||||
**Arguments**
|
||||
|
||||
- s - input string for BLAKE3 hash calculation. [String](../data-types/string.md).
|
||||
|
||||
**Return value**
|
||||
|
||||
- BLAKE3 hash as a byte array with type FixedString(32).
|
||||
|
||||
Type: [FixedString](../data-types/fixedstring.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Use function [hex](../functions/encoding-functions.md#hex) to represent the result as a hex-encoded string.
|
||||
|
||||
Query:
|
||||
```sql
|
||||
SELECT hex(BLAKE3('ABC'))
|
||||
```
|
||||
|
||||
Result:
|
||||
```sql
|
||||
┌─hex(BLAKE3('ABC'))───────────────────────────────────────────────┐
|
||||
│ D1717274597CF0289694F75D96D444B992A096F1AFD8E7BBFA6EBB1D360FEDFC │
|
||||
└──────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## URLHash(url\[, N\]) {#urlhashurl-n}
|
||||
|
||||
A fast, decent-quality non-cryptographic hash function for a string obtained from a URL using some type of normalization.
|
||||
`URLHash(s)` – Calculates a hash from a string without one of the trailing symbols `/`,`?` or `#` at the end, if present.
|
||||
`URLHash(s, N)` – Calculates a hash from a string up to the N level in the URL hierarchy, without one of the trailing symbols `/`,`?` or `#` at the end, if present.
|
||||
Levels are the same as in URLHierarchy.
|
||||
Levels are the same as in URLHierarchy.
|
||||
|
||||
## farmFingerprint64 {#farmfingerprint64}
|
||||
|
||||
@ -228,7 +266,7 @@ Levels are the same as in URLHierarchy.
|
||||
|
||||
Produces a 64-bit [FarmHash](https://github.com/google/farmhash) or Fingerprint value. `farmFingerprint64` is preferred for a stable and portable value.
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
farmFingerprint64(par1, ...)
|
||||
farmHash64(par1, ...)
|
||||
```
|
||||
@ -245,11 +283,11 @@ A [UInt64](../../sql-reference/data-types/int-uint.md) data type hash value.
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT farmHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS FarmHash, toTypeName(FarmHash) AS type;
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─────────────FarmHash─┬─type───┐
|
||||
│ 17790458267262532859 │ UInt64 │
|
||||
└──────────────────────┴────────┘
|
||||
@ -261,7 +299,7 @@ Calculates [JavaHash](http://hg.openjdk.java.net/jdk8u/jdk8u/jdk/file/478a4add97
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT javaHash('')
|
||||
```
|
||||
|
||||
@ -273,13 +311,13 @@ A `Int32` data type hash value.
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT javaHash('Hello, world!');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─javaHash('Hello, world!')─┐
|
||||
│ -1880044555 │
|
||||
└───────────────────────────┘
|
||||
@ -291,7 +329,7 @@ Calculates [JavaHash](http://hg.openjdk.java.net/jdk8u/jdk8u/jdk/file/478a4add97
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
javaHashUTF16LE(stringUtf16le)
|
||||
```
|
||||
|
||||
@ -309,13 +347,13 @@ Correct query with UTF-16LE encoded string.
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT javaHashUTF16LE(convertCharset('test', 'utf-8', 'utf-16le'));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─javaHashUTF16LE(convertCharset('test', 'utf-8', 'utf-16le'))─┐
|
||||
│ 3556498 │
|
||||
└──────────────────────────────────────────────────────────────┘
|
||||
@ -325,7 +363,7 @@ Result:
|
||||
|
||||
Calculates `HiveHash` from a string.
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT hiveHash('')
|
||||
```
|
||||
|
||||
@ -341,13 +379,13 @@ Type: `hiveHash`.
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT hiveHash('Hello, world!');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─hiveHash('Hello, world!')─┐
|
||||
│ 267439093 │
|
||||
└───────────────────────────┘
|
||||
@ -357,7 +395,7 @@ Result:
|
||||
|
||||
Produces a 64-bit [MetroHash](http://www.jandrewrogers.com/2015/05/27/metrohash/) hash value.
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
metroHash64(par1, ...)
|
||||
```
|
||||
|
||||
@ -371,11 +409,11 @@ A [UInt64](../../sql-reference/data-types/int-uint.md) data type hash value.
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT metroHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MetroHash, toTypeName(MetroHash) AS type;
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌────────────MetroHash─┬─type───┐
|
||||
│ 14235658766382344533 │ UInt64 │
|
||||
└──────────────────────┴────────┘
|
||||
@ -391,7 +429,7 @@ For more information, see the link: [JumpConsistentHash](https://arxiv.org/pdf/1
|
||||
|
||||
Produces a [MurmurHash2](https://github.com/aappleby/smhasher) hash value.
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
murmurHash2_32(par1, ...)
|
||||
murmurHash2_64(par1, ...)
|
||||
```
|
||||
@ -407,11 +445,11 @@ Both functions take a variable number of input parameters. Arguments can be any
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT murmurHash2_64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MurmurHash2, toTypeName(MurmurHash2) AS type;
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌──────────MurmurHash2─┬─type───┐
|
||||
│ 11832096901709403633 │ UInt64 │
|
||||
└──────────────────────┴────────┘
|
||||
@ -423,7 +461,7 @@ Calculates a 64-bit [MurmurHash2](https://github.com/aappleby/smhasher) hash val
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
gccMurmurHash(par1, ...)
|
||||
```
|
||||
|
||||
@ -441,7 +479,7 @@ Type: [UInt64](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT
|
||||
gccMurmurHash(1, 2, 3) AS res1,
|
||||
gccMurmurHash(('a', [1, 2, 3], 4, (4, ['foo', 'bar'], 1, (1, 2)))) AS res2
|
||||
@ -449,7 +487,7 @@ SELECT
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─────────────────res1─┬────────────────res2─┐
|
||||
│ 12384823029245979431 │ 1188926775431157506 │
|
||||
└──────────────────────┴─────────────────────┘
|
||||
@ -459,7 +497,7 @@ Result:
|
||||
|
||||
Produces a [MurmurHash3](https://github.com/aappleby/smhasher) hash value.
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
murmurHash3_32(par1, ...)
|
||||
murmurHash3_64(par1, ...)
|
||||
```
|
||||
@ -475,11 +513,11 @@ Both functions take a variable number of input parameters. Arguments can be any
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT murmurHash3_32(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MurmurHash3, toTypeName(MurmurHash3) AS type;
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─MurmurHash3─┬─type───┐
|
||||
│ 2152717 │ UInt32 │
|
||||
└─────────────┴────────┘
|
||||
@ -491,7 +529,7 @@ Produces a 128-bit [MurmurHash3](https://github.com/aappleby/smhasher) hash valu
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
murmurHash3_128(expr)
|
||||
```
|
||||
|
||||
@ -509,13 +547,13 @@ Type: [FixedString(16)](../../sql-reference/data-types/fixedstring.md).
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT hex(murmurHash3_128('foo', 'foo', 'foo'));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─hex(murmurHash3_128('foo', 'foo', 'foo'))─┐
|
||||
│ F8F7AD9B6CD4CF117A71E277E2EC2931 │
|
||||
└───────────────────────────────────────────┘
|
||||
@ -525,7 +563,7 @@ Result:
|
||||
|
||||
Calculates `xxHash` from a string. It is proposed in two flavors, 32 and 64 bits.
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT xxHash32('')
|
||||
|
||||
OR
|
||||
@ -543,13 +581,13 @@ Type: `xxHash`.
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT xxHash32('Hello, world!');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─xxHash32('Hello, world!')─┐
|
||||
│ 834093149 │
|
||||
└───────────────────────────┘
|
||||
@ -567,7 +605,7 @@ Can be used for detection of semi-duplicate strings with [bitHammingDistance](..
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
ngramSimHash(string[, ngramsize])
|
||||
```
|
||||
|
||||
@ -586,13 +624,13 @@ Type: [UInt64](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT ngramSimHash('ClickHouse') AS Hash;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌───────Hash─┐
|
||||
│ 1627567969 │
|
||||
└────────────┘
|
||||
@ -606,7 +644,7 @@ Can be used for detection of semi-duplicate strings with [bitHammingDistance](..
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
ngramSimHashCaseInsensitive(string[, ngramsize])
|
||||
```
|
||||
|
||||
@ -625,13 +663,13 @@ Type: [UInt64](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT ngramSimHashCaseInsensitive('ClickHouse') AS Hash;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌──────Hash─┐
|
||||
│ 562180645 │
|
||||
└───────────┘
|
||||
@ -645,7 +683,7 @@ Can be used for detection of semi-duplicate strings with [bitHammingDistance](..
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
ngramSimHashUTF8(string[, ngramsize])
|
||||
```
|
||||
|
||||
@ -664,13 +702,13 @@ Type: [UInt64](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT ngramSimHashUTF8('ClickHouse') AS Hash;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌───────Hash─┐
|
||||
│ 1628157797 │
|
||||
└────────────┘
|
||||
@ -684,7 +722,7 @@ Can be used for detection of semi-duplicate strings with [bitHammingDistance](..
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
ngramSimHashCaseInsensitiveUTF8(string[, ngramsize])
|
||||
```
|
||||
|
||||
@ -703,13 +741,13 @@ Type: [UInt64](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT ngramSimHashCaseInsensitiveUTF8('ClickHouse') AS Hash;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌───────Hash─┐
|
||||
│ 1636742693 │
|
||||
└────────────┘
|
||||
@ -723,7 +761,7 @@ Can be used for detection of semi-duplicate strings with [bitHammingDistance](..
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
wordShingleSimHash(string[, shinglesize])
|
||||
```
|
||||
|
||||
@ -742,13 +780,13 @@ Type: [UInt64](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT wordShingleSimHash('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Hash;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌───────Hash─┐
|
||||
│ 2328277067 │
|
||||
└────────────┘
|
||||
@ -762,7 +800,7 @@ Can be used for detection of semi-duplicate strings with [bitHammingDistance](..
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
wordShingleSimHashCaseInsensitive(string[, shinglesize])
|
||||
```
|
||||
|
||||
@ -781,13 +819,13 @@ Type: [UInt64](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT wordShingleSimHashCaseInsensitive('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Hash;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌───────Hash─┐
|
||||
│ 2194812424 │
|
||||
└────────────┘
|
||||
@ -801,7 +839,7 @@ Can be used for detection of semi-duplicate strings with [bitHammingDistance](..
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
wordShingleSimHashUTF8(string[, shinglesize])
|
||||
```
|
||||
|
||||
@ -820,13 +858,13 @@ Type: [UInt64](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT wordShingleSimHashUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Hash;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌───────Hash─┐
|
||||
│ 2328277067 │
|
||||
└────────────┘
|
||||
@ -840,7 +878,7 @@ Can be used for detection of semi-duplicate strings with [bitHammingDistance](..
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
wordShingleSimHashCaseInsensitiveUTF8(string[, shinglesize])
|
||||
```
|
||||
|
||||
@ -859,13 +897,13 @@ Type: [UInt64](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT wordShingleSimHashCaseInsensitiveUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Hash;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌───────Hash─┐
|
||||
│ 2194812424 │
|
||||
└────────────┘
|
||||
@ -879,7 +917,7 @@ Can be used for detection of semi-duplicate strings with [tupleHammingDistance](
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
ngramMinHash(string[, ngramsize, hashnum])
|
||||
```
|
||||
|
||||
@ -899,13 +937,13 @@ Type: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-refere
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT ngramMinHash('ClickHouse') AS Tuple;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─Tuple──────────────────────────────────────┐
|
||||
│ (18333312859352735453,9054248444481805918) │
|
||||
└────────────────────────────────────────────┘
|
||||
@ -919,7 +957,7 @@ Can be used for detection of semi-duplicate strings with [tupleHammingDistance](
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
ngramMinHashCaseInsensitive(string[, ngramsize, hashnum])
|
||||
```
|
||||
|
||||
@ -939,13 +977,13 @@ Type: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-refere
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT ngramMinHashCaseInsensitive('ClickHouse') AS Tuple;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─Tuple──────────────────────────────────────┐
|
||||
│ (2106263556442004574,13203602793651726206) │
|
||||
└────────────────────────────────────────────┘
|
||||
@ -959,7 +997,7 @@ Can be used for detection of semi-duplicate strings with [tupleHammingDistance](
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
ngramMinHashUTF8(string[, ngramsize, hashnum])
|
||||
```
|
||||
|
||||
@ -979,13 +1017,13 @@ Type: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-refere
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT ngramMinHashUTF8('ClickHouse') AS Tuple;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─Tuple──────────────────────────────────────┐
|
||||
│ (18333312859352735453,6742163577938632877) │
|
||||
└────────────────────────────────────────────┘
|
||||
@ -999,7 +1037,7 @@ Can be used for detection of semi-duplicate strings with [tupleHammingDistance](
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
ngramMinHashCaseInsensitiveUTF8(string [, ngramsize, hashnum])
|
||||
```
|
||||
|
||||
@ -1019,13 +1057,13 @@ Type: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-refere
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT ngramMinHashCaseInsensitiveUTF8('ClickHouse') AS Tuple;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─Tuple───────────────────────────────────────┐
|
||||
│ (12493625717655877135,13203602793651726206) │
|
||||
└─────────────────────────────────────────────┘
|
||||
@ -1037,7 +1075,7 @@ Splits a ASCII string into n-grams of `ngramsize` symbols and returns the n-gram
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
ngramMinHashArg(string[, ngramsize, hashnum])
|
||||
```
|
||||
|
||||
@ -1057,13 +1095,13 @@ Type: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-referen
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT ngramMinHashArg('ClickHouse') AS Tuple;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─Tuple─────────────────────────────────────────────────────────────────────────┐
|
||||
│ (('ous','ick','lic','Hou','kHo','use'),('Hou','lic','ick','ous','ckH','Cli')) │
|
||||
└───────────────────────────────────────────────────────────────────────────────┘
|
||||
@ -1075,7 +1113,7 @@ Splits a ASCII string into n-grams of `ngramsize` symbols and returns the n-gram
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
ngramMinHashArgCaseInsensitive(string[, ngramsize, hashnum])
|
||||
```
|
||||
|
||||
@ -1095,13 +1133,13 @@ Type: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-referen
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT ngramMinHashArgCaseInsensitive('ClickHouse') AS Tuple;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─Tuple─────────────────────────────────────────────────────────────────────────┐
|
||||
│ (('ous','ick','lic','kHo','use','Cli'),('kHo','lic','ick','ous','ckH','Hou')) │
|
||||
└───────────────────────────────────────────────────────────────────────────────┘
|
||||
@ -1113,7 +1151,7 @@ Splits a UTF-8 string into n-grams of `ngramsize` symbols and returns the n-gram
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
ngramMinHashArgUTF8(string[, ngramsize, hashnum])
|
||||
```
|
||||
|
||||
@ -1133,13 +1171,13 @@ Type: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-referen
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT ngramMinHashArgUTF8('ClickHouse') AS Tuple;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─Tuple─────────────────────────────────────────────────────────────────────────┐
|
||||
│ (('ous','ick','lic','Hou','kHo','use'),('kHo','Hou','lic','ick','ous','ckH')) │
|
||||
└───────────────────────────────────────────────────────────────────────────────┘
|
||||
@ -1151,7 +1189,7 @@ Splits a UTF-8 string into n-grams of `ngramsize` symbols and returns the n-gram
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
ngramMinHashArgCaseInsensitiveUTF8(string[, ngramsize, hashnum])
|
||||
```
|
||||
|
||||
@ -1171,13 +1209,13 @@ Type: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-referen
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT ngramMinHashArgCaseInsensitiveUTF8('ClickHouse') AS Tuple;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─Tuple─────────────────────────────────────────────────────────────────────────┐
|
||||
│ (('ckH','ous','ick','lic','kHo','use'),('kHo','lic','ick','ous','ckH','Hou')) │
|
||||
└───────────────────────────────────────────────────────────────────────────────┘
|
||||
@ -1191,7 +1229,7 @@ Can be used for detection of semi-duplicate strings with [tupleHammingDistance](
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
wordShingleMinHash(string[, shinglesize, hashnum])
|
||||
```
|
||||
|
||||
@ -1211,13 +1249,13 @@ Type: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-refere
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT wordShingleMinHash('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Tuple;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─Tuple──────────────────────────────────────┐
|
||||
│ (16452112859864147620,5844417301642981317) │
|
||||
└────────────────────────────────────────────┘
|
||||
@ -1231,7 +1269,7 @@ Can be used for detection of semi-duplicate strings with [tupleHammingDistance](
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
wordShingleMinHashCaseInsensitive(string[, shinglesize, hashnum])
|
||||
```
|
||||
|
||||
@ -1251,13 +1289,13 @@ Type: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-refere
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT wordShingleMinHashCaseInsensitive('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Tuple;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─Tuple─────────────────────────────────────┐
|
||||
│ (3065874883688416519,1634050779997673240) │
|
||||
└───────────────────────────────────────────┘
|
||||
@ -1271,7 +1309,7 @@ Can be used for detection of semi-duplicate strings with [tupleHammingDistance](
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
wordShingleMinHashUTF8(string[, shinglesize, hashnum])
|
||||
```
|
||||
|
||||
@ -1291,13 +1329,13 @@ Type: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-refere
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT wordShingleMinHashUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Tuple;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─Tuple──────────────────────────────────────┐
|
||||
│ (16452112859864147620,5844417301642981317) │
|
||||
└────────────────────────────────────────────┘
|
||||
@ -1311,7 +1349,7 @@ Can be used for detection of semi-duplicate strings with [tupleHammingDistance](
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
wordShingleMinHashCaseInsensitiveUTF8(string[, shinglesize, hashnum])
|
||||
```
|
||||
|
||||
@ -1331,13 +1369,13 @@ Type: [Tuple](../../sql-reference/data-types/tuple.md)([UInt64](../../sql-refere
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT wordShingleMinHashCaseInsensitiveUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Tuple;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─Tuple─────────────────────────────────────┐
|
||||
│ (3065874883688416519,1634050779997673240) │
|
||||
└───────────────────────────────────────────┘
|
||||
@ -1349,7 +1387,7 @@ Splits a ASCII string into parts (shingles) of `shinglesize` words each and retu
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
wordShingleMinHashArg(string[, shinglesize, hashnum])
|
||||
```
|
||||
|
||||
@ -1369,13 +1407,13 @@ Type: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-referen
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT wordShingleMinHashArg('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).', 1, 3) AS Tuple;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─Tuple─────────────────────────────────────────────────────────────────┐
|
||||
│ (('OLAP','database','analytical'),('online','oriented','processing')) │
|
||||
└───────────────────────────────────────────────────────────────────────┘
|
||||
@ -1387,7 +1425,7 @@ Splits a ASCII string into parts (shingles) of `shinglesize` words each and retu
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
wordShingleMinHashArgCaseInsensitive(string[, shinglesize, hashnum])
|
||||
```
|
||||
|
||||
@ -1407,13 +1445,13 @@ Type: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-referen
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT wordShingleMinHashArgCaseInsensitive('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).', 1, 3) AS Tuple;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─Tuple──────────────────────────────────────────────────────────────────┐
|
||||
│ (('queries','database','analytical'),('oriented','processing','DBMS')) │
|
||||
└────────────────────────────────────────────────────────────────────────┘
|
||||
@ -1425,7 +1463,7 @@ Splits a UTF-8 string into parts (shingles) of `shinglesize` words each and retu
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
wordShingleMinHashArgUTF8(string[, shinglesize, hashnum])
|
||||
```
|
||||
|
||||
@ -1445,13 +1483,13 @@ Type: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-referen
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT wordShingleMinHashArgUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).', 1, 3) AS Tuple;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─Tuple─────────────────────────────────────────────────────────────────┐
|
||||
│ (('OLAP','database','analytical'),('online','oriented','processing')) │
|
||||
└───────────────────────────────────────────────────────────────────────┘
|
||||
@ -1463,7 +1501,7 @@ Splits a UTF-8 string into parts (shingles) of `shinglesize` words each and retu
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
wordShingleMinHashArgCaseInsensitiveUTF8(string[, shinglesize, hashnum])
|
||||
```
|
||||
|
||||
@ -1483,13 +1521,13 @@ Type: [Tuple](../../sql-reference/data-types/tuple.md)([Tuple](../../sql-referen
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT wordShingleMinHashArgCaseInsensitiveUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).', 1, 3) AS Tuple;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─Tuple──────────────────────────────────────────────────────────────────┐
|
||||
│ (('queries','database','analytical'),('oriented','processing','DBMS')) │
|
||||
└────────────────────────────────────────────────────────────────────────┘
|
||||
|
@ -13,7 +13,7 @@ Simhash – это хеш-функция, которая для близких
|
||||
|
||||
[Интерпретирует](../../sql-reference/functions/hash-functions.md#type_conversion_functions-reinterpretAsString) все входные параметры как строки и вычисляет хэш [MD5](https://ru.wikipedia.org/wiki/MD5) для каждой из них. Затем объединяет хэши, берет первые 8 байт хэша результирующей строки и интерпретирует их как значение типа `UInt64` с big-endian порядком байтов.
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
halfMD5(par1, ...)
|
||||
```
|
||||
|
||||
@ -30,11 +30,11 @@ halfMD5(par1, ...)
|
||||
|
||||
**Пример**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT halfMD5(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS halfMD5hash, toTypeName(halfMD5hash) AS type;
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌────────halfMD5hash─┬─type───┐
|
||||
│ 186182704141653334 │ UInt64 │
|
||||
└────────────────────┴────────┘
|
||||
@ -54,7 +54,7 @@ SELECT halfMD5(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')
|
||||
|
||||
Генерирует 64-х битное значение [SipHash](https://131002.net/siphash/).
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
sipHash64(par1,...)
|
||||
```
|
||||
|
||||
@ -77,11 +77,11 @@ sipHash64(par1,...)
|
||||
|
||||
**Пример**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT sipHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS SipHash, toTypeName(SipHash) AS type;
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌──────────────SipHash─┬─type───┐
|
||||
│ 13726873534472839665 │ UInt64 │
|
||||
└──────────────────────┴────────┘
|
||||
@ -93,7 +93,7 @@ SELECT sipHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
sipHash128(par1,...)
|
||||
```
|
||||
|
||||
@ -111,13 +111,13 @@ sipHash128(par1,...)
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT hex(sipHash128('foo', '\x01', 3));
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─hex(sipHash128('foo', '', 3))────┐
|
||||
│ 9DE516A64A414D4B1B609415E4523F24 │
|
||||
└──────────────────────────────────┘
|
||||
@ -127,7 +127,7 @@ SELECT hex(sipHash128('foo', '\x01', 3));
|
||||
|
||||
Генерирует 64-х битное значение [CityHash](https://github.com/google/cityhash).
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
cityHash64(par1,...)
|
||||
```
|
||||
|
||||
@ -145,11 +145,11 @@ cityHash64(par1,...)
|
||||
|
||||
Пример вызова:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT cityHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS CityHash, toTypeName(CityHash) AS type;
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─────────────CityHash─┬─type───┐
|
||||
│ 12072650598913549138 │ UInt64 │
|
||||
└──────────────────────┴────────┘
|
||||
@ -157,7 +157,7 @@ SELECT cityHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:0
|
||||
|
||||
А вот так вы можете вычислить чексумму всей таблицы с точностью до порядка строк:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT groupBitXor(cityHash64(*)) FROM table
|
||||
```
|
||||
|
||||
@ -177,7 +177,7 @@ SELECT groupBitXor(cityHash64(*)) FROM table
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SHA1('s')
|
||||
...
|
||||
SHA512('s')
|
||||
@ -203,18 +203,56 @@ SHA512('s')
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT hex(SHA1('abc'));
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─hex(SHA1('abc'))─────────────────────────┐
|
||||
│ A9993E364706816ABA3E25717850C26C9CD0D89D │
|
||||
└──────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## BLAKE3 {#blake3}
|
||||
|
||||
Вычисляет BLAKE3 хеш строки и возвращает полученный набор байт в виде [FixedString](../data-types/fixedstring.md).
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
```sql
|
||||
BLAKE3('s')
|
||||
```
|
||||
|
||||
Данная криптографическая функция интегрирована в ClickHouse из Rust-библиотеки. Функция работает сравнительно быстро, показывая в 2 раза более быстрые результаты по сравнению с SHA-2, генерируя хеши аналогичной SHA-256 длины.
|
||||
|
||||
**Параметры**
|
||||
|
||||
- s - входная строка для вычисления хеша BLAKE3. [String](../data-types/string.md).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Хеш BLAKE3 в виде шестнадцатеричной строки, имеющей тип FixedString(32).
|
||||
|
||||
Тип: [FixedString](../data-types/fixedstring.md).
|
||||
|
||||
**Пример**
|
||||
|
||||
Используйте функцию [hex](../functions/encoding-functions.md#hex) для представления результата в виде строки с шестнадцатеричной кодировкой.
|
||||
|
||||
Запрос:
|
||||
```sql
|
||||
SELECT hex(BLAKE3('ABC'))
|
||||
```
|
||||
|
||||
Результат:
|
||||
```response
|
||||
┌─hex(BLAKE3('ABC'))───────────────────────────────────────────────┐
|
||||
│ D1717274597CF0289694F75D96D444B992A096F1AFD8E7BBFA6EBB1D360FEDFC │
|
||||
└──────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## URLHash(url\[, N\]) {#urlhashurl-n}
|
||||
|
||||
Быстрая не криптографическая хэш-функция неплохого качества для строки, полученной из URL путём некоторой нормализации.
|
||||
@ -228,7 +266,7 @@ SELECT hex(SHA1('abc'));
|
||||
|
||||
Создает 64-битное значение [FarmHash](https://github.com/google/farmhash), независимое от платформы (архитектуры сервера), что важно, если значения сохраняются или используются для разбиения данных на группы.
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
farmFingerprint64(par1, ...)
|
||||
farmHash64(par1, ...)
|
||||
```
|
||||
@ -245,11 +283,11 @@ farmHash64(par1, ...)
|
||||
|
||||
**Пример**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT farmHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS FarmHash, toTypeName(FarmHash) AS type;
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─────────────FarmHash─┬─type───┐
|
||||
│ 17790458267262532859 │ UInt64 │
|
||||
└──────────────────────┴────────┘
|
||||
@ -259,7 +297,7 @@ SELECT farmHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:0
|
||||
|
||||
Вычисляет [JavaHash](http://hg.openjdk.java.net/jdk8u/jdk8u/jdk/file/478a4add975b/src/share/classes/java/lang/String.java#l1452) от строки. `JavaHash` не отличается ни скоростью, ни качеством, поэтому эту функцию следует считать устаревшей. Используйте эту функцию, если вам необходимо получить значение хэша по такому же алгоритму.
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT javaHash('')
|
||||
```
|
||||
|
||||
@ -273,13 +311,13 @@ SELECT javaHash('')
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT javaHash('Hello, world!');
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─javaHash('Hello, world!')─┐
|
||||
│ -1880044555 │
|
||||
└───────────────────────────┘
|
||||
@ -291,7 +329,7 @@ SELECT javaHash('Hello, world!');
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
javaHashUTF16LE(stringUtf16le)
|
||||
```
|
||||
|
||||
@ -311,13 +349,13 @@ javaHashUTF16LE(stringUtf16le)
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT javaHashUTF16LE(convertCharset('test', 'utf-8', 'utf-16le'));
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─javaHashUTF16LE(convertCharset('test', 'utf-8', 'utf-16le'))─┐
|
||||
│ 3556498 │
|
||||
└──────────────────────────────────────────────────────────────┘
|
||||
@ -327,7 +365,7 @@ SELECT javaHashUTF16LE(convertCharset('test', 'utf-8', 'utf-16le'));
|
||||
|
||||
Вычисляет `HiveHash` от строки.
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT hiveHash('')
|
||||
```
|
||||
|
||||
@ -343,13 +381,13 @@ SELECT hiveHash('')
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT hiveHash('Hello, world!');
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─hiveHash('Hello, world!')─┐
|
||||
│ 267439093 │
|
||||
└───────────────────────────┘
|
||||
@ -359,7 +397,7 @@ SELECT hiveHash('Hello, world!');
|
||||
|
||||
Генерирует 64-х битное значение [MetroHash](http://www.jandrewrogers.com/2015/05/27/metrohash/).
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
metroHash64(par1, ...)
|
||||
```
|
||||
|
||||
@ -373,11 +411,11 @@ metroHash64(par1, ...)
|
||||
|
||||
**Пример**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT metroHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MetroHash, toTypeName(MetroHash) AS type;
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌────────────MetroHash─┬─type───┐
|
||||
│ 14235658766382344533 │ UInt64 │
|
||||
└──────────────────────┴────────┘
|
||||
@ -393,7 +431,7 @@ SELECT metroHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:
|
||||
|
||||
Генерирует значение [MurmurHash2](https://github.com/aappleby/smhasher).
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
murmurHash2_32(par1, ...)
|
||||
murmurHash2_64(par1, ...)
|
||||
```
|
||||
@ -409,11 +447,11 @@ murmurHash2_64(par1, ...)
|
||||
|
||||
**Пример**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT murmurHash2_64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MurmurHash2, toTypeName(MurmurHash2) AS type;
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌──────────MurmurHash2─┬─type───┐
|
||||
│ 11832096901709403633 │ UInt64 │
|
||||
└──────────────────────┴────────┘
|
||||
@ -425,7 +463,7 @@ SELECT murmurHash2_64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
gccMurmurHash(par1, ...);
|
||||
```
|
||||
|
||||
@ -443,7 +481,7 @@ gccMurmurHash(par1, ...);
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT
|
||||
gccMurmurHash(1, 2, 3) AS res1,
|
||||
gccMurmurHash(('a', [1, 2, 3], 4, (4, ['foo', 'bar'], 1, (1, 2)))) AS res2
|
||||
@ -451,7 +489,7 @@ SELECT
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─────────────────res1─┬────────────────res2─┐
|
||||
│ 12384823029245979431 │ 1188926775431157506 │
|
||||
└──────────────────────┴─────────────────────┘
|
||||
@ -461,7 +499,7 @@ SELECT
|
||||
|
||||
Генерирует значение [MurmurHash3](https://github.com/aappleby/smhasher).
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
murmurHash3_32(par1, ...)
|
||||
murmurHash3_64(par1, ...)
|
||||
```
|
||||
@ -477,11 +515,11 @@ murmurHash3_64(par1, ...)
|
||||
|
||||
**Пример**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT murmurHash3_32(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MurmurHash3, toTypeName(MurmurHash3) AS type;
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─MurmurHash3─┬─type───┐
|
||||
│ 2152717 │ UInt32 │
|
||||
└─────────────┴────────┘
|
||||
@ -493,7 +531,7 @@ SELECT murmurHash3_32(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
murmurHash3_128(expr)
|
||||
```
|
||||
|
||||
@ -511,13 +549,13 @@ murmurHash3_128(expr)
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT hex(murmurHash3_128('foo', 'foo', 'foo'));
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─hex(murmurHash3_128('foo', 'foo', 'foo'))─┐
|
||||
│ F8F7AD9B6CD4CF117A71E277E2EC2931 │
|
||||
└───────────────────────────────────────────┘
|
||||
@ -527,7 +565,7 @@ SELECT hex(murmurHash3_128('foo', 'foo', 'foo'));
|
||||
|
||||
Вычисляет `xxHash` от строки. Предлагается в двух вариантах: 32 и 64 бита.
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT xxHash32('')
|
||||
|
||||
OR
|
||||
@ -545,13 +583,13 @@ SELECT xxHash64('')
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT xxHash32('Hello, world!');
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─xxHash32('Hello, world!')─┐
|
||||
│ 834093149 │
|
||||
└───────────────────────────┘
|
||||
@ -569,7 +607,7 @@ SELECT xxHash32('Hello, world!');
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
ngramSimHash(string[, ngramsize])
|
||||
```
|
||||
|
||||
@ -588,13 +626,13 @@ ngramSimHash(string[, ngramsize])
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT ngramSimHash('ClickHouse') AS Hash;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌───────Hash─┐
|
||||
│ 1627567969 │
|
||||
└────────────┘
|
||||
@ -608,7 +646,7 @@ SELECT ngramSimHash('ClickHouse') AS Hash;
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
ngramSimHashCaseInsensitive(string[, ngramsize])
|
||||
```
|
||||
|
||||
@ -627,13 +665,13 @@ ngramSimHashCaseInsensitive(string[, ngramsize])
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT ngramSimHashCaseInsensitive('ClickHouse') AS Hash;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌──────Hash─┐
|
||||
│ 562180645 │
|
||||
└───────────┘
|
||||
@ -647,7 +685,7 @@ SELECT ngramSimHashCaseInsensitive('ClickHouse') AS Hash;
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
ngramSimHashUTF8(string[, ngramsize])
|
||||
```
|
||||
|
||||
@ -666,13 +704,13 @@ ngramSimHashUTF8(string[, ngramsize])
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT ngramSimHashUTF8('ClickHouse') AS Hash;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌───────Hash─┐
|
||||
│ 1628157797 │
|
||||
└────────────┘
|
||||
@ -686,7 +724,7 @@ SELECT ngramSimHashUTF8('ClickHouse') AS Hash;
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
ngramSimHashCaseInsensitiveUTF8(string[, ngramsize])
|
||||
```
|
||||
|
||||
@ -705,13 +743,13 @@ ngramSimHashCaseInsensitiveUTF8(string[, ngramsize])
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT ngramSimHashCaseInsensitiveUTF8('ClickHouse') AS Hash;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌───────Hash─┐
|
||||
│ 1636742693 │
|
||||
└────────────┘
|
||||
@ -725,7 +763,7 @@ SELECT ngramSimHashCaseInsensitiveUTF8('ClickHouse') AS Hash;
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
wordShingleSimHash(string[, shinglesize])
|
||||
```
|
||||
|
||||
@ -744,13 +782,13 @@ wordShingleSimHash(string[, shinglesize])
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT wordShingleSimHash('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Hash;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌───────Hash─┐
|
||||
│ 2328277067 │
|
||||
└────────────┘
|
||||
@ -764,7 +802,7 @@ SELECT wordShingleSimHash('ClickHouse® is a column-oriented database management
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
wordShingleSimHashCaseInsensitive(string[, shinglesize])
|
||||
```
|
||||
|
||||
@ -783,13 +821,13 @@ wordShingleSimHashCaseInsensitive(string[, shinglesize])
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT wordShingleSimHashCaseInsensitive('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Hash;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌───────Hash─┐
|
||||
│ 2194812424 │
|
||||
└────────────┘
|
||||
@ -803,7 +841,7 @@ SELECT wordShingleSimHashCaseInsensitive('ClickHouse® is a column-oriented data
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
wordShingleSimHashUTF8(string[, shinglesize])
|
||||
```
|
||||
|
||||
@ -822,13 +860,13 @@ wordShingleSimHashUTF8(string[, shinglesize])
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT wordShingleSimHashUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Hash;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌───────Hash─┐
|
||||
│ 2328277067 │
|
||||
└────────────┘
|
||||
@ -842,7 +880,7 @@ SELECT wordShingleSimHashUTF8('ClickHouse® is a column-oriented database manage
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
wordShingleSimHashCaseInsensitiveUTF8(string[, shinglesize])
|
||||
```
|
||||
|
||||
@ -861,13 +899,13 @@ wordShingleSimHashCaseInsensitiveUTF8(string[, shinglesize])
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT wordShingleSimHashCaseInsensitiveUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Hash;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌───────Hash─┐
|
||||
│ 2194812424 │
|
||||
└────────────┘
|
||||
@ -881,7 +919,7 @@ SELECT wordShingleSimHashCaseInsensitiveUTF8('ClickHouse® is a column-oriented
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
ngramMinHash(string[, ngramsize, hashnum])
|
||||
```
|
||||
|
||||
@ -901,13 +939,13 @@ ngramMinHash(string[, ngramsize, hashnum])
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT ngramMinHash('ClickHouse') AS Tuple;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─Tuple──────────────────────────────────────┐
|
||||
│ (18333312859352735453,9054248444481805918) │
|
||||
└────────────────────────────────────────────┘
|
||||
@ -921,7 +959,7 @@ SELECT ngramMinHash('ClickHouse') AS Tuple;
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
ngramMinHashCaseInsensitive(string[, ngramsize, hashnum])
|
||||
```
|
||||
|
||||
@ -941,13 +979,13 @@ ngramMinHashCaseInsensitive(string[, ngramsize, hashnum])
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT ngramMinHashCaseInsensitive('ClickHouse') AS Tuple;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─Tuple──────────────────────────────────────┐
|
||||
│ (2106263556442004574,13203602793651726206) │
|
||||
└────────────────────────────────────────────┘
|
||||
@ -960,7 +998,7 @@ SELECT ngramMinHashCaseInsensitive('ClickHouse') AS Tuple;
|
||||
Может быть использована для проверки двух строк на схожесть вместе с функцией [tupleHammingDistance](../../sql-reference/functions/tuple-functions.md#tuplehammingdistance). Если для двух строк минимальные или максимальные хеши одинаковы, мы считаем, что эти строки совпадают.
|
||||
|
||||
**Синтаксис**
|
||||
``` sql
|
||||
```sql
|
||||
ngramMinHashUTF8(string[, ngramsize, hashnum])
|
||||
```
|
||||
|
||||
@ -980,13 +1018,13 @@ ngramMinHashUTF8(string[, ngramsize, hashnum])
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT ngramMinHashUTF8('ClickHouse') AS Tuple;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─Tuple──────────────────────────────────────┐
|
||||
│ (18333312859352735453,6742163577938632877) │
|
||||
└────────────────────────────────────────────┘
|
||||
@ -1000,7 +1038,7 @@ SELECT ngramMinHashUTF8('ClickHouse') AS Tuple;
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
ngramMinHashCaseInsensitiveUTF8(string [, ngramsize, hashnum])
|
||||
```
|
||||
|
||||
@ -1020,13 +1058,13 @@ ngramMinHashCaseInsensitiveUTF8(string [, ngramsize, hashnum])
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT ngramMinHashCaseInsensitiveUTF8('ClickHouse') AS Tuple;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─Tuple───────────────────────────────────────┐
|
||||
│ (12493625717655877135,13203602793651726206) │
|
||||
└─────────────────────────────────────────────┘
|
||||
@ -1038,7 +1076,7 @@ SELECT ngramMinHashCaseInsensitiveUTF8('ClickHouse') AS Tuple;
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
ngramMinHashArg(string[, ngramsize, hashnum])
|
||||
```
|
||||
|
||||
@ -1058,13 +1096,13 @@ ngramMinHashArg(string[, ngramsize, hashnum])
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT ngramMinHashArg('ClickHouse') AS Tuple;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─Tuple─────────────────────────────────────────────────────────────────────────┐
|
||||
│ (('ous','ick','lic','Hou','kHo','use'),('Hou','lic','ick','ous','ckH','Cli')) │
|
||||
└───────────────────────────────────────────────────────────────────────────────┘
|
||||
@ -1076,7 +1114,7 @@ SELECT ngramMinHashArg('ClickHouse') AS Tuple;
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
ngramMinHashArgCaseInsensitive(string[, ngramsize, hashnum])
|
||||
```
|
||||
|
||||
@ -1096,13 +1134,13 @@ ngramMinHashArgCaseInsensitive(string[, ngramsize, hashnum])
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT ngramMinHashArgCaseInsensitive('ClickHouse') AS Tuple;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─Tuple─────────────────────────────────────────────────────────────────────────┐
|
||||
│ (('ous','ick','lic','kHo','use','Cli'),('kHo','lic','ick','ous','ckH','Hou')) │
|
||||
└───────────────────────────────────────────────────────────────────────────────┘
|
||||
@ -1114,7 +1152,7 @@ SELECT ngramMinHashArgCaseInsensitive('ClickHouse') AS Tuple;
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
ngramMinHashArgUTF8(string[, ngramsize, hashnum])
|
||||
```
|
||||
|
||||
@ -1134,13 +1172,13 @@ ngramMinHashArgUTF8(string[, ngramsize, hashnum])
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT ngramMinHashArgUTF8('ClickHouse') AS Tuple;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─Tuple─────────────────────────────────────────────────────────────────────────┐
|
||||
│ (('ous','ick','lic','Hou','kHo','use'),('kHo','Hou','lic','ick','ous','ckH')) │
|
||||
└───────────────────────────────────────────────────────────────────────────────┘
|
||||
@ -1152,7 +1190,7 @@ SELECT ngramMinHashArgUTF8('ClickHouse') AS Tuple;
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
ngramMinHashArgCaseInsensitiveUTF8(string[, ngramsize, hashnum])
|
||||
```
|
||||
|
||||
@ -1172,13 +1210,13 @@ ngramMinHashArgCaseInsensitiveUTF8(string[, ngramsize, hashnum])
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT ngramMinHashArgCaseInsensitiveUTF8('ClickHouse') AS Tuple;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─Tuple─────────────────────────────────────────────────────────────────────────┐
|
||||
│ (('ckH','ous','ick','lic','kHo','use'),('kHo','lic','ick','ous','ckH','Hou')) │
|
||||
└───────────────────────────────────────────────────────────────────────────────┘
|
||||
@ -1192,7 +1230,7 @@ SELECT ngramMinHashArgCaseInsensitiveUTF8('ClickHouse') AS Tuple;
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
wordShingleMinHash(string[, shinglesize, hashnum])
|
||||
```
|
||||
|
||||
@ -1212,13 +1250,13 @@ wordShingleMinHash(string[, shinglesize, hashnum])
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT wordShingleMinHash('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Tuple;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─Tuple──────────────────────────────────────┐
|
||||
│ (16452112859864147620,5844417301642981317) │
|
||||
└────────────────────────────────────────────┘
|
||||
@ -1232,7 +1270,7 @@ SELECT wordShingleMinHash('ClickHouse® is a column-oriented database management
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
wordShingleMinHashCaseInsensitive(string[, shinglesize, hashnum])
|
||||
```
|
||||
|
||||
@ -1252,13 +1290,13 @@ wordShingleMinHashCaseInsensitive(string[, shinglesize, hashnum])
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT wordShingleMinHashCaseInsensitive('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Tuple;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─Tuple─────────────────────────────────────┐
|
||||
│ (3065874883688416519,1634050779997673240) │
|
||||
└───────────────────────────────────────────┘
|
||||
@ -1272,7 +1310,7 @@ SELECT wordShingleMinHashCaseInsensitive('ClickHouse® is a column-oriented data
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
wordShingleMinHashUTF8(string[, shinglesize, hashnum])
|
||||
```
|
||||
|
||||
@ -1292,13 +1330,13 @@ wordShingleMinHashUTF8(string[, shinglesize, hashnum])
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT wordShingleMinHashUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Tuple;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─Tuple──────────────────────────────────────┐
|
||||
│ (16452112859864147620,5844417301642981317) │
|
||||
└────────────────────────────────────────────┘
|
||||
@ -1312,7 +1350,7 @@ SELECT wordShingleMinHashUTF8('ClickHouse® is a column-oriented database manage
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
wordShingleMinHashCaseInsensitiveUTF8(string[, shinglesize, hashnum])
|
||||
```
|
||||
|
||||
@ -1332,13 +1370,13 @@ wordShingleMinHashCaseInsensitiveUTF8(string[, shinglesize, hashnum])
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT wordShingleMinHashCaseInsensitiveUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).') AS Tuple;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─Tuple─────────────────────────────────────┐
|
||||
│ (3065874883688416519,1634050779997673240) │
|
||||
└───────────────────────────────────────────┘
|
||||
@ -1350,7 +1388,7 @@ SELECT wordShingleMinHashCaseInsensitiveUTF8('ClickHouse® is a column-oriented
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
wordShingleMinHashArg(string[, shinglesize, hashnum])
|
||||
```
|
||||
|
||||
@ -1370,13 +1408,13 @@ wordShingleMinHashArg(string[, shinglesize, hashnum])
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT wordShingleMinHashArg('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).', 1, 3) AS Tuple;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─Tuple─────────────────────────────────────────────────────────────────┐
|
||||
│ (('OLAP','database','analytical'),('online','oriented','processing')) │
|
||||
└───────────────────────────────────────────────────────────────────────┘
|
||||
@ -1388,7 +1426,7 @@ SELECT wordShingleMinHashArg('ClickHouse® is a column-oriented database managem
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
wordShingleMinHashArgCaseInsensitive(string[, shinglesize, hashnum])
|
||||
```
|
||||
|
||||
@ -1408,13 +1446,13 @@ wordShingleMinHashArgCaseInsensitive(string[, shinglesize, hashnum])
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT wordShingleMinHashArgCaseInsensitive('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).', 1, 3) AS Tuple;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─Tuple──────────────────────────────────────────────────────────────────┐
|
||||
│ (('queries','database','analytical'),('oriented','processing','DBMS')) │
|
||||
└────────────────────────────────────────────────────────────────────────┘
|
||||
@ -1426,7 +1464,7 @@ SELECT wordShingleMinHashArgCaseInsensitive('ClickHouse® is a column-oriented d
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
wordShingleMinHashArgUTF8(string[, shinglesize, hashnum])
|
||||
```
|
||||
|
||||
@ -1446,13 +1484,13 @@ wordShingleMinHashArgUTF8(string[, shinglesize, hashnum])
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT wordShingleMinHashArgUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).', 1, 3) AS Tuple;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─Tuple─────────────────────────────────────────────────────────────────┐
|
||||
│ (('OLAP','database','analytical'),('online','oriented','processing')) │
|
||||
└───────────────────────────────────────────────────────────────────────┘
|
||||
@ -1464,7 +1502,7 @@ SELECT wordShingleMinHashArgUTF8('ClickHouse® is a column-oriented database man
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
wordShingleMinHashArgCaseInsensitiveUTF8(string[, shinglesize, hashnum])
|
||||
```
|
||||
|
||||
@ -1484,13 +1522,13 @@ wordShingleMinHashArgCaseInsensitiveUTF8(string[, shinglesize, hashnum])
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT wordShingleMinHashArgCaseInsensitiveUTF8('ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).', 1, 3) AS Tuple;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─Tuple──────────────────────────────────────────────────────────────────┐
|
||||
│ (('queries','database','analytical'),('oriented','processing','DBMS')) │
|
||||
└────────────────────────────────────────────────────────────────────────┘
|
||||
|
@ -80,4 +80,78 @@ SELECT toTypeName(0), toTypeName(0 + 0), toTypeName(0 + 0 + 0), toTypeName(0 + 0
|
||||
返回数值的最小公倍数。
|
||||
除以零或将最小负数除以-1时抛出异常。
|
||||
|
||||
## max2 {#max2}
|
||||
|
||||
比较两个值并返回最大值。返回值转换为[Float64](../../sql-reference/data-types/float.md)。
|
||||
|
||||
**语法**
|
||||
|
||||
```sql
|
||||
max2(value1, value2)
|
||||
```
|
||||
|
||||
**参数**
|
||||
|
||||
- `value1` — 第一个值,类型为[Int/UInt](../../sql-reference/data-types/int-uint.md)或[Float](../../sql-reference/data-types/float.md)。
|
||||
- `value2` — 第二个值,类型为[Int/UInt](../../sql-reference/data-types/int-uint.md)或[Float](../../sql-reference/data-types/float.md)。
|
||||
|
||||
**返回值**
|
||||
|
||||
- 两个值中的最大值。
|
||||
|
||||
类型: [Float](../../sql-reference/data-types/float.md)。
|
||||
|
||||
**示例**
|
||||
|
||||
查询语句:
|
||||
|
||||
```sql
|
||||
SELECT max2(-1, 2);
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
```text
|
||||
┌─max2(-1, 2)─┐
|
||||
│ 2 │
|
||||
└─────────────┘
|
||||
```
|
||||
|
||||
## min2 {#min2}
|
||||
|
||||
比较两个值并返回最小值。返回值类型转换为[Float64](../../sql-reference/data-types/float.md)。
|
||||
|
||||
**语法**
|
||||
|
||||
```sql
|
||||
max2(value1, value2)
|
||||
```
|
||||
|
||||
**参数**
|
||||
|
||||
- `value1` — 第一个值,类型为[Int/UInt](../../sql-reference/data-types/int-uint.md) or [Float](../../sql-reference/data-types/float.md)。
|
||||
- `value2` — 第二个值,类型为[Int/UInt](../../sql-reference/data-types/int-uint.md) or [Float](../../sql-reference/data-types/float.md)。
|
||||
|
||||
**返回值**
|
||||
|
||||
- 两个值中的最小值。
|
||||
|
||||
类型: [Float](../../sql-reference/data-types/float.md)。
|
||||
|
||||
**示例**
|
||||
|
||||
查询语句:
|
||||
|
||||
```sql
|
||||
SELECT min2(-1, 2);
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
```text
|
||||
┌─min2(-1, 2)─┐
|
||||
│ -1 │
|
||||
└─────────────┘
|
||||
```
|
||||
|
||||
[来源文章](https://clickhouse.com/docs/en/query_language/functions/arithmetic_functions/) <!--hide-->
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -109,7 +109,7 @@ std::vector<String> Client::loadWarningMessages()
|
||||
connection->sendQuery(connection_parameters.timeouts, "SELECT message FROM system.warnings", "" /* query_id */,
|
||||
QueryProcessingStage::Complete,
|
||||
&global_context->getSettingsRef(),
|
||||
&global_context->getClientInfo(), false);
|
||||
&global_context->getClientInfo(), false, {});
|
||||
while (true)
|
||||
{
|
||||
Packet packet = connection->receivePacket();
|
||||
|
@ -334,7 +334,12 @@ Poco::Net::SocketAddress makeSocketAddress(const std::string & host, UInt16 port
|
||||
return socket_address;
|
||||
}
|
||||
|
||||
Poco::Net::SocketAddress Server::socketBindListen(Poco::Net::ServerSocket & socket, const std::string & host, UInt16 port, [[maybe_unused]] bool secure) const
|
||||
Poco::Net::SocketAddress Server::socketBindListen(
|
||||
const Poco::Util::AbstractConfiguration & config,
|
||||
Poco::Net::ServerSocket & socket,
|
||||
const std::string & host,
|
||||
UInt16 port,
|
||||
[[maybe_unused]] bool secure) const
|
||||
{
|
||||
auto address = makeSocketAddress(host, port, &logger());
|
||||
#if !defined(POCO_CLICKHOUSE_PATCH) || POCO_VERSION < 0x01090100
|
||||
@ -347,7 +352,7 @@ Poco::Net::SocketAddress Server::socketBindListen(Poco::Net::ServerSocket & sock
|
||||
#if POCO_VERSION < 0x01080000
|
||||
socket.bind(address, /* reuseAddress = */ true);
|
||||
#else
|
||||
socket.bind(address, /* reuseAddress = */ true, /* reusePort = */ config().getBool("listen_reuse_port", false));
|
||||
socket.bind(address, /* reuseAddress = */ true, /* reusePort = */ config.getBool("listen_reuse_port", false));
|
||||
#endif
|
||||
|
||||
/// If caller requests any available port from the OS, discover it after binding.
|
||||
@ -357,7 +362,7 @@ Poco::Net::SocketAddress Server::socketBindListen(Poco::Net::ServerSocket & sock
|
||||
LOG_DEBUG(&logger(), "Requested any available port (port == 0), actual port is {:d}", address.port());
|
||||
}
|
||||
|
||||
socket.listen(/* backlog = */ config().getUInt("listen_backlog", 4096));
|
||||
socket.listen(/* backlog = */ config.getUInt("listen_backlog", 4096));
|
||||
|
||||
return address;
|
||||
}
|
||||
@ -1237,7 +1242,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
[&](UInt16 port) -> ProtocolServerAdapter
|
||||
{
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socketBindListen(socket, listen_host, port);
|
||||
auto address = socketBindListen(config(), socket, listen_host, port);
|
||||
socket.setReceiveTimeout(config().getUInt64("keeper_server.socket_receive_timeout_sec", DBMS_DEFAULT_RECEIVE_TIMEOUT_SEC));
|
||||
socket.setSendTimeout(config().getUInt64("keeper_server.socket_send_timeout_sec", DBMS_DEFAULT_SEND_TIMEOUT_SEC));
|
||||
return ProtocolServerAdapter(
|
||||
@ -1260,7 +1265,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
{
|
||||
#if USE_SSL
|
||||
Poco::Net::SecureServerSocket socket;
|
||||
auto address = socketBindListen(socket, listen_host, port, /* secure = */ true);
|
||||
auto address = socketBindListen(config(), socket, listen_host, port, /* secure = */ true);
|
||||
socket.setReceiveTimeout(config().getUInt64("keeper_server.socket_receive_timeout_sec", DBMS_DEFAULT_RECEIVE_TIMEOUT_SEC));
|
||||
socket.setSendTimeout(config().getUInt64("keeper_server.socket_send_timeout_sec", DBMS_DEFAULT_SEND_TIMEOUT_SEC));
|
||||
return ProtocolServerAdapter(
|
||||
@ -1797,7 +1802,7 @@ void Server::createServers(
|
||||
createServer(config, listen_host, port_name, listen_try, start_servers, servers, [&](UInt16 port) -> ProtocolServerAdapter
|
||||
{
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socketBindListen(socket, listen_host, port);
|
||||
auto address = socketBindListen(config, socket, listen_host, port);
|
||||
socket.setReceiveTimeout(settings.http_receive_timeout);
|
||||
socket.setSendTimeout(settings.http_send_timeout);
|
||||
|
||||
@ -1815,7 +1820,7 @@ void Server::createServers(
|
||||
{
|
||||
#if USE_SSL
|
||||
Poco::Net::SecureServerSocket socket;
|
||||
auto address = socketBindListen(socket, listen_host, port, /* secure = */ true);
|
||||
auto address = socketBindListen(config, socket, listen_host, port, /* secure = */ true);
|
||||
socket.setReceiveTimeout(settings.http_receive_timeout);
|
||||
socket.setSendTimeout(settings.http_send_timeout);
|
||||
return ProtocolServerAdapter(
|
||||
@ -1836,7 +1841,7 @@ void Server::createServers(
|
||||
createServer(config, listen_host, port_name, listen_try, start_servers, servers, [&](UInt16 port) -> ProtocolServerAdapter
|
||||
{
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socketBindListen(socket, listen_host, port);
|
||||
auto address = socketBindListen(config, socket, listen_host, port);
|
||||
socket.setReceiveTimeout(settings.receive_timeout);
|
||||
socket.setSendTimeout(settings.send_timeout);
|
||||
return ProtocolServerAdapter(
|
||||
@ -1855,7 +1860,7 @@ void Server::createServers(
|
||||
createServer(config, listen_host, port_name, listen_try, start_servers, servers, [&](UInt16 port) -> ProtocolServerAdapter
|
||||
{
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socketBindListen(socket, listen_host, port);
|
||||
auto address = socketBindListen(config, socket, listen_host, port);
|
||||
socket.setReceiveTimeout(settings.receive_timeout);
|
||||
socket.setSendTimeout(settings.send_timeout);
|
||||
return ProtocolServerAdapter(
|
||||
@ -1875,7 +1880,7 @@ void Server::createServers(
|
||||
{
|
||||
#if USE_SSL
|
||||
Poco::Net::SecureServerSocket socket;
|
||||
auto address = socketBindListen(socket, listen_host, port, /* secure = */ true);
|
||||
auto address = socketBindListen(config, socket, listen_host, port, /* secure = */ true);
|
||||
socket.setReceiveTimeout(settings.receive_timeout);
|
||||
socket.setSendTimeout(settings.send_timeout);
|
||||
return ProtocolServerAdapter(
|
||||
@ -1899,7 +1904,7 @@ void Server::createServers(
|
||||
createServer(config, listen_host, port_name, listen_try, start_servers, servers, [&](UInt16 port) -> ProtocolServerAdapter
|
||||
{
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socketBindListen(socket, listen_host, port);
|
||||
auto address = socketBindListen(config, socket, listen_host, port);
|
||||
socket.setReceiveTimeout(settings.http_receive_timeout);
|
||||
socket.setSendTimeout(settings.http_send_timeout);
|
||||
return ProtocolServerAdapter(
|
||||
@ -1919,7 +1924,7 @@ void Server::createServers(
|
||||
{
|
||||
#if USE_SSL
|
||||
Poco::Net::SecureServerSocket socket;
|
||||
auto address = socketBindListen(socket, listen_host, port, /* secure = */ true);
|
||||
auto address = socketBindListen(config, socket, listen_host, port, /* secure = */ true);
|
||||
socket.setReceiveTimeout(settings.http_receive_timeout);
|
||||
socket.setSendTimeout(settings.http_send_timeout);
|
||||
return ProtocolServerAdapter(
|
||||
@ -1943,7 +1948,7 @@ void Server::createServers(
|
||||
createServer(config, listen_host, port_name, listen_try, start_servers, servers, [&](UInt16 port) -> ProtocolServerAdapter
|
||||
{
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socketBindListen(socket, listen_host, port, /* secure = */ true);
|
||||
auto address = socketBindListen(config, socket, listen_host, port, /* secure = */ true);
|
||||
socket.setReceiveTimeout(Poco::Timespan());
|
||||
socket.setSendTimeout(settings.send_timeout);
|
||||
return ProtocolServerAdapter(
|
||||
@ -1957,7 +1962,7 @@ void Server::createServers(
|
||||
createServer(config, listen_host, port_name, listen_try, start_servers, servers, [&](UInt16 port) -> ProtocolServerAdapter
|
||||
{
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socketBindListen(socket, listen_host, port, /* secure = */ true);
|
||||
auto address = socketBindListen(config, socket, listen_host, port, /* secure = */ true);
|
||||
socket.setReceiveTimeout(Poco::Timespan());
|
||||
socket.setSendTimeout(settings.send_timeout);
|
||||
return ProtocolServerAdapter(
|
||||
@ -1985,7 +1990,7 @@ void Server::createServers(
|
||||
createServer(config, listen_host, port_name, listen_try, start_servers, servers, [&](UInt16 port) -> ProtocolServerAdapter
|
||||
{
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socketBindListen(socket, listen_host, port);
|
||||
auto address = socketBindListen(config, socket, listen_host, port);
|
||||
socket.setReceiveTimeout(settings.http_receive_timeout);
|
||||
socket.setSendTimeout(settings.http_send_timeout);
|
||||
return ProtocolServerAdapter(
|
||||
|
@ -67,7 +67,12 @@ protected:
|
||||
|
||||
private:
|
||||
ContextMutablePtr global_context;
|
||||
Poco::Net::SocketAddress socketBindListen(Poco::Net::ServerSocket & socket, const std::string & host, UInt16 port, [[maybe_unused]] bool secure = false) const;
|
||||
Poco::Net::SocketAddress socketBindListen(
|
||||
const Poco::Util::AbstractConfiguration & config,
|
||||
Poco::Net::ServerSocket & socket,
|
||||
const std::string & host,
|
||||
UInt16 port,
|
||||
[[maybe_unused]] bool secure = false) const;
|
||||
|
||||
using CreateServerFunc = std::function<ProtocolServerAdapter(UInt16)>;
|
||||
void createServer(
|
||||
|
@ -67,11 +67,11 @@ namespace
|
||||
size_t num_password_fields = has_no_password + has_password_plaintext + has_password_sha256_hex + has_password_double_sha1_hex + has_ldap + has_kerberos + has_certificates;
|
||||
|
||||
if (num_password_fields > 1)
|
||||
throw Exception("More than one field of 'password', 'password_sha256_hex', 'password_double_sha1_hex', 'no_password', 'ldap', 'kerberos', 'certificates' are used to specify authentication info for user " + user_name + ". Must be only one of them.",
|
||||
throw Exception("More than one field of 'password', 'password_sha256_hex', 'password_double_sha1_hex', 'no_password', 'ldap', 'kerberos', 'ssl_certificates' are used to specify authentication info for user " + user_name + ". Must be only one of them.",
|
||||
ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
if (num_password_fields < 1)
|
||||
throw Exception("Either 'password' or 'password_sha256_hex' or 'password_double_sha1_hex' or 'no_password' or 'ldap' or 'kerberos' or 'certificates' must be specified for user " + user_name + ".", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception("Either 'password' or 'password_sha256_hex' or 'password_double_sha1_hex' or 'no_password' or 'ldap' or 'kerberos' or 'ssl_certificates' must be specified for user " + user_name + ".", ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
if (has_password_plaintext)
|
||||
{
|
||||
|
@ -718,7 +718,8 @@ void ClientBase::processOrdinaryQuery(const String & query_to_execute, ASTPtr pa
|
||||
query_processing_stage,
|
||||
&global_context->getSettingsRef(),
|
||||
&global_context->getClientInfo(),
|
||||
true);
|
||||
true,
|
||||
[&](const Progress & progress) { onProgress(progress); });
|
||||
|
||||
if (send_external_tables)
|
||||
sendExternalTables(parsed_query);
|
||||
@ -1071,7 +1072,8 @@ void ClientBase::processInsertQuery(const String & query_to_execute, ASTPtr pars
|
||||
query_processing_stage,
|
||||
&global_context->getSettingsRef(),
|
||||
&global_context->getClientInfo(),
|
||||
true);
|
||||
true,
|
||||
[&](const Progress & progress) { onProgress(progress); });
|
||||
|
||||
if (send_external_tables)
|
||||
sendExternalTables(parsed_query);
|
||||
@ -1103,7 +1105,9 @@ void ClientBase::sendData(Block & sample, const ColumnsDescription & columns_des
|
||||
if (!parsed_insert_query)
|
||||
return;
|
||||
|
||||
if (need_render_progress)
|
||||
bool have_data_in_stdin = !is_interactive && !stdin_is_a_tty && !std_in.eof();
|
||||
|
||||
if (need_render_progress && have_data_in_stdin)
|
||||
{
|
||||
/// Set total_bytes_to_read for current fd.
|
||||
FileProgress file_progress(0, std_in.size());
|
||||
@ -1113,8 +1117,6 @@ void ClientBase::sendData(Block & sample, const ColumnsDescription & columns_des
|
||||
progress_indication.setFileProgressCallback(global_context, true);
|
||||
}
|
||||
|
||||
bool have_data_in_stdin = !is_interactive && !stdin_is_a_tty && !std_in.eof();
|
||||
|
||||
/// If data fetched from file (maybe compressed file)
|
||||
if (parsed_insert_query->infile)
|
||||
{
|
||||
|
@ -451,7 +451,8 @@ void Connection::sendQuery(
|
||||
UInt64 stage,
|
||||
const Settings * settings,
|
||||
const ClientInfo * client_info,
|
||||
bool with_pending_data)
|
||||
bool with_pending_data,
|
||||
std::function<void(const Progress &)>)
|
||||
{
|
||||
if (!connected)
|
||||
connect(timeouts);
|
||||
|
@ -100,7 +100,8 @@ public:
|
||||
UInt64 stage/* = QueryProcessingStage::Complete */,
|
||||
const Settings * settings/* = nullptr */,
|
||||
const ClientInfo * client_info/* = nullptr */,
|
||||
bool with_pending_data/* = false */) override;
|
||||
bool with_pending_data/* = false */,
|
||||
std::function<void(const Progress &)> process_progress_callback) override;
|
||||
|
||||
void sendCancel() override;
|
||||
|
||||
|
@ -179,7 +179,7 @@ void HedgedConnections::sendQuery(
|
||||
modified_settings.parallel_replica_offset = fd_to_replica_location[replica.packet_receiver->getFileDescriptor()].offset;
|
||||
}
|
||||
|
||||
replica.connection->sendQuery(timeouts, query, query_id, stage, &modified_settings, &client_info, with_pending_data);
|
||||
replica.connection->sendQuery(timeouts, query, query_id, stage, &modified_settings, &client_info, with_pending_data, {});
|
||||
replica.change_replica_timeout.setRelative(timeouts.receive_data_timeout);
|
||||
replica.packet_receiver->setReceiveTimeout(hedged_connections_factory.getConnectionTimeouts().receive_timeout);
|
||||
};
|
||||
|
@ -90,7 +90,8 @@ public:
|
||||
UInt64 stage,
|
||||
const Settings * settings,
|
||||
const ClientInfo * client_info,
|
||||
bool with_pending_data) = 0;
|
||||
bool with_pending_data,
|
||||
std::function<void(const Progress &)> process_progress_callback) = 0;
|
||||
|
||||
virtual void sendCancel() = 0;
|
||||
|
||||
|
@ -74,13 +74,14 @@ void LocalConnection::sendQuery(
|
||||
UInt64 stage,
|
||||
const Settings *,
|
||||
const ClientInfo *,
|
||||
bool)
|
||||
bool,
|
||||
std::function<void(const Progress &)> process_progress_callback)
|
||||
{
|
||||
query_context = session.makeQueryContext();
|
||||
query_context->setCurrentQueryId(query_id);
|
||||
if (send_progress)
|
||||
{
|
||||
query_context->setProgressCallback([this] (const Progress & value) { return this->updateProgress(value); });
|
||||
query_context->setProgressCallback([this] (const Progress & value) { this->updateProgress(value); });
|
||||
query_context->setFileProgressCallback([this](const FileProgress & value) { this->updateProgress(Progress(value)); });
|
||||
}
|
||||
if (!current_database.empty())
|
||||
@ -143,6 +144,19 @@ void LocalConnection::sendQuery(
|
||||
else if (state->io.pipeline.completed())
|
||||
{
|
||||
CompletedPipelineExecutor executor(state->io.pipeline);
|
||||
if (process_progress_callback)
|
||||
{
|
||||
auto callback = [this, &process_progress_callback]()
|
||||
{
|
||||
if (state->is_cancelled)
|
||||
return true;
|
||||
|
||||
process_progress_callback(state->progress.fetchAndResetPiecewiseAtomically());
|
||||
return false;
|
||||
};
|
||||
|
||||
executor.setCancelCallback(callback, query_context->getSettingsRef().interactive_delay / 1000);
|
||||
}
|
||||
executor.execute();
|
||||
}
|
||||
|
||||
@ -185,6 +199,7 @@ void LocalConnection::sendData(const Block & block, const String &, bool)
|
||||
|
||||
void LocalConnection::sendCancel()
|
||||
{
|
||||
state->is_cancelled = true;
|
||||
if (state->executor)
|
||||
state->executor->cancel();
|
||||
}
|
||||
@ -440,7 +455,7 @@ Packet LocalConnection::receivePacket()
|
||||
}
|
||||
case Protocol::Server::Progress:
|
||||
{
|
||||
packet.progress = std::move(state->progress);
|
||||
packet.progress = state->progress.fetchAndResetPiecewiseAtomically();
|
||||
state->progress.reset();
|
||||
next_packet_type.reset();
|
||||
break;
|
||||
|
@ -98,7 +98,8 @@ public:
|
||||
UInt64 stage/* = QueryProcessingStage::Complete */,
|
||||
const Settings * settings/* = nullptr */,
|
||||
const ClientInfo * client_info/* = nullptr */,
|
||||
bool with_pending_data/* = false */) override;
|
||||
bool with_pending_data/* = false */,
|
||||
std::function<void(const Progress &)> process_progress_callback) override;
|
||||
|
||||
void sendCancel() override;
|
||||
|
||||
|
@ -161,14 +161,14 @@ void MultiplexedConnections::sendQuery(
|
||||
modified_settings.parallel_replica_offset = i;
|
||||
|
||||
replica_states[i].connection->sendQuery(timeouts, query, query_id,
|
||||
stage, &modified_settings, &client_info, with_pending_data);
|
||||
stage, &modified_settings, &client_info, with_pending_data, {});
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/// Use single replica.
|
||||
replica_states[0].connection->sendQuery(timeouts, query, query_id,
|
||||
stage, &modified_settings, &client_info, with_pending_data);
|
||||
stage, &modified_settings, &client_info, with_pending_data, {});
|
||||
}
|
||||
|
||||
sent_query = true;
|
||||
|
@ -132,7 +132,7 @@ void Suggest::load(ContextPtr context, const ConnectionParameters & connection_p
|
||||
|
||||
void Suggest::fetch(IServerConnection & connection, const ConnectionTimeouts & timeouts, const std::string & query)
|
||||
{
|
||||
connection.sendQuery(timeouts, query, "" /* query_id */, QueryProcessingStage::Complete, nullptr, nullptr, false);
|
||||
connection.sendQuery(timeouts, query, "" /* query_id */, QueryProcessingStage::Complete, nullptr, nullptr, false, {});
|
||||
|
||||
while (true)
|
||||
{
|
||||
|
@ -10,6 +10,11 @@
|
||||
#include <Columns/ColumnCompressed.h>
|
||||
#include <Processors/Transforms/ColumnGathererTransform.h>
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
#include <DataTypes/Native.h>
|
||||
#include <llvm/IR/IRBuilder.h>
|
||||
#endif
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -241,6 +246,66 @@ ColumnPtr ColumnNullable::index(const IColumn & indexes, size_t limit) const
|
||||
return ColumnNullable::create(indexed_data, indexed_null_map);
|
||||
}
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
|
||||
bool ColumnNullable::isComparatorCompilable() const
|
||||
{
|
||||
return nested_column->isComparatorCompilable();
|
||||
}
|
||||
|
||||
llvm::Value * ColumnNullable::compileComparator(llvm::IRBuilderBase & builder, llvm::Value * lhs, llvm::Value * rhs,
|
||||
llvm::Value * nan_direction_hint) const
|
||||
{
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
auto * head = b.GetInsertBlock();
|
||||
|
||||
llvm::Value * lhs_unwrapped_value = b.CreateExtractValue(lhs, {0});
|
||||
llvm::Value * lhs_is_null_value = b.CreateExtractValue(lhs, {1});
|
||||
|
||||
llvm::Value * rhs_unwrapped_value = b.CreateExtractValue(rhs, {0});
|
||||
llvm::Value * rhs_is_null_value = b.CreateExtractValue(rhs, {1});
|
||||
|
||||
llvm::Value * lhs_or_rhs_are_null = b.CreateOr(lhs_is_null_value, rhs_is_null_value);
|
||||
|
||||
auto * lhs_or_rhs_are_null_block = llvm::BasicBlock::Create(head->getContext(), "lhs_or_rhs_are_null_block", head->getParent());
|
||||
auto * lhs_rhs_are_not_null_block = llvm::BasicBlock::Create(head->getContext(), "lhs_and_rhs_are_not_null_block", head->getParent());
|
||||
auto * join_block = llvm::BasicBlock::Create(head->getContext(), "join_block", head->getParent());
|
||||
|
||||
b.CreateCondBr(lhs_or_rhs_are_null, lhs_or_rhs_are_null_block, lhs_rhs_are_not_null_block);
|
||||
|
||||
// if (unlikely(lval_is_null || rval_is_null))
|
||||
// {
|
||||
// if (lval_is_null && rval_is_null)
|
||||
// return 0;
|
||||
// else
|
||||
// return lval_is_null ? null_direction_hint : -null_direction_hint;
|
||||
// }
|
||||
|
||||
b.SetInsertPoint(lhs_or_rhs_are_null_block);
|
||||
auto * lhs_equals_rhs_result = llvm::ConstantInt::getSigned(b.getInt8Ty(), 0);
|
||||
llvm::Value * lhs_and_rhs_are_null = b.CreateAnd(lhs_is_null_value, rhs_is_null_value);
|
||||
llvm::Value * lhs_is_null_result = b.CreateSelect(lhs_is_null_value, nan_direction_hint, b.CreateNeg(nan_direction_hint));
|
||||
llvm::Value * lhs_or_rhs_are_null_block_result = b.CreateSelect(lhs_and_rhs_are_null, lhs_equals_rhs_result, lhs_is_null_result);
|
||||
b.CreateBr(join_block);
|
||||
|
||||
// getNestedColumn().compareAt(n, m, nested_rhs, null_direction_hint);
|
||||
|
||||
b.SetInsertPoint(lhs_rhs_are_not_null_block);
|
||||
llvm::Value * lhs_rhs_are_not_null_block_result
|
||||
= nested_column->compileComparator(builder, lhs_unwrapped_value, rhs_unwrapped_value, nan_direction_hint);
|
||||
b.CreateBr(join_block);
|
||||
|
||||
b.SetInsertPoint(join_block);
|
||||
|
||||
auto * result = b.CreatePHI(b.getInt8Ty(), 2);
|
||||
result->addIncoming(lhs_or_rhs_are_null_block_result, lhs_or_rhs_are_null_block);
|
||||
result->addIncoming(lhs_rhs_are_not_null_block_result, lhs_rhs_are_not_null_block);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
int ColumnNullable::compareAtImpl(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint, const Collator * collator) const
|
||||
{
|
||||
/// NULL values share the properties of NaN values.
|
||||
|
@ -6,6 +6,9 @@
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Common/assert_cast.h>
|
||||
|
||||
#include "config_core.h"
|
||||
|
||||
|
||||
class Collator;
|
||||
|
||||
namespace DB
|
||||
@ -94,6 +97,15 @@ public:
|
||||
ColumnPtr permute(const Permutation & perm, size_t limit) const override;
|
||||
ColumnPtr index(const IColumn & indexes, size_t limit) const override;
|
||||
int compareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const override;
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
|
||||
bool isComparatorCompilable() const override;
|
||||
|
||||
llvm::Value * compileComparator(llvm::IRBuilderBase & /*builder*/, llvm::Value * /*lhs*/, llvm::Value * /*rhs*/, llvm::Value * /*nan_direction_hint*/) const override;
|
||||
|
||||
#endif
|
||||
|
||||
void compareColumn(const IColumn & rhs, size_t rhs_row_num,
|
||||
PaddedPODArray<UInt64> * row_indexes, PaddedPODArray<Int8> & compare_results,
|
||||
int direction, int nan_direction_hint) const override;
|
||||
|
@ -25,6 +25,12 @@
|
||||
# include <emmintrin.h>
|
||||
#endif
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
#include <DataTypes/Native.h>
|
||||
#include <llvm/IR/IRBuilder.h>
|
||||
#endif
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -183,6 +189,43 @@ namespace
|
||||
};
|
||||
}
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
|
||||
template <typename T>
|
||||
bool ColumnVector<T>::isComparatorCompilable() const
|
||||
{
|
||||
/// TODO: for std::is_floating_point_v<T> we need implement is_nan in LLVM IR.
|
||||
return std::is_integral_v<T>;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
llvm::Value * ColumnVector<T>::compileComparator(llvm::IRBuilderBase & builder, llvm::Value * lhs, llvm::Value * rhs, llvm::Value *) const
|
||||
{
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
|
||||
if constexpr (std::is_integral_v<T>)
|
||||
{
|
||||
// a > b ? 1 : (a < b ? -1 : 0);
|
||||
|
||||
bool is_signed = std::is_signed_v<T>;
|
||||
|
||||
auto * lhs_greater_than_rhs_result = llvm::ConstantInt::getSigned(b.getInt8Ty(), 1);
|
||||
auto * lhs_less_than_rhs_result = llvm::ConstantInt::getSigned(b.getInt8Ty(), -1);
|
||||
auto * lhs_equals_rhs_result = llvm::ConstantInt::getSigned(b.getInt8Ty(), 0);
|
||||
|
||||
auto * lhs_greater_than_rhs = is_signed ? b.CreateICmpSGT(lhs, rhs) : b.CreateICmpUGT(lhs, rhs);
|
||||
auto * lhs_less_than_rhs = is_signed ? b.CreateICmpSLT(lhs, rhs) : b.CreateICmpULT(lhs, rhs);
|
||||
auto * if_lhs_less_than_rhs_result = b.CreateSelect(lhs_less_than_rhs, lhs_less_than_rhs_result, lhs_equals_rhs_result);
|
||||
|
||||
return b.CreateSelect(lhs_greater_than_rhs, lhs_greater_than_rhs_result, if_lhs_less_than_rhs_result);
|
||||
}
|
||||
else
|
||||
{
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Method compileComparator is not supported for type {}", TypeName<T>);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
template <typename T>
|
||||
void ColumnVector<T>::getPermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability,
|
||||
|
@ -10,6 +10,8 @@
|
||||
#include <Core/TypeId.h>
|
||||
#include <base/TypeName.h>
|
||||
|
||||
#include "config_core.h"
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -217,6 +219,14 @@ public:
|
||||
return CompareHelper<T>::compare(data[n], assert_cast<const Self &>(rhs_).data[m], nan_direction_hint);
|
||||
}
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
|
||||
bool isComparatorCompilable() const override;
|
||||
|
||||
llvm::Value * compileComparator(llvm::IRBuilderBase & /*builder*/, llvm::Value * /*lhs*/, llvm::Value * /*rhs*/, llvm::Value * /*nan_direction_hint*/) const override;
|
||||
|
||||
#endif
|
||||
|
||||
void compareColumn(const IColumn & rhs, size_t rhs_row_num,
|
||||
PaddedPODArray<UInt64> * row_indexes, PaddedPODArray<Int8> & compare_results,
|
||||
int direction, int nan_direction_hint) const override
|
||||
|
@ -7,10 +7,19 @@
|
||||
#include <base/StringRef.h>
|
||||
#include <Core/Types.h>
|
||||
|
||||
#include "config_core.h"
|
||||
|
||||
|
||||
class SipHash;
|
||||
class Collator;
|
||||
|
||||
namespace llvm
|
||||
{
|
||||
class LLVMContext;
|
||||
class Value;
|
||||
class IRBuilderBase;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -281,6 +290,17 @@ public:
|
||||
*/
|
||||
[[nodiscard]] virtual int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const = 0;
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
|
||||
[[nodiscard]] virtual bool isComparatorCompilable() const { return false; }
|
||||
|
||||
[[nodiscard]] virtual llvm::Value * compileComparator(llvm::IRBuilderBase & /*builder*/, llvm::Value * /*lhs*/, llvm::Value * /*rhs*/, llvm::Value * /*nan_direction_hint*/) const
|
||||
{
|
||||
throw Exception("Method compileComparator is not supported for " + getName(), ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/// Equivalent to compareAt, but collator is used to compare values.
|
||||
[[nodiscard]] virtual int compareAtWithCollation(size_t, size_t, const IColumn &, int, const Collator &) const
|
||||
{
|
||||
|
@ -400,7 +400,7 @@ LRUFileCache::FileSegmentCell * LRUFileCache::addCell(
|
||||
|
||||
if (files[key].contains(offset))
|
||||
throw Exception(
|
||||
ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR,
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"Cache already exists for key: `{}`, offset: {}, size: {}.\nCurrent cache structure: {}",
|
||||
keyToStr(key), offset, size, dumpStructureUnlocked(key, cache_lock));
|
||||
|
||||
@ -609,7 +609,7 @@ void LRUFileCache::remove(const Key & key)
|
||||
#endif
|
||||
}
|
||||
|
||||
void LRUFileCache::remove(bool force_remove_unreleasable)
|
||||
void LRUFileCache::remove()
|
||||
{
|
||||
/// Try remove all cached files by cache_base_path.
|
||||
/// Only releasable file segments are evicted.
|
||||
@ -626,7 +626,7 @@ void LRUFileCache::remove(bool force_remove_unreleasable)
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"Cache is in inconsistent state: LRU queue contains entries with no cache cell");
|
||||
|
||||
if (cell->releasable() || force_remove_unreleasable)
|
||||
if (cell->releasable())
|
||||
{
|
||||
auto file_segment = cell->file_segment;
|
||||
if (file_segment)
|
||||
@ -647,7 +647,7 @@ void LRUFileCache::remove(
|
||||
|
||||
auto * cell = getCell(key, offset, cache_lock);
|
||||
if (!cell)
|
||||
throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, "No cache cell for key: {}, offset: {}", keyToStr(key), offset);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "No cache cell for key: {}, offset: {}", keyToStr(key), offset);
|
||||
|
||||
if (cell->queue_iterator)
|
||||
{
|
||||
|
@ -26,6 +26,7 @@ class IFileCache : private boost::noncopyable
|
||||
{
|
||||
friend class FileSegment;
|
||||
friend struct FileSegmentsHolder;
|
||||
friend class FileSegmentRangeWriter;
|
||||
|
||||
public:
|
||||
using Key = UInt128;
|
||||
@ -42,7 +43,7 @@ public:
|
||||
|
||||
virtual void remove(const Key & key) = 0;
|
||||
|
||||
virtual void remove(bool force_remove_unreleasable) = 0;
|
||||
virtual void remove() = 0;
|
||||
|
||||
static bool isReadOnly();
|
||||
|
||||
@ -143,13 +144,11 @@ public:
|
||||
|
||||
FileSegments getSnapshot() const override;
|
||||
|
||||
FileSegmentsHolder setDownloading(const Key & key, size_t offset, size_t size) override;
|
||||
|
||||
void initialize() override;
|
||||
|
||||
void remove(const Key & key) override;
|
||||
|
||||
void remove(bool force_remove_unreleasable) override;
|
||||
void remove() override;
|
||||
|
||||
std::vector<String> tryGetCachePaths(const Key & key) override;
|
||||
|
||||
@ -272,6 +271,8 @@ private:
|
||||
void fillHolesWithEmptyFileSegments(
|
||||
FileSegments & file_segments, const Key & key, const FileSegment::Range & range, bool fill_with_detached_file_segments, std::lock_guard<std::mutex> & cache_lock);
|
||||
|
||||
FileSegmentsHolder setDownloading(const Key & key, size_t offset, size_t size) override;
|
||||
|
||||
size_t getUsedCacheSizeUnlocked(std::lock_guard<std::mutex> & cache_lock) const;
|
||||
|
||||
size_t getAvailableCacheSizeUnlocked(std::lock_guard<std::mutex> & cache_lock) const;
|
||||
|
@ -107,8 +107,7 @@ String FileSegment::getOrSetDownloader()
|
||||
{
|
||||
std::lock_guard segment_lock(mutex);
|
||||
|
||||
if (detached)
|
||||
throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, "Cannot set downloader for a detached file segment");
|
||||
assertNotDetached(segment_lock);
|
||||
|
||||
if (downloader_id.empty())
|
||||
{
|
||||
@ -132,6 +131,8 @@ void FileSegment::resetDownloader()
|
||||
{
|
||||
std::lock_guard segment_lock(mutex);
|
||||
|
||||
assertNotDetached(segment_lock);
|
||||
|
||||
if (downloader_id.empty())
|
||||
throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, "There is no downloader");
|
||||
|
||||
@ -209,7 +210,7 @@ void FileSegment::write(const char * from, size_t size, size_t offset_)
|
||||
"Not enough space is reserved. Available: {}, expected: {}", availableSize(), size);
|
||||
|
||||
if (!isDownloader())
|
||||
throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR,
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
"Only downloader can do the downloading. (CallerId: {}, DownloaderId: {})",
|
||||
getCallerId(), downloader_id);
|
||||
|
||||
@ -224,7 +225,10 @@ void FileSegment::write(const char * from, size_t size, size_t offset_)
|
||||
"Attempt to write {} bytes to offset: {}, but current download offset is {}",
|
||||
size, offset_, download_offset);
|
||||
|
||||
assertNotDetached();
|
||||
{
|
||||
std::lock_guard segment_lock(mutex);
|
||||
assertNotDetached(segment_lock);
|
||||
}
|
||||
|
||||
if (!cache_writer)
|
||||
{
|
||||
@ -273,9 +277,8 @@ void FileSegment::writeInMemory(const char * from, size_t size)
|
||||
ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR,
|
||||
"Not enough space is reserved. Available: {}, expected: {}", availableSize(), size);
|
||||
|
||||
assertNotDetached();
|
||||
|
||||
std::lock_guard segment_lock(mutex);
|
||||
assertNotDetached(segment_lock);
|
||||
|
||||
if (cache_writer)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cache writer already initialized");
|
||||
@ -311,7 +314,7 @@ size_t FileSegment::finalizeWrite()
|
||||
if (size == 0)
|
||||
throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, "Writing zero size is not allowed");
|
||||
|
||||
assertNotDetached();
|
||||
assertNotDetached(segment_lock);
|
||||
|
||||
try
|
||||
{
|
||||
@ -342,6 +345,11 @@ FileSegment::State FileSegment::wait()
|
||||
{
|
||||
std::unique_lock segment_lock(mutex);
|
||||
|
||||
if (is_detached)
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"Cache file segment is in detached state, operation not allowed");
|
||||
|
||||
if (downloader_id.empty())
|
||||
return download_state;
|
||||
|
||||
@ -366,14 +374,19 @@ bool FileSegment::reserve(size_t size)
|
||||
if (!size)
|
||||
throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, "Zero space reservation is not allowed");
|
||||
|
||||
assertNotDetached();
|
||||
|
||||
{
|
||||
std::lock_guard segment_lock(mutex);
|
||||
assertNotDetached(segment_lock);
|
||||
|
||||
auto caller_id = getCallerId();
|
||||
if (downloader_id != caller_id)
|
||||
throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, "Space can be reserved only by downloader (current: {}, expected: {})", caller_id, downloader_id);
|
||||
bool is_downloader = caller_id == downloader_id;
|
||||
if (!is_downloader)
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"Space can be reserved only by downloader (current: {}, expected: {})",
|
||||
caller_id, downloader_id);
|
||||
}
|
||||
|
||||
if (downloaded_size + size > range().size())
|
||||
throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR,
|
||||
@ -392,6 +405,7 @@ bool FileSegment::reserve(size_t size)
|
||||
size_t size_to_reserve = size - free_space;
|
||||
|
||||
std::lock_guard cache_lock(cache->mutex);
|
||||
|
||||
bool reserved = cache->tryReserve(key(), offset(), size_to_reserve, cache_lock);
|
||||
|
||||
if (reserved)
|
||||
@ -437,6 +451,8 @@ void FileSegment::completeBatchAndResetDownloader()
|
||||
{
|
||||
std::lock_guard segment_lock(mutex);
|
||||
|
||||
assertNotDetached(segment_lock);
|
||||
|
||||
if (!isDownloaderImpl(segment_lock))
|
||||
{
|
||||
cv.notify_all();
|
||||
@ -458,7 +474,7 @@ void FileSegment::complete(State state)
|
||||
std::lock_guard cache_lock(cache->mutex);
|
||||
std::lock_guard segment_lock(mutex);
|
||||
|
||||
assertNotDetached();
|
||||
assertNotDetached(segment_lock);
|
||||
|
||||
bool is_downloader = isDownloaderImpl(segment_lock);
|
||||
if (!is_downloader)
|
||||
@ -501,12 +517,15 @@ void FileSegment::complete(State state)
|
||||
void FileSegment::complete(std::lock_guard<std::mutex> & cache_lock)
|
||||
{
|
||||
std::lock_guard segment_lock(mutex);
|
||||
|
||||
assertNotDetached(segment_lock);
|
||||
|
||||
completeUnlocked(cache_lock, segment_lock);
|
||||
}
|
||||
|
||||
void FileSegment::completeUnlocked(std::lock_guard<std::mutex> & cache_lock, std::lock_guard<std::mutex> & segment_lock)
|
||||
{
|
||||
if (download_state == State::SKIP_CACHE || detached)
|
||||
if (download_state == State::SKIP_CACHE || is_detached)
|
||||
return;
|
||||
|
||||
if (isDownloaderImpl(segment_lock)
|
||||
@ -516,7 +535,7 @@ void FileSegment::completeUnlocked(std::lock_guard<std::mutex> & cache_lock, std
|
||||
setDownloaded(segment_lock);
|
||||
}
|
||||
|
||||
assertNotDetached();
|
||||
assertNotDetached(segment_lock);
|
||||
|
||||
if (download_state == State::DOWNLOADING || download_state == State::EMPTY)
|
||||
{
|
||||
@ -589,6 +608,7 @@ void FileSegment::completeImpl(std::lock_guard<std::mutex> & cache_lock, std::lo
|
||||
downloader_id.clear();
|
||||
}
|
||||
|
||||
LOG_TEST(log, "Completed file segment: {}", getInfoForLogImpl(segment_lock));
|
||||
assertCorrectnessImpl(segment_lock);
|
||||
}
|
||||
|
||||
@ -649,15 +669,40 @@ void FileSegment::assertCorrectnessImpl(std::lock_guard<std::mutex> & /* segment
|
||||
assert(download_state != FileSegment::State::DOWNLOADED || std::filesystem::file_size(cache->getPathInLocalCache(key(), offset())) > 0);
|
||||
}
|
||||
|
||||
void FileSegment::assertNotDetached() const
|
||||
void FileSegment::throwIfDetached() const
|
||||
{
|
||||
if (detached)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Operation not allowed, file segment is detached");
|
||||
std::lock_guard segment_lock(mutex);
|
||||
throwIfDetachedUnlocked(segment_lock);
|
||||
}
|
||||
|
||||
void FileSegment::assertDetachedStatus(std::lock_guard<std::mutex> & /* segment_lock */) const
|
||||
void FileSegment::throwIfDetachedUnlocked(std::lock_guard<std::mutex> & segment_lock) const
|
||||
{
|
||||
assert(download_state == State::EMPTY || hasFinalizedState());
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"Cache file segment is in detached state, operation not allowed. "
|
||||
"It can happen when cache was concurrently dropped with SYSTEM DROP FILESYSTEM CACHE FORCE. "
|
||||
"Please, retry. File segment info: {}", getInfoForLogImpl(segment_lock));
|
||||
}
|
||||
|
||||
|
||||
void FileSegment::assertNotDetached(std::lock_guard<std::mutex> & segment_lock) const
|
||||
{
|
||||
if (is_detached)
|
||||
throwIfDetachedUnlocked(segment_lock);
|
||||
}
|
||||
|
||||
void FileSegment::assertDetachedStatus(std::lock_guard<std::mutex> & segment_lock) const
|
||||
{
|
||||
/// Detached file segment is allowed to have only a certain subset of states.
|
||||
/// It should be either EMPTY or one of the finalized states.
|
||||
|
||||
if (download_state != State::EMPTY && !hasFinalizedState())
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"Detached file segment has incorrect state: {}",
|
||||
getInfoForLogImpl(segment_lock));
|
||||
}
|
||||
}
|
||||
|
||||
FileSegmentPtr FileSegment::getSnapshot(const FileSegmentPtr & file_segment, std::lock_guard<std::mutex> & /* cache_lock */)
|
||||
@ -684,29 +729,35 @@ bool FileSegment::hasFinalizedState() const
|
||||
|| download_state == State::SKIP_CACHE;
|
||||
}
|
||||
|
||||
void FileSegment::detach(std::lock_guard<std::mutex> & cache_lock, std::lock_guard<std::mutex> & segment_lock)
|
||||
void FileSegment::detach(
|
||||
std::lock_guard<std::mutex> & /* cache_lock */,
|
||||
std::lock_guard<std::mutex> & segment_lock)
|
||||
{
|
||||
if (detached)
|
||||
/// Now detached status can be in 2 cases, which do not do any complex logic:
|
||||
/// 1. there is only 1 remaining file segment holder
|
||||
/// && it does not need this segment anymore
|
||||
/// && this file segment was in cache and needs to be removed
|
||||
/// 2. in read_from_cache_if_exists_otherwise_bypass_cache case
|
||||
if (is_detached)
|
||||
return;
|
||||
|
||||
markAsDetached(segment_lock);
|
||||
download_state = State::PARTIALLY_DOWNLOADED_NO_CONTINUATION;
|
||||
downloader_id.clear();
|
||||
|
||||
if (!hasFinalizedState())
|
||||
{
|
||||
completeUnlocked(cache_lock, segment_lock);
|
||||
}
|
||||
LOG_TEST(log, "Detached file segment: {}", getInfoForLogImpl(segment_lock));
|
||||
}
|
||||
|
||||
void FileSegment::markAsDetached(std::lock_guard<std::mutex> & /* segment_lock */)
|
||||
{
|
||||
detached = true;
|
||||
is_detached = true;
|
||||
CurrentMetrics::add(CurrentMetrics::CacheDetachedFileSegments);
|
||||
}
|
||||
|
||||
FileSegment::~FileSegment()
|
||||
{
|
||||
std::lock_guard segment_lock(mutex);
|
||||
if (detached)
|
||||
if (is_detached)
|
||||
CurrentMetrics::sub(CurrentMetrics::CacheDetachedFileSegments);
|
||||
}
|
||||
|
||||
@ -726,15 +777,18 @@ FileSegmentsHolder::~FileSegmentsHolder()
|
||||
if (!cache)
|
||||
cache = file_segment->cache;
|
||||
|
||||
try
|
||||
{
|
||||
bool detached = false;
|
||||
bool is_detached = false;
|
||||
|
||||
{
|
||||
std::lock_guard segment_lock(file_segment->mutex);
|
||||
detached = file_segment->isDetached(segment_lock);
|
||||
if (detached)
|
||||
is_detached = file_segment->isDetached(segment_lock);
|
||||
if (is_detached)
|
||||
file_segment->assertDetachedStatus(segment_lock);
|
||||
}
|
||||
if (detached)
|
||||
|
||||
if (is_detached)
|
||||
{
|
||||
/// This file segment is not owned by cache, so it will be destructed
|
||||
/// at this point, therefore no completion required.
|
||||
@ -742,10 +796,6 @@ FileSegmentsHolder::~FileSegmentsHolder()
|
||||
continue;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
/// File segment pointer must be reset right after calling complete() and
|
||||
/// under the same mutex, because complete() checks for segment pointers.
|
||||
std::lock_guard cache_lock(cache->mutex);
|
||||
@ -757,7 +807,6 @@ FileSegmentsHolder::~FileSegmentsHolder()
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -774,5 +823,4 @@ String FileSegmentsHolder::toString()
|
||||
return ranges;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
@ -25,8 +25,10 @@ using FileSegments = std::list<FileSegmentPtr>;
|
||||
|
||||
class FileSegment : boost::noncopyable
|
||||
{
|
||||
|
||||
friend class LRUFileCache;
|
||||
friend struct FileSegmentsHolder;
|
||||
friend class FileSegmentRangeWriter;
|
||||
|
||||
public:
|
||||
using Key = UInt128;
|
||||
@ -149,9 +151,15 @@ public:
|
||||
|
||||
void assertCorrectness() const;
|
||||
|
||||
static FileSegmentPtr getSnapshot(const FileSegmentPtr & file_segment, std::lock_guard<std::mutex> & cache_lock);
|
||||
static FileSegmentPtr getSnapshot(
|
||||
const FileSegmentPtr & file_segment,
|
||||
std::lock_guard<std::mutex> & cache_lock);
|
||||
|
||||
void detach(std::lock_guard<std::mutex> & cache_lock, std::lock_guard<std::mutex> & segment_lock);
|
||||
void detach(
|
||||
std::lock_guard<std::mutex> & cache_lock,
|
||||
std::lock_guard<std::mutex> & segment_lock);
|
||||
|
||||
[[noreturn]] void throwIfDetached() const;
|
||||
|
||||
private:
|
||||
size_t availableSize() const { return reserved_size - downloaded_size; }
|
||||
@ -159,11 +167,14 @@ private:
|
||||
size_t getDownloadedSize(std::lock_guard<std::mutex> & segment_lock) const;
|
||||
String getInfoForLogImpl(std::lock_guard<std::mutex> & segment_lock) const;
|
||||
void assertCorrectnessImpl(std::lock_guard<std::mutex> & segment_lock) const;
|
||||
void assertNotDetached() const;
|
||||
void assertDetachedStatus(std::lock_guard<std::mutex> & segment_lock) const;
|
||||
bool hasFinalizedState() const;
|
||||
bool isDetached(std::lock_guard<std::mutex> & /* segment_lock */) const { return detached; }
|
||||
|
||||
bool isDetached(std::lock_guard<std::mutex> & /* segment_lock */) const { return is_detached; }
|
||||
void markAsDetached(std::lock_guard<std::mutex> & segment_lock);
|
||||
[[noreturn]] void throwIfDetachedUnlocked(std::lock_guard<std::mutex> & segment_lock) const;
|
||||
|
||||
void assertDetachedStatus(std::lock_guard<std::mutex> & segment_lock) const;
|
||||
void assertNotDetached(std::lock_guard<std::mutex> & segment_lock) const;
|
||||
|
||||
void setDownloaded(std::lock_guard<std::mutex> & segment_lock);
|
||||
void setDownloadFailed(std::lock_guard<std::mutex> & segment_lock);
|
||||
@ -197,6 +208,10 @@ private:
|
||||
size_t downloaded_size = 0;
|
||||
size_t reserved_size = 0;
|
||||
|
||||
/// global locking order rule:
|
||||
/// 1. cache lock
|
||||
/// 2. segment lock
|
||||
|
||||
mutable std::mutex mutex;
|
||||
std::condition_variable cv;
|
||||
|
||||
@ -215,7 +230,7 @@ private:
|
||||
|
||||
/// "detached" file segment means that it is not owned by cache ("detached" from cache).
|
||||
/// In general case, all file segments are owned by cache.
|
||||
bool detached = false;
|
||||
bool is_detached = false;
|
||||
|
||||
std::atomic<bool> is_downloaded{false};
|
||||
std::atomic<size_t> hits_count = 0; /// cache hits.
|
||||
@ -227,6 +242,7 @@ private:
|
||||
struct FileSegmentsHolder : private boost::noncopyable
|
||||
{
|
||||
explicit FileSegmentsHolder(FileSegments && file_segments_) : file_segments(std::move(file_segments_)) {}
|
||||
|
||||
FileSegmentsHolder(FileSegmentsHolder && other) noexcept : file_segments(std::move(other.file_segments)) {}
|
||||
|
||||
~FileSegmentsHolder();
|
||||
|
@ -94,7 +94,7 @@ public:
|
||||
template <typename Factory>
|
||||
Pointer get(const Key & key, Factory && f)
|
||||
{
|
||||
std::unique_lock lock(mutex);
|
||||
std::lock_guard lock(mutex);
|
||||
|
||||
auto it = container.find(key);
|
||||
if (container.end() == it)
|
||||
|
@ -260,10 +260,12 @@
|
||||
\
|
||||
M(QueryMemoryLimitExceeded, "Number of times when memory limit exceeded for query.") \
|
||||
\
|
||||
M(RemoteFSReadMicroseconds, "Time of reading from remote filesystem.") \
|
||||
M(RemoteFSReadBytes, "Read bytes from remote filesystem.") \
|
||||
M(RemoteFSCacheReadBytes, "Read bytes from cache of remote filesystem.") \
|
||||
M(RemoteFSCacheDownloadBytes, "Bytes downloaded to cache from remote filesystem.") \
|
||||
M(CachedReadBufferReadFromSourceMicroseconds, "Time reading from filesystem cache source (from remote filesystem, etc)") \
|
||||
M(CachedReadBufferReadFromCacheMicroseconds, "Time reading from filesystem cache") \
|
||||
M(CachedReadBufferReadFromSourceBytes, "Bytes read from filesystem cache source (from remote fs, etc)") \
|
||||
M(CachedReadBufferReadFromCacheBytes, "Bytes read from filesystem cache") \
|
||||
M(CachedReadBufferCacheWriteBytes, "Bytes written from source (remote fs, etc) to filesystem cache") \
|
||||
M(CachedReadBufferCacheWriteMicroseconds, "Time spent writing data into filesystem cache") \
|
||||
\
|
||||
M(RemoteFSSeeks, "Total number of seeks for async buffer") \
|
||||
M(RemoteFSPrefetches, "Number of prefetches made with asynchronous reading from remote filesystem") \
|
||||
@ -275,6 +277,15 @@
|
||||
M(RemoteFSSeeksWithReset, "Number of seeks which lead to a new connection") \
|
||||
M(RemoteFSBuffers, "Number of buffers created for asynchronous reading from remote filesystem") \
|
||||
\
|
||||
M(ThreadpoolReaderTaskMicroseconds, "Time spent getting the data in asynchronous reading") \
|
||||
M(ThreadpoolReaderReadBytes, "Bytes read from a threadpool task in asynchronous reading") \
|
||||
\
|
||||
M(FileSegmentWaitReadBufferMicroseconds, "Metric per file segment. Time spend waiting for internal read buffer (includes cache waiting)") \
|
||||
M(FileSegmentReadMicroseconds, "Metric per file segment. Time spend reading from file") \
|
||||
M(FileSegmentCacheWriteMicroseconds, "Metric per file segment. Time spend writing data to cache") \
|
||||
M(FileSegmentPredownloadMicroseconds, "Metric per file segment. Time spent predownloading data to cache (predownloading - finishing file segment download (after someone who failed to do that) up to the point current thread was requested to do)") \
|
||||
M(FileSegmentUsedBytes, "Metric per file segment. How many bytes were actually used from current file segment") \
|
||||
\
|
||||
M(ReadBufferSeekCancelConnection, "Number of seeks which lead to new connection (s3, http)") \
|
||||
\
|
||||
M(SleepFunctionCalls, "Number of times a sleep function (sleep, sleepEachRow) has been called.") \
|
||||
|
@ -165,18 +165,17 @@ void ProgressIndication::writeProgress()
|
||||
message << '\r';
|
||||
|
||||
size_t prefix_size = message.count();
|
||||
size_t read_bytes = progress.read_raw_bytes ? progress.read_raw_bytes : progress.read_bytes;
|
||||
|
||||
message << indicator << " Progress: ";
|
||||
message
|
||||
<< formatReadableQuantity(progress.read_rows) << " rows, "
|
||||
<< formatReadableSizeWithDecimalSuffix(read_bytes);
|
||||
<< formatReadableSizeWithDecimalSuffix(progress.read_bytes);
|
||||
|
||||
auto elapsed_ns = watch.elapsed();
|
||||
if (elapsed_ns)
|
||||
message << " ("
|
||||
<< formatReadableQuantity(progress.read_rows * 1000000000.0 / elapsed_ns) << " rows/s., "
|
||||
<< formatReadableSizeWithDecimalSuffix(read_bytes * 1000000000.0 / elapsed_ns) << "/s.) ";
|
||||
<< formatReadableSizeWithDecimalSuffix(progress.read_bytes * 1000000000.0 / elapsed_ns) << "/s.) ";
|
||||
else
|
||||
message << ". ";
|
||||
|
||||
@ -206,7 +205,7 @@ void ProgressIndication::writeProgress()
|
||||
int64_t remaining_space = static_cast<int64_t>(terminal_width) - written_progress_chars;
|
||||
|
||||
/// If the approximate number of rows to process is known, we can display a progress bar and percentage.
|
||||
if (progress.total_rows_to_read || progress.total_raw_bytes_to_read)
|
||||
if (progress.total_rows_to_read || progress.total_bytes_to_read)
|
||||
{
|
||||
size_t current_count, max_count;
|
||||
if (progress.total_rows_to_read)
|
||||
@ -216,8 +215,8 @@ void ProgressIndication::writeProgress()
|
||||
}
|
||||
else
|
||||
{
|
||||
current_count = progress.read_raw_bytes;
|
||||
max_count = std::max(progress.read_raw_bytes, progress.total_raw_bytes_to_read);
|
||||
current_count = progress.read_bytes;
|
||||
max_count = std::max(progress.read_bytes, progress.total_bytes_to_read);
|
||||
}
|
||||
|
||||
/// To avoid flicker, display progress bar only if .5 seconds have passed since query execution start
|
||||
|
@ -119,9 +119,9 @@ TEST(LRUFileCache, get)
|
||||
assertRange(1, segments[0], DB::FileSegment::Range(0, 9), DB::FileSegment::State::EMPTY);
|
||||
|
||||
/// Exception because space not reserved.
|
||||
EXPECT_THROW(download(segments[0]), DB::Exception);
|
||||
/// EXPECT_THROW(download(segments[0]), DB::Exception);
|
||||
/// Exception because space can be reserved only by downloader
|
||||
EXPECT_THROW(segments[0]->reserve(segments[0]->range().size()), DB::Exception);
|
||||
/// EXPECT_THROW(segments[0]->reserve(segments[0]->range().size()), DB::Exception);
|
||||
|
||||
ASSERT_TRUE(segments[0]->getOrSetDownloader() == DB::FileSegment::getCallerId());
|
||||
ASSERT_TRUE(segments[0]->reserve(segments[0]->range().size()));
|
||||
|
@ -498,6 +498,15 @@ Block Block::cloneWithColumns(MutableColumns && columns) const
|
||||
Block res;
|
||||
|
||||
size_t num_columns = data.size();
|
||||
|
||||
if (num_columns != columns.size())
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"Cannot clone block with columns because block has {} columns, but {} columns given",
|
||||
num_columns, columns.size());
|
||||
}
|
||||
|
||||
res.reserve(num_columns);
|
||||
|
||||
for (size_t i = 0; i < num_columns; ++i)
|
||||
@ -514,8 +523,12 @@ Block Block::cloneWithColumns(const Columns & columns) const
|
||||
size_t num_columns = data.size();
|
||||
|
||||
if (num_columns != columns.size())
|
||||
throw Exception("Cannot clone block with columns because block has " + toString(num_columns) + " columns, "
|
||||
"but " + toString(columns.size()) + " columns given.", ErrorCodes::LOGICAL_ERROR);
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"Cannot clone block with columns because block has {} columns, but {} columns given",
|
||||
num_columns, columns.size());
|
||||
}
|
||||
|
||||
res.reserve(num_columns);
|
||||
|
||||
|
@ -8,5 +8,6 @@ namespace DB
|
||||
{
|
||||
|
||||
using ColumnNumbers = std::vector<size_t>;
|
||||
using ColumnNumbersList = std::vector<ColumnNumbers>;
|
||||
|
||||
}
|
||||
|
@ -109,6 +109,8 @@ public:
|
||||
std::optional<NameAndTypePair> tryGetByName(const std::string & name) const;
|
||||
};
|
||||
|
||||
using NamesAndTypesLists = std::vector<NamesAndTypesList>;
|
||||
|
||||
}
|
||||
|
||||
namespace std
|
||||
|
@ -22,7 +22,7 @@ namespace DB
|
||||
{
|
||||
class IColumn;
|
||||
|
||||
static constexpr UInt64 operator""_Gb(unsigned long long value)
|
||||
static constexpr UInt64 operator""_GiB(unsigned long long value)
|
||||
{
|
||||
return value * 1024 * 1024 * 1024;
|
||||
}
|
||||
@ -120,6 +120,8 @@ static constexpr UInt64 operator""_Gb(unsigned long long value)
|
||||
M(UInt64, min_count_to_compile_expression, 3, "The number of identical expressions before they are JIT-compiled", 0) \
|
||||
M(Bool, compile_aggregate_expressions, true, "Compile aggregate functions to native code.", 0) \
|
||||
M(UInt64, min_count_to_compile_aggregate_expression, 3, "The number of identical aggregate expressions before they are JIT-compiled", 0) \
|
||||
M(Bool, compile_sort_description, true, "Compile sort description to native code.", 0) \
|
||||
M(UInt64, min_count_to_compile_sort_description, 3, "The number of identical sort descriptions before they are JIT-compiled", 0) \
|
||||
M(UInt64, group_by_two_level_threshold, 100000, "From what number of keys, a two-level aggregation starts. 0 - the threshold is not set.", 0) \
|
||||
M(UInt64, group_by_two_level_threshold_bytes, 50000000, "From what size of the aggregation state in bytes, a two-level aggregation begins to be used. 0 - the threshold is not set. Two-level aggregation is used when at least one of the thresholds is triggered.", 0) \
|
||||
M(Bool, distributed_aggregation_memory_efficient, true, "Is the memory-saving mode of distributed aggregation enabled.", 0) \
|
||||
@ -360,14 +362,14 @@ static constexpr UInt64 operator""_Gb(unsigned long long value)
|
||||
M(OverflowMode, distinct_overflow_mode, OverflowMode::THROW, "What to do when the limit is exceeded.", 0) \
|
||||
\
|
||||
M(UInt64, max_memory_usage, 0, "Maximum memory usage for processing of single query. Zero means unlimited.", 0) \
|
||||
M(UInt64, max_guaranteed_memory_usage, 10_Gb, "Maximum guaranteed memory usage for processing of single query. It represents soft limit. Zero means unlimited.", 0) \
|
||||
M(UInt64, memory_overcommit_ratio_denominator, 1_GiB, "It represents soft memory limit on the user level. This value is used to compute query overcommit ratio.", 0) \
|
||||
M(UInt64, max_memory_usage_for_user, 0, "Maximum memory usage for processing all concurrently running queries for the user. Zero means unlimited.", 0) \
|
||||
M(UInt64, max_guaranteed_memory_usage_for_user, 10_Gb, "Maximum guaranteed memory usage for processing all concurrently running queries for the user. It represents soft limit. Zero means unlimited.", 0) \
|
||||
M(UInt64, memory_overcommit_ratio_denominator_for_user, 1_GiB, "It represents soft memory limit on the global level. This value is used to compute query overcommit ratio.", 0) \
|
||||
M(UInt64, max_untracked_memory, (4 * 1024 * 1024), "Small allocations and deallocations are grouped in thread local variable and tracked or profiled only when amount (in absolute value) becomes larger than specified value. If the value is higher than 'memory_profiler_step' it will be effectively lowered to 'memory_profiler_step'.", 0) \
|
||||
M(UInt64, memory_profiler_step, (4 * 1024 * 1024), "Whenever query memory usage becomes larger than every next step in number of bytes the memory profiler will collect the allocating stack trace. Zero means disabled memory profiler. Values lower than a few megabytes will slow down query processing.", 0) \
|
||||
M(Float, memory_profiler_sample_probability, 0., "Collect random allocations and deallocations and write them into system.trace_log with 'MemorySample' trace_type. The probability is for every alloc/free regardless to the size of the allocation. Note that sampling happens only when the amount of untracked memory exceeds 'max_untracked_memory'. You may want to set 'max_untracked_memory' to 0 for extra fine grained sampling.", 0) \
|
||||
\
|
||||
M(UInt64, memory_usage_overcommit_max_wait_microseconds, 0, "Maximum time thread will wait for memory to be freed in the case of memory overcommit. If timeout is reached and memory is not freed, exception is thrown", 0) \
|
||||
M(UInt64, memory_usage_overcommit_max_wait_microseconds, 200, "Maximum time thread will wait for memory to be freed in the case of memory overcommit on user level. If timeout is reached and memory is not freed, exception is thrown.", 0) \
|
||||
\
|
||||
M(UInt64, max_network_bandwidth, 0, "The maximum speed of data exchange over the network in bytes per second for a query. Zero means unlimited.", 0) \
|
||||
M(UInt64, max_network_bytes, 0, "The maximum number of bytes (compressed) to receive or transmit over the network for execution of the query.", 0) \
|
||||
@ -379,7 +381,7 @@ static constexpr UInt64 operator""_Gb(unsigned long long value)
|
||||
\
|
||||
M(Bool, log_profile_events, true, "Log query performance statistics into the query_log, query_thread_log and query_views_log.", 0) \
|
||||
M(Bool, log_query_settings, true, "Log query settings into the query_log.", 0) \
|
||||
M(Bool, log_query_threads, true, "Log query threads into system.query_thread_log table. This setting have effect only when 'log_queries' is true.", 0) \
|
||||
M(Bool, log_query_threads, false, "Log query threads into system.query_thread_log table. This setting have effect only when 'log_queries' is true.", 0) \
|
||||
M(Bool, log_query_views, true, "Log query dependent views into system.query_views_log table. This setting have effect only when 'log_queries' is true.", 0) \
|
||||
M(String, log_comment, "", "Log comment into system.query_log table and server log. It can be set to arbitrary string no longer than max_query_size.", 0) \
|
||||
M(LogsLevel, send_logs_level, LogsLevel::fatal, "Send server text logs with specified minimum level to client. Valid values: 'trace', 'debug', 'information', 'warning', 'error', 'fatal', 'none'", 0) \
|
||||
|
@ -12,6 +12,11 @@
|
||||
#include <Columns/IColumn.h>
|
||||
#include <Columns/ColumnString.h>
|
||||
|
||||
#include "config_core.h"
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
#include <Interpreters/JIT/compileFunction.h>
|
||||
#endif
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -49,6 +54,10 @@ struct SortCursorImpl
|
||||
*/
|
||||
IColumn::Permutation * permutation = nullptr;
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
std::vector<ColumnData> raw_sort_columns_data;
|
||||
#endif
|
||||
|
||||
SortCursorImpl() = default;
|
||||
|
||||
SortCursorImpl(const Block & block, const SortDescription & desc_, size_t order_ = 0, IColumn::Permutation * perm = nullptr)
|
||||
@ -78,6 +87,9 @@ struct SortCursorImpl
|
||||
{
|
||||
all_columns.clear();
|
||||
sort_columns.clear();
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
raw_sort_columns_data.clear();
|
||||
#endif
|
||||
|
||||
size_t num_columns = columns.size();
|
||||
|
||||
@ -90,6 +102,10 @@ struct SortCursorImpl
|
||||
size_t column_number = block.getPositionByName(column_desc.column_name);
|
||||
sort_columns.push_back(columns[column_number].get());
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
if (desc.compiled_sort_description)
|
||||
raw_sort_columns_data.emplace_back(getColumnData(sort_columns.back()));
|
||||
#endif
|
||||
need_collation[j] = desc[j].collator != nullptr && sort_columns.back()->isCollationSupported();
|
||||
has_collation |= need_collation[j];
|
||||
}
|
||||
@ -164,17 +180,36 @@ struct SortCursor : SortCursorHelper<SortCursor>
|
||||
/// The specified row of this cursor is greater than the specified row of another cursor.
|
||||
bool ALWAYS_INLINE greaterAt(const SortCursor & rhs, size_t lhs_pos, size_t rhs_pos) const
|
||||
{
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
if (impl->desc.compiled_sort_description && rhs.impl->desc.compiled_sort_description)
|
||||
{
|
||||
assert(impl->raw_sort_columns_data.size() == rhs.impl->raw_sort_columns_data.size());
|
||||
|
||||
auto sort_description_func_typed = reinterpret_cast<JITSortDescriptionFunc>(impl->desc.compiled_sort_description);
|
||||
int res = sort_description_func_typed(lhs_pos, rhs_pos, impl->raw_sort_columns_data.data(), rhs.impl->raw_sort_columns_data.data()); /// NOLINT
|
||||
|
||||
if (res > 0)
|
||||
return true;
|
||||
if (res < 0)
|
||||
return false;
|
||||
|
||||
return impl->order > rhs.impl->order;
|
||||
}
|
||||
#endif
|
||||
|
||||
for (size_t i = 0; i < impl->sort_columns_size; ++i)
|
||||
{
|
||||
const auto & desc = impl->desc[i];
|
||||
int direction = desc.direction;
|
||||
int nulls_direction = desc.nulls_direction;
|
||||
int res = direction * impl->sort_columns[i]->compareAt(lhs_pos, rhs_pos, *(rhs.impl->sort_columns[i]), nulls_direction);
|
||||
|
||||
if (res > 0)
|
||||
return true;
|
||||
if (res < 0)
|
||||
return false;
|
||||
}
|
||||
|
||||
return impl->order > rhs.impl->order;
|
||||
}
|
||||
};
|
||||
@ -190,8 +225,26 @@ struct SimpleSortCursor : SortCursorHelper<SimpleSortCursor>
|
||||
const auto & desc = impl->desc[0];
|
||||
int direction = desc.direction;
|
||||
int nulls_direction = desc.nulls_direction;
|
||||
int res = impl->sort_columns[0]->compareAt(lhs_pos, rhs_pos, *(rhs.impl->sort_columns[0]), nulls_direction);
|
||||
return res != 0 && ((res > 0) == (direction > 0));
|
||||
|
||||
bool result = false;
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
if (impl->desc.compiled_sort_description && rhs.impl->desc.compiled_sort_description)
|
||||
{
|
||||
assert(impl->raw_sort_columns_data.size() == rhs.impl->raw_sort_columns_data.size());
|
||||
|
||||
auto sort_description_func_typed = reinterpret_cast<JITSortDescriptionFunc>(impl->desc.compiled_sort_description);
|
||||
int jit_result = sort_description_func_typed(lhs_pos, rhs_pos, impl->raw_sort_columns_data.data(), rhs.impl->raw_sort_columns_data.data()); /// NOLINT
|
||||
result = jit_result > 0;
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
int non_jit_result = impl->sort_columns[0]->compareAt(lhs_pos, rhs_pos, *(rhs.impl->sort_columns[0]), nulls_direction);
|
||||
result = (non_jit_result != 0 && ((non_jit_result > 0) == (direction > 0)));
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -2,6 +2,13 @@
|
||||
#include <Core/SortDescription.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <Common/JSONBuilder.h>
|
||||
#include <Common/SipHash.h>
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
#include <DataTypes/Native.h>
|
||||
#include <Interpreters/JIT/compileFunction.h>
|
||||
#include <Interpreters/JIT/CompiledExpressionCache.h>
|
||||
#endif
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -35,6 +42,115 @@ void SortColumnDescription::explain(JSONBuilder::JSONMap & map) const
|
||||
map.add("With Fill", with_fill);
|
||||
}
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
|
||||
static CHJIT & getJITInstance()
|
||||
{
|
||||
static CHJIT jit;
|
||||
return jit;
|
||||
}
|
||||
|
||||
class CompiledSortDescriptionFunctionHolder final : public CompiledExpressionCacheEntry
|
||||
{
|
||||
public:
|
||||
explicit CompiledSortDescriptionFunctionHolder(CompiledSortDescriptionFunction compiled_function_)
|
||||
: CompiledExpressionCacheEntry(compiled_function_.compiled_module.size)
|
||||
, compiled_sort_description_function(compiled_function_)
|
||||
{}
|
||||
|
||||
~CompiledSortDescriptionFunctionHolder() override
|
||||
{
|
||||
getJITInstance().deleteCompiledModule(compiled_sort_description_function.compiled_module);
|
||||
}
|
||||
|
||||
CompiledSortDescriptionFunction compiled_sort_description_function;
|
||||
};
|
||||
|
||||
static std::string getSortDescriptionDump(const SortDescription & description, const DataTypes & header_types)
|
||||
{
|
||||
WriteBufferFromOwnString buffer;
|
||||
|
||||
for (size_t i = 0; i < description.size(); ++i)
|
||||
buffer << header_types[i]->getName() << ' ' << description[i].direction << ' ' << description[i].nulls_direction;
|
||||
|
||||
return buffer.str();
|
||||
}
|
||||
|
||||
static Poco::Logger * getLogger()
|
||||
{
|
||||
static Poco::Logger & logger = Poco::Logger::get("SortDescription");
|
||||
return &logger;
|
||||
}
|
||||
|
||||
void compileSortDescriptionIfNeeded(SortDescription & description, const DataTypes & sort_description_types, bool increase_compile_attemps)
|
||||
{
|
||||
static std::unordered_map<UInt128, UInt64, UInt128Hash> counter;
|
||||
static std::mutex mutex;
|
||||
|
||||
if (!description.compile_sort_description || sort_description_types.empty())
|
||||
return;
|
||||
|
||||
for (const auto & type : sort_description_types)
|
||||
{
|
||||
if (!type->createColumn()->isComparatorCompilable() || !canBeNativeType(*type))
|
||||
return;
|
||||
}
|
||||
|
||||
auto description_dump = getSortDescriptionDump(description, sort_description_types);
|
||||
|
||||
SipHash sort_description_dump_hash;
|
||||
sort_description_dump_hash.update(description_dump);
|
||||
|
||||
UInt128 sort_description_hash_key;
|
||||
sort_description_dump_hash.get128(sort_description_hash_key);
|
||||
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
UInt64 & current_counter = counter[sort_description_hash_key];
|
||||
if (current_counter < description.min_count_to_compile_sort_description)
|
||||
{
|
||||
current_counter += static_cast<UInt64>(increase_compile_attemps);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<CompiledSortDescriptionFunctionHolder> compiled_sort_description_holder;
|
||||
|
||||
if (auto * compilation_cache = CompiledExpressionCacheFactory::instance().tryGetCache())
|
||||
{
|
||||
auto [compiled_function_cache_entry, _] = compilation_cache->getOrSet(sort_description_hash_key, [&] ()
|
||||
{
|
||||
LOG_TRACE(getLogger(), "Compile sort description {}", description_dump);
|
||||
|
||||
auto compiled_sort_description = compileSortDescription(getJITInstance(), description, sort_description_types, description_dump);
|
||||
return std::make_shared<CompiledSortDescriptionFunctionHolder>(std::move(compiled_sort_description));
|
||||
});
|
||||
|
||||
compiled_sort_description_holder = std::static_pointer_cast<CompiledSortDescriptionFunctionHolder>(compiled_function_cache_entry);
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_TRACE(getLogger(), "Compile sort description {}", description_dump);
|
||||
auto compiled_sort_description = compileSortDescription(getJITInstance(), description, sort_description_types, description_dump);
|
||||
compiled_sort_description_holder = std::make_shared<CompiledSortDescriptionFunctionHolder>(std::move(compiled_sort_description));
|
||||
}
|
||||
|
||||
auto comparator_function = compiled_sort_description_holder->compiled_sort_description_function.comparator_function;
|
||||
description.compiled_sort_description = reinterpret_cast<void *>(comparator_function);
|
||||
description.compiled_sort_description_holder = std::move(compiled_sort_description_holder);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
void compileSortDescriptionIfNeeded(SortDescription & description, const DataTypes & sort_description_types, bool increase_compile_attemps)
|
||||
{
|
||||
(void)(description);
|
||||
(void)(sort_description_types);
|
||||
(void)(increase_compile_attemps);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
std::string dumpSortDescription(const SortDescription & description)
|
||||
{
|
||||
WriteBufferFromOwnString wb;
|
||||
|
@ -89,10 +89,26 @@ struct SortColumnDescriptionWithColumnIndex
|
||||
}
|
||||
};
|
||||
|
||||
class CompiledSortDescriptionFunctionHolder;
|
||||
|
||||
/// Description of the sorting rule for several columns.
|
||||
using SortDescription = std::vector<SortColumnDescription>;
|
||||
using SortDescriptionWithPositions = std::vector<SortColumnDescriptionWithColumnIndex>;
|
||||
|
||||
class SortDescription : public std::vector<SortColumnDescription>
|
||||
{
|
||||
public:
|
||||
/// Can be safely casted into JITSortDescriptionFunc
|
||||
void * compiled_sort_description = nullptr;
|
||||
std::shared_ptr<CompiledSortDescriptionFunctionHolder> compiled_sort_description_holder;
|
||||
size_t min_count_to_compile_sort_description = 3;
|
||||
bool compile_sort_description = false;
|
||||
};
|
||||
|
||||
/** Compile sort description for header_types.
|
||||
* Description is compiled only if compilation attempts to compile identical description is more than min_count_to_compile_sort_description.
|
||||
*/
|
||||
void compileSortDescriptionIfNeeded(SortDescription & description, const DataTypes & sort_description_types, bool increase_compile_attemps);
|
||||
|
||||
/// Outputs user-readable description into `out`.
|
||||
void dumpSortDescription(const SortDescription & description, WriteBuffer & out);
|
||||
|
||||
|
@ -47,7 +47,7 @@ static inline llvm::Type * toNativeType(llvm::IRBuilderBase & builder, const IDa
|
||||
return builder.getInt8Ty();
|
||||
else if (data_type.isInt16() || data_type.isUInt16() || data_type.isDate())
|
||||
return builder.getInt16Ty();
|
||||
else if (data_type.isInt32() || data_type.isUInt32() || data_type.isDateTime())
|
||||
else if (data_type.isInt32() || data_type.isUInt32() || data_type.isDate32() || data_type.isDateTime())
|
||||
return builder.getInt32Ty();
|
||||
else if (data_type.isInt64() || data_type.isUInt64())
|
||||
return builder.getInt64Ty();
|
||||
@ -111,7 +111,8 @@ static inline bool canBeNativeType(const IDataType & type)
|
||||
return canBeNativeType(*data_type_nullable.getNestedType());
|
||||
}
|
||||
|
||||
return data_type.isNativeInt() || data_type.isNativeUInt() || data_type.isFloat() || data_type.isDate() || data_type.isEnum();
|
||||
return data_type.isNativeInt() || data_type.isNativeUInt() || data_type.isFloat() || data_type.isDate()
|
||||
|| data_type.isDate32() || data_type.isDateTime() || data_type.isEnum();
|
||||
}
|
||||
|
||||
static inline llvm::Type * toNativeType(llvm::IRBuilderBase & builder, const DataTypePtr & type)
|
||||
@ -264,11 +265,11 @@ static inline llvm::Constant * getColumnNativeValue(llvm::IRBuilderBase & builde
|
||||
{
|
||||
return llvm::ConstantFP::get(type, assert_cast<const ColumnVector<Float64> &>(column).getElement(index));
|
||||
}
|
||||
else if (column_data_type.isNativeUInt() || column_data_type.isDate() || column_data_type.isDateTime() || column_data_type.isDateTime64())
|
||||
else if (column_data_type.isNativeUInt() || column_data_type.isDate() || column_data_type.isDateTime())
|
||||
{
|
||||
return llvm::ConstantInt::get(type, column.getUInt(index));
|
||||
}
|
||||
else if (column_data_type.isNativeInt() || column_data_type.isEnum())
|
||||
else if (column_data_type.isNativeInt() || column_data_type.isEnum() || column_data_type.isDate32())
|
||||
{
|
||||
return llvm::ConstantInt::get(type, column.getInt(index));
|
||||
}
|
||||
|
@ -1,10 +1,10 @@
|
||||
#include "AsynchronousReadIndirectBufferFromRemoteFS.h"
|
||||
|
||||
#include <Common/Stopwatch.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Disks/IO/ThreadPoolRemoteFSReader.h>
|
||||
#include <Disks/IO/ReadBufferFromRemoteFSGather.h>
|
||||
#include <IO/ReadSettings.h>
|
||||
#include <Common/logger_useful.h>
|
||||
|
||||
|
||||
namespace CurrentMetrics
|
||||
@ -57,7 +57,6 @@ AsynchronousReadIndirectBufferFromRemoteFS::AsynchronousReadIndirectBufferFromRe
|
||||
ProfileEvents::increment(ProfileEvents::RemoteFSBuffers);
|
||||
}
|
||||
|
||||
|
||||
String AsynchronousReadIndirectBufferFromRemoteFS::getFileName() const
|
||||
{
|
||||
return impl->getFileName();
|
||||
@ -169,6 +168,9 @@ bool AsynchronousReadIndirectBufferFromRemoteFS::nextImpl()
|
||||
if (!hasPendingDataToRead())
|
||||
return false;
|
||||
|
||||
Stopwatch watch;
|
||||
CurrentMetrics::Increment metric_increment{CurrentMetrics::AsynchronousReadWait};
|
||||
|
||||
size_t size = 0;
|
||||
if (prefetch_future.valid())
|
||||
{
|
||||
@ -176,15 +178,13 @@ bool AsynchronousReadIndirectBufferFromRemoteFS::nextImpl()
|
||||
|
||||
size_t offset = 0;
|
||||
{
|
||||
Stopwatch watch;
|
||||
CurrentMetrics::Increment metric_increment{CurrentMetrics::AsynchronousReadWait};
|
||||
auto result = prefetch_future.get();
|
||||
size = result.size;
|
||||
offset = result.offset;
|
||||
LOG_TEST(log, "Current size: {}, offset: {}", size, offset);
|
||||
|
||||
/// If prefetch_future is valid, size should always be greater than zero.
|
||||
assert(offset < size);
|
||||
assert(offset <= size);
|
||||
ProfileEvents::increment(ProfileEvents::AsynchronousReadWaitMicroseconds, watch.elapsedMicroseconds());
|
||||
}
|
||||
|
||||
@ -201,7 +201,7 @@ bool AsynchronousReadIndirectBufferFromRemoteFS::nextImpl()
|
||||
auto offset = result.offset;
|
||||
|
||||
LOG_TEST(log, "Current size: {}, offset: {}", size, offset);
|
||||
assert(offset < size);
|
||||
assert(offset <= size);
|
||||
|
||||
if (size)
|
||||
{
|
||||
@ -210,6 +210,9 @@ bool AsynchronousReadIndirectBufferFromRemoteFS::nextImpl()
|
||||
}
|
||||
}
|
||||
|
||||
watch.stop();
|
||||
ProfileEvents::increment(ProfileEvents::AsynchronousReadWaitMicroseconds, watch.elapsedMicroseconds());
|
||||
|
||||
file_offset_of_buffer_end = impl->getFileOffsetOfBufferEnd();
|
||||
assert(file_offset_of_buffer_end == impl->getImplementationBufferOffset());
|
||||
|
||||
|
@ -5,13 +5,23 @@
|
||||
#include <base/scope_guard.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/hex.h>
|
||||
#include <Common/getRandomASCIIString.h>
|
||||
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
extern const Event RemoteFSReadBytes;
|
||||
extern const Event RemoteFSCacheReadBytes;
|
||||
extern const Event RemoteFSCacheDownloadBytes;
|
||||
extern const Event FileSegmentWaitReadBufferMicroseconds;
|
||||
extern const Event FileSegmentReadMicroseconds;
|
||||
extern const Event FileSegmentCacheWriteMicroseconds;
|
||||
extern const Event FileSegmentPredownloadMicroseconds;
|
||||
extern const Event FileSegmentUsedBytes;
|
||||
|
||||
extern const Event CachedReadBufferReadFromSourceMicroseconds;
|
||||
extern const Event CachedReadBufferReadFromCacheMicroseconds;
|
||||
extern const Event CachedReadBufferCacheWriteMicroseconds;
|
||||
extern const Event CachedReadBufferReadFromSourceBytes;
|
||||
extern const Event CachedReadBufferReadFromCacheBytes;
|
||||
extern const Event CachedReadBufferCacheWriteBytes;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
@ -23,18 +33,12 @@ namespace ErrorCodes
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
static String getQueryId()
|
||||
{
|
||||
if (!CurrentThread::isInitialized() || !CurrentThread::get().getQueryContext() || CurrentThread::getQueryId().size == 0)
|
||||
return "";
|
||||
return CurrentThread::getQueryId().toString();
|
||||
}
|
||||
|
||||
CachedReadBufferFromRemoteFS::CachedReadBufferFromRemoteFS(
|
||||
const String & remote_fs_object_path_,
|
||||
FileCachePtr cache_,
|
||||
RemoteFSFileReaderCreator remote_file_reader_creator_,
|
||||
const ReadSettings & settings_,
|
||||
const String & query_id_,
|
||||
size_t read_until_position_)
|
||||
: SeekableReadBuffer(nullptr, 0)
|
||||
#ifndef NDEBUG
|
||||
@ -48,8 +52,9 @@ CachedReadBufferFromRemoteFS::CachedReadBufferFromRemoteFS(
|
||||
, settings(settings_)
|
||||
, read_until_position(read_until_position_)
|
||||
, remote_file_reader_creator(remote_file_reader_creator_)
|
||||
, query_id(getQueryId())
|
||||
, enable_logging(!query_id.empty() && CurrentThread::get().getQueryContext()->getSettingsRef().enable_filesystem_cache_log)
|
||||
, query_id(query_id_)
|
||||
, enable_logging(!query_id.empty() && settings_.enable_filesystem_cache_log)
|
||||
, current_buffer_id(getRandomASCIIString(8))
|
||||
{
|
||||
}
|
||||
|
||||
@ -62,9 +67,15 @@ void CachedReadBufferFromRemoteFS::appendFilesystemCacheLog(
|
||||
.query_id = query_id,
|
||||
.source_file_path = remote_fs_object_path,
|
||||
.file_segment_range = { file_segment_range.left, file_segment_range.right },
|
||||
.requested_range = { first_offset, read_until_position },
|
||||
.file_segment_size = file_segment_range.size(),
|
||||
.cache_attempted = true,
|
||||
.read_buffer_id = current_buffer_id,
|
||||
.profile_counters = std::make_shared<ProfileEvents::Counters::Snapshot>(current_file_segment_counters.getPartiallyAtomicSnapshot()),
|
||||
};
|
||||
|
||||
current_file_segment_counters.reset();
|
||||
|
||||
switch (type)
|
||||
{
|
||||
case CachedReadBufferFromRemoteFS::ReadType::CACHED:
|
||||
@ -109,9 +120,16 @@ void CachedReadBufferFromRemoteFS::initialize(size_t offset, size_t size)
|
||||
SeekableReadBufferPtr CachedReadBufferFromRemoteFS::getCacheReadBuffer(size_t offset) const
|
||||
{
|
||||
auto path = cache->getPathInLocalCache(cache_key, offset);
|
||||
auto buf = std::make_shared<ReadBufferFromFile>(path, settings.local_fs_buffer_size);
|
||||
if (buf->size() == 0)
|
||||
|
||||
ReadSettings local_read_settings{settings};
|
||||
/// Do not allow to use asynchronous version of LocalFSReadMethod.
|
||||
local_read_settings.local_fs_method = LocalFSReadMethod::pread;
|
||||
|
||||
auto buf = createReadBufferFromFileBase(path, local_read_settings);
|
||||
auto from_fd = dynamic_cast<ReadBufferFromFileDescriptor*>(buf.get());
|
||||
if (from_fd && from_fd->size() == 0)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Attempt to read from an empty cache file: {}", path);
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
@ -340,8 +358,13 @@ SeekableReadBufferPtr CachedReadBufferFromRemoteFS::getImplementationBuffer(File
|
||||
auto range = file_segment->range();
|
||||
bytes_to_predownload = 0;
|
||||
|
||||
Stopwatch watch(CLOCK_MONOTONIC);
|
||||
|
||||
auto read_buffer_for_file_segment = getReadBufferForFileSegment(file_segment);
|
||||
|
||||
watch.stop();
|
||||
current_file_segment_counters.increment(ProfileEvents::FileSegmentWaitReadBufferMicroseconds, watch.elapsedMicroseconds());
|
||||
|
||||
[[maybe_unused]] auto download_current_segment = read_type == ReadType::REMOTE_FS_READ_AND_PUT_IN_CACHE;
|
||||
assert(download_current_segment == file_segment->isDownloader());
|
||||
|
||||
@ -362,7 +385,7 @@ SeekableReadBufferPtr CachedReadBufferFromRemoteFS::getImplementationBuffer(File
|
||||
case ReadType::CACHED:
|
||||
{
|
||||
#ifndef NDEBUG
|
||||
auto * file_reader = assert_cast<ReadBufferFromFile *>(read_buffer_for_file_segment.get());
|
||||
auto * file_reader = dynamic_cast<ReadBufferFromFileDescriptor *>(read_buffer_for_file_segment.get());
|
||||
size_t file_size = file_reader->size();
|
||||
|
||||
if (file_size == 0 || range.left + file_size <= file_offset_of_buffer_end)
|
||||
@ -436,6 +459,9 @@ bool CachedReadBufferFromRemoteFS::completeFileSegmentAndGetNext()
|
||||
{
|
||||
LOG_TEST(log, "Completed segment: {}", (*current_file_segment_it)->range().toString());
|
||||
|
||||
if (enable_logging)
|
||||
appendFilesystemCacheLog((*current_file_segment_it)->range(), read_type);
|
||||
|
||||
auto file_segment_it = current_file_segment_it++;
|
||||
auto & file_segment = *file_segment_it;
|
||||
|
||||
@ -460,15 +486,29 @@ bool CachedReadBufferFromRemoteFS::completeFileSegmentAndGetNext()
|
||||
|
||||
if (read_type == ReadType::CACHED)
|
||||
(*current_file_segment_it)->incrementHitsCount();
|
||||
if (enable_logging)
|
||||
appendFilesystemCacheLog((*current_file_segment_it)->range(), read_type);
|
||||
|
||||
LOG_TEST(log, "New segment: {}", (*current_file_segment_it)->range().toString());
|
||||
return true;
|
||||
}
|
||||
|
||||
CachedReadBufferFromRemoteFS::~CachedReadBufferFromRemoteFS()
|
||||
{
|
||||
if (enable_logging
|
||||
&& file_segments_holder
|
||||
&& current_file_segment_it != file_segments_holder->file_segments.end())
|
||||
{
|
||||
appendFilesystemCacheLog((*current_file_segment_it)->range(), read_type);
|
||||
}
|
||||
}
|
||||
|
||||
void CachedReadBufferFromRemoteFS::predownload(FileSegmentPtr & file_segment)
|
||||
{
|
||||
Stopwatch predownload_watch(CLOCK_MONOTONIC);
|
||||
SCOPE_EXIT({
|
||||
predownload_watch.stop();
|
||||
current_file_segment_counters.increment(ProfileEvents::FileSegmentPredownloadMicroseconds, predownload_watch.elapsedMicroseconds());
|
||||
});
|
||||
|
||||
if (bytes_to_predownload)
|
||||
{
|
||||
/// Consider this case. Some user needed segment [a, b] and downloaded it partially.
|
||||
@ -484,7 +524,19 @@ void CachedReadBufferFromRemoteFS::predownload(FileSegmentPtr & file_segment)
|
||||
|
||||
while (true)
|
||||
{
|
||||
if (!bytes_to_predownload || implementation_buffer->eof())
|
||||
bool has_more_data;
|
||||
{
|
||||
Stopwatch watch(CLOCK_MONOTONIC);
|
||||
|
||||
has_more_data = !implementation_buffer->eof();
|
||||
|
||||
watch.stop();
|
||||
auto elapsed = watch.elapsedMicroseconds();
|
||||
current_file_segment_counters.increment(ProfileEvents::FileSegmentReadMicroseconds, elapsed);
|
||||
ProfileEvents::increment(ProfileEvents::CachedReadBufferReadFromSourceMicroseconds, elapsed);
|
||||
}
|
||||
|
||||
if (!bytes_to_predownload || !has_more_data)
|
||||
{
|
||||
if (bytes_to_predownload)
|
||||
throw Exception(
|
||||
@ -523,7 +575,7 @@ void CachedReadBufferFromRemoteFS::predownload(FileSegmentPtr & file_segment)
|
||||
size_t current_impl_buffer_size = implementation_buffer->buffer().size();
|
||||
size_t current_predownload_size = std::min(current_impl_buffer_size, bytes_to_predownload);
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::RemoteFSReadBytes, current_impl_buffer_size);
|
||||
ProfileEvents::increment(ProfileEvents::CachedReadBufferReadFromSourceBytes, current_impl_buffer_size);
|
||||
|
||||
if (file_segment->reserve(current_predownload_size))
|
||||
{
|
||||
@ -531,8 +583,15 @@ void CachedReadBufferFromRemoteFS::predownload(FileSegmentPtr & file_segment)
|
||||
|
||||
assert(file_segment->getDownloadOffset() == static_cast<size_t>(implementation_buffer->getPosition()));
|
||||
|
||||
Stopwatch watch(CLOCK_MONOTONIC);
|
||||
|
||||
file_segment->write(implementation_buffer->buffer().begin(), current_predownload_size, current_offset);
|
||||
ProfileEvents::increment(ProfileEvents::RemoteFSCacheDownloadBytes, current_predownload_size);
|
||||
|
||||
watch.stop();
|
||||
auto elapsed = watch.elapsedMicroseconds();
|
||||
current_file_segment_counters.increment(ProfileEvents::FileSegmentCacheWriteMicroseconds, elapsed);
|
||||
ProfileEvents::increment(ProfileEvents::CachedReadBufferCacheWriteMicroseconds, elapsed);
|
||||
ProfileEvents::increment(ProfileEvents::CachedReadBufferCacheWriteBytes, current_predownload_size);
|
||||
|
||||
current_offset += current_predownload_size;
|
||||
|
||||
@ -668,18 +727,18 @@ bool CachedReadBufferFromRemoteFS::nextImplStep()
|
||||
return false;
|
||||
|
||||
SCOPE_EXIT({
|
||||
/// Save state of current file segment before it is completed.
|
||||
nextimpl_step_log_info = getInfoForLog();
|
||||
|
||||
if (current_file_segment_it == file_segments_holder->file_segments.end())
|
||||
return;
|
||||
|
||||
auto & file_segment = *current_file_segment_it;
|
||||
|
||||
bool download_current_segment = read_type == ReadType::REMOTE_FS_READ_AND_PUT_IN_CACHE;
|
||||
if (download_current_segment)
|
||||
try
|
||||
{
|
||||
try
|
||||
/// Save state of current file segment before it is completed.
|
||||
nextimpl_step_log_info = getInfoForLog();
|
||||
|
||||
if (current_file_segment_it == file_segments_holder->file_segments.end())
|
||||
return;
|
||||
|
||||
auto & file_segment = *current_file_segment_it;
|
||||
|
||||
bool download_current_segment = read_type == ReadType::REMOTE_FS_READ_AND_PUT_IN_CACHE;
|
||||
if (download_current_segment)
|
||||
{
|
||||
bool need_complete_file_segment = file_segment->isDownloader();
|
||||
if (need_complete_file_segment)
|
||||
@ -688,13 +747,13 @@ bool CachedReadBufferFromRemoteFS::nextImplStep()
|
||||
file_segment->completeBatchAndResetDownloader();
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
}
|
||||
}
|
||||
|
||||
assert(!file_segment->isDownloader());
|
||||
assert(!file_segment->isDownloader());
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
}
|
||||
});
|
||||
|
||||
bytes_to_predownload = 0;
|
||||
@ -711,9 +770,6 @@ bool CachedReadBufferFromRemoteFS::nextImplStep()
|
||||
|
||||
if (read_type == ReadType::CACHED)
|
||||
(*current_file_segment_it)->incrementHitsCount();
|
||||
|
||||
if (enable_logging)
|
||||
appendFilesystemCacheLog((*current_file_segment_it)->range(), read_type);
|
||||
}
|
||||
|
||||
assert(!internal_buffer.empty());
|
||||
@ -747,18 +803,17 @@ bool CachedReadBufferFromRemoteFS::nextImplStep()
|
||||
|
||||
auto download_current_segment = read_type == ReadType::REMOTE_FS_READ_AND_PUT_IN_CACHE;
|
||||
if (download_current_segment != file_segment->isDownloader())
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"Incorrect segment state. Having read type: {}, Caller id: {}, downloader id: {}, file segment state: {}",
|
||||
toString(read_type),
|
||||
file_segment->getCallerId(),
|
||||
file_segment->getDownloader(),
|
||||
file_segment->state());
|
||||
"Incorrect segment state. Having read type: {}, file segment info: {}",
|
||||
toString(read_type), file_segment->getInfoForLog());
|
||||
}
|
||||
|
||||
if (!result)
|
||||
{
|
||||
#ifndef NDEBUG
|
||||
if (auto * cache_file_reader = typeid_cast<ReadBufferFromFile *>(implementation_buffer.get()))
|
||||
if (auto * cache_file_reader = dynamic_cast<ReadBufferFromFileDescriptor *>(implementation_buffer.get()))
|
||||
{
|
||||
auto cache_file_size = cache_file_reader->size();
|
||||
if (cache_file_size == 0)
|
||||
@ -767,13 +822,26 @@ bool CachedReadBufferFromRemoteFS::nextImplStep()
|
||||
}
|
||||
#endif
|
||||
|
||||
Stopwatch watch(CLOCK_MONOTONIC);
|
||||
|
||||
result = implementation_buffer->next();
|
||||
|
||||
watch.stop();
|
||||
auto elapsed = watch.elapsedMicroseconds();
|
||||
current_file_segment_counters.increment(ProfileEvents::FileSegmentReadMicroseconds, elapsed);
|
||||
|
||||
size = implementation_buffer->buffer().size();
|
||||
|
||||
if (read_type == ReadType::CACHED)
|
||||
ProfileEvents::increment(ProfileEvents::RemoteFSCacheReadBytes, size);
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::CachedReadBufferReadFromCacheBytes, size);
|
||||
ProfileEvents::increment(ProfileEvents::CachedReadBufferReadFromCacheMicroseconds, elapsed);
|
||||
}
|
||||
else
|
||||
ProfileEvents::increment(ProfileEvents::RemoteFSReadBytes, size);
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::CachedReadBufferReadFromSourceBytes, size);
|
||||
ProfileEvents::increment(ProfileEvents::CachedReadBufferReadFromSourceMicroseconds, elapsed);
|
||||
}
|
||||
}
|
||||
|
||||
if (result)
|
||||
@ -786,12 +854,18 @@ bool CachedReadBufferFromRemoteFS::nextImplStep()
|
||||
{
|
||||
assert(file_segment->getDownloadOffset() == static_cast<size_t>(implementation_buffer->getPosition()));
|
||||
|
||||
Stopwatch watch(CLOCK_MONOTONIC);
|
||||
|
||||
file_segment->write(
|
||||
needed_to_predownload ? implementation_buffer->position() : implementation_buffer->buffer().begin(),
|
||||
size,
|
||||
file_offset_of_buffer_end);
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::RemoteFSCacheDownloadBytes, size);
|
||||
watch.stop();
|
||||
auto elapsed = watch.elapsedMicroseconds();
|
||||
current_file_segment_counters.increment(ProfileEvents::FileSegmentCacheWriteMicroseconds, elapsed);
|
||||
ProfileEvents::increment(ProfileEvents::CachedReadBufferCacheWriteMicroseconds, elapsed);
|
||||
ProfileEvents::increment(ProfileEvents::CachedReadBufferCacheWriteBytes, size);
|
||||
|
||||
assert(file_segment->getDownloadOffset() <= file_segment->range().right + 1);
|
||||
assert(
|
||||
@ -819,10 +893,13 @@ bool CachedReadBufferFromRemoteFS::nextImplStep()
|
||||
}
|
||||
|
||||
file_offset_of_buffer_end += size;
|
||||
|
||||
}
|
||||
|
||||
swap(*implementation_buffer);
|
||||
|
||||
current_file_segment_counters.increment(ProfileEvents::FileSegmentUsedBytes, available());
|
||||
|
||||
if (download_current_segment)
|
||||
file_segment->completeBatchAndResetDownloader();
|
||||
|
||||
@ -851,7 +928,7 @@ bool CachedReadBufferFromRemoteFS::nextImplStep()
|
||||
if (size == 0 && file_offset_of_buffer_end < read_until_position)
|
||||
{
|
||||
std::optional<size_t> cache_file_size;
|
||||
if (auto * cache_file_reader = dynamic_cast<ReadBufferFromFile *>(implementation_buffer.get()))
|
||||
if (auto * cache_file_reader = dynamic_cast<ReadBufferFromFileDescriptor *>(implementation_buffer.get()))
|
||||
cache_file_size = cache_file_reader->size();
|
||||
|
||||
throw Exception(
|
||||
|
@ -26,8 +26,11 @@ public:
|
||||
FileCachePtr cache_,
|
||||
RemoteFSFileReaderCreator remote_file_reader_creator_,
|
||||
const ReadSettings & settings_,
|
||||
const String & query_id_,
|
||||
size_t read_until_position_);
|
||||
|
||||
~CachedReadBufferFromRemoteFS() override;
|
||||
|
||||
bool nextImpl() override;
|
||||
|
||||
off_t seek(off_t off, int whence) override;
|
||||
@ -116,8 +119,10 @@ private:
|
||||
|
||||
String query_id;
|
||||
bool enable_logging = false;
|
||||
String current_buffer_id;
|
||||
|
||||
CurrentMetrics::Increment metric_increment{CurrentMetrics::FilesystemCacheReadBuffers};
|
||||
ProfileEvents::Counters current_file_segment_counters;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -21,9 +21,11 @@
|
||||
#include <filesystem>
|
||||
#include <iostream>
|
||||
#include <Common/hex.h>
|
||||
#include <Interpreters/FilesystemCacheLog.h>
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -32,17 +34,24 @@ namespace ErrorCodes
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
#if USE_AWS_S3
|
||||
SeekableReadBufferPtr ReadBufferFromS3Gather::createImplementationBuffer(const String & path, size_t file_size)
|
||||
SeekableReadBufferPtr ReadBufferFromRemoteFSGather::createImplementationBuffer(const String & path, size_t file_size)
|
||||
{
|
||||
if (!current_file_path.empty() && !with_cache && enable_cache_log)
|
||||
{
|
||||
appendFilesystemCacheLog();
|
||||
}
|
||||
|
||||
current_file_path = fs::path(common_path_prefix) / path;
|
||||
current_file_size = file_size;
|
||||
total_bytes_read_from_current_file = 0;
|
||||
|
||||
return createImplementationBufferImpl(path, file_size);
|
||||
}
|
||||
|
||||
#if USE_AWS_S3
|
||||
SeekableReadBufferPtr ReadBufferFromS3Gather::createImplementationBufferImpl(const String & path, size_t file_size)
|
||||
{
|
||||
current_path = path;
|
||||
auto remote_path = fs::path(common_path_prefix) / path;
|
||||
|
||||
auto cache = settings.remote_fs_cache;
|
||||
bool with_cache = cache
|
||||
&& settings.enable_filesystem_cache
|
||||
&& (!IFileCache::isReadOnly() || settings.read_from_filesystem_cache_if_exists_otherwise_bypass_cache);
|
||||
|
||||
auto remote_file_reader_creator = [=, this]()
|
||||
{
|
||||
return std::make_unique<ReadBufferFromS3>(
|
||||
@ -53,7 +62,7 @@ SeekableReadBufferPtr ReadBufferFromS3Gather::createImplementationBuffer(const S
|
||||
if (with_cache)
|
||||
{
|
||||
return std::make_shared<CachedReadBufferFromRemoteFS>(
|
||||
remote_path, cache, remote_file_reader_creator, settings, read_until_position ? read_until_position : file_size);
|
||||
remote_path, settings.remote_fs_cache, remote_file_reader_creator, settings, query_id, read_until_position ? read_until_position : file_size);
|
||||
}
|
||||
|
||||
return remote_file_reader_creator();
|
||||
@ -62,24 +71,24 @@ SeekableReadBufferPtr ReadBufferFromS3Gather::createImplementationBuffer(const S
|
||||
|
||||
|
||||
#if USE_AZURE_BLOB_STORAGE
|
||||
SeekableReadBufferPtr ReadBufferFromAzureBlobStorageGather::createImplementationBuffer(const String & path, size_t /* file_size */)
|
||||
SeekableReadBufferPtr ReadBufferFromAzureBlobStorageGather::createImplementationBufferImpl(const String & path, size_t /* file_size */)
|
||||
{
|
||||
current_path = path;
|
||||
current_file_path = path;
|
||||
return std::make_unique<ReadBufferFromAzureBlobStorage>(blob_container_client, path, max_single_read_retries,
|
||||
max_single_download_retries, settings.remote_fs_buffer_size, /* use_external_buffer */true, read_until_position);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
SeekableReadBufferPtr ReadBufferFromWebServerGather::createImplementationBuffer(const String & path, size_t /* file_size */)
|
||||
SeekableReadBufferPtr ReadBufferFromWebServerGather::createImplementationBufferImpl(const String & path, size_t /* file_size */)
|
||||
{
|
||||
current_path = path;
|
||||
current_file_path = path;
|
||||
return std::make_unique<ReadBufferFromWebServer>(fs::path(uri) / path, context, settings, /* use_external_buffer */true, read_until_position);
|
||||
}
|
||||
|
||||
|
||||
#if USE_HDFS
|
||||
SeekableReadBufferPtr ReadBufferFromHDFSGather::createImplementationBuffer(const String & path, size_t /* file_size */)
|
||||
SeekableReadBufferPtr ReadBufferFromHDFSGather::createImplementationBufferImpl(const String & path, size_t /* file_size */)
|
||||
{
|
||||
return std::make_unique<ReadBufferFromHDFS>(hdfs_uri, fs::path(hdfs_directory) / path, config, settings.remote_fs_buffer_size);
|
||||
}
|
||||
@ -94,8 +103,31 @@ ReadBufferFromRemoteFSGather::ReadBufferFromRemoteFSGather(
|
||||
, common_path_prefix(common_path_prefix_)
|
||||
, blobs_to_read(blobs_to_read_)
|
||||
, settings(settings_)
|
||||
, query_id(CurrentThread::isInitialized() && CurrentThread::get().getQueryContext() != nullptr ? CurrentThread::getQueryId() : "")
|
||||
, log(&Poco::Logger::get("ReadBufferFromRemoteFSGather"))
|
||||
, enable_cache_log(!query_id.empty() && settings.enable_filesystem_cache_log)
|
||||
{
|
||||
with_cache = settings.remote_fs_cache
|
||||
&& settings.enable_filesystem_cache
|
||||
&& (!IFileCache::isReadOnly() || settings.read_from_filesystem_cache_if_exists_otherwise_bypass_cache);
|
||||
}
|
||||
|
||||
|
||||
void ReadBufferFromRemoteFSGather::appendFilesystemCacheLog()
|
||||
{
|
||||
FilesystemCacheLogElement elem
|
||||
{
|
||||
.event_time = std::chrono::system_clock::to_time_t(std::chrono::system_clock::now()),
|
||||
.query_id = query_id,
|
||||
.source_file_path = current_file_path,
|
||||
.file_segment_range = { 0, current_file_size },
|
||||
.read_type = FilesystemCacheLogElement::ReadType::READ_FROM_FS_BYPASSING_CACHE,
|
||||
.file_segment_size = total_bytes_read_from_current_file,
|
||||
.cache_attempted = false,
|
||||
};
|
||||
|
||||
if (auto cache_log = Context::getGlobalContextInstance()->getFilesystemCacheLog())
|
||||
cache_log->add(elem);
|
||||
}
|
||||
|
||||
|
||||
@ -199,6 +231,7 @@ bool ReadBufferFromRemoteFSGather::readImpl()
|
||||
*/
|
||||
if (bytes_to_ignore)
|
||||
{
|
||||
total_bytes_read_from_current_file += bytes_to_ignore;
|
||||
current_buf->ignore(bytes_to_ignore);
|
||||
result = current_buf->hasPendingData();
|
||||
file_offset_of_buffer_end += bytes_to_ignore;
|
||||
@ -225,6 +258,7 @@ bool ReadBufferFromRemoteFSGather::readImpl()
|
||||
{
|
||||
assert(available());
|
||||
nextimpl_working_buffer_offset = offset();
|
||||
total_bytes_read_from_current_file += available();
|
||||
}
|
||||
|
||||
return result;
|
||||
@ -254,7 +288,7 @@ void ReadBufferFromRemoteFSGather::reset()
|
||||
|
||||
String ReadBufferFromRemoteFSGather::getFileName() const
|
||||
{
|
||||
return current_path;
|
||||
return current_file_path;
|
||||
}
|
||||
|
||||
|
||||
@ -282,5 +316,12 @@ size_t ReadBufferFromRemoteFSGather::getImplementationBufferOffset() const
|
||||
return current_buf->getFileOffsetOfBufferEnd();
|
||||
}
|
||||
|
||||
ReadBufferFromRemoteFSGather::~ReadBufferFromRemoteFSGather()
|
||||
{
|
||||
if (!with_cache && enable_cache_log)
|
||||
{
|
||||
appendFilesystemCacheLog();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -29,6 +29,9 @@ public:
|
||||
const std::string & common_path_prefix_,
|
||||
const BlobsPathToSize & blobs_to_read_,
|
||||
const ReadSettings & settings_);
|
||||
|
||||
~ReadBufferFromRemoteFSGather() override;
|
||||
|
||||
String getFileName() const;
|
||||
|
||||
void reset();
|
||||
@ -54,7 +57,7 @@ public:
|
||||
size_t getImplementationBufferOffset() const;
|
||||
|
||||
protected:
|
||||
virtual SeekableReadBufferPtr createImplementationBuffer(const String & path, size_t file_size) = 0;
|
||||
virtual SeekableReadBufferPtr createImplementationBufferImpl(const String & path, size_t file_size) = 0;
|
||||
|
||||
std::string common_path_prefix;
|
||||
|
||||
@ -64,9 +67,16 @@ protected:
|
||||
|
||||
size_t read_until_position = 0;
|
||||
|
||||
String current_path;
|
||||
String current_file_path;
|
||||
size_t current_file_size = 0;
|
||||
|
||||
bool with_cache;
|
||||
|
||||
String query_id;
|
||||
|
||||
private:
|
||||
SeekableReadBufferPtr createImplementationBuffer(const String & path, size_t file_size);
|
||||
|
||||
bool nextImpl() override;
|
||||
|
||||
void initialize();
|
||||
@ -75,6 +85,8 @@ private:
|
||||
|
||||
bool moveToNextBuffer();
|
||||
|
||||
void appendFilesystemCacheLog();
|
||||
|
||||
SeekableReadBufferPtr current_buf;
|
||||
|
||||
size_t current_buf_idx = 0;
|
||||
@ -89,6 +101,10 @@ private:
|
||||
size_t bytes_to_ignore = 0;
|
||||
|
||||
Poco::Logger * log;
|
||||
|
||||
size_t total_bytes_read_from_current_file = 0;
|
||||
|
||||
bool enable_cache_log = false;
|
||||
};
|
||||
|
||||
|
||||
@ -113,7 +129,7 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
SeekableReadBufferPtr createImplementationBuffer(const String & path, size_t file_size) override;
|
||||
SeekableReadBufferPtr createImplementationBufferImpl(const String & path, size_t file_size) override;
|
||||
|
||||
private:
|
||||
std::shared_ptr<Aws::S3::S3Client> client_ptr;
|
||||
@ -143,7 +159,7 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
SeekableReadBufferPtr createImplementationBuffer(const String & path, size_t file_size) override;
|
||||
SeekableReadBufferPtr createImplementationBufferImpl(const String & path, size_t file_size) override;
|
||||
|
||||
private:
|
||||
std::shared_ptr<Azure::Storage::Blobs::BlobContainerClient> blob_container_client;
|
||||
@ -168,7 +184,7 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
SeekableReadBufferPtr createImplementationBuffer(const String & path, size_t file_size) override;
|
||||
SeekableReadBufferPtr createImplementationBufferImpl(const String & path, size_t file_size) override;
|
||||
|
||||
private:
|
||||
String uri;
|
||||
@ -195,7 +211,7 @@ public:
|
||||
hdfs_uri = hdfs_uri_.substr(0, begin_of_path);
|
||||
}
|
||||
|
||||
SeekableReadBufferPtr createImplementationBuffer(const String & path, size_t file_size) override;
|
||||
SeekableReadBufferPtr createImplementationBufferImpl(const String & path, size_t file_size) override;
|
||||
|
||||
private:
|
||||
const Poco::Util::AbstractConfiguration & config;
|
||||
|
@ -16,8 +16,8 @@
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
extern const Event RemoteFSReadMicroseconds;
|
||||
extern const Event RemoteFSReadBytes;
|
||||
extern const Event ThreadpoolReaderTaskMicroseconds;
|
||||
extern const Event ThreadpoolReaderReadBytes;
|
||||
}
|
||||
|
||||
namespace CurrentMetrics
|
||||
@ -83,8 +83,8 @@ std::future<IAsynchronousReader::Result> ThreadPoolRemoteFSReader::submit(Reques
|
||||
|
||||
watch.stop();
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::RemoteFSReadMicroseconds, watch.elapsedMicroseconds());
|
||||
ProfileEvents::increment(ProfileEvents::RemoteFSReadBytes, result.offset ? result.size - result.offset : result.size);
|
||||
ProfileEvents::increment(ProfileEvents::ThreadpoolReaderTaskMicroseconds, watch.elapsedMicroseconds());
|
||||
ProfileEvents::increment(ProfileEvents::ThreadpoolReaderReadBytes, result.offset ? result.size - result.offset : result.size);
|
||||
|
||||
thread_status.detachQuery(/* if_not_detached */true);
|
||||
|
||||
|
@ -63,7 +63,7 @@ Aws::Client::ClientConfigurationPerRequest ProxyResolverConfiguration::getConfig
|
||||
{
|
||||
auto resolved_endpoint = endpoint;
|
||||
resolved_endpoint.setHost(resolved_hosts[i].toString());
|
||||
session = makeHTTPSession(endpoint, timeouts, false);
|
||||
session = makeHTTPSession(resolved_endpoint, timeouts, false);
|
||||
|
||||
try
|
||||
{
|
||||
|
@ -477,7 +477,7 @@ public:
|
||||
ErrorCodes::ILLEGAL_COLUMN);
|
||||
|
||||
if (!col->getValue<String>().empty())
|
||||
re = Regexps::get<false, false>(col->getValue<String>());
|
||||
re = Regexps::get<false, false, false>(col->getValue<String>());
|
||||
|
||||
}
|
||||
|
||||
@ -560,7 +560,7 @@ public:
|
||||
+ " of first argument of function " + getName() + ". Must be constant string.",
|
||||
ErrorCodes::ILLEGAL_COLUMN);
|
||||
|
||||
re = Regexps::get<false, false>(col->getValue<String>());
|
||||
re = Regexps::get<false, false, false>(col->getValue<String>());
|
||||
capture = re->getNumberOfSubpatterns() > 0 ? 1 : 0;
|
||||
|
||||
matches.resize(capture + 1);
|
||||
|
@ -24,12 +24,11 @@ namespace ErrorCodes
|
||||
/// Is the [I]LIKE expression reduced to finding a substring in a string?
|
||||
static inline bool likePatternIsStrstr(const String & pattern, String & res)
|
||||
{
|
||||
res = "";
|
||||
|
||||
if (pattern.size() < 2 || pattern.front() != '%' || pattern.back() != '%')
|
||||
return false;
|
||||
|
||||
res.reserve(pattern.size() * 2);
|
||||
res = "";
|
||||
res.reserve(pattern.size() - 2);
|
||||
|
||||
const char * pos = pattern.data();
|
||||
const char * end = pos + pattern.size();
|
||||
@ -81,7 +80,7 @@ struct MatchImpl
|
||||
static void vectorConstant(
|
||||
const ColumnString::Chars & data,
|
||||
const ColumnString::Offsets & offsets,
|
||||
const std::string & pattern,
|
||||
const String & pattern,
|
||||
const ColumnPtr & start_pos,
|
||||
PaddedPODArray<UInt8> & res)
|
||||
{
|
||||
@ -92,14 +91,13 @@ struct MatchImpl
|
||||
if (offsets.empty())
|
||||
return;
|
||||
|
||||
String strstr_pattern;
|
||||
|
||||
/// A simple case where the [I]LIKE expression reduces to finding a substring in a string
|
||||
String strstr_pattern;
|
||||
if (like && likePatternIsStrstr(pattern, strstr_pattern))
|
||||
{
|
||||
const UInt8 * begin = data.data();
|
||||
const UInt8 * const begin = data.data();
|
||||
const UInt8 * const end = data.data() + data.size();
|
||||
const UInt8 * pos = begin;
|
||||
const UInt8 * end = pos + data.size();
|
||||
|
||||
/// The current index in the array of strings.
|
||||
size_t i = 0;
|
||||
@ -137,7 +135,7 @@ struct MatchImpl
|
||||
|
||||
auto regexp = Regexps::get<like, true, case_insensitive>(pattern);
|
||||
|
||||
std::string required_substring;
|
||||
String required_substring;
|
||||
bool is_trivial;
|
||||
bool required_substring_is_prefix; /// for `anchored` execution of the regexp.
|
||||
|
||||
@ -172,9 +170,9 @@ struct MatchImpl
|
||||
{
|
||||
/// NOTE This almost matches with the case of LikePatternIsStrstr.
|
||||
|
||||
const UInt8 * begin = data.data();
|
||||
const UInt8 * const begin = data.data();
|
||||
const UInt8 * const end = data.begin() + data.size();
|
||||
const UInt8 * pos = begin;
|
||||
const UInt8 * end = pos + data.size();
|
||||
|
||||
/// The current index in the array of strings.
|
||||
size_t i = 0;
|
||||
@ -230,6 +228,7 @@ struct MatchImpl
|
||||
++i;
|
||||
}
|
||||
|
||||
/// Tail, in which there can be no substring.
|
||||
if (i < res.size())
|
||||
memset(&res[i], revert, (res.size() - i) * sizeof(res[0]));
|
||||
}
|
||||
@ -238,14 +237,14 @@ struct MatchImpl
|
||||
|
||||
/// Very carefully crafted copy-paste.
|
||||
static void vectorFixedConstant(
|
||||
const ColumnString::Chars & data, size_t n, const std::string & pattern,
|
||||
const ColumnString::Chars & data, size_t n, const String & pattern,
|
||||
PaddedPODArray<UInt8> & res)
|
||||
{
|
||||
if (data.empty())
|
||||
return;
|
||||
|
||||
String strstr_pattern;
|
||||
/// A simple case where the LIKE expression reduces to finding a substring in a string
|
||||
String strstr_pattern;
|
||||
if (like && likePatternIsStrstr(pattern, strstr_pattern))
|
||||
{
|
||||
const UInt8 * begin = data.data();
|
||||
@ -291,9 +290,9 @@ struct MatchImpl
|
||||
{
|
||||
size_t size = data.size() / n;
|
||||
|
||||
auto regexp = Regexps::get<like, true>(pattern);
|
||||
auto regexp = Regexps::get<like, true, case_insensitive>(pattern);
|
||||
|
||||
std::string required_substring;
|
||||
String required_substring;
|
||||
bool is_trivial;
|
||||
bool required_substring_is_prefix; /// for `anchored` execution of the regexp.
|
||||
|
||||
|
@ -44,23 +44,20 @@ namespace Regexps
|
||||
template <bool like>
|
||||
inline Regexp createRegexp(const std::string & pattern, int flags)
|
||||
{
|
||||
return {pattern, flags};
|
||||
}
|
||||
|
||||
template <>
|
||||
inline Regexp createRegexp<true>(const std::string & pattern, int flags)
|
||||
{
|
||||
return {likePatternToRegexp(pattern), flags};
|
||||
if constexpr (like)
|
||||
return {likePatternToRegexp(pattern), flags};
|
||||
else
|
||||
return {pattern, flags};
|
||||
}
|
||||
|
||||
/** Returns holder of an object from Pool.
|
||||
* You must hold the ownership while using the object.
|
||||
* In destructor, it returns the object back to the Pool for further reuse.
|
||||
*/
|
||||
template <bool like, bool no_capture, bool case_insensitive = false>
|
||||
template <bool like, bool no_capture, bool case_insensitive>
|
||||
inline Pool::Pointer get(const std::string & pattern)
|
||||
{
|
||||
/// C++11 has thread-safe function-local static on most modern compilers.
|
||||
/// the Singleton is thread-safe in C++11
|
||||
static Pool known_regexps; /// Different variables for different pattern parameters.
|
||||
|
||||
return known_regexps.get(pattern, [&pattern]
|
||||
|
@ -1,7 +1,7 @@
|
||||
include("${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake")
|
||||
add_headers_and_sources(clickhouse_functions_array .)
|
||||
add_library(clickhouse_functions_array ${clickhouse_functions_array_sources} ${clickhouse_functions_array_headers})
|
||||
target_link_libraries(clickhouse_functions_array PRIVATE dbms clickhouse_functions_gatherutils)
|
||||
target_link_libraries(clickhouse_functions_array PRIVATE dbms clickhouse_functions_gatherutils ch_contrib::eigen)
|
||||
|
||||
if (STRIP_DEBUG_SYMBOLS_FUNCTIONS)
|
||||
target_compile_options(clickhouse_functions_array PRIVATE "-g0")
|
||||
|
247
src/Functions/array/arrayDistance.cpp
Normal file
247
src/Functions/array/arrayDistance.cpp
Normal file
@ -0,0 +1,247 @@
|
||||
#include <Columns/ColumnArray.h>
|
||||
#include <Columns/IColumn.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <DataTypes/IDataType.h>
|
||||
#include <DataTypes/getLeastSupertype.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Functions/FunctionHelpers.h>
|
||||
|
||||
#include <Eigen/Core>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int SIZES_OF_ARRAYS_DOESNT_MATCH;
|
||||
}
|
||||
|
||||
template <const int N>
|
||||
struct LpDistance
|
||||
{
|
||||
static inline String name = "L" + std::to_string(N);
|
||||
template <typename T>
|
||||
static void compute(const Eigen::MatrixX<T> & left, const Eigen::MatrixX<T> & right, PaddedPODArray<T> & array)
|
||||
{
|
||||
auto norms = (left - right).colwise().template lpNorm<N>();
|
||||
array.reserve(norms.size());
|
||||
// array.insert() failed to work with Eigen iterators
|
||||
for (auto n : norms)
|
||||
array.push_back(n);
|
||||
}
|
||||
};
|
||||
|
||||
struct LinfDistance : LpDistance<Eigen::Infinity>
|
||||
{
|
||||
static inline String name = "Linf";
|
||||
};
|
||||
|
||||
struct CosineDistance
|
||||
{
|
||||
static inline String name = "Cosine";
|
||||
template <typename T>
|
||||
static void compute(const Eigen::MatrixX<T> & left, const Eigen::MatrixX<T> & right, PaddedPODArray<T> & array)
|
||||
{
|
||||
auto prod = left.cwiseProduct(right).colwise().sum();
|
||||
auto nx = left.colwise().norm();
|
||||
auto ny = right.colwise().norm();
|
||||
auto nm = nx.cwiseProduct(ny).cwiseInverse();
|
||||
auto dist = 1.0 - prod.cwiseProduct(nm).array();
|
||||
array.reserve(dist.size());
|
||||
for (auto d : dist)
|
||||
array.push_back(d);
|
||||
}
|
||||
};
|
||||
|
||||
template <class Kernel>
|
||||
class FunctionArrayDistance : public IFunction
|
||||
{
|
||||
public:
|
||||
static inline auto name = "array" + Kernel::name + "Distance";
|
||||
String getName() const override { return name; }
|
||||
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionArrayDistance<Kernel>>(); }
|
||||
size_t getNumberOfArguments() const override { return 2; }
|
||||
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
|
||||
bool useDefaultImplementationForConstants() const override { return true; }
|
||||
|
||||
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
|
||||
{
|
||||
DataTypes types;
|
||||
for (const auto & argument : arguments)
|
||||
{
|
||||
const auto * array_type = checkAndGetDataType<DataTypeArray>(argument.type.get());
|
||||
if (!array_type)
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Argument of function {} must be array.", getName());
|
||||
|
||||
types.push_back(array_type->getNestedType());
|
||||
}
|
||||
const auto & common_type = getLeastSupertype(types);
|
||||
switch (common_type->getTypeId())
|
||||
{
|
||||
case TypeIndex::UInt8:
|
||||
case TypeIndex::UInt16:
|
||||
case TypeIndex::UInt32:
|
||||
case TypeIndex::Int8:
|
||||
case TypeIndex::Int16:
|
||||
case TypeIndex::Int32:
|
||||
case TypeIndex::Float32:
|
||||
return std::make_shared<DataTypeFloat32>();
|
||||
case TypeIndex::UInt64:
|
||||
case TypeIndex::Int64:
|
||||
case TypeIndex::Float64:
|
||||
return std::make_shared<DataTypeFloat64>();
|
||||
default:
|
||||
throw Exception(
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Arguments of function {} has nested type {}. "
|
||||
"Support: UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64, Float32, Float64.",
|
||||
getName(), common_type->getName());
|
||||
}
|
||||
}
|
||||
|
||||
ColumnPtr
|
||||
executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t /*input_rows_count*/) const override
|
||||
{
|
||||
DataTypePtr type_x = typeid_cast<const DataTypeArray *>(arguments[0].type.get())->getNestedType();
|
||||
DataTypePtr type_y = typeid_cast<const DataTypeArray *>(arguments[1].type.get())->getNestedType();
|
||||
|
||||
ColumnPtr col_x = arguments[0].column->convertToFullColumnIfConst();
|
||||
ColumnPtr col_y = arguments[1].column->convertToFullColumnIfConst();
|
||||
|
||||
const auto * arr_x = assert_cast<const ColumnArray *>(col_x.get());
|
||||
const auto * arr_y = assert_cast<const ColumnArray *>(col_y.get());
|
||||
|
||||
auto result = result_type->createColumn();
|
||||
switch (result_type->getTypeId())
|
||||
{
|
||||
case TypeIndex::Float32:
|
||||
executeWithType<Float32>(*arr_x, *arr_y, type_x, type_y, result);
|
||||
break;
|
||||
case TypeIndex::Float64:
|
||||
executeWithType<Float64>(*arr_x, *arr_y, type_x, type_y, result);
|
||||
break;
|
||||
default:
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected result type.");
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
private:
|
||||
template <typename MatrixType>
|
||||
void executeWithType(
|
||||
const ColumnArray & array_x,
|
||||
const ColumnArray & array_y,
|
||||
const DataTypePtr & type_x,
|
||||
const DataTypePtr & type_y,
|
||||
MutableColumnPtr & column) const
|
||||
{
|
||||
Eigen::MatrixX<MatrixType> mx, my;
|
||||
columnToMatrix(array_x, type_x, mx);
|
||||
columnToMatrix(array_y, type_y, my);
|
||||
|
||||
if (mx.rows() && my.rows() && mx.rows() != my.rows())
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::SIZES_OF_ARRAYS_DOESNT_MATCH,
|
||||
"Arguments of function {} have different array sizes: {} and {}",
|
||||
getName(), mx.rows(), my.rows());
|
||||
}
|
||||
auto & data = assert_cast<ColumnVector<MatrixType> &>(*column).getData();
|
||||
Kernel::compute(mx, my, data);
|
||||
}
|
||||
|
||||
template <typename MatrixType>
|
||||
void columnToMatrix(const ColumnArray & array, const DataTypePtr & nested_type, Eigen::MatrixX<MatrixType> & mat) const
|
||||
{
|
||||
const auto & offsets = array.getOffsets();
|
||||
size_t cols = offsets.size();
|
||||
size_t rows = cols > 0 ? offsets.front() : 0;
|
||||
|
||||
ColumnArray::Offset prev = 0;
|
||||
for (ColumnArray::Offset off : offsets)
|
||||
{
|
||||
if (off - prev != rows)
|
||||
throw Exception(
|
||||
ErrorCodes::SIZES_OF_ARRAYS_DOESNT_MATCH,
|
||||
"Arrays in a column passed to function {} have different sizes: {} and {}",
|
||||
getName(), rows, off - prev);
|
||||
prev = off;
|
||||
}
|
||||
|
||||
switch (nested_type->getTypeId())
|
||||
{
|
||||
case TypeIndex::UInt8:
|
||||
fillMatrix<MatrixType, UInt8>(mat, array, rows, cols);
|
||||
break;
|
||||
case TypeIndex::UInt16:
|
||||
fillMatrix<MatrixType, UInt16>(mat, array, rows, cols);
|
||||
break;
|
||||
case TypeIndex::UInt32:
|
||||
fillMatrix<MatrixType, UInt32>(mat, array, rows, cols);
|
||||
break;
|
||||
case TypeIndex::UInt64:
|
||||
fillMatrix<MatrixType, UInt64>(mat, array, rows, cols);
|
||||
break;
|
||||
case TypeIndex::Int8:
|
||||
fillMatrix<MatrixType, Int8>(mat, array, rows, cols);
|
||||
break;
|
||||
case TypeIndex::Int16:
|
||||
fillMatrix<MatrixType, Int16>(mat, array, rows, cols);
|
||||
break;
|
||||
case TypeIndex::Int32:
|
||||
fillMatrix<MatrixType, Int32>(mat, array, rows, cols);
|
||||
break;
|
||||
case TypeIndex::Int64:
|
||||
fillMatrix<MatrixType, Int64>(mat, array, rows, cols);
|
||||
break;
|
||||
case TypeIndex::Float32:
|
||||
fillMatrix<MatrixType, Float32>(mat, array, rows, cols);
|
||||
break;
|
||||
case TypeIndex::Float64:
|
||||
fillMatrix<MatrixType, Float64>(mat, array, rows, cols);
|
||||
break;
|
||||
default:
|
||||
throw Exception(
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Arguments of function {} has nested type {}. "
|
||||
"Support: UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64, Float32, Float64.",
|
||||
getName(), nested_type->getName());
|
||||
}
|
||||
}
|
||||
|
||||
// optimize for float/ double
|
||||
template <typename MatrixType, typename DataType>
|
||||
requires std::is_same_v<MatrixType, DataType>
|
||||
void fillMatrix(Eigen::MatrixX<MatrixType> & mat, const ColumnArray & array, size_t rows, size_t cols) const
|
||||
{
|
||||
const auto & data = typeid_cast<const ColumnVector<DataType> &>(array.getData()).getData();
|
||||
mat = Eigen::Map<const Eigen::MatrixX<MatrixType>>(data.data(), rows, cols);
|
||||
}
|
||||
|
||||
template <typename MatrixType, typename DataType>
|
||||
void fillMatrix(Eigen::MatrixX<MatrixType> & mat, const ColumnArray & array, size_t rows, size_t cols) const
|
||||
{
|
||||
const auto & data = typeid_cast<const ColumnVector<DataType> &>(array.getData()).getData();
|
||||
mat.resize(rows, cols);
|
||||
for (size_t col = 0; col < cols; ++col)
|
||||
{
|
||||
for (size_t row = 0; row < rows; ++row)
|
||||
{
|
||||
size_t off = col * rows;
|
||||
mat(row, col) = static_cast<MatrixType>(data[off + row]);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
void registerFunctionArrayDistance(FunctionFactory & factory)
|
||||
{
|
||||
factory.registerFunction<FunctionArrayDistance<LpDistance<1>>>();
|
||||
factory.registerFunction<FunctionArrayDistance<LpDistance<2>>>();
|
||||
factory.registerFunction<FunctionArrayDistance<LinfDistance>>();
|
||||
factory.registerFunction<FunctionArrayDistance<CosineDistance>>();
|
||||
}
|
||||
|
||||
}
|
205
src/Functions/array/arrayNorm.cpp
Normal file
205
src/Functions/array/arrayNorm.cpp
Normal file
@ -0,0 +1,205 @@
|
||||
#include <Columns/ColumnArray.h>
|
||||
#include <Columns/IColumn.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <DataTypes/IDataType.h>
|
||||
#include <DataTypes/getLeastSupertype.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Functions/FunctionHelpers.h>
|
||||
|
||||
#include <Eigen/Core>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
template <const int N>
|
||||
struct LpNorm
|
||||
{
|
||||
static inline String name = "L" + std::to_string(N);
|
||||
template <typename T>
|
||||
static void compute(const std::vector<Eigen::VectorX<T>> & vec, PaddedPODArray<T> & array)
|
||||
{
|
||||
array.reserve(vec.size());
|
||||
for (const auto & v : vec)
|
||||
{
|
||||
array.push_back(v.template lpNorm<N>());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
struct LinfNorm : LpNorm<Eigen::Infinity>
|
||||
{
|
||||
static inline String name = "Linf";
|
||||
};
|
||||
|
||||
template <class Kernel>
|
||||
class FunctionArrayNorm : public IFunction
|
||||
{
|
||||
public:
|
||||
static inline auto name = "array" + Kernel::name + "Norm";
|
||||
String getName() const override { return name; }
|
||||
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionArrayNorm<Kernel>>(); }
|
||||
size_t getNumberOfArguments() const override { return 1; }
|
||||
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
|
||||
bool useDefaultImplementationForConstants() const override { return true; }
|
||||
|
||||
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
|
||||
{
|
||||
DataTypes types;
|
||||
for (const auto & argument : arguments)
|
||||
{
|
||||
const auto * array_type = checkAndGetDataType<DataTypeArray>(argument.type.get());
|
||||
if (!array_type)
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Argument of function {} must be array.", getName());
|
||||
|
||||
types.push_back(array_type->getNestedType());
|
||||
}
|
||||
const auto & common_type = getLeastSupertype(types);
|
||||
switch (common_type->getTypeId())
|
||||
{
|
||||
case TypeIndex::UInt8:
|
||||
case TypeIndex::UInt16:
|
||||
case TypeIndex::UInt32:
|
||||
case TypeIndex::Int8:
|
||||
case TypeIndex::Int16:
|
||||
case TypeIndex::Int32:
|
||||
case TypeIndex::Float32:
|
||||
return std::make_shared<DataTypeFloat32>();
|
||||
case TypeIndex::UInt64:
|
||||
case TypeIndex::Int64:
|
||||
case TypeIndex::Float64:
|
||||
return std::make_shared<DataTypeFloat64>();
|
||||
default:
|
||||
throw Exception(
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Arguments of function {} has nested type {}. "
|
||||
"Support: UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64, Float32, Float64.",
|
||||
getName(), common_type->getName());
|
||||
}
|
||||
}
|
||||
|
||||
ColumnPtr
|
||||
executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t /*input_rows_count*/) const override
|
||||
{
|
||||
DataTypePtr type = typeid_cast<const DataTypeArray *>(arguments[0].type.get())->getNestedType();
|
||||
ColumnPtr column = arguments[0].column->convertToFullColumnIfConst();
|
||||
const auto * arr = assert_cast<const ColumnArray *>(column.get());
|
||||
|
||||
auto result = result_type->createColumn();
|
||||
switch (result_type->getTypeId())
|
||||
{
|
||||
case TypeIndex::Float32:
|
||||
executeWithType<Float32>(*arr, type, result);
|
||||
break;
|
||||
case TypeIndex::Float64:
|
||||
executeWithType<Float64>(*arr, type, result);
|
||||
break;
|
||||
default:
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected result type.");
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
private:
|
||||
template <typename MatrixType>
|
||||
void executeWithType(const ColumnArray & array, const DataTypePtr & type, MutableColumnPtr & column) const
|
||||
{
|
||||
std::vector<Eigen::VectorX<MatrixType>> vec;
|
||||
columnToVectors(array, type, vec);
|
||||
auto & data = assert_cast<ColumnVector<MatrixType> &>(*column).getData();
|
||||
Kernel::compute(vec, data);
|
||||
}
|
||||
|
||||
template <typename MatrixType>
|
||||
void columnToVectors(const ColumnArray & array, const DataTypePtr & nested_type, std::vector<Eigen::VectorX<MatrixType>> & vec) const
|
||||
{
|
||||
switch (nested_type->getTypeId())
|
||||
{
|
||||
case TypeIndex::UInt8:
|
||||
fillVectors<MatrixType, UInt8>(vec, array);
|
||||
break;
|
||||
case TypeIndex::UInt16:
|
||||
fillVectors<MatrixType, UInt16>(vec, array);
|
||||
break;
|
||||
case TypeIndex::UInt32:
|
||||
fillVectors<MatrixType, UInt32>(vec, array);
|
||||
break;
|
||||
case TypeIndex::UInt64:
|
||||
fillVectors<MatrixType, UInt64>(vec, array);
|
||||
break;
|
||||
case TypeIndex::Int8:
|
||||
fillVectors<MatrixType, Int8>(vec, array);
|
||||
break;
|
||||
case TypeIndex::Int16:
|
||||
fillVectors<MatrixType, Int16>(vec, array);
|
||||
break;
|
||||
case TypeIndex::Int32:
|
||||
fillVectors<MatrixType, Int32>(vec, array);
|
||||
break;
|
||||
case TypeIndex::Int64:
|
||||
fillVectors<MatrixType, Int64>(vec, array);
|
||||
break;
|
||||
case TypeIndex::Float32:
|
||||
fillVectors<MatrixType, Float32>(vec, array);
|
||||
break;
|
||||
case TypeIndex::Float64:
|
||||
fillVectors<MatrixType, Float64>(vec, array);
|
||||
break;
|
||||
default:
|
||||
throw Exception(
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Arguments of function {} has nested type {}. "
|
||||
"Support: UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64, Float32, Float64.",
|
||||
getName(), nested_type->getName());
|
||||
}
|
||||
}
|
||||
|
||||
template <typename MatrixType, typename DataType>
|
||||
requires std::is_same_v<MatrixType, DataType>
|
||||
void fillVectors(std::vector<Eigen::VectorX<MatrixType>> & vec, const ColumnArray & array) const
|
||||
{
|
||||
const auto & data = typeid_cast<const ColumnVector<DataType> &>(array.getData()).getData();
|
||||
const auto & offsets = array.getOffsets();
|
||||
vec.reserve(offsets.size());
|
||||
ColumnArray::Offset prev = 0;
|
||||
for (auto off : offsets)
|
||||
{
|
||||
vec.emplace_back(Eigen::Map<const Eigen::VectorX<MatrixType>>(data.data() + prev, off - prev));
|
||||
prev = off;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename MatrixType, typename DataType>
|
||||
void fillVectors(std::vector<Eigen::VectorX<MatrixType>> & vec, const ColumnArray & array) const
|
||||
{
|
||||
const auto & data = typeid_cast<const ColumnVector<DataType> &>(array.getData()).getData();
|
||||
const auto & offsets = array.getOffsets();
|
||||
vec.reserve(offsets.size());
|
||||
|
||||
ColumnArray::Offset prev = 0;
|
||||
for (auto off : offsets)
|
||||
{
|
||||
Eigen::VectorX<MatrixType> mat(off - prev);
|
||||
for (ColumnArray::Offset row = 0; row + prev < off; ++row)
|
||||
{
|
||||
mat[row] = static_cast<MatrixType>(data[prev + row]);
|
||||
}
|
||||
prev = off;
|
||||
vec.emplace_back(mat);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
void registerFunctionArrayNorm(FunctionFactory & factory)
|
||||
{
|
||||
factory.registerFunction<FunctionArrayNorm<LpNorm<1>>>();
|
||||
factory.registerFunction<FunctionArrayNorm<LpNorm<2>>>();
|
||||
factory.registerFunction<FunctionArrayNorm<LinfNorm>>();
|
||||
}
|
||||
|
||||
}
|
@ -37,6 +37,8 @@ void registerFunctionArrayAUC(FunctionFactory &);
|
||||
void registerFunctionArrayReduceInRanges(FunctionFactory &);
|
||||
void registerFunctionMapOp(FunctionFactory &);
|
||||
void registerFunctionMapPopulateSeries(FunctionFactory &);
|
||||
void registerFunctionArrayDistance(FunctionFactory &);
|
||||
void registerFunctionArrayNorm(FunctionFactory &);
|
||||
|
||||
void registerFunctionsArray(FunctionFactory & factory)
|
||||
{
|
||||
@ -75,6 +77,8 @@ void registerFunctionsArray(FunctionFactory & factory)
|
||||
registerFunctionArrayAUC(factory);
|
||||
registerFunctionMapOp(factory);
|
||||
registerFunctionMapPopulateSeries(factory);
|
||||
registerFunctionArrayDistance(factory);
|
||||
registerFunctionArrayNorm(factory);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -21,7 +21,7 @@ struct ExtractImpl
|
||||
res_data.reserve(data.size() / 5);
|
||||
res_offsets.resize(offsets.size());
|
||||
|
||||
const auto & regexp = Regexps::get<false, false>(pattern);
|
||||
const auto & regexp = Regexps::get<false, false, false>(pattern);
|
||||
|
||||
unsigned capture = regexp->getNumberOfSubpatterns() > 0 ? 1 : 0;
|
||||
OptimizedRegularExpression::MatchVec matches;
|
||||
|
@ -95,7 +95,7 @@ public:
|
||||
throw Exception("Length of 'needle' argument must be greater than 0.", ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
using StringPiece = typename Regexps::Regexp::StringPieceType;
|
||||
auto holder = Regexps::get<false, false>(needle);
|
||||
auto holder = Regexps::get<false, false, false>(needle);
|
||||
const auto & regexp = holder->getRE2();
|
||||
|
||||
if (!regexp)
|
||||
|
@ -63,7 +63,7 @@ public:
|
||||
if (needle.empty())
|
||||
throw Exception(getName() + " length of 'needle' argument must be greater than 0.", ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
auto regexp = Regexps::get<false, false>(needle);
|
||||
auto regexp = Regexps::get<false, false, false>(needle);
|
||||
const auto & re2 = regexp->getRE2();
|
||||
|
||||
if (!re2)
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Functions/IFunction.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Interpreters/castColumn.h>
|
||||
|
||||
#include <h3api.h>
|
||||
|
||||
@ -51,10 +52,10 @@ public:
|
||||
arg->getName(), 1, getName());
|
||||
|
||||
arg = arguments[1].get();
|
||||
if (!WhichDataType(arg).isUInt16())
|
||||
if (!WhichDataType(arg).isNativeUInt())
|
||||
throw Exception(
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Illegal type {} of argument {} of function {}. Must be UInt16",
|
||||
"Illegal type {} of argument {} of function {}. Must be unsigned native integer.",
|
||||
arg->getName(),
|
||||
2,
|
||||
getName());
|
||||
@ -80,7 +81,8 @@ public:
|
||||
const auto & data_hindex = col_hindex->getData();
|
||||
|
||||
/// ColumnUInt16 is sufficient as the max value of 2nd arg is checked (arg > 0 < 10000) in implementation below
|
||||
const auto * col_k = checkAndGetColumn<ColumnUInt16>(non_const_arguments[1].column.get());
|
||||
auto cast_result = castColumnAccurate(non_const_arguments[1], std::make_shared<DataTypeUInt16>());
|
||||
const auto * col_k = checkAndGetColumn<ColumnUInt16>(cast_result.get());
|
||||
if (!col_k)
|
||||
throw Exception(
|
||||
ErrorCodes::ILLEGAL_COLUMN,
|
||||
|
@ -74,8 +74,12 @@ namespace
|
||||
if (https)
|
||||
{
|
||||
#if USE_SSL
|
||||
/// Cannot resolve host in advance, otherwise SNI won't work in Poco.
|
||||
session = std::make_shared<Poco::Net::HTTPSClientSession>(host, port);
|
||||
String resolved_host = resolve_host ? DNSResolver::instance().resolveHost(host).toString() : host;
|
||||
auto https_session = std::make_shared<Poco::Net::HTTPSClientSession>(host, port);
|
||||
if (resolve_host)
|
||||
https_session->setResolvedHost(DNSResolver::instance().resolveHost(host).toString());
|
||||
|
||||
session = std::move(https_session);
|
||||
#else
|
||||
throw Exception("ClickHouse was built without HTTPS support", ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME);
|
||||
#endif
|
||||
|
@ -68,10 +68,9 @@ bool Progress::incrementPiecewiseAtomically(const Progress & rhs)
|
||||
{
|
||||
read_rows += rhs.read_rows;
|
||||
read_bytes += rhs.read_bytes;
|
||||
read_raw_bytes += rhs.read_raw_bytes;
|
||||
|
||||
total_rows_to_read += rhs.total_rows_to_read;
|
||||
total_raw_bytes_to_read += rhs.total_raw_bytes_to_read;
|
||||
total_bytes_to_read += rhs.total_bytes_to_read;
|
||||
|
||||
written_rows += rhs.written_rows;
|
||||
written_bytes += rhs.written_bytes;
|
||||
@ -83,10 +82,9 @@ void Progress::reset()
|
||||
{
|
||||
read_rows = 0;
|
||||
read_bytes = 0;
|
||||
read_raw_bytes = 0;
|
||||
|
||||
total_rows_to_read = 0;
|
||||
total_raw_bytes_to_read = 0;
|
||||
total_bytes_to_read = 0;
|
||||
|
||||
written_rows = 0;
|
||||
written_bytes = 0;
|
||||
@ -98,10 +96,9 @@ ProgressValues Progress::getValues() const
|
||||
|
||||
res.read_rows = read_rows.load(std::memory_order_relaxed);
|
||||
res.read_bytes = read_bytes.load(std::memory_order_relaxed);
|
||||
res.read_raw_bytes = read_raw_bytes.load(std::memory_order_relaxed);
|
||||
|
||||
res.total_rows_to_read = total_rows_to_read.load(std::memory_order_relaxed);
|
||||
res.total_raw_bytes_to_read = total_raw_bytes_to_read.load(std::memory_order_relaxed);
|
||||
res.total_bytes_to_read = total_bytes_to_read.load(std::memory_order_relaxed);
|
||||
|
||||
res.written_rows = written_rows.load(std::memory_order_relaxed);
|
||||
res.written_bytes = written_bytes.load(std::memory_order_relaxed);
|
||||
@ -109,16 +106,31 @@ ProgressValues Progress::getValues() const
|
||||
return res;
|
||||
}
|
||||
|
||||
ProgressValues Progress::fetchAndResetPiecewiseAtomically()
|
||||
ProgressValues Progress::fetchValuesAndResetPiecewiseAtomically()
|
||||
{
|
||||
ProgressValues res;
|
||||
|
||||
res.read_rows = read_rows.fetch_and(0);
|
||||
res.read_bytes = read_bytes.fetch_and(0);
|
||||
res.read_raw_bytes = read_raw_bytes.fetch_and(0);
|
||||
|
||||
res.total_rows_to_read = total_rows_to_read.fetch_and(0);
|
||||
res.total_raw_bytes_to_read = total_raw_bytes_to_read.fetch_and(0);
|
||||
res.total_bytes_to_read = total_bytes_to_read.fetch_and(0);
|
||||
|
||||
res.written_rows = written_rows.fetch_and(0);
|
||||
res.written_bytes = written_bytes.fetch_and(0);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
Progress Progress::fetchAndResetPiecewiseAtomically()
|
||||
{
|
||||
Progress res;
|
||||
|
||||
res.read_rows = read_rows.fetch_and(0);
|
||||
res.read_bytes = read_bytes.fetch_and(0);
|
||||
|
||||
res.total_rows_to_read = total_rows_to_read.fetch_and(0);
|
||||
res.total_bytes_to_read = total_bytes_to_read.fetch_and(0);
|
||||
|
||||
res.written_rows = written_rows.fetch_and(0);
|
||||
res.written_bytes = written_bytes.fetch_and(0);
|
||||
@ -130,10 +142,9 @@ Progress & Progress::operator=(Progress && other) noexcept
|
||||
{
|
||||
read_rows = other.read_rows.load(std::memory_order_relaxed);
|
||||
read_bytes = other.read_bytes.load(std::memory_order_relaxed);
|
||||
read_raw_bytes = other.read_raw_bytes.load(std::memory_order_relaxed);
|
||||
|
||||
total_rows_to_read = other.total_rows_to_read.load(std::memory_order_relaxed);
|
||||
total_raw_bytes_to_read = other.total_raw_bytes_to_read.load(std::memory_order_relaxed);
|
||||
total_bytes_to_read = other.total_bytes_to_read.load(std::memory_order_relaxed);
|
||||
|
||||
written_rows = other.written_rows.load(std::memory_order_relaxed);
|
||||
written_bytes = other.written_bytes.load(std::memory_order_relaxed);
|
||||
@ -149,6 +160,7 @@ void Progress::read(ReadBuffer & in, UInt64 server_revision)
|
||||
read_rows.store(values.read_rows, std::memory_order_relaxed);
|
||||
read_bytes.store(values.read_bytes, std::memory_order_relaxed);
|
||||
total_rows_to_read.store(values.total_rows_to_read, std::memory_order_relaxed);
|
||||
|
||||
written_rows.store(values.written_rows, std::memory_order_relaxed);
|
||||
written_bytes.store(values.written_bytes, std::memory_order_relaxed);
|
||||
}
|
||||
|
@ -18,10 +18,9 @@ struct ProgressValues
|
||||
{
|
||||
size_t read_rows;
|
||||
size_t read_bytes;
|
||||
size_t read_raw_bytes;
|
||||
|
||||
size_t total_rows_to_read;
|
||||
size_t total_raw_bytes_to_read;
|
||||
size_t total_bytes_to_read;
|
||||
|
||||
size_t written_rows;
|
||||
size_t written_bytes;
|
||||
@ -68,15 +67,12 @@ struct Progress
|
||||
{
|
||||
std::atomic<size_t> read_rows {0}; /// Rows (source) processed.
|
||||
std::atomic<size_t> read_bytes {0}; /// Bytes (uncompressed, source) processed.
|
||||
std::atomic<size_t> read_raw_bytes {0}; /// Raw bytes processed.
|
||||
|
||||
/** How much rows/bytes must be processed, in total, approximately. Non-zero value is sent when there is information about
|
||||
* some new part of job. Received values must be summed to get estimate of total rows to process.
|
||||
* `total_raw_bytes_to_process` is used for file table engine or when reading from file descriptor.
|
||||
* Used for rendering progress bar on client.
|
||||
*/
|
||||
std::atomic<size_t> total_rows_to_read {0};
|
||||
std::atomic<size_t> total_raw_bytes_to_read {0};
|
||||
std::atomic<size_t> total_bytes_to_read {0};
|
||||
|
||||
std::atomic<size_t> written_rows {0};
|
||||
std::atomic<size_t> written_bytes {0};
|
||||
@ -93,7 +89,7 @@ struct Progress
|
||||
: written_rows(write_progress.written_rows), written_bytes(write_progress.written_bytes) {}
|
||||
|
||||
explicit Progress(FileProgress file_progress)
|
||||
: read_raw_bytes(file_progress.read_bytes), total_raw_bytes_to_read(file_progress.total_bytes_to_read) {}
|
||||
: read_bytes(file_progress.read_bytes), total_bytes_to_read(file_progress.total_bytes_to_read) {}
|
||||
|
||||
void read(ReadBuffer & in, UInt64 server_revision);
|
||||
|
||||
@ -109,7 +105,9 @@ struct Progress
|
||||
|
||||
ProgressValues getValues() const;
|
||||
|
||||
ProgressValues fetchAndResetPiecewiseAtomically();
|
||||
ProgressValues fetchValuesAndResetPiecewiseAtomically();
|
||||
|
||||
Progress fetchAndResetPiecewiseAtomically();
|
||||
|
||||
Progress & operator=(Progress && other) noexcept;
|
||||
|
||||
|
@ -50,8 +50,6 @@ public:
|
||||
return file_name;
|
||||
}
|
||||
|
||||
Range getRemainingReadRange() const override { return Range{ .left = file_offset_of_buffer_end, .right = std::nullopt }; }
|
||||
|
||||
size_t getFileOffsetOfBufferEnd() const override { return file_offset_of_buffer_end; }
|
||||
};
|
||||
|
||||
|
@ -49,6 +49,8 @@ public:
|
||||
return file_offset_of_buffer_end - (working_buffer.end() - pos);
|
||||
}
|
||||
|
||||
Range getRemainingReadRange() const override { return Range{ .left = file_offset_of_buffer_end, .right = std::nullopt }; }
|
||||
|
||||
/// If 'offset' is small enough to stay in buffer after seek, then true seek in file does not happen.
|
||||
off_t seek(off_t off, int whence) override;
|
||||
|
||||
|
@ -57,7 +57,7 @@ struct ReadSettings
|
||||
/// Method to use reading from local filesystem.
|
||||
LocalFSReadMethod local_fs_method = LocalFSReadMethod::pread;
|
||||
/// Method to use reading from remote filesystem.
|
||||
RemoteFSReadMethod remote_fs_method = RemoteFSReadMethod::read;
|
||||
RemoteFSReadMethod remote_fs_method = RemoteFSReadMethod::threadpool;
|
||||
|
||||
size_t local_fs_buffer_size = DBMS_DEFAULT_BUFFER_SIZE;
|
||||
size_t remote_fs_buffer_size = DBMS_DEFAULT_BUFFER_SIZE;
|
||||
@ -80,6 +80,7 @@ struct ReadSettings
|
||||
bool enable_filesystem_cache = true;
|
||||
size_t filesystem_cache_max_wait_sec = 1;
|
||||
bool read_from_filesystem_cache_if_exists_otherwise_bypass_cache = false;
|
||||
bool enable_filesystem_cache_log = false;
|
||||
|
||||
size_t remote_read_min_bytes_for_seek = DBMS_DEFAULT_BUFFER_SIZE;
|
||||
|
||||
|
@ -21,7 +21,7 @@
|
||||
namespace ProfileEvents
|
||||
{
|
||||
extern const Event WriteBufferFromS3Bytes;
|
||||
extern const Event RemoteFSCacheDownloadBytes;
|
||||
extern const Event CachedReadBufferCacheWriteBytes;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
@ -490,7 +490,7 @@ void WriteBufferFromS3::finalizeCacheIfNeeded(std::optional<FileSegmentsHolder>
|
||||
size_t size = (*file_segment_it)->finalizeWrite();
|
||||
file_segment_it = file_segments.erase(file_segment_it);
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::RemoteFSCacheDownloadBytes, size);
|
||||
ProfileEvents::increment(ProfileEvents::CachedReadBufferCacheWriteBytes, size);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
|
@ -214,7 +214,7 @@ public:
|
||||
ActionsDAGPtr clone() const;
|
||||
|
||||
/// Execute actions for header. Input block must have empty columns.
|
||||
/// Result should be equal to the execution of ExpressionActions build form this DAG.
|
||||
/// Result should be equal to the execution of ExpressionActions built from this DAG.
|
||||
/// Actions are not changed, no expressions are compiled.
|
||||
///
|
||||
/// In addition, check that result constants are constants according to DAG.
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <Common/ColumnsHashing.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/filesystemHelpers.h>
|
||||
#include <Core/ColumnNumbers.h>
|
||||
|
||||
#include <QueryPipeline/SizeLimits.h>
|
||||
|
||||
@ -991,9 +992,13 @@ public:
|
||||
}
|
||||
|
||||
/// Only parameters that matter during merge.
|
||||
Params(const Block & intermediate_header_,
|
||||
const ColumnNumbers & keys_, const AggregateDescriptions & aggregates_, bool overflow_row_, size_t max_threads_)
|
||||
: Params(Block(), keys_, aggregates_, overflow_row_, 0, OverflowMode::THROW, 0, 0, 0, false, nullptr, max_threads_, 0, false, 0)
|
||||
Params(
|
||||
const Block & intermediate_header_,
|
||||
const ColumnNumbers & keys_,
|
||||
const AggregateDescriptions & aggregates_,
|
||||
bool overflow_row_,
|
||||
size_t max_threads_)
|
||||
: Params(Block(), keys_, aggregates_, overflow_row_, 0, OverflowMode::THROW, 0, 0, 0, false, nullptr, max_threads_, 0, false, 0, {}, {})
|
||||
{
|
||||
intermediate_header = intermediate_header_;
|
||||
}
|
||||
|
@ -16,25 +16,27 @@
|
||||
#include <Parsers/IAST_fwd.h>
|
||||
#include <Parsers/parseQuery.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int SET_SIZE_LIMIT_EXCEEDED;
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
namespace JoinStuff
|
||||
{
|
||||
|
||||
ConcurrentHashJoin::ConcurrentHashJoin(ContextPtr context_, std::shared_ptr<TableJoin> table_join_, size_t slots_, const Block & right_sample_block, bool any_take_last_row_)
|
||||
: context(context_)
|
||||
, table_join(table_join_)
|
||||
, slots(slots_)
|
||||
{
|
||||
if (!slots_ || slots_ >= 256)
|
||||
if (slots < 1 || 255 < slots)
|
||||
{
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Invalid argument slot : {}", slots_);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Number of slots should be [1, 255], got {}", slots);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < slots; ++i)
|
||||
@ -43,36 +45,44 @@ ConcurrentHashJoin::ConcurrentHashJoin(ContextPtr context_, std::shared_ptr<Tabl
|
||||
inner_hash_join->data = std::make_unique<HashJoin>(table_join_, right_sample_block, any_take_last_row_);
|
||||
hash_joins.emplace_back(std::move(inner_hash_join));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
bool ConcurrentHashJoin::addJoinedBlock(const Block & block, bool check_limits)
|
||||
bool ConcurrentHashJoin::addJoinedBlock(const Block & right_block, bool check_limits)
|
||||
{
|
||||
Blocks dispatched_blocks = dispatchBlock(table_join->getOnlyClause().key_names_right, block);
|
||||
Blocks dispatched_blocks = dispatchBlock(table_join->getOnlyClause().key_names_right, right_block);
|
||||
|
||||
std::list<size_t> pending_blocks;
|
||||
for (size_t i = 0; i < dispatched_blocks.size(); ++i)
|
||||
pending_blocks.emplace_back(i);
|
||||
while (!pending_blocks.empty())
|
||||
size_t blocks_left = 0;
|
||||
for (const auto & block : dispatched_blocks)
|
||||
{
|
||||
for (auto iter = pending_blocks.begin(); iter != pending_blocks.end();)
|
||||
if (block)
|
||||
{
|
||||
++blocks_left;
|
||||
}
|
||||
}
|
||||
|
||||
while (blocks_left > 0)
|
||||
{
|
||||
/// insert blocks into corresponding HashJoin instances
|
||||
for (size_t i = 0; i < dispatched_blocks.size(); ++i)
|
||||
{
|
||||
auto & i = *iter;
|
||||
auto & hash_join = hash_joins[i];
|
||||
auto & dispatched_block = dispatched_blocks[i];
|
||||
if (hash_join->mutex.try_lock())
|
||||
{
|
||||
if (!hash_join->data->addJoinedBlock(dispatched_block, check_limits))
|
||||
{
|
||||
hash_join->mutex.unlock();
|
||||
return false;
|
||||
}
|
||||
|
||||
hash_join->mutex.unlock();
|
||||
iter = pending_blocks.erase(iter);
|
||||
if (dispatched_block)
|
||||
{
|
||||
/// if current hash_join is already processed by another thread, skip it and try later
|
||||
std::unique_lock<std::mutex> lock(hash_join->mutex, std::try_to_lock);
|
||||
if (!lock.owns_lock())
|
||||
continue;
|
||||
|
||||
bool limit_exceeded = !hash_join->data->addJoinedBlock(dispatched_block, check_limits);
|
||||
|
||||
dispatched_block = {};
|
||||
blocks_left--;
|
||||
|
||||
if (limit_exceeded)
|
||||
return false;
|
||||
}
|
||||
else
|
||||
iter++;
|
||||
}
|
||||
}
|
||||
|
||||
@ -161,30 +171,32 @@ std::shared_ptr<NotJoinedBlocks> ConcurrentHashJoin::getNonJoinedBlocks(
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Invalid join type. join kind: {}, strictness: {}", table_join->kind(), table_join->strictness());
|
||||
}
|
||||
|
||||
static IColumn::Selector hashToSelector(const WeakHash32 & hash, size_t num_shards)
|
||||
{
|
||||
const auto & data = hash.getData();
|
||||
size_t num_rows = data.size();
|
||||
|
||||
IColumn::Selector selector(num_rows);
|
||||
for (size_t i = 0; i < num_rows; ++i)
|
||||
selector[i] = data[i] % num_shards;
|
||||
return selector;
|
||||
}
|
||||
|
||||
Blocks ConcurrentHashJoin::dispatchBlock(const Strings & key_columns_names, const Block & from_block)
|
||||
{
|
||||
Blocks result;
|
||||
|
||||
size_t num_shards = hash_joins.size();
|
||||
size_t num_rows = from_block.rows();
|
||||
size_t num_cols = from_block.columns();
|
||||
|
||||
ColumnRawPtrs key_cols;
|
||||
WeakHash32 hash(num_rows);
|
||||
for (const auto & key_name : key_columns_names)
|
||||
{
|
||||
key_cols.push_back(from_block.getByName(key_name).column.get());
|
||||
}
|
||||
IColumn::Selector selector(num_rows);
|
||||
for (size_t i = 0; i < num_rows; ++i)
|
||||
{
|
||||
SipHash hash;
|
||||
for (const auto & key_col : key_cols)
|
||||
{
|
||||
key_col->updateHashWithValue(i, hash);
|
||||
}
|
||||
selector[i] = hash.get64() % num_shards;
|
||||
const auto & key_col = from_block.getByName(key_name).column;
|
||||
key_col->updateWeakHash32(hash);
|
||||
}
|
||||
auto selector = hashToSelector(hash, num_shards);
|
||||
|
||||
Blocks result;
|
||||
for (size_t i = 0; i < num_shards; ++i)
|
||||
{
|
||||
result.emplace_back(from_block.cloneEmpty());
|
||||
@ -203,4 +215,3 @@ Blocks ConcurrentHashJoin::dispatchBlock(const Strings & key_columns_names, cons
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
@ -15,8 +15,7 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace JoinStuff
|
||||
{
|
||||
|
||||
/**
|
||||
* Can run addJoinedBlock() parallelly to speedup the join process. On test, it almose linear speedup by
|
||||
* the degree of parallelism.
|
||||
@ -33,6 +32,7 @@ namespace JoinStuff
|
||||
*/
|
||||
class ConcurrentHashJoin : public IJoin
|
||||
{
|
||||
|
||||
public:
|
||||
explicit ConcurrentHashJoin(ContextPtr context_, std::shared_ptr<TableJoin> table_join_, size_t slots_, const Block & right_sample_block, bool any_take_last_row_ = false);
|
||||
~ConcurrentHashJoin() override = default;
|
||||
@ -49,6 +49,7 @@ public:
|
||||
bool supportParallelJoin() const override { return true; }
|
||||
std::shared_ptr<NotJoinedBlocks>
|
||||
getNonJoinedBlocks(const Block & left_sample_block, const Block & result_sample_block, UInt64 max_block_size) const override;
|
||||
|
||||
private:
|
||||
struct InternalHashJoin
|
||||
{
|
||||
@ -71,5 +72,5 @@ private:
|
||||
Blocks dispatchBlock(const Strings & key_columns_names, const Block & from_block);
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -535,6 +535,7 @@ ContextMutablePtr Context::createCopy(const ContextMutablePtr & other)
|
||||
Context::~Context() = default;
|
||||
|
||||
InterserverIOHandler & Context::getInterserverIOHandler() { return shared->interserver_io_handler; }
|
||||
const InterserverIOHandler & Context::getInterserverIOHandler() const { return shared->interserver_io_handler; }
|
||||
|
||||
std::unique_lock<std::recursive_mutex> Context::getLock() const
|
||||
{
|
||||
@ -2226,7 +2227,7 @@ bool Context::hasAuxiliaryZooKeeper(const String & name) const
|
||||
return getConfigRef().has("auxiliary_zookeepers." + name);
|
||||
}
|
||||
|
||||
InterserverCredentialsPtr Context::getInterserverCredentials()
|
||||
InterserverCredentialsPtr Context::getInterserverCredentials() const
|
||||
{
|
||||
return shared->interserver_io_credentials.get();
|
||||
}
|
||||
@ -3417,6 +3418,7 @@ ReadSettings Context::getReadSettings() const
|
||||
res.enable_filesystem_cache = settings.enable_filesystem_cache;
|
||||
res.filesystem_cache_max_wait_sec = settings.filesystem_cache_max_wait_sec;
|
||||
res.read_from_filesystem_cache_if_exists_otherwise_bypass_cache = settings.read_from_filesystem_cache_if_exists_otherwise_bypass_cache;
|
||||
res.enable_filesystem_cache_log = settings.enable_filesystem_cache_log;
|
||||
|
||||
res.remote_read_min_bytes_for_seek = settings.remote_read_min_bytes_for_seek;
|
||||
|
||||
|
@ -612,6 +612,7 @@ public:
|
||||
OutputFormatPtr getOutputFormatParallelIfPossible(const String & name, WriteBuffer & buf, const Block & sample) const;
|
||||
|
||||
InterserverIOHandler & getInterserverIOHandler();
|
||||
const InterserverIOHandler & getInterserverIOHandler() const;
|
||||
|
||||
/// How other servers can access this for downloading replicated data.
|
||||
void setInterserverIOAddress(const String & host, UInt16 port);
|
||||
@ -619,7 +620,7 @@ public:
|
||||
|
||||
/// Credentials which server will use to communicate with others
|
||||
void updateInterserverCredentials(const Poco::Util::AbstractConfiguration & config);
|
||||
InterserverCredentialsPtr getInterserverCredentials();
|
||||
InterserverCredentialsPtr getInterserverCredentials() const;
|
||||
|
||||
/// Interserver requests scheme (http or https)
|
||||
void setInterserverScheme(const String & scheme);
|
||||
|
@ -43,7 +43,9 @@
|
||||
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Common/StringUtils/StringUtils.h>
|
||||
#include <Core/NamesAndTypes.h>
|
||||
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <DataTypes/DataTypeFactory.h>
|
||||
|
||||
#include <Interpreters/ActionsVisitor.h>
|
||||
@ -325,6 +327,12 @@ void ExpressionAnalyzer::analyzeAggregation(ActionsDAGPtr & temp_actions)
|
||||
{
|
||||
NameSet unique_keys;
|
||||
ASTs & group_asts = group_by_ast->children;
|
||||
|
||||
/// For GROUPING SETS with multiple groups we always add virtual __grouping_set column
|
||||
/// With set number, which is used as an additional key at the stage of merging aggregating data.
|
||||
if (select_query->group_by_with_grouping_sets && group_asts.size() > 1)
|
||||
aggregated_columns.emplace_back("__grouping_set", std::make_shared<DataTypeUInt64>());
|
||||
|
||||
for (ssize_t i = 0; i < static_cast<ssize_t>(group_asts.size()); ++i)
|
||||
{
|
||||
ssize_t size = group_asts.size();
|
||||
@ -332,46 +340,105 @@ void ExpressionAnalyzer::analyzeAggregation(ActionsDAGPtr & temp_actions)
|
||||
if (getContext()->getSettingsRef().enable_positional_arguments)
|
||||
replaceForPositionalArguments(group_asts[i], select_query, ASTSelectQuery::Expression::GROUP_BY);
|
||||
|
||||
getRootActionsNoMakeSet(group_asts[i], temp_actions, false);
|
||||
|
||||
const auto & column_name = group_asts[i]->getColumnName();
|
||||
|
||||
const auto * node = temp_actions->tryFindInIndex(column_name);
|
||||
if (!node)
|
||||
throw Exception("Unknown identifier (in GROUP BY): " + column_name, ErrorCodes::UNKNOWN_IDENTIFIER);
|
||||
|
||||
/// Only removes constant keys if it's an initiator or distributed_group_by_no_merge is enabled.
|
||||
if (getContext()->getClientInfo().distributed_depth == 0 || settings.distributed_group_by_no_merge > 0)
|
||||
if (select_query->group_by_with_grouping_sets)
|
||||
{
|
||||
/// Constant expressions have non-null column pointer at this stage.
|
||||
if (node->column && isColumnConst(*node->column))
|
||||
ASTs group_elements_ast;
|
||||
const ASTExpressionList * group_ast_element = group_asts[i]->as<const ASTExpressionList>();
|
||||
group_elements_ast = group_ast_element->children;
|
||||
|
||||
NamesAndTypesList grouping_set_list;
|
||||
|
||||
for (ssize_t j = 0; j < ssize_t(group_elements_ast.size()); ++j)
|
||||
{
|
||||
select_query->group_by_with_constant_keys = true;
|
||||
getRootActionsNoMakeSet(group_elements_ast[j], temp_actions, false);
|
||||
|
||||
/// But don't remove last key column if no aggregate functions, otherwise aggregation will not work.
|
||||
if (!aggregate_descriptions.empty() || size > 1)
|
||||
ssize_t group_size = group_elements_ast.size();
|
||||
const auto & column_name = group_elements_ast[j]->getColumnName();
|
||||
const auto * node = temp_actions->tryFindInIndex(column_name);
|
||||
if (!node)
|
||||
throw Exception("Unknown identifier (in GROUP BY): " + column_name, ErrorCodes::UNKNOWN_IDENTIFIER);
|
||||
|
||||
/// Only removes constant keys if it's an initiator or distributed_group_by_no_merge is enabled.
|
||||
if (getContext()->getClientInfo().distributed_depth == 0 || settings.distributed_group_by_no_merge > 0)
|
||||
{
|
||||
if (i + 1 < static_cast<ssize_t>(size))
|
||||
group_asts[i] = std::move(group_asts.back());
|
||||
/// Constant expressions have non-null column pointer at this stage.
|
||||
if (node->column && isColumnConst(*node->column))
|
||||
{
|
||||
select_query->group_by_with_constant_keys = true;
|
||||
|
||||
group_asts.pop_back();
|
||||
/// But don't remove last key column if no aggregate functions, otherwise aggregation will not work.
|
||||
if (!aggregate_descriptions.empty() || group_size > 1)
|
||||
{
|
||||
if (j + 1 < static_cast<ssize_t>(group_size))
|
||||
group_elements_ast[j] = std::move(group_elements_ast.back());
|
||||
|
||||
--i;
|
||||
continue;
|
||||
group_elements_ast.pop_back();
|
||||
|
||||
--j;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
NameAndTypePair key{column_name, node->result_type};
|
||||
|
||||
grouping_set_list.push_back(key);
|
||||
|
||||
/// Aggregation keys are unique.
|
||||
if (!unique_keys.contains(key.name))
|
||||
{
|
||||
unique_keys.insert(key.name);
|
||||
aggregation_keys.push_back(key);
|
||||
|
||||
/// Key is no longer needed, therefore we can save a little by moving it.
|
||||
aggregated_columns.push_back(std::move(key));
|
||||
}
|
||||
}
|
||||
|
||||
aggregation_keys_list.push_back(std::move(grouping_set_list));
|
||||
}
|
||||
|
||||
NameAndTypePair key{column_name, node->result_type};
|
||||
|
||||
/// Aggregation keys are uniqued.
|
||||
if (!unique_keys.contains(key.name))
|
||||
else
|
||||
{
|
||||
unique_keys.insert(key.name);
|
||||
aggregation_keys.push_back(key);
|
||||
getRootActionsNoMakeSet(group_asts[i], temp_actions, false);
|
||||
|
||||
/// Key is no longer needed, therefore we can save a little by moving it.
|
||||
aggregated_columns.push_back(std::move(key));
|
||||
const auto & column_name = group_asts[i]->getColumnName();
|
||||
const auto * node = temp_actions->tryFindInIndex(column_name);
|
||||
if (!node)
|
||||
throw Exception("Unknown identifier (in GROUP BY): " + column_name, ErrorCodes::UNKNOWN_IDENTIFIER);
|
||||
|
||||
/// Only removes constant keys if it's an initiator or distributed_group_by_no_merge is enabled.
|
||||
if (getContext()->getClientInfo().distributed_depth == 0 || settings.distributed_group_by_no_merge > 0)
|
||||
{
|
||||
/// Constant expressions have non-null column pointer at this stage.
|
||||
if (node->column && isColumnConst(*node->column))
|
||||
{
|
||||
select_query->group_by_with_constant_keys = true;
|
||||
|
||||
/// But don't remove last key column if no aggregate functions, otherwise aggregation will not work.
|
||||
if (!aggregate_descriptions.empty() || size > 1)
|
||||
{
|
||||
if (i + 1 < static_cast<ssize_t>(size))
|
||||
group_asts[i] = std::move(group_asts.back());
|
||||
|
||||
group_asts.pop_back();
|
||||
|
||||
--i;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
NameAndTypePair key{column_name, node->result_type};
|
||||
|
||||
/// Aggregation keys are uniqued.
|
||||
if (!unique_keys.contains(key.name))
|
||||
{
|
||||
unique_keys.insert(key.name);
|
||||
aggregation_keys.push_back(key);
|
||||
|
||||
/// Key is no longer needed, therefore we can save a little by moving it.
|
||||
aggregated_columns.push_back(std::move(key));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -726,6 +793,8 @@ void makeWindowDescriptionFromAST(const Context & context,
|
||||
|
||||
void ExpressionAnalyzer::makeWindowDescriptions(ActionsDAGPtr actions)
|
||||
{
|
||||
auto current_context = getContext();
|
||||
|
||||
// Window definitions from the WINDOW clause
|
||||
const auto * select_query = query->as<ASTSelectQuery>();
|
||||
if (select_query && select_query->window())
|
||||
@ -735,7 +804,7 @@ void ExpressionAnalyzer::makeWindowDescriptions(ActionsDAGPtr actions)
|
||||
const auto & elem = ptr->as<const ASTWindowListElement &>();
|
||||
WindowDescription desc;
|
||||
desc.window_name = elem.name;
|
||||
makeWindowDescriptionFromAST(*getContext(), window_descriptions,
|
||||
makeWindowDescriptionFromAST(*current_context, window_descriptions,
|
||||
desc, elem.definition.get());
|
||||
|
||||
auto [it, inserted] = window_descriptions.insert(
|
||||
@ -820,7 +889,7 @@ void ExpressionAnalyzer::makeWindowDescriptions(ActionsDAGPtr actions)
|
||||
const ASTWindowDefinition &>();
|
||||
WindowDescription desc;
|
||||
desc.window_name = definition.getDefaultWindowName();
|
||||
makeWindowDescriptionFromAST(*getContext(), window_descriptions,
|
||||
makeWindowDescriptionFromAST(*current_context, window_descriptions,
|
||||
desc, &definition);
|
||||
|
||||
auto [it, inserted] = window_descriptions.insert(
|
||||
@ -835,6 +904,18 @@ void ExpressionAnalyzer::makeWindowDescriptions(ActionsDAGPtr actions)
|
||||
it->second.window_functions.push_back(window_function);
|
||||
}
|
||||
}
|
||||
|
||||
bool compile_sort_description = current_context->getSettingsRef().compile_sort_description;
|
||||
size_t min_count_to_compile_sort_description = current_context->getSettingsRef().min_count_to_compile_sort_description;
|
||||
|
||||
for (auto & [_, window_description] : window_descriptions)
|
||||
{
|
||||
window_description.full_sort_description.compile_sort_description = compile_sort_description;
|
||||
window_description.full_sort_description.min_count_to_compile_sort_description = min_count_to_compile_sort_description;
|
||||
|
||||
window_description.partition_by.compile_sort_description = compile_sort_description;
|
||||
window_description.partition_by.min_count_to_compile_sort_description = min_count_to_compile_sort_description;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -939,7 +1020,7 @@ static std::shared_ptr<IJoin> chooseJoinAlgorithm(std::shared_ptr<TableJoin> ana
|
||||
{
|
||||
if (analyzed_join->allowParallelHashJoin())
|
||||
{
|
||||
return std::make_shared<JoinStuff::ConcurrentHashJoin>(context, analyzed_join, context->getSettings().max_threads, sample_block);
|
||||
return std::make_shared<ConcurrentHashJoin>(context, analyzed_join, context->getSettings().max_threads, sample_block);
|
||||
}
|
||||
return std::make_shared<HashJoin>(analyzed_join, sample_block);
|
||||
}
|
||||
@ -1169,10 +1250,24 @@ bool SelectQueryExpressionAnalyzer::appendGroupBy(ExpressionActionsChain & chain
|
||||
ExpressionActionsChain::Step & step = chain.lastStep(columns_after_join);
|
||||
|
||||
ASTs asts = select_query->groupBy()->children;
|
||||
for (const auto & ast : asts)
|
||||
if (select_query->group_by_with_grouping_sets)
|
||||
{
|
||||
step.addRequiredOutput(ast->getColumnName());
|
||||
getRootActions(ast, only_types, step.actions());
|
||||
for (const auto & ast : asts)
|
||||
{
|
||||
for (const auto & ast_element : ast->children)
|
||||
{
|
||||
step.addRequiredOutput(ast_element->getColumnName());
|
||||
getRootActions(ast_element, only_types, step.actions());
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for (const auto & ast : asts)
|
||||
{
|
||||
step.addRequiredOutput(ast->getColumnName());
|
||||
getRootActions(ast, only_types, step.actions());
|
||||
}
|
||||
}
|
||||
|
||||
if (optimize_aggregation_in_order)
|
||||
@ -1584,6 +1679,7 @@ ExpressionAnalysisResult::ExpressionAnalysisResult(
|
||||
, second_stage(second_stage_)
|
||||
, need_aggregate(query_analyzer.hasAggregation())
|
||||
, has_window(query_analyzer.hasWindow())
|
||||
, use_grouping_set_key(query_analyzer.useGroupingSetKey())
|
||||
{
|
||||
/// first_stage: Do I need to perform the first part of the pipeline - running on remote servers during distributed processing.
|
||||
/// second_stage: Do I need to execute the second part of the pipeline - running on the initiating server during distributed processing.
|
||||
|
@ -64,6 +64,7 @@ struct ExpressionAnalyzerData
|
||||
|
||||
bool has_aggregation = false;
|
||||
NamesAndTypesList aggregation_keys;
|
||||
NamesAndTypesLists aggregation_keys_list;
|
||||
bool has_const_aggregation_keys = false;
|
||||
AggregateDescriptions aggregate_descriptions;
|
||||
|
||||
@ -221,6 +222,8 @@ struct ExpressionAnalysisResult
|
||||
bool optimize_aggregation_in_order = false;
|
||||
bool join_has_delayed_stream = false;
|
||||
|
||||
bool use_grouping_set_key = false;
|
||||
|
||||
ActionsDAGPtr before_array_join;
|
||||
ArrayJoinActionPtr array_join;
|
||||
ActionsDAGPtr before_join;
|
||||
@ -321,8 +324,11 @@ public:
|
||||
bool hasGlobalSubqueries() { return has_global_subqueries; }
|
||||
bool hasTableJoin() const { return syntax->ast_join; }
|
||||
|
||||
bool useGroupingSetKey() const { return aggregation_keys_list.size() > 1; }
|
||||
|
||||
const NamesAndTypesList & aggregationKeys() const { return aggregation_keys; }
|
||||
bool hasConstAggregationKeys() const { return has_const_aggregation_keys; }
|
||||
const NamesAndTypesLists & aggregationKeysList() const { return aggregation_keys_list; }
|
||||
const AggregateDescriptions & aggregates() const { return aggregate_descriptions; }
|
||||
|
||||
std::unique_ptr<QueryPlan> getJoinedPlan();
|
||||
|
@ -2,6 +2,8 @@
|
||||
#include <DataTypes/DataTypeDateTime.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <DataTypes/DataTypeMap.h>
|
||||
#include <Interpreters/ProfileEventsExt.h>
|
||||
#include <Interpreters/FilesystemCacheLog.h>
|
||||
|
||||
|
||||
@ -34,9 +36,13 @@ NamesAndTypesList FilesystemCacheLogElement::getNamesAndTypes()
|
||||
{"event_time", std::make_shared<DataTypeDateTime>()},
|
||||
{"query_id", std::make_shared<DataTypeString>()},
|
||||
{"source_file_path", std::make_shared<DataTypeString>()},
|
||||
{"file_segment_range", std::make_shared<DataTypeTuple>(std::move(types))},
|
||||
{"file_segment_range", std::make_shared<DataTypeTuple>(types)},
|
||||
{"total_requested_range", std::make_shared<DataTypeTuple>(types)},
|
||||
{"size", std::make_shared<DataTypeUInt64>()},
|
||||
{"read_type", std::make_shared<DataTypeString>()},
|
||||
{"cache_attempted", std::make_shared<DataTypeUInt8>()},
|
||||
{"ProfileEvents", std::make_shared<DataTypeMap>(std::make_shared<DataTypeString>(), std::make_shared<DataTypeUInt64>())},
|
||||
{"read_buffer_id", std::make_shared<DataTypeString>()},
|
||||
};
|
||||
}
|
||||
|
||||
@ -51,8 +57,22 @@ void FilesystemCacheLogElement::appendToBlock(MutableColumns & columns) const
|
||||
|
||||
columns[i++]->insert(source_file_path);
|
||||
columns[i++]->insert(Tuple{file_segment_range.first, file_segment_range.second});
|
||||
columns[i++]->insert(Tuple{requested_range.first, requested_range.second});
|
||||
columns[i++]->insert(file_segment_size);
|
||||
columns[i++]->insert(typeToString(read_type));
|
||||
columns[i++]->insert(cache_attempted);
|
||||
|
||||
if (profile_counters)
|
||||
{
|
||||
auto * column = columns[i++].get();
|
||||
ProfileEvents::dumpToMapColumn(*profile_counters, column, true);
|
||||
}
|
||||
else
|
||||
{
|
||||
columns[i++]->insertDefault();
|
||||
}
|
||||
|
||||
columns[i++]->insert(read_buffer_id);
|
||||
}
|
||||
|
||||
};
|
||||
|
@ -37,8 +37,12 @@ struct FilesystemCacheLogElement
|
||||
String source_file_path;
|
||||
|
||||
std::pair<size_t, size_t> file_segment_range{};
|
||||
std::pair<size_t, size_t> requested_range{};
|
||||
ReadType read_type{};
|
||||
size_t file_segment_size;
|
||||
bool cache_attempted;
|
||||
String read_buffer_id;
|
||||
std::shared_ptr<ProfileEvents::Counters::Snapshot> profile_counters;
|
||||
|
||||
static std::string name() { return "FilesystemCacheLog"; }
|
||||
|
||||
|
@ -81,6 +81,8 @@
|
||||
#include <Common/FieldVisitorToString.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Common/checkStackSize.h>
|
||||
#include <Core/ColumnNumbers.h>
|
||||
#include <Interpreters/Aggregator.h>
|
||||
#include <base/map.h>
|
||||
#include <Common/scope_guard_safe.h>
|
||||
#include <memory>
|
||||
@ -736,6 +738,9 @@ Block InterpreterSelectQuery::getSampleBlockImpl()
|
||||
|
||||
Block res;
|
||||
|
||||
if (analysis_result.use_grouping_set_key)
|
||||
res.insert({ nullptr, std::make_shared<DataTypeUInt64>(), "__grouping_set" });
|
||||
|
||||
for (const auto & key : query_analyzer->aggregationKeys())
|
||||
res.insert({nullptr, header.getByName(key.name).type, key.name});
|
||||
|
||||
@ -858,6 +863,9 @@ static SortDescription getSortDescription(const ASTSelectQuery & query, ContextP
|
||||
order_descr.emplace_back(name, order_by_elem.direction, order_by_elem.nulls_direction, collator);
|
||||
}
|
||||
|
||||
order_descr.compile_sort_description = context->getSettingsRef().compile_sort_description;
|
||||
order_descr.min_count_to_compile_sort_description = context->getSettingsRef().min_count_to_compile_sort_description;
|
||||
|
||||
return order_descr;
|
||||
}
|
||||
|
||||
@ -1082,6 +1090,11 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional<P
|
||||
options.to_stage > QueryProcessingStage::WithMergeableState &&
|
||||
!query.group_by_with_totals && !query.group_by_with_rollup && !query.group_by_with_cube;
|
||||
|
||||
bool use_grouping_set_key = expressions.use_grouping_set_key;
|
||||
|
||||
if (query.group_by_with_grouping_sets && query.group_by_with_totals)
|
||||
throw Exception("WITH TOTALS and GROUPING SETS are not supported together", ErrorCodes::NOT_IMPLEMENTED);
|
||||
|
||||
if (query_info.projection && query_info.projection->desc->type == ProjectionDescription::Type::Aggregate)
|
||||
{
|
||||
query_info.projection->aggregate_overflow_row = aggregate_overflow_row;
|
||||
@ -1196,7 +1209,7 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional<P
|
||||
|
||||
preliminary_sort();
|
||||
if (expressions.need_aggregate)
|
||||
executeMergeAggregated(query_plan, aggregate_overflow_row, aggregate_final);
|
||||
executeMergeAggregated(query_plan, aggregate_overflow_row, aggregate_final, use_grouping_set_key);
|
||||
}
|
||||
if (from_aggregation_stage)
|
||||
{
|
||||
@ -1301,6 +1314,7 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional<P
|
||||
{
|
||||
executeAggregation(
|
||||
query_plan, expressions.before_aggregation, aggregate_overflow_row, aggregate_final, query_info.input_order_info);
|
||||
|
||||
/// We need to reset input order info, so that executeOrder can't use it
|
||||
query_info.input_order_info.reset();
|
||||
if (query_info.projection)
|
||||
@ -1356,7 +1370,7 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional<P
|
||||
{
|
||||
/// If you need to combine aggregated results from multiple servers
|
||||
if (!expressions.first_stage)
|
||||
executeMergeAggregated(query_plan, aggregate_overflow_row, aggregate_final);
|
||||
executeMergeAggregated(query_plan, aggregate_overflow_row, aggregate_final, use_grouping_set_key);
|
||||
|
||||
if (!aggregate_final)
|
||||
{
|
||||
@ -1372,20 +1386,18 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional<P
|
||||
else if (query.group_by_with_cube)
|
||||
executeRollupOrCube(query_plan, Modificator::CUBE);
|
||||
|
||||
if ((query.group_by_with_rollup || query.group_by_with_cube) && expressions.hasHaving())
|
||||
if ((query.group_by_with_rollup || query.group_by_with_cube || query.group_by_with_grouping_sets) && expressions.hasHaving())
|
||||
{
|
||||
if (query.group_by_with_totals)
|
||||
throw Exception(
|
||||
"WITH TOTALS and WITH ROLLUP or CUBE are not supported together in presence of HAVING",
|
||||
ErrorCodes::NOT_IMPLEMENTED);
|
||||
throw Exception("WITH TOTALS and WITH ROLLUP or CUBE or GROUPING SETS are not supported together in presence of HAVING", ErrorCodes::NOT_IMPLEMENTED);
|
||||
executeHaving(query_plan, expressions.before_having, expressions.remove_having_filter);
|
||||
}
|
||||
}
|
||||
else if (expressions.hasHaving())
|
||||
executeHaving(query_plan, expressions.before_having, expressions.remove_having_filter);
|
||||
}
|
||||
else if (query.group_by_with_totals || query.group_by_with_rollup || query.group_by_with_cube)
|
||||
throw Exception("WITH TOTALS, ROLLUP or CUBE are not supported without aggregation", ErrorCodes::NOT_IMPLEMENTED);
|
||||
else if (query.group_by_with_totals || query.group_by_with_rollup || query.group_by_with_cube || query.group_by_with_grouping_sets)
|
||||
throw Exception("WITH TOTALS, ROLLUP, CUBE or GROUPING SETS are not supported without aggregation", ErrorCodes::NOT_IMPLEMENTED);
|
||||
|
||||
// Now we must execute:
|
||||
// 1) expressions before window functions,
|
||||
@ -1565,6 +1577,7 @@ static void executeMergeAggregatedImpl(
|
||||
bool overflow_row,
|
||||
bool final,
|
||||
bool is_remote_storage,
|
||||
bool has_grouping_sets,
|
||||
const Settings & settings,
|
||||
const NamesAndTypesList & aggregation_keys,
|
||||
const AggregateDescriptions & aggregates)
|
||||
@ -1572,6 +1585,8 @@ static void executeMergeAggregatedImpl(
|
||||
const auto & header_before_merge = query_plan.getCurrentDataStream().header;
|
||||
|
||||
ColumnNumbers keys;
|
||||
if (has_grouping_sets)
|
||||
keys.push_back(header_before_merge.getPositionByName("__grouping_set"));
|
||||
for (const auto & key : aggregation_keys)
|
||||
keys.push_back(header_before_merge.getPositionByName(key.name));
|
||||
|
||||
@ -1674,6 +1689,7 @@ void InterpreterSelectQuery::addEmptySourceToQueryPlan(
|
||||
query_info.projection->aggregate_overflow_row,
|
||||
query_info.projection->aggregate_final,
|
||||
false,
|
||||
false,
|
||||
context_->getSettingsRef(),
|
||||
query_info.projection->aggregation_keys,
|
||||
query_info.projection->aggregate_descriptions);
|
||||
@ -2165,7 +2181,6 @@ void InterpreterSelectQuery::executeFetchColumns(QueryProcessingStage::Enum proc
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void InterpreterSelectQuery::executeWhere(QueryPlan & query_plan, const ActionsDAGPtr & expression, bool remove_filter)
|
||||
{
|
||||
auto where_step = std::make_unique<FilterStep>(
|
||||
@ -2175,6 +2190,80 @@ void InterpreterSelectQuery::executeWhere(QueryPlan & query_plan, const ActionsD
|
||||
query_plan.addStep(std::move(where_step));
|
||||
}
|
||||
|
||||
static Aggregator::Params getAggregatorParams(
|
||||
const ASTPtr & query_ptr,
|
||||
const SelectQueryExpressionAnalyzer & query_analyzer,
|
||||
const Context & context,
|
||||
const Block & current_data_stream_header,
|
||||
const ColumnNumbers & keys,
|
||||
const AggregateDescriptions & aggregates,
|
||||
bool overflow_row, const Settings & settings,
|
||||
size_t group_by_two_level_threshold, size_t group_by_two_level_threshold_bytes)
|
||||
{
|
||||
const auto stats_collecting_params = Aggregator::Params::StatsCollectingParams(
|
||||
query_ptr,
|
||||
settings.collect_hash_table_stats_during_aggregation,
|
||||
settings.max_entries_for_hash_table_stats,
|
||||
settings.max_size_to_preallocate_for_aggregation);
|
||||
|
||||
return Aggregator::Params{
|
||||
current_data_stream_header,
|
||||
keys,
|
||||
aggregates,
|
||||
overflow_row,
|
||||
settings.max_rows_to_group_by,
|
||||
settings.group_by_overflow_mode,
|
||||
group_by_two_level_threshold,
|
||||
group_by_two_level_threshold_bytes,
|
||||
settings.max_bytes_before_external_group_by,
|
||||
settings.empty_result_for_aggregation_by_empty_set
|
||||
|| (settings.empty_result_for_aggregation_by_constant_keys_on_empty_set && keys.empty()
|
||||
&& query_analyzer.hasConstAggregationKeys()),
|
||||
context.getTemporaryVolume(),
|
||||
settings.max_threads,
|
||||
settings.min_free_disk_space_for_temporary_data,
|
||||
settings.compile_aggregate_expressions,
|
||||
settings.min_count_to_compile_aggregate_expression,
|
||||
Block{},
|
||||
stats_collecting_params
|
||||
};
|
||||
}
|
||||
|
||||
static GroupingSetsParamsList getAggregatorGroupingSetsParams(
|
||||
const SelectQueryExpressionAnalyzer & query_analyzer,
|
||||
const Block & header_before_aggregation,
|
||||
const ColumnNumbers & all_keys
|
||||
)
|
||||
{
|
||||
GroupingSetsParamsList result;
|
||||
if (query_analyzer.useGroupingSetKey())
|
||||
{
|
||||
auto const & aggregation_keys_list = query_analyzer.aggregationKeysList();
|
||||
|
||||
ColumnNumbersList grouping_sets_with_keys;
|
||||
ColumnNumbersList missing_columns_per_set;
|
||||
|
||||
for (const auto & aggregation_keys : aggregation_keys_list)
|
||||
{
|
||||
ColumnNumbers keys;
|
||||
std::unordered_set<size_t> keys_set;
|
||||
for (const auto & key : aggregation_keys)
|
||||
{
|
||||
keys.push_back(header_before_aggregation.getPositionByName(key.name));
|
||||
keys_set.insert(keys.back());
|
||||
}
|
||||
|
||||
ColumnNumbers missing_indexes;
|
||||
for (size_t i = 0; i < all_keys.size(); ++i)
|
||||
{
|
||||
if (!keys_set.contains(all_keys[i]))
|
||||
missing_indexes.push_back(i);
|
||||
}
|
||||
result.emplace_back(std::move(keys), std::move(missing_indexes));
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void InterpreterSelectQuery::executeAggregation(QueryPlan & query_plan, const ActionsDAGPtr & expression, bool overflow_row, bool final, InputOrderInfoPtr group_by_info)
|
||||
{
|
||||
@ -2186,9 +2275,6 @@ void InterpreterSelectQuery::executeAggregation(QueryPlan & query_plan, const Ac
|
||||
return;
|
||||
|
||||
const auto & header_before_aggregation = query_plan.getCurrentDataStream().header;
|
||||
ColumnNumbers keys;
|
||||
for (const auto & key : query_analyzer->aggregationKeys())
|
||||
keys.push_back(header_before_aggregation.getPositionByName(key.name));
|
||||
|
||||
AggregateDescriptions aggregates = query_analyzer->aggregates();
|
||||
for (auto & descr : aggregates)
|
||||
@ -2198,32 +2284,14 @@ void InterpreterSelectQuery::executeAggregation(QueryPlan & query_plan, const Ac
|
||||
|
||||
const Settings & settings = context->getSettingsRef();
|
||||
|
||||
const auto stats_collecting_params = Aggregator::Params::StatsCollectingParams(
|
||||
query_ptr,
|
||||
settings.collect_hash_table_stats_during_aggregation,
|
||||
settings.max_entries_for_hash_table_stats,
|
||||
settings.max_size_to_preallocate_for_aggregation);
|
||||
ColumnNumbers keys;
|
||||
for (const auto & key : query_analyzer->aggregationKeys())
|
||||
keys.push_back(header_before_aggregation.getPositionByName(key.name));
|
||||
|
||||
Aggregator::Params params(
|
||||
header_before_aggregation,
|
||||
keys,
|
||||
aggregates,
|
||||
overflow_row,
|
||||
settings.max_rows_to_group_by,
|
||||
settings.group_by_overflow_mode,
|
||||
settings.group_by_two_level_threshold,
|
||||
settings.group_by_two_level_threshold_bytes,
|
||||
settings.max_bytes_before_external_group_by,
|
||||
settings.empty_result_for_aggregation_by_empty_set
|
||||
|| (settings.empty_result_for_aggregation_by_constant_keys_on_empty_set && keys.empty()
|
||||
&& query_analyzer->hasConstAggregationKeys()),
|
||||
context->getTemporaryVolume(),
|
||||
settings.max_threads,
|
||||
settings.min_free_disk_space_for_temporary_data,
|
||||
settings.compile_aggregate_expressions,
|
||||
settings.min_count_to_compile_aggregate_expression,
|
||||
Block{},
|
||||
stats_collecting_params);
|
||||
auto aggregator_params = getAggregatorParams(query_ptr, *query_analyzer, *context, header_before_aggregation, keys, aggregates, overflow_row, settings,
|
||||
settings.group_by_two_level_threshold, settings.group_by_two_level_threshold_bytes);
|
||||
|
||||
auto grouping_sets_params = getAggregatorGroupingSetsParams(*query_analyzer, header_before_aggregation, keys);
|
||||
|
||||
SortDescription group_by_sort_description;
|
||||
|
||||
@ -2241,7 +2309,8 @@ void InterpreterSelectQuery::executeAggregation(QueryPlan & query_plan, const Ac
|
||||
|
||||
auto aggregating_step = std::make_unique<AggregatingStep>(
|
||||
query_plan.getCurrentDataStream(),
|
||||
params,
|
||||
std::move(aggregator_params),
|
||||
std::move(grouping_sets_params),
|
||||
final,
|
||||
settings.max_block_size,
|
||||
settings.aggregation_in_order_max_block_bytes,
|
||||
@ -2250,11 +2319,10 @@ void InterpreterSelectQuery::executeAggregation(QueryPlan & query_plan, const Ac
|
||||
storage_has_evenly_distributed_read,
|
||||
std::move(group_by_info),
|
||||
std::move(group_by_sort_description));
|
||||
|
||||
query_plan.addStep(std::move(aggregating_step));
|
||||
}
|
||||
|
||||
void InterpreterSelectQuery::executeMergeAggregated(QueryPlan & query_plan, bool overflow_row, bool final)
|
||||
void InterpreterSelectQuery::executeMergeAggregated(QueryPlan & query_plan, bool overflow_row, bool final, bool has_grouping_sets)
|
||||
{
|
||||
/// If aggregate projection was chosen for table, avoid adding MergeAggregated.
|
||||
/// It is already added by storage (because of performance issues).
|
||||
@ -2268,6 +2336,7 @@ void InterpreterSelectQuery::executeMergeAggregated(QueryPlan & query_plan, bool
|
||||
overflow_row,
|
||||
final,
|
||||
storage && storage->isRemote(),
|
||||
has_grouping_sets,
|
||||
context->getSettingsRef(),
|
||||
query_analyzer->aggregationKeys(),
|
||||
query_analyzer->aggregates());
|
||||
@ -2302,47 +2371,28 @@ void InterpreterSelectQuery::executeTotalsAndHaving(
|
||||
query_plan.addStep(std::move(totals_having_step));
|
||||
}
|
||||
|
||||
|
||||
void InterpreterSelectQuery::executeRollupOrCube(QueryPlan & query_plan, Modificator modificator)
|
||||
{
|
||||
const auto & header_before_transform = query_plan.getCurrentDataStream().header;
|
||||
|
||||
ColumnNumbers keys;
|
||||
const Settings & settings = context->getSettingsRef();
|
||||
|
||||
ColumnNumbers keys;
|
||||
for (const auto & key : query_analyzer->aggregationKeys())
|
||||
keys.push_back(header_before_transform.getPositionByName(key.name));
|
||||
|
||||
const Settings & settings = context->getSettingsRef();
|
||||
|
||||
Aggregator::Params params(
|
||||
header_before_transform,
|
||||
keys,
|
||||
query_analyzer->aggregates(),
|
||||
false,
|
||||
settings.max_rows_to_group_by,
|
||||
settings.group_by_overflow_mode,
|
||||
0,
|
||||
0,
|
||||
settings.max_bytes_before_external_group_by,
|
||||
settings.empty_result_for_aggregation_by_empty_set,
|
||||
context->getTemporaryVolume(),
|
||||
settings.max_threads,
|
||||
settings.min_free_disk_space_for_temporary_data,
|
||||
settings.compile_aggregate_expressions,
|
||||
settings.min_count_to_compile_aggregate_expression);
|
||||
|
||||
auto transform_params = std::make_shared<AggregatingTransformParams>(params, true);
|
||||
auto params = getAggregatorParams(query_ptr, *query_analyzer, *context, header_before_transform, keys, query_analyzer->aggregates(), false, settings, 0, 0);
|
||||
auto transform_params = std::make_shared<AggregatingTransformParams>(std::move(params), true);
|
||||
|
||||
QueryPlanStepPtr step;
|
||||
if (modificator == Modificator::ROLLUP)
|
||||
step = std::make_unique<RollupStep>(query_plan.getCurrentDataStream(), std::move(transform_params));
|
||||
else
|
||||
else if (modificator == Modificator::CUBE)
|
||||
step = std::make_unique<CubeStep>(query_plan.getCurrentDataStream(), std::move(transform_params));
|
||||
|
||||
query_plan.addStep(std::move(step));
|
||||
}
|
||||
|
||||
|
||||
void InterpreterSelectQuery::executeExpression(QueryPlan & query_plan, const ActionsDAGPtr & expression, const std::string & description)
|
||||
{
|
||||
if (!expression)
|
||||
@ -2405,26 +2455,25 @@ void InterpreterSelectQuery::executeWindow(QueryPlan & query_plan)
|
||||
// Try to sort windows in such an order that the window with the longest
|
||||
// sort description goes first, and all window that use its prefixes follow.
|
||||
std::vector<const WindowDescription *> windows_sorted;
|
||||
for (const auto & [_, w] : query_analyzer->windowDescriptions())
|
||||
windows_sorted.push_back(&w);
|
||||
for (const auto & [_, window] : query_analyzer->windowDescriptions())
|
||||
windows_sorted.push_back(&window);
|
||||
|
||||
::sort(windows_sorted.begin(), windows_sorted.end(), windowDescriptionComparator);
|
||||
|
||||
const Settings & settings = context->getSettingsRef();
|
||||
for (size_t i = 0; i < windows_sorted.size(); ++i)
|
||||
{
|
||||
const auto & w = *windows_sorted[i];
|
||||
const auto & window = *windows_sorted[i];
|
||||
|
||||
// We don't need to sort again if the input from previous window already
|
||||
// has suitable sorting. Also don't create sort steps when there are no
|
||||
// columns to sort by, because the sort nodes are confused by this. It
|
||||
// happens in case of `over ()`.
|
||||
if (!w.full_sort_description.empty() && (i == 0 || !sortIsPrefix(w, *windows_sorted[i - 1])))
|
||||
if (!window.full_sort_description.empty() && (i == 0 || !sortIsPrefix(window, *windows_sorted[i - 1])))
|
||||
{
|
||||
|
||||
auto sorting_step = std::make_unique<SortingStep>(
|
||||
query_plan.getCurrentDataStream(),
|
||||
w.full_sort_description,
|
||||
window.full_sort_description,
|
||||
settings.max_block_size,
|
||||
0 /* LIMIT */,
|
||||
SizeLimits(settings.max_rows_to_sort, settings.max_bytes_to_sort, settings.sort_overflow_mode),
|
||||
@ -2433,12 +2482,12 @@ void InterpreterSelectQuery::executeWindow(QueryPlan & query_plan)
|
||||
settings.max_bytes_before_external_sort,
|
||||
context->getTemporaryVolume(),
|
||||
settings.min_free_disk_space_for_temporary_data);
|
||||
sorting_step->setStepDescription("Sorting for window '" + w.window_name + "'");
|
||||
sorting_step->setStepDescription("Sorting for window '" + window.window_name + "'");
|
||||
query_plan.addStep(std::move(sorting_step));
|
||||
}
|
||||
|
||||
auto window_step = std::make_unique<WindowStep>(query_plan.getCurrentDataStream(), w, w.window_functions);
|
||||
window_step->setStepDescription("Window step for window '" + w.window_name + "'");
|
||||
auto window_step = std::make_unique<WindowStep>(query_plan.getCurrentDataStream(), window, window.window_functions);
|
||||
window_step->setStepDescription("Window step for window '" + window.window_name + "'");
|
||||
|
||||
query_plan.addStep(std::move(window_step));
|
||||
}
|
||||
@ -2510,8 +2559,7 @@ void InterpreterSelectQuery::executeMergeSorted(QueryPlan & query_plan, const So
|
||||
{
|
||||
const Settings & settings = context->getSettingsRef();
|
||||
|
||||
auto merging_sorted
|
||||
= std::make_unique<SortingStep>(query_plan.getCurrentDataStream(), sort_description, settings.max_block_size, limit);
|
||||
auto merging_sorted = std::make_unique<SortingStep>(query_plan.getCurrentDataStream(), sort_description, settings.max_block_size, limit);
|
||||
|
||||
merging_sorted->setStepDescription("Merge sorted streams " + description);
|
||||
query_plan.addStep(std::move(merging_sorted));
|
||||
|
@ -27,6 +27,9 @@ class InterpreterSelectWithUnionQuery;
|
||||
class Context;
|
||||
class QueryPlan;
|
||||
|
||||
struct GroupingSetsParams;
|
||||
using GroupingSetsParamsList = std::vector<GroupingSetsParams>;
|
||||
|
||||
struct TreeRewriterResult;
|
||||
using TreeRewriterResultPtr = std::shared_ptr<const TreeRewriterResult>;
|
||||
|
||||
@ -140,12 +143,11 @@ private:
|
||||
void executeImpl(QueryPlan & query_plan, std::optional<Pipe> prepared_pipe);
|
||||
|
||||
/// Different stages of query execution.
|
||||
|
||||
void executeFetchColumns(QueryProcessingStage::Enum processing_stage, QueryPlan & query_plan);
|
||||
void executeWhere(QueryPlan & query_plan, const ActionsDAGPtr & expression, bool remove_filter);
|
||||
void executeAggregation(
|
||||
QueryPlan & query_plan, const ActionsDAGPtr & expression, bool overflow_row, bool final, InputOrderInfoPtr group_by_info);
|
||||
void executeMergeAggregated(QueryPlan & query_plan, bool overflow_row, bool final);
|
||||
void executeMergeAggregated(QueryPlan & query_plan, bool overflow_row, bool final, bool has_grouping_sets);
|
||||
void executeTotalsAndHaving(QueryPlan & query_plan, bool has_having, const ActionsDAGPtr & expression, bool remove_filter, bool overflow_row, bool final);
|
||||
void executeHaving(QueryPlan & query_plan, const ActionsDAGPtr & expression, bool remove_filter);
|
||||
static void executeExpression(QueryPlan & query_plan, const ActionsDAGPtr & expression, const std::string & description);
|
||||
@ -171,7 +173,7 @@ private:
|
||||
enum class Modificator
|
||||
{
|
||||
ROLLUP = 0,
|
||||
CUBE = 1
|
||||
CUBE = 1,
|
||||
};
|
||||
|
||||
void executeRollupOrCube(QueryPlan & query_plan, Modificator modificator);
|
||||
|
@ -312,12 +312,12 @@ BlockIO InterpreterSystemQuery::execute()
|
||||
{
|
||||
auto caches = FileCacheFactory::instance().getAll();
|
||||
for (const auto & [_, cache_data] : caches)
|
||||
cache_data.cache->remove(query.force_removal);
|
||||
cache_data.cache->remove();
|
||||
}
|
||||
else
|
||||
{
|
||||
auto cache = FileCacheFactory::instance().get(query.filesystem_cache_path);
|
||||
cache->remove(query.force_removal);
|
||||
cache->remove();
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -69,7 +69,7 @@ public:
|
||||
return endpoint_map.erase(name);
|
||||
}
|
||||
|
||||
InterserverIOEndpointPtr getEndpoint(const String & name)
|
||||
InterserverIOEndpointPtr getEndpoint(const String & name) const
|
||||
try
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
@ -84,7 +84,7 @@ private:
|
||||
using EndpointMap = std::map<String, InterserverIOEndpointPtr>;
|
||||
|
||||
EndpointMap endpoint_map;
|
||||
std::mutex mutex;
|
||||
mutable std::mutex mutex;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -775,6 +775,133 @@ CompiledAggregateFunctions compileAggregateFunctions(CHJIT & jit, const std::vec
|
||||
return compiled_aggregate_functions;
|
||||
}
|
||||
|
||||
CompiledSortDescriptionFunction compileSortDescription(
|
||||
CHJIT & jit,
|
||||
SortDescription & description,
|
||||
const DataTypes & sort_description_types,
|
||||
const std::string & sort_description_dump)
|
||||
{
|
||||
Stopwatch watch;
|
||||
|
||||
auto compiled_module = jit.compileModule([&](llvm::Module & module)
|
||||
{
|
||||
auto & context = module.getContext();
|
||||
llvm::IRBuilder<> b(context);
|
||||
|
||||
auto * size_type = b.getIntNTy(sizeof(size_t) * 8);
|
||||
|
||||
auto * column_data_type = llvm::StructType::get(b.getInt8PtrTy(), b.getInt8PtrTy());
|
||||
|
||||
std::vector<llvm::Type *> types = { size_type, size_type, column_data_type->getPointerTo(), column_data_type->getPointerTo() };
|
||||
auto * comparator_func_declaration = llvm::FunctionType::get(b.getInt8Ty(), types, false);
|
||||
auto * comparator_func = llvm::Function::Create(comparator_func_declaration, llvm::Function::ExternalLinkage, sort_description_dump, module);
|
||||
|
||||
auto * arguments = comparator_func->args().begin();
|
||||
llvm::Value * lhs_index_arg = &*arguments++;
|
||||
llvm::Value * rhs_index_arg = &*arguments++;
|
||||
llvm::Value * columns_lhs_arg = &*arguments++;
|
||||
llvm::Value * columns_rhs_arg = &*arguments++;
|
||||
|
||||
size_t columns_size = description.size();
|
||||
|
||||
std::vector<std::pair<llvm::BasicBlock *, llvm::Value *>> comparator_steps_and_results;
|
||||
for (size_t i = 0; i < columns_size; ++i)
|
||||
{
|
||||
auto * step = llvm::BasicBlock::Create(b.getContext(), "step_" + std::to_string(i), comparator_func);
|
||||
llvm::Value * result_value = nullptr;
|
||||
comparator_steps_and_results.emplace_back(step, result_value);
|
||||
}
|
||||
|
||||
auto * lhs_equals_rhs_result = llvm::ConstantInt::getSigned(b.getInt8Ty(), 0);
|
||||
|
||||
auto * comparator_join = llvm::BasicBlock::Create(b.getContext(), "comparator_join", comparator_func);
|
||||
|
||||
for (size_t i = 0; i < columns_size; ++i)
|
||||
{
|
||||
b.SetInsertPoint(comparator_steps_and_results[i].first);
|
||||
|
||||
const auto & sort_description = description[i];
|
||||
const auto & column_type = sort_description_types[i];
|
||||
|
||||
auto dummy_column = column_type->createColumn();
|
||||
|
||||
auto * column_native_type_nullable = toNativeType(b, column_type);
|
||||
auto * column_native_type = toNativeType(b, removeNullable(column_type));
|
||||
if (!column_native_type)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "No native type for column type {}", column_type->getName());
|
||||
|
||||
auto * column_native_type_pointer = column_native_type->getPointerTo();
|
||||
bool column_type_is_nullable = column_type->isNullable();
|
||||
|
||||
auto * nullable_unitilized = llvm::Constant::getNullValue(column_native_type_nullable);
|
||||
|
||||
auto * lhs_column = b.CreateLoad(column_data_type, b.CreateConstInBoundsGEP1_64(column_data_type, columns_lhs_arg, i));
|
||||
auto * lhs_column_data = b.CreatePointerCast(b.CreateExtractValue(lhs_column, {0}), column_native_type_pointer);
|
||||
auto * lhs_column_null_data = column_type_is_nullable ? b.CreateExtractValue(lhs_column, {1}) : nullptr;
|
||||
|
||||
llvm::Value * lhs_value = b.CreateLoad(b.CreateInBoundsGEP(nullptr, lhs_column_data, lhs_index_arg));
|
||||
|
||||
if (lhs_column_null_data)
|
||||
{
|
||||
auto * is_null_value_pointer = b.CreateInBoundsGEP(nullptr, lhs_column_null_data, lhs_index_arg);
|
||||
auto * is_null = b.CreateICmpNE(b.CreateLoad(b.getInt8Ty(), is_null_value_pointer), b.getInt8(0));
|
||||
auto * lhs_nullable_value = b.CreateInsertValue(b.CreateInsertValue(nullable_unitilized, lhs_value, {0}), is_null, {1});
|
||||
lhs_value = lhs_nullable_value;
|
||||
}
|
||||
|
||||
auto * rhs_column = b.CreateLoad(column_data_type, b.CreateConstInBoundsGEP1_64(column_data_type, columns_rhs_arg, i));
|
||||
auto * rhs_column_data = b.CreatePointerCast(b.CreateExtractValue(rhs_column, {0}), column_native_type_pointer);
|
||||
auto * rhs_column_null_data = column_type_is_nullable ? b.CreateExtractValue(rhs_column, {1}) : nullptr;
|
||||
|
||||
llvm::Value * rhs_value = b.CreateLoad(b.CreateInBoundsGEP(nullptr, rhs_column_data, rhs_index_arg));
|
||||
if (rhs_column_null_data)
|
||||
{
|
||||
auto * is_null_value_pointer = b.CreateInBoundsGEP(nullptr, rhs_column_null_data, rhs_index_arg);
|
||||
auto * is_null = b.CreateICmpNE(b.CreateLoad(b.getInt8Ty(), is_null_value_pointer), b.getInt8(0));
|
||||
auto * rhs_nullable_value = b.CreateInsertValue(b.CreateInsertValue(nullable_unitilized, rhs_value, {0}), is_null, {1});
|
||||
rhs_value = rhs_nullable_value;
|
||||
}
|
||||
|
||||
llvm::Value * direction = llvm::ConstantInt::getSigned(b.getInt8Ty(), sort_description.direction);
|
||||
llvm::Value * nan_direction_hint = llvm::ConstantInt::getSigned(b.getInt8Ty(), sort_description.nulls_direction);
|
||||
llvm::Value * compare_result = dummy_column->compileComparator(b, lhs_value, rhs_value, nan_direction_hint);
|
||||
llvm::Value * result = b.CreateMul(direction, compare_result);
|
||||
|
||||
comparator_steps_and_results[i].first = b.GetInsertBlock();
|
||||
comparator_steps_and_results[i].second = result;
|
||||
|
||||
if (i == columns_size - 1)
|
||||
b.CreateBr(comparator_join);
|
||||
else
|
||||
b.CreateCondBr(b.CreateICmpEQ(result, lhs_equals_rhs_result), comparator_steps_and_results[i + 1].first, comparator_join);
|
||||
}
|
||||
|
||||
b.SetInsertPoint(comparator_join);
|
||||
auto * phi = b.CreatePHI(b.getInt8Ty(), comparator_steps_and_results.size());
|
||||
|
||||
for (const auto & [block, result_value] : comparator_steps_and_results)
|
||||
phi->addIncoming(result_value, block);
|
||||
|
||||
b.CreateRet(phi);
|
||||
});
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::CompileExpressionsMicroseconds, watch.elapsedMicroseconds());
|
||||
ProfileEvents::increment(ProfileEvents::CompileExpressionsBytes, compiled_module.size);
|
||||
ProfileEvents::increment(ProfileEvents::CompileFunction);
|
||||
|
||||
auto comparator_function = reinterpret_cast<JITSortDescriptionFunc>(compiled_module.function_name_to_symbol[sort_description_dump]);
|
||||
assert(comparator_function);
|
||||
|
||||
CompiledSortDescriptionFunction compiled_sort_descriptor_function
|
||||
{
|
||||
.comparator_function = comparator_function,
|
||||
|
||||
.compiled_module = std::move(compiled_module)
|
||||
};
|
||||
|
||||
return compiled_sort_descriptor_function;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -4,10 +4,12 @@
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
|
||||
#include <Core/SortDescription.h>
|
||||
#include <Functions/IFunction.h>
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
#include <Interpreters/JIT/CHJIT.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -83,6 +85,21 @@ struct CompiledAggregateFunctions
|
||||
*/
|
||||
CompiledAggregateFunctions compileAggregateFunctions(CHJIT & jit, const std::vector<AggregateFunctionWithOffset> & functions, std::string functions_dump_name);
|
||||
|
||||
|
||||
using JITSortDescriptionFunc = int8_t (*)(size_t, size_t, ColumnData *, ColumnData *);
|
||||
|
||||
struct CompiledSortDescriptionFunction
|
||||
{
|
||||
JITSortDescriptionFunc comparator_function;
|
||||
CHJIT::CompiledModule compiled_module;
|
||||
};
|
||||
|
||||
CompiledSortDescriptionFunction compileSortDescription(
|
||||
CHJIT & jit,
|
||||
SortDescription & description,
|
||||
const DataTypes & sort_description_types,
|
||||
const std::string & sort_description_dump);
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -590,7 +590,7 @@ void MergeJoin::mergeInMemoryRightBlocks()
|
||||
|
||||
/// TODO: there should be no split keys by blocks for RIGHT|FULL JOIN
|
||||
builder.addTransform(std::make_shared<MergeSortingTransform>(
|
||||
builder.getHeader(), right_sort_description, max_rows_in_right_block, 0, 0, 0, 0, nullptr, 0));
|
||||
builder.getHeader(), right_sort_description, max_rows_in_right_block, 0, false, 0, 0, 0, nullptr, 0));
|
||||
|
||||
auto pipeline = QueryPipelineBuilder::getPipeline(std::move(builder));
|
||||
PullingPipelineExecutor executor(pipeline);
|
||||
|
@ -39,10 +39,8 @@ bool shardContains(
|
||||
const std::string & sharding_column_name,
|
||||
const OptimizeShardingKeyRewriteInMatcher::Data & data)
|
||||
{
|
||||
UInt64 field_value;
|
||||
/// Convert value to numeric (if required).
|
||||
if (!sharding_column_value.tryGet<UInt64>(field_value))
|
||||
sharding_column_value = convertFieldToType(sharding_column_value, *data.sharding_key_type);
|
||||
/// Implicit conversion.
|
||||
sharding_column_value = convertFieldToType(sharding_column_value, *data.sharding_key_type);
|
||||
|
||||
/// NULL is not allowed in sharding key,
|
||||
/// so it should be safe to assume that shard cannot contain it.
|
||||
|
@ -212,7 +212,7 @@ ProcessList::EntryPtr ProcessList::insert(const String & query_, const IAST * as
|
||||
|
||||
/// Set query-level memory trackers
|
||||
thread_group->memory_tracker.setOrRaiseHardLimit(settings.max_memory_usage);
|
||||
thread_group->memory_tracker.setSoftLimit(settings.max_guaranteed_memory_usage);
|
||||
thread_group->memory_tracker.setSoftLimit(settings.memory_overcommit_ratio_denominator);
|
||||
|
||||
if (query_context->hasTraceCollector())
|
||||
{
|
||||
@ -242,7 +242,7 @@ ProcessList::EntryPtr ProcessList::insert(const String & query_, const IAST * as
|
||||
|
||||
/// Track memory usage for all simultaneously running queries from single user.
|
||||
user_process_list.user_memory_tracker.setOrRaiseHardLimit(settings.max_memory_usage_for_user);
|
||||
user_process_list.user_memory_tracker.setSoftLimit(settings.max_guaranteed_memory_usage_for_user);
|
||||
user_process_list.user_memory_tracker.setSoftLimit(settings.memory_overcommit_ratio_denominator_for_user);
|
||||
user_process_list.user_memory_tracker.setDescription("(for user)");
|
||||
user_process_list.user_overcommit_tracker.setMaxWaitTime(settings.memory_usage_overcommit_max_wait_microseconds);
|
||||
|
||||
|
@ -210,10 +210,22 @@ GroupByKeysInfo getGroupByKeysInfo(const ASTs & group_by_keys)
|
||||
/// filling set with short names of keys
|
||||
for (const auto & group_key : group_by_keys)
|
||||
{
|
||||
if (group_key->as<ASTFunction>())
|
||||
data.has_function = true;
|
||||
/// for grouping sets case
|
||||
if (group_key->as<ASTExpressionList>())
|
||||
{
|
||||
const auto express_list_ast = group_key->as<const ASTExpressionList &>();
|
||||
for (const auto & group_elem : express_list_ast.children)
|
||||
{
|
||||
data.key_names.insert(group_elem->getColumnName());
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (group_key->as<ASTFunction>())
|
||||
data.has_function = true;
|
||||
|
||||
data.key_names.insert(group_key->getColumnName());
|
||||
data.key_names.insert(group_key->getColumnName());
|
||||
}
|
||||
}
|
||||
|
||||
return data;
|
||||
@ -646,12 +658,6 @@ void optimizeSumIfFunctions(ASTPtr & query)
|
||||
RewriteSumIfFunctionVisitor(data).visit(query);
|
||||
}
|
||||
|
||||
void optimizeCountConstantAndSumOne(ASTPtr & query)
|
||||
{
|
||||
RewriteCountVariantsVisitor::visit(query);
|
||||
}
|
||||
|
||||
|
||||
void optimizeInjectiveFunctionsInsideUniq(ASTPtr & query, ContextPtr context)
|
||||
{
|
||||
RemoveInjectiveFunctionsVisitor::Data data(context);
|
||||
@ -740,6 +746,11 @@ void TreeOptimizer::optimizeIf(ASTPtr & query, Aliases & aliases, bool if_chain_
|
||||
OptimizeIfChainsVisitor().visit(query);
|
||||
}
|
||||
|
||||
void TreeOptimizer::optimizeCountConstantAndSumOne(ASTPtr & query)
|
||||
{
|
||||
RewriteCountVariantsVisitor::visit(query);
|
||||
}
|
||||
|
||||
void TreeOptimizer::apply(ASTPtr & query, TreeRewriterResult & result,
|
||||
const std::vector<TableWithColumnNamesAndTypes> & tables_with_columns, ContextPtr context)
|
||||
{
|
||||
|
@ -24,6 +24,7 @@ public:
|
||||
ContextPtr context);
|
||||
|
||||
static void optimizeIf(ASTPtr & query, Aliases & aliases, bool if_chain_to_multiif);
|
||||
static void optimizeCountConstantAndSumOne(ASTPtr & query);
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -1151,6 +1151,10 @@ TreeRewriterResultPtr TreeRewriter::analyzeSelect(
|
||||
if (remove_duplicates)
|
||||
renameDuplicatedColumns(select_query);
|
||||
|
||||
/// Perform it before analyzing JOINs, because it may change number of columns with names unique and break some login inside JOINs
|
||||
if (settings.optimize_normalize_count_variants)
|
||||
TreeOptimizer::optimizeCountConstantAndSumOne(query);
|
||||
|
||||
if (tables_with_columns.size() > 1)
|
||||
{
|
||||
const auto & right_table = tables_with_columns[1];
|
||||
|
@ -597,41 +597,29 @@ NotJoinedBlocks::NotJoinedBlocks(std::unique_ptr<RightColumnsFiller> filler_,
|
||||
column_indices_left.emplace_back(left_pos);
|
||||
}
|
||||
|
||||
/// `sample_block_names` may contain non unique column names
|
||||
/// `saved_block_sample` may contains non unique column names, get any of them
|
||||
/// (e.g. in case of `... JOIN (SELECT a, a, b FROM table) as t2`)
|
||||
/// proper fix is to get rid of it
|
||||
std::unordered_set<String> sample_block_names;
|
||||
for (size_t right_pos = 0; right_pos < saved_block_sample.columns(); ++right_pos)
|
||||
for (const auto & [name, right_pos] : saved_block_sample.getNamesToIndexesMap())
|
||||
{
|
||||
const String & name = saved_block_sample.getByPosition(right_pos).name;
|
||||
|
||||
auto [_, inserted] = sample_block_names.insert(name);
|
||||
/// skip columns with same names
|
||||
if (!inserted)
|
||||
continue;
|
||||
|
||||
if (!result_sample_block.has(name))
|
||||
continue;
|
||||
|
||||
size_t result_position = result_sample_block.getPositionByName(name);
|
||||
|
||||
/// Don't remap left keys twice. We need only qualified right keys here
|
||||
if (result_position < left_columns_count)
|
||||
continue;
|
||||
|
||||
setRightIndex(right_pos, result_position);
|
||||
/// Start from left_columns_count to don't remap left keys twice. We need only qualified right keys here
|
||||
/// `result_sample_block` may contains non unique column names, need to set index for all of them
|
||||
for (size_t result_pos = left_columns_count; result_pos < result_sample_block.columns(); ++result_pos)
|
||||
{
|
||||
const auto & result_name = result_sample_block.getByPosition(result_pos).name;
|
||||
if (result_name == name)
|
||||
setRightIndex(right_pos, result_pos);
|
||||
}
|
||||
}
|
||||
|
||||
/// `result_sample_block` also may contains non unique column names
|
||||
const auto & result_names = result_sample_block.getNames();
|
||||
size_t unique_names_count = std::unordered_set<String>(result_names.begin(), result_names.end()).size();
|
||||
if (column_indices_left.size() + column_indices_right.size() + same_result_keys.size() != unique_names_count)
|
||||
if (column_indices_left.size() + column_indices_right.size() + same_result_keys.size() != result_sample_block.columns())
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"Error in columns mapping in JOIN. (assertion failed {} + {} + {} != {}) "
|
||||
"Error in columns mapping in JOIN: assertion failed {} + {} + {} != {}; "
|
||||
"Result block [{}], Saved block [{}]",
|
||||
column_indices_left.size(), column_indices_right.size(), same_result_keys.size(), unique_names_count,
|
||||
column_indices_left.size(), column_indices_right.size(), same_result_keys.size(), result_sample_block.columns(),
|
||||
result_sample_block.dumpNames(), saved_block_sample.dumpNames());
|
||||
}
|
||||
}
|
||||
|
||||
void NotJoinedBlocks::setRightIndex(size_t right_pos, size_t result_position)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user