diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index acd7511d520..2ce1124404f 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -93,21 +93,21 @@ jobs: with: stage: Builds_2 data: ${{ needs.RunConfig.outputs.data }} - Tests_2: + Tests_2_ww: needs: [RunConfig, Builds_2] + if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_2_ww') }} + uses: ./.github/workflows/reusable_test_stage.yml + with: + stage: Tests_2_ww + data: ${{ needs.RunConfig.outputs.data }} + Tests_2: + # Test_3 should not wait for Test_1/Test_2 and should not be blocked by them on master branch since all jobs need to run there. + needs: [RunConfig, Builds_1] if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_2') }} uses: ./.github/workflows/reusable_test_stage.yml with: stage: Tests_2 data: ${{ needs.RunConfig.outputs.data }} - Tests_3: - # Test_3 should not wait for Test_1/Test_2 and should not be blocked by them on master branch since all jobs need to run there. - needs: [RunConfig, Builds_1] - if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_3') }} - uses: ./.github/workflows/reusable_test_stage.yml - with: - stage: Tests_3 - data: ${{ needs.RunConfig.outputs.data }} ################################# Reports ################################# # Reports should run even if Builds_1/2 fail - run them separately, not in Tests_1/2/3 @@ -123,7 +123,7 @@ jobs: FinishCheck: if: ${{ !cancelled() }} - needs: [RunConfig, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3] + needs: [RunConfig, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2_ww, Tests_2] runs-on: [self-hosted, style-checker-aarch64] steps: - name: Check out repository code @@ -133,6 +133,7 @@ jobs: cd "$GITHUB_WORKSPACE/tests/ci" python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }} - name: Check Workflow results + if: ${{ !cancelled() }} run: | export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json" cat > "$WORKFLOW_RESULT_FILE" << 'EOF' diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 34bf51871d2..854dff530e7 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -123,20 +123,20 @@ jobs: stage: Builds_2 data: ${{ needs.RunConfig.outputs.data }} # stage for running non-required checks without being blocked by required checks (Test_1) if corresponding settings is selected - Tests_2: + Tests_2_ww: needs: [RunConfig, Builds_1] + if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_2_ww') }} + uses: ./.github/workflows/reusable_test_stage.yml + with: + stage: Tests_2_ww + data: ${{ needs.RunConfig.outputs.data }} + Tests_2: + needs: [RunConfig, Builds_1, Tests_1] if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_2') }} uses: ./.github/workflows/reusable_test_stage.yml with: stage: Tests_2 data: ${{ needs.RunConfig.outputs.data }} - Tests_3: - needs: [RunConfig, Builds_1, Tests_1] - if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_3') }} - uses: ./.github/workflows/reusable_test_stage.yml - with: - stage: Tests_3 - data: ${{ needs.RunConfig.outputs.data }} ################################# Reports ################################# # Reports should run even if Builds_1/2 fail - run them separately (not in Tests_1/2/3) @@ -154,7 +154,7 @@ jobs: if: ${{ !cancelled() }} # Test_2 or Test_3 do not have the jobs required for Mergeable check, # however, set them as "needs" to get all checks results before the automatic merge occurs. - needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3] + needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2_ww, Tests_2] runs-on: [self-hosted, style-checker-aarch64] steps: - name: Check out repository code @@ -178,7 +178,7 @@ jobs: # FinishCheck: if: ${{ !failure() && !cancelled() }} - needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3] + needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2_ww, Tests_2] runs-on: [self-hosted, style-checker-aarch64] steps: - name: Check out repository code diff --git a/CHANGELOG.md b/CHANGELOG.md index 07b37835dda..620b7c99bac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -45,7 +45,6 @@ * Add support for `cluster_for_parallel_replicas` when using custom key parallel replicas. It allows you to use parallel replicas with custom key with MergeTree tables. [#65453](https://github.com/ClickHouse/ClickHouse/pull/65453) ([Antonio Andelic](https://github.com/antonio2368)). #### Performance Improvement -* Enable `optimize_functions_to_subcolumns` by default. [#58661](https://github.com/ClickHouse/ClickHouse/pull/58661) ([Anton Popov](https://github.com/CurtizJ)). * Replace int to string algorithm with a faster one (from a modified amdn/itoa to a modified jeaiii/itoa). [#61661](https://github.com/ClickHouse/ClickHouse/pull/61661) ([Raúl Marín](https://github.com/Algunenano)). * Sizes of hash tables created by join (`parallel_hash` algorithm) is collected and cached now. This information will be used to preallocate space in hash tables for subsequent query executions and save time on hash table resizes. [#64553](https://github.com/ClickHouse/ClickHouse/pull/64553) ([Nikita Taranov](https://github.com/nickitat)). * Optimized queries with `ORDER BY` primary key and `WHERE` that have a condition with high selectivity by using of buffering. It is controlled by setting `read_in_order_use_buffering` (enabled by default) and can increase memory usage of query. [#64607](https://github.com/ClickHouse/ClickHouse/pull/64607) ([Anton Popov](https://github.com/CurtizJ)). diff --git a/base/glibc-compatibility/CMakeLists.txt b/base/glibc-compatibility/CMakeLists.txt index c967fa5b11b..8948e25cb8e 100644 --- a/base/glibc-compatibility/CMakeLists.txt +++ b/base/glibc-compatibility/CMakeLists.txt @@ -18,6 +18,16 @@ if (GLIBC_COMPATIBILITY) message (FATAL_ERROR "glibc_compatibility can only be used on x86_64 or aarch64.") endif () + if (SANITIZE STREQUAL thread) + # Disable TSAN instrumentation that conflicts with re-exec due to high ASLR entropy using getauxval + # See longer comment in __auxv_init_procfs + # In the case of tsan we need to make sure getauxval is not instrumented as that would introduce tsan + # internal calls to functions that depend on a state that isn't initialized yet + set_source_files_properties( + musl/getauxval.c + PROPERTIES COMPILE_FLAGS "-mllvm -tsan-instrument-func-entry-exit=false") + endif() + # Need to omit frame pointers to match the performance of glibc set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fomit-frame-pointer") diff --git a/base/glibc-compatibility/musl/getauxval.c b/base/glibc-compatibility/musl/getauxval.c index ea5cff9fc11..ec2cce1e4aa 100644 --- a/base/glibc-compatibility/musl/getauxval.c +++ b/base/glibc-compatibility/musl/getauxval.c @@ -75,6 +75,44 @@ unsigned long NO_SANITIZE_THREAD __getauxval_procfs(unsigned long type) } static unsigned long NO_SANITIZE_THREAD __auxv_init_procfs(unsigned long type) { +#if defined(__x86_64__) && defined(__has_feature) +# if __has_feature(memory_sanitizer) || __has_feature(thread_sanitizer) + /// Sanitizers are not compatible with high ASLR entropy, which is the default on modern Linux distributions, and + /// to workaround this limitation, TSAN and MSAN (couldn't see other sanitizers doing the same), re-exec the binary + /// without ASLR (see https://github.com/llvm/llvm-project/commit/0784b1eefa36d4acbb0dacd2d18796e26313b6c5) + + /// The problem we face is that, in order to re-exec, the sanitizer wants to use the original pathname in the call + /// and to get its value it uses getauxval (https://github.com/llvm/llvm-project/blob/20eff684203287828d6722fc860b9d3621429542/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp#L985-L988). + /// Since we provide getauxval ourselves (to minimize the version dependency on runtime glibc), we are the ones + // being called and we fail horribly: + /// + /// ==301455==ERROR: MemorySanitizer: SEGV on unknown address 0x2ffc6d721550 (pc 0x5622c1cc0073 bp 0x000000000003 sp 0x7ffc6d721530 T301455) + /// ==301455==The signal is caused by a WRITE memory access. + /// #0 0x5622c1cc0073 in __auxv_init_procfs ./ClickHouse/base/glibc-compatibility/musl/getauxval.c:129:5 + /// #1 0x5622c1cbffe9 in getauxval ./ClickHouse/base/glibc-compatibility/musl/getauxval.c:240:12 + /// #2 0x5622c0d7bfb4 in __sanitizer::ReExec() crtstuff.c + /// #3 0x5622c0df7bfc in __msan::InitShadowWithReExec(bool) crtstuff.c + /// #4 0x5622c0d95356 in __msan_init (./ClickHouse/build_msan/contrib/google-protobuf-cmake/protoc+0x256356) (BuildId: 6411d3c88b898ba3f7d49760555977d3e61f0741) + /// #5 0x5622c0dfe878 in msan.module_ctor main.cc + /// #6 0x5622c1cc156c in __libc_csu_init (./ClickHouse/build_msan/contrib/google-protobuf-cmake/protoc+0x118256c) (BuildId: 6411d3c88b898ba3f7d49760555977d3e61f0741) + /// #7 0x73dc05dd7ea3 in __libc_start_main /usr/src/debug/glibc/glibc/csu/../csu/libc-start.c:343:6 + /// #8 0x5622c0d6b7cd in _start (./ClickHouse/build_msan/contrib/google-protobuf-cmake/protoc+0x22c7cd) (BuildId: 6411d3c88b898ba3f7d49760555977d3e61f0741) + + /// The source of the issue above is that, at this point in time during __msan_init, we can't really do much as + /// most global variables aren't initialized or available yet, so we can't initiate the auxiliary vector. + /// Normal glibc / musl getauxval doesn't have this problem since they initiate their auxval vector at the very + /// start of __libc_start_main (just keeping track of argv+argc+1), but we don't have such option (otherwise + /// this complexity of reading "/proc/self/auxv" or using __environ would not be necessary). + + /// To avoid this crashes on the re-exec call (see above how it would fail when creating `aux`, and if we used + /// __auxv_init_environ then it would SIGSEV on READing `__environ`) we capture this call for `AT_EXECFN` and + /// unconditionally return "/proc/self/exe" without any preparation. Theoretically this should be fine in + /// our case, as we don't load any libraries. That's the theory at least. + if (type == AT_EXECFN) + return (unsigned long)"/proc/self/exe"; +# endif +#endif + // For debugging: // - od -t dL /proc/self/auxv // - LD_SHOW_AUX= ls @@ -199,7 +237,7 @@ static unsigned long NO_SANITIZE_THREAD __auxv_init_environ(unsigned long type) // - __auxv_init_procfs -> __auxv_init_environ -> __getauxval_environ static void * volatile getauxval_func = (void *)__auxv_init_procfs; -unsigned long getauxval(unsigned long type) +unsigned long NO_SANITIZE_THREAD getauxval(unsigned long type) { return ((unsigned long (*)(unsigned long))getauxval_func)(type); } diff --git a/contrib/icu b/contrib/icu index a56dde820dc..7750081bda4 160000 --- a/contrib/icu +++ b/contrib/icu @@ -1 +1 @@ -Subproject commit a56dde820dc35665a66f2e9ee8ba58e75049b668 +Subproject commit 7750081bda4b3bc1768ae03849ec70f67ea10625 diff --git a/contrib/icu-cmake/CMakeLists.txt b/contrib/icu-cmake/CMakeLists.txt index 0a650f2bcc0..f9d05f7fe97 100644 --- a/contrib/icu-cmake/CMakeLists.txt +++ b/contrib/icu-cmake/CMakeLists.txt @@ -4,7 +4,9 @@ else () option(ENABLE_ICU "Enable ICU" 0) endif () -if (NOT ENABLE_ICU) +# Temporarily disabled s390x because the ICU build links a blob (icudt71b_dat.S) and our friends from IBM did not explain how they generated +# the blob on s390x: https://github.com/ClickHouse/icudata/pull/2#issuecomment-2226957255 +if (NOT ENABLE_ICU OR ARCH_S390X) message(STATUS "Not using ICU") return() endif() @@ -12,8 +14,6 @@ endif() set(ICU_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/icu/icu4c/source") set(ICUDATA_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/icudata/") -set (CMAKE_CXX_STANDARD 17) - # These lists of sources were generated from build log of the original ICU build system (configure + make). set(ICUUC_SOURCES @@ -462,9 +462,9 @@ file(GENERATE OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/empty.cpp" CONTENT " ") enable_language(ASM) if (ARCH_S390X) - set(ICUDATA_SOURCE_FILE "${ICUDATA_SOURCE_DIR}/icudt70b_dat.S" ) + set(ICUDATA_SOURCE_FILE "${ICUDATA_SOURCE_DIR}/icudt75b_dat.S" ) else() - set(ICUDATA_SOURCE_FILE "${ICUDATA_SOURCE_DIR}/icudt70l_dat.S" ) + set(ICUDATA_SOURCE_FILE "${ICUDATA_SOURCE_DIR}/icudt75l_dat.S" ) endif() set(ICUDATA_SOURCES diff --git a/contrib/icudata b/contrib/icudata index c8e717892a5..d345d6ac22f 160000 --- a/contrib/icudata +++ b/contrib/icudata @@ -1 +1 @@ -Subproject commit c8e717892a557b4d2852317c7d628aacc0a0e5ab +Subproject commit d345d6ac22f381c882420de9053d30ae1ff38d75 diff --git a/docs/en/engines/table-engines/special/keepermap.md b/docs/en/engines/table-engines/special/keepermap.md index 5559cc2c648..04a9a4b0d4e 100644 --- a/docs/en/engines/table-engines/special/keepermap.md +++ b/docs/en/engines/table-engines/special/keepermap.md @@ -54,7 +54,7 @@ CREATE TABLE keeper_map_table `v2` String, `v3` Float32 ) -ENGINE = KeeperMap(/keeper_map_table, 4) +ENGINE = KeeperMap('/keeper_map_table', 4) PRIMARY KEY key ``` diff --git a/docs/en/interfaces/formats.md b/docs/en/interfaces/formats.md index 9c4c082bc3a..8892c6d8d3f 100644 --- a/docs/en/interfaces/formats.md +++ b/docs/en/interfaces/formats.md @@ -32,6 +32,7 @@ The supported formats are: | [Vertical](#vertical) | ✗ | ✔ | | [JSON](#json) | ✔ | ✔ | | [JSONAsString](#jsonasstring) | ✔ | ✗ | +| [JSONAsObject](#jsonasobject) | ✔ | ✗ | | [JSONStrings](#jsonstrings) | ✔ | ✔ | | [JSONColumns](#jsoncolumns) | ✔ | ✔ | | [JSONColumnsWithMetadata](#jsoncolumnsmonoblock) | ✔ | ✔ | @@ -822,6 +823,67 @@ Result: └────────────────────────────┘ ``` +## JSONAsObject {#jsonasobject} + +In this format, a single JSON object is interpreted as a single [Object('json')](/docs/en/sql-reference/data-types/json.md) value. If the input has several JSON objects (comma separated), they are interpreted as separate rows. If the input data is enclosed in square brackets, it is interpreted as an array of JSONs. + +This format can only be parsed for a table with a single field of type [Object('json')](/docs/en/sql-reference/data-types/json.md). The remaining columns must be set to [DEFAULT](/docs/en/sql-reference/statements/create/table.md/#default) or [MATERIALIZED](/docs/en/sql-reference/statements/create/table.md/#materialized). + +**Examples** + +Query: + +``` sql +SET allow_experimental_object_type = 1; +CREATE TABLE json_as_object (json Object('json')) ENGINE = Memory; +INSERT INTO json_as_object (json) FORMAT JSONAsObject {"foo":{"bar":{"x":"y"},"baz":1}},{},{"any json stucture":1} +SELECT * FROM json_as_object FORMAT JSONEachRow; +``` + +Result: + +``` response +{"json":{"any json stucture":0,"foo":{"bar":{"x":"y"},"baz":1}}} +{"json":{"any json stucture":0,"foo":{"bar":{"x":""},"baz":0}}} +{"json":{"any json stucture":1,"foo":{"bar":{"x":""},"baz":0}}} +``` + +**An array of JSON objects** + +Query: + +``` sql +SET allow_experimental_object_type = 1; +CREATE TABLE json_square_brackets (field Object('json')) ENGINE = Memory; +INSERT INTO json_square_brackets FORMAT JSONAsObject [{"id": 1, "name": "name1"}, {"id": 2, "name": "name2"}]; + +SELECT * FROM json_square_brackets FORMAT JSONEachRow; +``` + +Result: + +```response +{"field":{"id":1,"name":"name1"}} +{"field":{"id":2,"name":"name2"}} +``` + +**Columns with default values** + +```sql +SET allow_experimental_object_type = 1; +CREATE TABLE json_as_object (json Object('json'), time DateTime MATERIALIZED now()) ENGINE = Memory; +INSERT INTO json_as_object (json) FORMAT JSONAsObject {"foo":{"bar":{"x":"y"},"baz":1}}; +INSERT INTO json_as_object (json) FORMAT JSONAsObject {}; +INSERT INTO json_as_object (json) FORMAT JSONAsObject {"any json stucture":1} +SELECT * FROM json_as_object FORMAT JSONEachRow +``` + +```resonse +{"json":{"any json stucture":0,"foo":{"bar":{"x":"y"},"baz":1}},"time":"2024-07-25 17:02:45"} +{"json":{"any json stucture":0,"foo":{"bar":{"x":""},"baz":0}},"time":"2024-07-25 17:02:47"} +{"json":{"any json stucture":1,"foo":{"bar":{"x":""},"baz":0}},"time":"2024-07-25 17:02:50"} +``` + ## JSONCompact {#jsoncompact} Differs from JSON only in that data rows are output in arrays, not in objects. diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index 61e84ca72d1..66126e649c6 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -49,149 +49,3001 @@ SETTINGS cast_keep_nullable = 1 └──────────────────┴─────────────────────┴──────────────────┘ ``` -## toInt(8\|16\|32\|64\|128\|256) +## toInt8 -Converts an input value to a value the [Int](../data-types/int-uint.md) data type. This function family includes: +Converts an input value to a value of type [`Int8`](../data-types/int-uint.md). Throws an exception in case of an error. -- `toInt8(expr)` — Converts to a value of data type `Int8`. -- `toInt16(expr)` — Converts to a value of data type `Int16`. -- `toInt32(expr)` — Converts to a value of data type `Int32`. -- `toInt64(expr)` — Converts to a value of data type `Int64`. -- `toInt128(expr)` — Converts to a value of data type `Int128`. -- `toInt256(expr)` — Converts to a value of data type `Int256`. +**Syntax** + +```sql +toInt8(expr) +``` **Arguments** -- `expr` — [Expression](../syntax.md/#syntax-expressions) returning a number or a string with the decimal representation of a number. Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). + +Supported arguments: +- Values of type (U)Int8/16/32/64/128/256. +- Values of type Float32/64. +- String representations of (U)Int8/16/32/128/256. + +Unsupported arguments: +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt8('0xc0fe');`. + +:::note +If the input value cannot be represented within the bounds of [Int8](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered an error. +For example: `SELECT toInt8(128) == -128;`. +::: **Returned value** -Integer value in the `Int8`, `Int16`, `Int32`, `Int64`, `Int128` or `Int256` data type. +- 8-bit integer value. [Int8](../data-types/int-uint.md). -Functions use [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning they truncate fractional digits of numbers. - -The behavior of functions for the [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments is undefined. Remember about [numeric conversions issues](#common-issues-with-data-conversion), when using the functions. +:::note +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: **Example** Query: -``` sql -SELECT toInt64(nan), toInt32(32), toInt16('16'), toInt8(8.8); +```sql +SELECT + toInt8(-8), + toInt8(-8.8), + toInt8('-8') +FORMAT vertical; ``` Result: ```response -┌─────────toInt64(nan)─┬─toInt32(32)─┬─toInt16('16')─┬─toInt8(8.8)─┐ -│ -9223372036854775808 │ 32 │ 16 │ 8 │ -└──────────────────────┴─────────────┴───────────────┴─────────────┘ +Row 1: +────── +toInt8(-8): -8 +toInt8(-8.8): -8 +toInt8('-8'): -8 ``` -## toInt(8\|16\|32\|64\|128\|256)OrZero +**See also** -Takes an argument of type [String](../data-types/string.md) and tries to parse it into an Int (8 \| 16 \| 32 \| 64 \| 128 \| 256). If unsuccessful, returns `0`. +- [`toInt8OrZero`](#toint8orzero). +- [`toInt8OrNull`](#toint8ornull). +- [`toInt8OrDefault`](#toint8ordefault). -**Example** +## toInt8OrZero -Query: +Like [`toInt8`](#toint8), this function converts an input value to a value of type [Int8](../data-types/int-uint.md) but returns `0` in case of an error. -``` sql -SELECT toInt64OrZero('123123'), toInt8OrZero('123qwe123'); +**Syntax** + +```sql +toInt8OrZero(x) ``` -Result: - -```response -┌─toInt64OrZero('123123')─┬─toInt8OrZero('123qwe123')─┐ -│ 123123 │ 0 │ -└─────────────────────────┴───────────────────────────┘ -``` - -## toInt(8\|16\|32\|64\|128\|256)OrNull - -It takes an argument of type String and tries to parse it into Int (8 \| 16 \| 32 \| 64 \| 128 \| 256). If unsuccessful, returns `NULL`. - -**Example** - -Query: - -``` sql -SELECT toInt64OrNull('123123'), toInt8OrNull('123qwe123'); -``` - -Result: - -```response -┌─toInt64OrNull('123123')─┬─toInt8OrNull('123qwe123')─┐ -│ 123123 │ ᴺᵁᴸᴸ │ -└─────────────────────────┴───────────────────────────┘ -``` - -## toInt(8\|16\|32\|64\|128\|256)OrDefault - -It takes an argument of type String and tries to parse it into Int (8 \| 16 \| 32 \| 64 \| 128 \| 256). If unsuccessful, returns the default type value. - -**Example** - -Query: - -``` sql -SELECT toInt64OrDefault('123123', cast('-1' as Int64)), toInt8OrDefault('123qwe123', cast('-1' as Int8)); -``` - -Result: - -```response -┌─toInt64OrDefault('123123', CAST('-1', 'Int64'))─┬─toInt8OrDefault('123qwe123', CAST('-1', 'Int8'))─┐ -│ 123123 │ -1 │ -└─────────────────────────────────────────────────┴──────────────────────────────────────────────────┘ -``` - - -## toUInt(8\|16\|32\|64\|256) - -Converts an input value to the [UInt](../data-types/int-uint.md) data type. This function family includes: - -- `toUInt8(expr)` — Converts to a value of data type `UInt8`. -- `toUInt16(expr)` — Converts to a value of data type `UInt16`. -- `toUInt32(expr)` — Converts to a value of data type `UInt32`. -- `toUInt64(expr)` — Converts to a value of data type `UInt64`. -- `toUInt256(expr)` — Converts to a value of data type `UInt256`. - **Arguments** -- `expr` — [Expression](../syntax.md/#syntax-expressions) returning a number or a string with the decimal representation of a number. Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. +- `x` — A String representation of a number. [String](../data-types/string.md). + +Supported arguments: +- String representations of (U)Int8/16/32/128/256. + +Unsupported arguments (return `0`): +- String representations of ordinary Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt8OrZero('0xc0fe');`. + +:::note +If the input value cannot be represented within the bounds of [Int8](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered an error. +::: **Returned value** -- Integer value in the `UInt8`, `UInt16`, `UInt32`, `UInt64` or `UInt256` data type. +- 8-bit integer value if successful, otherwise `0`. [Int8](../data-types/int-uint.md). -Functions use [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning they truncate fractional digits of numbers. - -The behavior of functions for negative arguments and for the [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments is undefined. If you pass a string with a negative number, for example `'-32'`, ClickHouse raises an exception. Remember about [numeric conversions issues](#common-issues-with-data-conversion), when using the functions. +:::note +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: **Example** Query: ``` sql -SELECT toUInt64(nan), toUInt32(-32), toUInt16('16'), toUInt8(8.8); +SELECT + toInt8OrZero('-8'), + toInt8OrZero('abc') +FORMAT vertical; ``` Result: ```response -┌───────toUInt64(nan)─┬─toUInt32(-32)─┬─toUInt16('16')─┬─toUInt8(8.8)─┐ -│ 9223372036854775808 │ 4294967264 │ 16 │ 8 │ -└─────────────────────┴───────────────┴────────────────┴──────────────┘ +Row 1: +────── +toInt8OrZero('-8'): -8 +toInt8OrZero('abc'): 0 ``` -## toUInt(8\|16\|32\|64\|256)OrZero +**See also** -## toUInt(8\|16\|32\|64\|256)OrNull +- [`toInt8`](#toint8). +- [`toInt8OrNull`](#toint8ornull). +- [`toInt8OrDefault`](#toint8ordefault). -## toUInt(8\|16\|32\|64\|256)OrDefault +## toInt8OrNull + +Like [`toInt8`](#toint8), this function converts an input value to a value of type [Int8](../data-types/int-uint.md) but returns `NULL` in case of an error. + +**Syntax** + +```sql +toInt8OrNull(x) +``` + +**Arguments** + +- `x` — A String representation of a number. [String](../data-types/string.md). + +Supported arguments: +- String representations of (U)Int8/16/32/128/256. + +Unsupported arguments (return `\N`) +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt8OrNull('0xc0fe');`. + +:::note +If the input value cannot be represented within the bounds of [Int8](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered an error. +::: + +**Returned value** + +- 8-bit integer value if successful, otherwise `NULL`. [Int8](../data-types/int-uint.md) / [NULL](../data-types/nullable.md). + +:::note +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +**Example** + +Query: + +``` sql +SELECT + toInt8OrNull('-8'), + toInt8OrNull('abc') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toInt8OrNull('-8'): -8 +toInt8OrNull('abc'): ᴺᵁᴸᴸ +``` + +**See also** + +- [`toInt8`](#toint8). +- [`toInt8OrZero`](#toint8orzero). +- [`toInt8OrDefault`](#toint8ordefault). + +## toInt8OrDefault + +Like [`toInt8`](#toint8), this function converts an input value to a value of type [Int8](../data-types/int-uint.md) but returns the default value in case of an error. +If no `default` value is passed then `0` is returned in case of an error. + +**Syntax** + +```sql +toInt8OrDefault(expr[, default]) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). +- `default` (optional) — The default value to return if parsing to type `Int8` is unsuccessful. [Int8](../data-types/int-uint.md). + +Supported arguments: +- Values of type (U)Int8/16/32/64/128/256. +- Values of type Float32/64. +- String representations of (U)Int8/16/32/128/256. + +Arguments for which the default value is returned: +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt8OrDefault('0xc0fe', CAST('-1', 'Int8'));`. + +:::note +If the input value cannot be represented within the bounds of [Int8](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered an error. +::: + +**Returned value** + +- 8-bit integer value if successful, otherwise returns the default value if passed or `0` if not. [Int8](../data-types/int-uint.md). + +:::note +- The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +- The default value type should be the same as the cast type. +::: + +**Example** + +Query: + +``` sql +SELECT + toInt8OrDefault('-8', CAST('-1', 'Int8')), + toInt8OrDefault('abc', CAST('-1', 'Int8')) +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toInt8OrDefault('-8', CAST('-1', 'Int8')): -8 +toInt8OrDefault('abc', CAST('-1', 'Int8')): -1 +``` + +**See also** + +- [`toInt8`](#toint8). +- [`toInt8OrZero`](#toint8orzero). +- [`toInt8OrNull`](#toint8orNull). + +## toInt16 + +Converts an input value to a value of type [`Int16`](../data-types/int-uint.md). Throws an exception in case of an error. + +**Syntax** + +```sql +toInt16(expr) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). + +Supported arguments: +- Values of type (U)Int8/16/32/64/128/256. +- Values of type Float32/64. +- String representations of (U)Int8/16/32/128/256. + +Unsupported arguments: +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt16('0xc0fe');`. + +:::note +If the input value cannot be represented within the bounds of [Int16](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered an error. +For example: `SELECT toInt16(32768) == -32768;`. +::: + +**Returned value** + +- 16-bit integer value. [Int16](../data-types/int-uint.md). + +:::note +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +**Example** + +Query: + +```sql +SELECT + toInt16(-16), + toInt16(-16.16), + toInt16('-16') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toInt16(-16): -16 +toInt16(-16.16): -16 +toInt16('-16'): -16 +``` + +**See also** + +- [`toInt16OrZero`](#toint16orzero). +- [`toInt16OrNull`](#toint16ornull). +- [`toInt16OrDefault`](#toint16ordefault). + +## toInt16OrZero + +Like [`toInt16`](#toint16), this function converts an input value to a value of type [Int16](../data-types/int-uint.md) but returns `0` in case of an error. + +**Syntax** + +```sql +toInt16OrZero(x) +``` + +**Arguments** + +- `x` — A String representation of a number. [String](../data-types/string.md). + +Supported arguments: +- String representations of (U)Int8/16/32/128/256. + +Unsupported arguments (return `0`): +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt16OrZero('0xc0fe');`. + +:::note +If the input value cannot be represented within the bounds of [Int16](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered as an error. +::: + +**Returned value** + +- 16-bit integer value if successful, otherwise `0`. [Int16](../data-types/int-uint.md). + +:::note +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +**Example** + +Query: + +``` sql +SELECT + toInt16OrZero('-16'), + toInt16OrZero('abc') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toInt16OrZero('-16'): -16 +toInt16OrZero('abc'): 0 +``` + +**See also** + +- [`toInt16`](#toint16). +- [`toInt16OrNull`](#toint16ornull). +- [`toInt16OrDefault`](#toint16ordefault). + +## toInt16OrNull + +Like [`toInt16`](#toint16), this function converts an input value to a value of type [Int16](../data-types/int-uint.md) but returns `NULL` in case of an error. + +**Syntax** + +```sql +toInt16OrNull(x) +``` + +**Arguments** + +- `x` — A String representation of a number. [String](../data-types/string.md). + +Supported arguments: +- String representations of (U)Int8/16/32/128/256. + +Unsupported arguments (return `\N`) +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt16OrNull('0xc0fe');`. + +:::note +If the input value cannot be represented within the bounds of [Int16](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered an error. +::: + +**Returned value** + +- 16-bit integer value if successful, otherwise `NULL`. [Int16](../data-types/int-uint.md) / [NULL](../data-types/nullable.md). + +:::note +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +**Example** + +Query: + +``` sql +SELECT + toInt16OrNull('-16'), + toInt16OrNull('abc') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toInt16OrNull('-16'): -16 +toInt16OrNull('abc'): ᴺᵁᴸᴸ +``` + +**See also** + +- [`toInt16`](#toint16). +- [`toInt16OrZero`](#toint16orzero). +- [`toInt16OrDefault`](#toint16ordefault). + +## toInt16OrDefault + +Like [`toInt16`](#toint16), this function converts an input value to a value of type [Int16](../data-types/int-uint.md) but returns the default value in case of an error. +If no `default` value is passed then `0` is returned in case of an error. + +**Syntax** + +```sql +toInt16OrDefault(expr[, default]) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). +- `default` (optional) — The default value to return if parsing to type `Int16` is unsuccessful. [Int16](../data-types/int-uint.md). + +Supported arguments: +- Values of type (U)Int8/16/32/64/128/256. +- Values of type Float32/64. +- String representations of (U)Int8/16/32/128/256. + +Arguments for which the default value is returned: +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt16OrDefault('0xc0fe', CAST('-1', 'Int16'));`. + +:::note +If the input value cannot be represented within the bounds of [Int16](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered an error. +::: + +**Returned value** + +- 16-bit integer value if successful, otherwise returns the default value if passed or `0` if not. [Int16](../data-types/int-uint.md). + +:::note +- The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +- The default value type should be the same as the cast type. +::: + +**Example** + +Query: + +``` sql +SELECT + toInt16OrDefault('-16', CAST('-1', 'Int16')), + toInt16OrDefault('abc', CAST('-1', 'Int16')) +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toInt16OrDefault('-16', CAST('-1', 'Int16')): -16 +toInt16OrDefault('abc', CAST('-1', 'Int16')): -1 +``` + +**See also** + +- [`toInt16`](#toint16). +- [`toInt16OrZero`](#toint16orzero). +- [`toInt16OrNull`](#toint16ornull). + +## toInt32 + +Converts an input value to a value of type [`Int32`](../data-types/int-uint.md). Throws an exception in case of an error. + +**Syntax** + +```sql +toInt32(expr) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). + +Supported arguments: +- Values of type (U)Int8/16/32/64/128/256. +- Values of type Float32/64. +- String representations of (U)Int8/16/32/128/256. + +Unsupported arguments: +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt32('0xc0fe');`. + +:::note +If the input value cannot be represented within the bounds of [Int32](../data-types/int-uint.md), the result over or under flows. +This is not considered an error. +For example: `SELECT toInt32(2147483648) == -2147483648;` +::: + +**Returned value** + +- 32-bit integer value. [Int32](../data-types/int-uint.md). + +:::note +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +**Example** + +Query: + +```sql +SELECT + toInt32(-32), + toInt32(-32.32), + toInt32('-32') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toInt32(-32): -32 +toInt32(-32.32): -32 +toInt32('-32'): -32 +``` + +**See also** + +- [`toInt32OrZero`](#toint32orzero). +- [`toInt32OrNull`](#toint32ornull). +- [`toInt32OrDefault`](#toint32ordefault). + +## toInt32OrZero + +Like [`toInt32`](#toint32), this function converts an input value to a value of type [Int32](../data-types/int-uint.md) but returns `0` in case of an error. + +**Syntax** + +```sql +toInt32OrZero(x) +``` + +**Arguments** + +- `x` — A String representation of a number. [String](../data-types/string.md). + +Supported arguments: +- String representations of (U)Int8/16/32/128/256. + +Unsupported arguments (return `0`): +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt32OrZero('0xc0fe');`. + +:::note +If the input value cannot be represented within the bounds of [Int32](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered an error. +::: + +**Returned value** + +- 32-bit integer value if successful, otherwise `0`. [Int32](../data-types/int-uint.md) + +:::note +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncate fractional digits of numbers. +::: + +**Example** + +Query: + +``` sql +SELECT + toInt32OrZero('-32'), + toInt32OrZero('abc') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toInt32OrZero('-32'): -32 +toInt32OrZero('abc'): 0 +``` +**See also** + +- [`toInt32`](#toint32). +- [`toInt32OrNull`](#toint32ornull). +- [`toInt32OrDefault`](#toint32ordefault). + +## toInt32OrNull + +Like [`toInt32`](#toint32), this function converts an input value to a value of type [Int32](../data-types/int-uint.md) but returns `NULL` in case of an error. + +**Syntax** + +```sql +toInt32OrNull(x) +``` + +**Arguments** + +- `x` — A String representation of a number. [String](../data-types/string.md). + +Supported arguments: +- String representations of (U)Int8/16/32/128/256. + +Unsupported arguments (return `\N`) +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt32OrNull('0xc0fe');`. + +:::note +If the input value cannot be represented within the bounds of [Int32](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered an error. +::: + +**Returned value** + +- 32-bit integer value if successful, otherwise `NULL`. [Int32](../data-types/int-uint.md) / [NULL](../data-types/nullable.md). + +:::note +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +**Example** + +Query: + +``` sql +SELECT + toInt32OrNull('-32'), + toInt32OrNull('abc') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toInt32OrNull('-32'): -32 +toInt32OrNull('abc'): ᴺᵁᴸᴸ +``` + +**See also** + +- [`toInt32`](#toint32). +- [`toInt32OrZero`](#toint32orzero). +- [`toInt32OrDefault`](#toint32ordefault). + +## toInt32OrDefault + +Like [`toInt32`](#toint32), this function converts an input value to a value of type [Int32](../data-types/int-uint.md) but returns the default value in case of an error. +If no `default` value is passed then `0` is returned in case of an error. + +**Syntax** + +```sql +toInt32OrDefault(expr[, default]) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). +- `default` (optional) — The default value to return if parsing to type `Int32` is unsuccessful. [Int32](../data-types/int-uint.md). + +Supported arguments: +- Values of type (U)Int8/16/32/64/128/256. +- Values of type Float32/64. +- String representations of (U)Int8/16/32/128/256. + +Arguments for which the default value is returned: +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt32OrDefault('0xc0fe', CAST('-1', 'Int32'));`. + +:::note +If the input value cannot be represented within the bounds of [Int32](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered an error. +::: + +**Returned value** + +- 32-bit integer value if successful, otherwise returns the default value if passed or `0` if not. [Int32](../data-types/int-uint.md). + +:::note +- The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +- The default value type should be the same as the cast type. +::: + +**Example** + +Query: + +``` sql +SELECT + toInt32OrDefault('-32', CAST('-1', 'Int32')), + toInt32OrDefault('abc', CAST('-1', 'Int32')) +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toInt32OrDefault('-32', CAST('-1', 'Int32')): -32 +toInt32OrDefault('abc', CAST('-1', 'Int32')): -1 +``` + +**See also** + +- [`toInt32`](#toint32). +- [`toInt32OrZero`](#toint32orzero). +- [`toInt32OrNull`](#toint32ornull). + +## toInt64 + +Converts an input value to a value of type [`Int64`](../data-types/int-uint.md). Throws an exception in case of an error. + +**Syntax** + +```sql +toInt64(expr) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). + +Supported arguments: +- Values of type (U)Int8/16/32/64/128/256. +- Values of type Float32/64. +- String representations of (U)Int8/16/32/128/256. + +Unsupported types: +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt64('0xc0fe');`. + +:::note +If the input value cannot be represented within the bounds of [Int64](../data-types/int-uint.md), the result over or under flows. +This is not considered an error. +For example: `SELECT toInt64(9223372036854775808) == -9223372036854775808;` +::: + +**Returned value** + +- 64-bit integer value. [Int64](../data-types/int-uint.md). + +:::note +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +**Example** + +Query: + +```sql +SELECT + toInt64(-64), + toInt64(-64.64), + toInt64('-64') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toInt64(-64): -64 +toInt64(-64.64): -64 +toInt64('-64'): -64 +``` + +**See also** + +- [`toInt64OrZero`](#toint64orzero). +- [`toInt64OrNull`](#toint64ornull). +- [`toInt64OrDefault`](#toint64ordefault). + +## toInt64OrZero + +Like [`toInt64`](#toint64), this function converts an input value to a value of type [Int64](../data-types/int-uint.md) but returns `0` in case of an error. + +**Syntax** + +```sql +toInt64OrZero(x) +``` + +**Arguments** + +- `x` — A String representation of a number. [String](../data-types/string.md). + +Supported arguments: +- String representations of (U)Int8/16/32/128/256. + +Unsupported arguments (return `0`): +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt64OrZero('0xc0fe');`. + +:::note +If the input value cannot be represented within the bounds of [Int64](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered an error. +::: + +**Returned value** + +- 64-bit integer value if successful, otherwise `0`. [Int64](../data-types/int-uint.md). + +:::note +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +**Example** + +Query: + +``` sql +SELECT + toInt64OrZero('-64'), + toInt64OrZero('abc') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toInt64OrZero('-64'): -64 +toInt64OrZero('abc'): 0 +``` + +**See also** + +- [`toInt64`](#toint64). +- [`toInt64OrNull`](#toint64ornull). +- [`toInt64OrDefault`](#toint64ordefault). + +## toInt64OrNull + +Like [`toInt64`](#toint64), this function converts an input value to a value of type [Int64](../data-types/int-uint.md) but returns `NULL` in case of an error. + +**Syntax** + +```sql +toInt64OrNull(x) +``` + +**Arguments** + +- `x` — A String representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). + +Supported arguments: +- String representations of (U)Int8/16/32/128/256. + +Unsupported arguments (return `\N`) +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt64OrNull('0xc0fe');`. + +:::note +If the input value cannot be represented within the bounds of [Int64](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered an error. +::: + +**Returned value** + +- 64-bit integer value if successful, otherwise `NULL`. [Int64](../data-types/int-uint.md) / [NULL](../data-types/nullable.md). + +:::note +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +**Example** + +Query: + +``` sql +SELECT + toInt64OrNull('-64'), + toInt64OrNull('abc') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toInt64OrNull('-64'): -64 +toInt64OrNull('abc'): ᴺᵁᴸᴸ +``` + +**See also** + +- [`toInt64`](#toint64). +- [`toInt64OrZero`](#toint64orzero). +- [`toInt64OrDefault`](#toint64ordefault). + +## toInt64OrDefault + +Like [`toInt64`](#toint64), this function converts an input value to a value of type [Int64](../data-types/int-uint.md) but returns the default value in case of an error. +If no `default` value is passed then `0` is returned in case of an error. + +**Syntax** + +```sql +toInt64OrDefault(expr[, default]) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). +- `default` (optional) — The default value to return if parsing to type `Int64` is unsuccessful. [Int64](../data-types/int-uint.md). + +Supported arguments: +- Values of type (U)Int8/16/32/64/128/256. +- Values of type Float32/64. +- String representations of (U)Int8/16/32/128/256. + +Arguments for which the default value is returned: +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt64OrDefault('0xc0fe', CAST('-1', 'Int64'));`. + +:::note +If the input value cannot be represented within the bounds of [Int64](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered an error. +::: + +**Returned value** + +- 64-bit integer value if successful, otherwise returns the default value if passed or `0` if not. [Int64](../data-types/int-uint.md). + +:::note +- The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +- The default value type should be the same as the cast type. +::: + +**Example** + +Query: + +``` sql +SELECT + toInt64OrDefault('-64', CAST('-1', 'Int64')), + toInt64OrDefault('abc', CAST('-1', 'Int64')) +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toInt64OrDefault('-64', CAST('-1', 'Int64')): -64 +toInt64OrDefault('abc', CAST('-1', 'Int64')): -1 +``` + +**See also** + +- [`toInt64`](#toint64). +- [`toInt64OrZero`](#toint64orzero). +- [`toInt64OrNull`](#toint64ornull). + +## toInt128 + +Converts an input value to a value of type [`Int128`](../data-types/int-uint.md). Throws an exception in case of an error. + +**Syntax** + +```sql +toInt128(expr) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). + +Supported arguments: +- Values of type (U)Int8/16/32/64/128/256. +- Values of type Float32/64. +- String representations of (U)Int8/16/32/128/256. + +Unsupported arguments: +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt128('0xc0fe');`. + +:::note +If the input value cannot be represented within the bounds of [Int128](../data-types/int-uint.md), the result over or under flows. +This is not considered an error. +::: + +**Returned value** + +- 128-bit integer value. [Int128](../data-types/int-uint.md). + +:::note +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +**Example** + +Query: + +```sql +SELECT + toInt128(-128), + toInt128(-128.8), + toInt128('-128') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toInt128(-128): -128 +toInt128(-128.8): -128 +toInt128('-128'): -128 +``` + +**See also** + +- [`toInt128OrZero`](#toint128orzero). +- [`toInt128OrNull`](#toint128ornull). +- [`toInt128OrDefault`](#toint128ordefault). + +## toInt128OrZero + +Like [`toInt128`](#toint128), this function converts an input value to a value of type [Int128](../data-types/int-uint.md) but returns `0` in case of an error. + +**Syntax** + +```sql +toInt128OrZero(expr) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). + +Supported arguments: +- String representations of (U)Int8/16/32/128/256. + +Unsupported arguments (return `0`): +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt128OrZero('0xc0fe');`. + +:::note +If the input value cannot be represented within the bounds of [Int128](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered an error. +::: + +**Returned value** + +- 128-bit integer value if successful, otherwise `0`. [Int128](../data-types/int-uint.md). + +:::note +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +**Example** + +Query: + +``` sql +SELECT + toInt128OrZero('-128'), + toInt128OrZero('abc') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toInt128OrZero('-128'): -128 +toInt128OrZero('abc'): 0 +``` + +**See also** + +- [`toInt128`](#toint128). +- [`toInt128OrNull`](#toint128ornull). +- [`toInt128OrDefault`](#toint128ordefault). + +## toInt128OrNull + +Like [`toInt128`](#toint128), this function converts an input value to a value of type [Int128](../data-types/int-uint.md) but returns `NULL` in case of an error. + +**Syntax** + +```sql +toInt128OrNull(x) +``` + +**Arguments** + +- `x` — A String representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). + +Supported arguments: +- String representations of (U)Int8/16/32/128/256. + +Unsupported arguments (return `\N`) +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt128OrNull('0xc0fe');`. + +:::note +If the input value cannot be represented within the bounds of [Int128](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered an error. +::: + +**Returned value** + +- 128-bit integer value if successful, otherwise `NULL`. [Int128](../data-types/int-uint.md) / [NULL](../data-types/nullable.md). + +:::note +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +**Example** + +Query: + +``` sql +SELECT + toInt128OrNull('-128'), + toInt128OrNull('abc') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toInt128OrNull('-128'): -128 +toInt128OrNull('abc'): ᴺᵁᴸᴸ +``` + +**See also** + +- [`toInt128`](#toint128). +- [`toInt128OrZero`](#toint128orzero). +- [`toInt128OrDefault`](#toint128ordefault). + +## toInt128OrDefault + +Like [`toInt128`](#toint128), this function converts an input value to a value of type [Int128](../data-types/int-uint.md) but returns the default value in case of an error. +If no `default` value is passed then `0` is returned in case of an error. + +**Syntax** + +```sql +toInt128OrDefault(expr[, default]) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). +- `default` (optional) — The default value to return if parsing to type `Int128` is unsuccessful. [Int128](../data-types/int-uint.md). + +Supported arguments: +- (U)Int8/16/32/64/128/256. +- Float32/64. +- String representations of (U)Int8/16/32/128/256. + +Arguments for which the default value is returned: +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt128OrDefault('0xc0fe', CAST('-1', 'Int128'));`. + +:::note +If the input value cannot be represented within the bounds of [Int128](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered an error. +::: + +**Returned value** + +- 128-bit integer value if successful, otherwise returns the default value if passed or `0` if not. [Int128](../data-types/int-uint.md). + +:::note +- The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +- The default value type should be the same as the cast type. +::: + +**Example** + +Query: + +``` sql +SELECT + toInt128OrDefault('-128', CAST('-1', 'Int128')), + toInt128OrDefault('abc', CAST('-1', 'Int128')) +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toInt128OrDefault('-128', CAST('-1', 'Int128')): -128 +toInt128OrDefault('abc', CAST('-1', 'Int128')): -1 +``` + +**See also** + +- [`toInt128`](#toint128). +- [`toInt128OrZero`](#toint128orzero). +- [`toInt128OrNull`](#toint128ornull). + +## toInt256 + +Converts an input value to a value of type [`Int256`](../data-types/int-uint.md). Throws an exception in case of an error. + +**Syntax** + +```sql +toInt256(expr) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). + +Supported arguments: +- Values of type (U)Int8/16/32/64/128/256. +- Values of type Float32/64. +- String representations of (U)Int8/16/32/128/256. + +Unsupported arguments: +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt256('0xc0fe');`. + +:::note +If the input value cannot be represented within the bounds of [Int256](../data-types/int-uint.md), the result over or under flows. +This is not considered an error. +::: + +**Returned value** + +- 256-bit integer value. [Int256](../data-types/int-uint.md). + +:::note +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +**Example** + +Query: + +```sql +SELECT + toInt256(-256), + toInt256(-256.256), + toInt256('-256') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toInt256(-256): -256 +toInt256(-256.256): -256 +toInt256('-256'): -256 +``` + +**See also** + +- [`toInt256OrZero`](#toint256orzero). +- [`toInt256OrNull`](#toint256ornull). +- [`toInt256OrDefault`](#toint256ordefault). + +## toInt256OrZero + +Like [`toInt256`](#toint256), this function converts an input value to a value of type [Int256](../data-types/int-uint.md) but returns `0` in case of an error. + +**Syntax** + +```sql +toInt256OrZero(x) +``` + +**Arguments** + +- `x` — A String representation of a number. [String](../data-types/string.md). + +Supported arguments: +- String representations of (U)Int8/16/32/128/256. + +Unsupported arguments (return `0`): +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt256OrZero('0xc0fe');`. + +:::note +If the input value cannot be represented within the bounds of [Int256](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered an error. +::: + +**Returned value** + +- 256-bit integer value if successful, otherwise `0`. [Int256](../data-types/int-uint.md). + +:::note +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +**Example** + +Query: + +``` sql +SELECT + toInt256OrZero('-256'), + toInt256OrZero('abc') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toInt256OrZero('-256'): -256 +toInt256OrZero('abc'): 0 +``` + +**See also** + +- [`toInt256`](#toint256). +- [`toInt256OrNull`](#toint256ornull). +- [`toInt256OrDefault`](#toint256ordefault). + +## toInt256OrNull + +Like [`toInt256`](#toint256), this function converts an input value to a value of type [Int256](../data-types/int-uint.md) but returns `NULL` in case of an error. + +**Syntax** + +```sql +toInt256OrNull(x) +``` + +**Arguments** + +- `x` — A String representation of a number. [String](../data-types/string.md). + +Supported arguments: +- String representations of (U)Int8/16/32/128/256. + +Unsupported arguments (return `\N`) +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt256OrNull('0xc0fe');`. + +:::note +If the input value cannot be represented within the bounds of [Int256](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered an error. +::: + +**Returned value** + +- 256-bit integer value if successful, otherwise `NULL`. [Int256](../data-types/int-uint.md) / [NULL](../data-types/nullable.md). + +:::note +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +**Example** + +Query: + +``` sql +SELECT + toInt256OrNull('-256'), + toInt256OrNull('abc') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toInt256OrNull('-256'): -256 +toInt256OrNull('abc'): ᴺᵁᴸᴸ +``` + +**See also** + +- [`toInt256`](#toint256). +- [`toInt256OrZero`](#toint256orzero). +- [`toInt256OrDefault`](#toint256ordefault). + +## toInt256OrDefault + +Like [`toInt256`](#toint256), this function converts an input value to a value of type [Int256](../data-types/int-uint.md) but returns the default value in case of an error. +If no `default` value is passed then `0` is returned in case of an error. + +**Syntax** + +```sql +toInt256OrDefault(expr[, default]) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). +- `default` (optional) — The default value to return if parsing to type `Int256` is unsuccessful. [Int256](../data-types/int-uint.md). + +Supported arguments: +- Values of type (U)Int8/16/32/64/128/256. +- Values of type Float32/64. +- String representations of (U)Int8/16/32/128/256. + +Arguments for which the default value is returned: +- String representations of Float32/64 values, including `NaN` and `Inf` +- String representations of binary and hexadecimal values, e.g. `SELECT toInt256OrDefault('0xc0fe', CAST('-1', 'Int256'));` + +:::note +If the input value cannot be represented within the bounds of [Int256](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered an error. +::: + +**Returned value** + +- 256-bit integer value if successful, otherwise returns the default value if passed or `0` if not. [Int256](../data-types/int-uint.md). + +:::note +- The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +- The default value type should be the same as the cast type. +::: + +**Example** + +Query: + +``` sql +SELECT + toInt256OrDefault('-256', CAST('-1', 'Int256')), + toInt256OrDefault('abc', CAST('-1', 'Int256')) +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toInt256OrDefault('-256', CAST('-1', 'Int256')): -256 +toInt256OrDefault('abc', CAST('-1', 'Int256')): -1 +``` + +**See also** + +- [`toInt256`](#toint256). +- [`toInt256OrZero`](#toint256orzero). +- [`toInt256OrNull`](#toint256ornull). + +# toUInt8 + +Converts an input value to a value of type [`UInt8`](../data-types/int-uint.md). Throws an exception in case of an error. + +**Syntax** + +```sql +toUInt8(expr) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). + +Supported arguments: +- Values of type (U)Int8/16/32/64/128/256. +- Values of type Float32/64. +- String representations of (U)Int8/16/32/128/256. + +Unsupported arguments: +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toUInt8('0xc0fe');`. + +:::note +If the input value cannot be represented within the bounds of [UInt8](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered an error. +For example: `SELECT toUInt8(256) == 0;`. +::: + +**Returned value** + +- 8-bit unsigned integer value. [UInt8](../data-types/int-uint.md). + +:::note +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +**Example** + +Query: + +```sql +SELECT + toUInt8(8), + toUInt8(8.8), + toUInt8('8') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toUInt8(8): 8 +toUInt8(8.8): 8 +toUInt8('8'): 8 +``` + +**See also** + +- [`toUInt8OrZero`](#touint8orzero). +- [`toUInt8OrNull`](#touint8ornull). +- [`toUInt8OrDefault`](#touint8ordefault). + +## toUInt8OrZero + +Like [`toUInt8`](#touint8), this function converts an input value to a value of type [UInt8](../data-types/int-uint.md) but returns `0` in case of an error. + +**Syntax** + +```sql +toUInt8OrZero(x) +``` + +**Arguments** + +- `x` — A String representation of a number. [String](../data-types/string.md). + +Supported arguments: +- String representations of (U)Int8/16/32/128/256. + +Unsupported arguments (return `0`): +- String representations of ordinary Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toUInt8OrZero('0xc0fe');`. + +:::note +If the input value cannot be represented within the bounds of [UInt8](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered an error. +::: + +**Returned value** + +- 8-bit unsigned integer value if successful, otherwise `0`. [UInt8](../data-types/int-uint.md). + +:::note +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +**Example** + +Query: + +``` sql +SELECT + toUInt8OrZero('-8'), + toUInt8OrZero('abc') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toUInt8OrZero('-8'): 0 +toUInt8OrZero('abc'): 0 +``` + +**See also** + +- [`toUInt8`](#touint8). +- [`toUInt8OrNull`](#touint8ornull). +- [`toUInt8OrDefault`](#touint8ordefault). + +## toUInt8OrNull + +Like [`toUInt8`](#touint8), this function converts an input value to a value of type [UInt8](../data-types/int-uint.md) but returns `NULL` in case of an error. + +**Syntax** + +```sql +toUInt8OrNull(x) +``` + +**Arguments** + +- `x` — A String representation of a number. [String](../data-types/string.md). + +Supported arguments: +- String representations of (U)Int8/16/32/128/256. + +Unsupported arguments (return `\N`) +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toUInt8OrNull('0xc0fe');`. + +:::note +If the input value cannot be represented within the bounds of [UInt8](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered an error. +::: + +**Returned value** + +- 8-bit unsigned integer value if successful, otherwise `NULL`. [UInt8](../data-types/int-uint.md) / [NULL](../data-types/nullable.md). + +:::note +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +**Example** + +Query: + +``` sql +SELECT + toUInt8OrNull('8'), + toUInt8OrNull('abc') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toUInt8OrNull('8'): 8 +toUInt8OrNull('abc'): ᴺᵁᴸᴸ +``` + +**See also** + +- [`toUInt8`](#touint8). +- [`toUInt8OrZero`](#touint8orzero). +- [`toUInt8OrDefault`](#touint8ordefault). + +## toUInt8OrDefault + +Like [`toUInt8`](#touint8), this function converts an input value to a value of type [UInt8](../data-types/int-uint.md) but returns the default value in case of an error. +If no `default` value is passed then `0` is returned in case of an error. + +**Syntax** + +```sql +toUInt8OrDefault(expr[, default]) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). +- `default` (optional) — The default value to return if parsing to type `UInt8` is unsuccessful. [UInt8](../data-types/int-uint.md). + +Supported arguments: +- Values of type (U)Int8/16/32/64/128/256. +- Values of type Float32/64. +- String representations of (U)Int8/16/32/128/256. + +Arguments for which the default value is returned: +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toUInt8OrDefault('0xc0fe', CAST('0', 'UInt8'));`. + +:::note +If the input value cannot be represented within the bounds of [UInt8](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered an error. +::: + +**Returned value** + +- 8-bit unsigned integer value if successful, otherwise returns the default value if passed or `0` if not. [UInt8](../data-types/int-uint.md). + +:::note +- The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +- The default value type should be the same as the cast type. +::: + +**Example** + +Query: + +``` sql +SELECT + toUInt8OrDefault('8', CAST('0', 'UInt8')), + toUInt8OrDefault('abc', CAST('0', 'UInt8')) +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toUInt8OrDefault('8', CAST('0', 'UInt8')): 8 +toUInt8OrDefault('abc', CAST('0', 'UInt8')): 0 +``` + +**See also** + +- [`toUInt8`](#touint8). +- [`toUInt8OrZero`](#touint8orzero). +- [`toUInt8OrNull`](#touint8orNull). + +## toUInt16 + +Converts an input value to a value of type [`UInt16`](../data-types/int-uint.md). Throws an exception in case of an error. + +**Syntax** + +```sql +toUInt16(expr) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). + +Supported arguments: +- Values of type (U)Int8/16/32/64/128/256. +- Values of type Float32/64. +- String representations of (U)Int8/16/32/128/256. + +Unsupported arguments: +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toUInt16('0xc0fe');`. + +:::note +If the input value cannot be represented within the bounds of [UInt16](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered an error. +For example: `SELECT toUInt16(65536) == 0;`. +::: + +**Returned value** + +- 16-bit unsigned integer value. [UInt16](../data-types/int-uint.md). + +:::note +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +**Example** + +Query: + +```sql +SELECT + toUInt16(16), + toUInt16(16.16), + toUInt16('16') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toUInt16(16): 16 +toUInt16(16.16): 16 +toUInt16('16'): 16 +``` + +**See also** + +- [`toUInt16OrZero`](#touint16orzero). +- [`toUInt16OrNull`](#touint16ornull). +- [`toUInt16OrDefault`](#touint16ordefault). + +## toUInt16OrZero + +Like [`toUInt16`](#touint16), this function converts an input value to a value of type [UInt16](../data-types/int-uint.md) but returns `0` in case of an error. + +**Syntax** + +```sql +toUInt16OrZero(x) +``` + +**Arguments** + +- `x` — A String representation of a number. [String](../data-types/string.md). + +Supported arguments: +- String representations of (U)Int8/16/32/128/256. + +Unsupported arguments (return `0`): +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toUInt16OrZero('0xc0fe');`. + +:::note +If the input value cannot be represented within the bounds of [UInt16](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered as an error. +::: + +**Returned value** + +- 16-bit unsigned integer value if successful, otherwise `0`. [UInt16](../data-types/int-uint.md). + +:::note +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +**Example** + +Query: + +``` sql +SELECT + toUInt16OrZero('16'), + toUInt16OrZero('abc') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toUInt16OrZero('16'): 16 +toUInt16OrZero('abc'): 0 +``` + +**See also** + +- [`toUInt16`](#touint16). +- [`toUInt16OrNull`](#touint16ornull). +- [`toUInt16OrDefault`](#touint16ordefault). + +## toUInt16OrNull + +Like [`toUInt16`](#touint16), this function converts an input value to a value of type [UInt16](../data-types/int-uint.md) but returns `NULL` in case of an error. + +**Syntax** + +```sql +toUInt16OrNull(x) +``` + +**Arguments** + +- `x` — A String representation of a number. [String](../data-types/string.md). + +Supported arguments: +- String representations of (U)Int8/16/32/128/256. + +Unsupported arguments (return `\N`) +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toUInt16OrNull('0xc0fe');`. + +:::note +If the input value cannot be represented within the bounds of [UInt16](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered an error. +::: + +**Returned value** + +- 16-bit unsigned integer value if successful, otherwise `NULL`. [UInt16](../data-types/int-uint.md) / [NULL](../data-types/nullable.md). + +:::note +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +**Example** + +Query: + +``` sql +SELECT + toUInt16OrNull('16'), + toUInt16OrNull('abc') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toUInt16OrNull('16'): 16 +toUInt16OrNull('abc'): ᴺᵁᴸᴸ +``` + +**See also** + +- [`toUInt16`](#touint16). +- [`toUInt16OrZero`](#touint16orzero). +- [`toUInt16OrDefault`](#touint16ordefault). + +## toUInt16OrDefault + +Like [`toUInt16`](#touint16), this function converts an input value to a value of type [UInt16](../data-types/int-uint.md) but returns the default value in case of an error. +If no `default` value is passed then `0` is returned in case of an error. + +**Syntax** + +```sql +toUInt16OrDefault(expr[, default]) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). +- `default` (optional) — The default value to return if parsing to type `UInt16` is unsuccessful. [UInt16](../data-types/int-uint.md). + +Supported arguments: +- Values of type (U)Int8/16/32/64/128/256. +- Values of type Float32/64. +- String representations of (U)Int8/16/32/128/256. + +Arguments for which the default value is returned: +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toUInt16OrDefault('0xc0fe', CAST('0', 'UInt16'));`. + +:::note +If the input value cannot be represented within the bounds of [UInt16](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered an error. +::: + +**Returned value** + +- 16-bit unsigned integer value if successful, otherwise returns the default value if passed or `0` if not. [UInt16](../data-types/int-uint.md). + +:::note +- The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +- The default value type should be the same as the cast type. +::: + +**Example** + +Query: + +``` sql +SELECT + toUInt16OrDefault('16', CAST('0', 'UInt16')), + toUInt16OrDefault('abc', CAST('0', 'UInt16')) +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toUInt16OrDefault('16', CAST('0', 'UInt16')): 16 +toUInt16OrDefault('abc', CAST('0', 'UInt16')): 0 +``` + +**See also** + +- [`toUInt16`](#touint16). +- [`toUInt16OrZero`](#touint16orzero). +- [`toUInt16OrNull`](#touint16ornull). + +## toUInt32 + +Converts an input value to a value of type [`UInt32`](../data-types/int-uint.md). Throws an exception in case of an error. + +**Syntax** + +```sql +toUInt32(expr) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). + +Supported arguments: +- Values of type (U)Int8/16/32/64/128/256. +- Values of type Float32/64. +- String representations of (U)Int8/16/32/128/256. + +Unsupported arguments: +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toUInt32('0xc0fe');`. + +:::note +If the input value cannot be represented within the bounds of [UInt32](../data-types/int-uint.md), the result over or under flows. +This is not considered an error. +For example: `SELECT toUInt32(4294967296) == 0;` +::: + +**Returned value** + +- 32-bit unsigned integer value. [UInt32](../data-types/int-uint.md). + +:::note +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +**Example** + +Query: + +```sql +SELECT + toUInt32(32), + toUInt32(32.32), + toUInt32('32') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toUInt32(32): 32 +toUInt32(32.32): 32 +toUInt32('32'): 32 +``` + +**See also** + +- [`toUInt32OrZero`](#touint32orzero). +- [`toUInt32OrNull`](#touint32ornull). +- [`toUInt32OrDefault`](#touint32ordefault). + +## toUInt32OrZero + +Like [`toUInt32`](#touint32), this function converts an input value to a value of type [UInt32](../data-types/int-uint.md) but returns `0` in case of an error. + +**Syntax** + +```sql +toUInt32OrZero(x) +``` + +**Arguments** + +- `x` — A String representation of a number. [String](../data-types/string.md). + +Supported arguments: +- String representations of (U)Int8/16/32/128/256. + +Unsupported arguments (return `0`): +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toUInt32OrZero('0xc0fe');`. + +:::note +If the input value cannot be represented within the bounds of [UInt32](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered an error. +::: + +**Returned value** + +- 32-bit unsigned integer value if successful, otherwise `0`. [UInt32](../data-types/int-uint.md) + +:::note +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero) +, meaning it truncates fractional digits of numbers. +::: + +**Example** + +Query: + +``` sql +SELECT + toUInt32OrZero('32'), + toUInt32OrZero('abc') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toUInt32OrZero('32'): 32 +toUInt32OrZero('abc'): 0 +``` +**See also** + +- [`toUInt32`](#touint32). +- [`toUInt32OrNull`](#touint32ornull). +- [`toUInt32OrDefault`](#touint32ordefault). + +## toUInt32OrNull + +Like [`toUInt32`](#touint32), this function converts an input value to a value of type [UInt32](../data-types/int-uint.md) but returns `NULL` in case of an error. + +**Syntax** + +```sql +toUInt32OrNull(x) +``` + +**Arguments** + +- `x` — A String representation of a number. [String](../data-types/string.md). + +Supported arguments: +- String representations of (U)Int8/16/32/128/256. + +Unsupported arguments (return `\N`) +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toUInt32OrNull('0xc0fe');`. + +:::note +If the input value cannot be represented within the bounds of [UInt32](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered an error. +::: + +**Returned value** + +- 32-bit unsigned integer value if successful, otherwise `NULL`. [UInt32](../data-types/int-uint.md) / [NULL](../data-types/nullable.md). + +:::note +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero) +, meaning it truncates fractional digits of numbers. +::: + +**Example** + +Query: + +``` sql +SELECT + toUInt32OrNull('32'), + toUInt32OrNull('abc') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toUInt32OrNull('32'): 32 +toUInt32OrNull('abc'): ᴺᵁᴸᴸ +``` + +**See also** + +- [`toUInt32`](#touint32). +- [`toUInt32OrZero`](#touint32orzero). +- [`toUInt32OrDefault`](#touint32ordefault). + +## toUInt32OrDefault + +Like [`toUInt32`](#touint32), this function converts an input value to a value of type [UInt32](../data-types/int-uint.md) but returns the default value in case of an error. +If no `default` value is passed then `0` is returned in case of an error. + +**Syntax** + +```sql +toUInt32OrDefault(expr[, default]) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). +- `default` (optional) — The default value to return if parsing to type `UInt32` is unsuccessful. [UInt32](../data-types/int-uint.md). + +Supported arguments: +- Values of type (U)Int8/16/32/64/128/256. +- Values of type Float32/64. +- String representations of (U)Int8/16/32/128/256. + +Arguments for which the default value is returned: +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toUInt32OrDefault('0xc0fe', CAST('0', 'UInt32'));`. + +:::note +If the input value cannot be represented within the bounds of [UInt32](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered an error. +::: + +**Returned value** + +- 32-bit unsigned integer value if successful, otherwise returns the default value if passed or `0` if not. [UInt32](../data-types/int-uint.md). + +:::note +- The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +- The default value type should be the same as the cast type. +::: + +**Example** + +Query: + +``` sql +SELECT + toUInt32OrDefault('32', CAST('0', 'UInt32')), + toUInt32OrDefault('abc', CAST('0', 'UInt32')) +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toUInt32OrDefault('32', CAST('0', 'UInt32')): 32 +toUInt32OrDefault('abc', CAST('0', 'UInt32')): 0 +``` + +**See also** + +- [`toUInt32`](#touint32). +- [`toUInt32OrZero`](#touint32orzero). +- [`toUInt32OrNull`](#touint32ornull). + +## toUInt64 + +Converts an input value to a value of type [`UInt64`](../data-types/int-uint.md). Throws an exception in case of an error. + +**Syntax** + +```sql +toUInt64(expr) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). + +Supported arguments: +- Values of type (U)Int8/16/32/64/128/256. +- Values of type Float32/64. +- String representations of (U)Int8/16/32/128/256. + +Unsupported types: +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toUInt64('0xc0fe');`. + +:::note +If the input value cannot be represented within the bounds of [UInt64](../data-types/int-uint.md), the result over or under flows. +This is not considered an error. +For example: `SELECT toUInt64(18446744073709551616) == 0;` +::: + +**Returned value** + +- 64-bit unsigned integer value. [UInt64](../data-types/int-uint.md). + +:::note +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +**Example** + +Query: + +```sql +SELECT + toUInt64(64), + toUInt64(64.64), + toUInt64('64') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toUInt64(64): 64 +toUInt64(64.64): 64 +toUInt64('64'): 64 +``` + +**See also** + +- [`toUInt64OrZero`](#touint64orzero). +- [`toUInt64OrNull`](#touint64ornull). +- [`toUInt64OrDefault`](#touint64ordefault). + +## toInt64OrZero + +Like [`toUInt64`](#touint64), this function converts an input value to a value of type [UInt64](../data-types/int-uint.md) but returns `0` in case of an error. + +**Syntax** + +```sql +toUInt64OrZero(x) +``` + +**Arguments** + +- `x` — A String representation of a number. [String](../data-types/string.md). + +Supported arguments: +- String representations of (U)Int8/16/32/128/256. + +Unsupported arguments (return `0`): +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toUInt64OrZero('0xc0fe');`. + +:::note +If the input value cannot be represented within the bounds of [UInt64](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered an error. +::: + +**Returned value** + +- 64-bit unsigned integer value if successful, otherwise `0`. [UInt64](../data-types/int-uint.md). + +:::note +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +**Example** + +Query: + +``` sql +SELECT + toUInt64OrZero('64'), + toUInt64OrZero('abc') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toUInt64OrZero('64'): 64 +toUInt64OrZero('abc'): 0 +``` + +**See also** + +- [`toUInt64`](#touint64). +- [`toUInt64OrNull`](#touint64ornull). +- [`toUInt64OrDefault`](#touint64ordefault). + +## toUInt64OrNull + +Like [`toUInt64`](#touint64), this function converts an input value to a value of type [UInt64](../data-types/int-uint.md) but returns `NULL` in case of an error. + +**Syntax** + +```sql +toUInt64OrNull(x) +``` + +**Arguments** + +- `x` — A String representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). + +Supported arguments: +- String representations of (U)Int8/16/32/128/256. + +Unsupported arguments (return `\N`) +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toUInt64OrNull('0xc0fe');`. + +:::note +If the input value cannot be represented within the bounds of [UInt64](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered an error. +::: + +**Returned value** + +- 64-bit unsigned integer value if successful, otherwise `NULL`. [UInt64](../data-types/int-uint.md) / [NULL](../data-types/nullable.md). + +:::note +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +**Example** + +Query: + +``` sql +SELECT + toUInt64OrNull('64'), + toUInt64OrNull('abc') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toUInt64OrNull('64'): 64 +toUInt64OrNull('abc'): ᴺᵁᴸᴸ +``` + +**See also** + +- [`toUInt64`](#touint64). +- [`toUInt64OrZero`](#touint64orzero). +- [`toUInt64OrDefault`](#touint64ordefault). + +## toUInt64OrDefault + +Like [`toUInt64`](#touint64), this function converts an input value to a value of type [UInt64](../data-types/int-uint.md) but returns the default value in case of an error. +If no `default` value is passed then `0` is returned in case of an error. + +**Syntax** + +```sql +toUInt64OrDefault(expr[, default]) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). +- `defauult` (optional) — The default value to return if parsing to type `UInt64` is unsuccessful. [UInt64](../data-types/int-uint.md). + +Supported arguments: +- Values of type (U)Int8/16/32/64/128/256. +- Values of type Float32/64. +- String representations of (U)Int8/16/32/128/256. + +Arguments for which the default value is returned: +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toUInt64OrDefault('0xc0fe', CAST('0', 'UInt64'));`. + +:::note +If the input value cannot be represented within the bounds of [UInt64](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered an error. +::: + +**Returned value** + +- 64-bit unsigned integer value if successful, otherwise returns the default value if passed or `0` if not. [UInt64](../data-types/int-uint.md). + +:::note +- The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +- The default value type should be the same as the cast type. +::: + +**Example** + +Query: + +``` sql +SELECT + toUInt64OrDefault('64', CAST('0', 'UInt64')), + toUInt64OrDefault('abc', CAST('0', 'UInt64')) +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toUInt64OrDefault('64', CAST('0', 'UInt64')): 64 +toUInt64OrDefault('abc', CAST('0', 'UInt64')): 0 +``` + +**See also** + +- [`toUInt64`](#touint64). +- [`toUInt64OrZero`](#touint64orzero). +- [`toUInt64OrNull`](#touint64ornull). + +## toUInt128 + +Converts an input value to a value of type [`UInt128`](../data-types/int-uint.md). Throws an exception in case of an error. + +**Syntax** + +```sql +toUInt128(expr) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). + +Supported arguments: +- Values of type (U)Int8/16/32/64/128/256. +- Values of type Float32/64. +- String representations of (U)Int8/16/32/128/256. + +Unsupported arguments: +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toUInt128('0xc0fe');`. + +:::note +If the input value cannot be represented within the bounds of [UInt128](../data-types/int-uint.md), the result over or under flows. +This is not considered an error. +::: + +**Returned value** + +- 128-bit unsigned integer value. [UInt128](../data-types/int-uint.md). + +:::note +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +**Example** + +Query: + +```sql +SELECT + toUInt128(128), + toUInt128(128.8), + toUInt128('128') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toUInt128(128): 128 +toUInt128(128.8): 128 +toUInt128('128'): 128 +``` + +**See also** + +- [`toUInt128OrZero`](#touint128orzero). +- [`toUInt128OrNull`](#touint128ornull). +- [`toUInt128OrDefault`](#touint128ordefault). + +## toUInt128OrZero + +Like [`toUInt128`](#touint128), this function converts an input value to a value of type [UInt128](../data-types/int-uint.md) but returns `0` in case of an error. + +**Syntax** + +```sql +toUInt128OrZero(expr) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). + +Supported arguments: +- String representations of (U)Int8/16/32/128/256. + +Unsupported arguments (return `0`): +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toUInt128OrZero('0xc0fe');`. + +:::note +If the input value cannot be represented within the bounds of [UInt128](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered an error. +::: + +**Returned value** + +- 128-bit unsigned integer value if successful, otherwise `0`. [UInt128](../data-types/int-uint.md). + +:::note +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +**Example** + +Query: + +``` sql +SELECT + toUInt128OrZero('128'), + toUInt128OrZero('abc') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toUInt128OrZero('128'): 128 +toUInt128OrZero('abc'): 0 +``` + +**See also** + +- [`toUInt128`](#touint128). +- [`toUInt128OrNull`](#touint128ornull). +- [`toUInt128OrDefault`](#touint128ordefault). + +## toUInt128OrNull + +Like [`toUInt128`](#touint128), this function converts an input value to a value of type [UInt128](../data-types/int-uint.md) but returns `NULL` in case of an error. + +**Syntax** + +```sql +toUInt128OrNull(x) +``` + +**Arguments** + +- `x` — A String representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). + +Supported arguments: +- String representations of (U)Int8/16/32/128/256. + +Unsupported arguments (return `\N`) +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toUInt128OrNull('0xc0fe');`. + +:::note +If the input value cannot be represented within the bounds of [UInt128](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered an error. +::: + +**Returned value** + +- 128-bit unsigned integer value if successful, otherwise `NULL`. [UInt128](../data-types/int-uint.md) / [NULL](../data-types/nullable.md). + +:::note +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +**Example** + +Query: + +``` sql +SELECT + toUInt128OrNull('128'), + toUInt128OrNull('abc') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toUInt128OrNull('128'): 128 +toUInt128OrNull('abc'): ᴺᵁᴸᴸ +``` + +**See also** + +- [`toUInt128`](#touint128). +- [`toUInt128OrZero`](#touint128orzero). +- [`toUInt128OrDefault`](#touint128ordefault). + +## toUInt128OrDefault + +Like [`toUInt128`](#toint128), this function converts an input value to a value of type [UInt128](../data-types/int-uint.md) but returns the default value in case of an error. +If no `default` value is passed then `0` is returned in case of an error. + +**Syntax** + +```sql +toUInt128OrDefault(expr[, default]) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). +- `default` (optional) — The default value to return if parsing to type `UInt128` is unsuccessful. [UInt128](../data-types/int-uint.md). + +Supported arguments: +- (U)Int8/16/32/64/128/256. +- Float32/64. +- String representations of (U)Int8/16/32/128/256. + +Arguments for which the default value is returned: +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toUInt128OrDefault('0xc0fe', CAST('0', 'UInt128'));`. + +:::note +If the input value cannot be represented within the bounds of [UInt128](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered an error. +::: + +**Returned value** + +- 128-bit unsigned integer value if successful, otherwise returns the default value if passed or `0` if not. [UInt128](../data-types/int-uint.md). + +:::note +- The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +- The default value type should be the same as the cast type. +::: + +**Example** + +Query: + +``` sql +SELECT + toUInt128OrDefault('128', CAST('0', 'UInt128')), + toUInt128OrDefault('abc', CAST('0', 'UInt128')) +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toUInt128OrDefault('128', CAST('0', 'UInt128')): 128 +toUInt128OrDefault('abc', CAST('0', 'UInt128')): 0 +``` + +**See also** + +- [`toUInt128`](#touint128). +- [`toUInt128OrZero`](#touint128orzero). +- [`toUInt128OrNull`](#touint128ornull). + +## toUInt256 + +Converts an input value to a value of type [`UInt256`](../data-types/int-uint.md). Throws an exception in case of an error. + +**Syntax** + +```sql +toUInt256(expr) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). + +Supported arguments: +- Values of type (U)Int8/16/32/64/128/256. +- Values of type Float32/64. +- String representations of (U)Int8/16/32/128/256. + +Unsupported arguments: +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toUInt256('0xc0fe');`. + +:::note +If the input value cannot be represented within the bounds of [UInt256](../data-types/int-uint.md), the result over or under flows. +This is not considered an error. +::: + +**Returned value** + +- 256-bit unsigned integer value. [Int256](../data-types/int-uint.md). + +:::note +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +**Example** + +Query: + +```sql +SELECT + toUInt256(256), + toUInt256(256.256), + toUInt256('256') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toUInt256(256): 256 +toUInt256(256.256): 256 +toUInt256('256'): 256 +``` + +**See also** + +- [`toUInt256OrZero`](#touint256orzero). +- [`toUInt256OrNull`](#touint256ornull). +- [`toUInt256OrDefault`](#touint256ordefault). + +## toUInt256OrZero + +Like [`toUInt256`](#touint256), this function converts an input value to a value of type [UInt256](../data-types/int-uint.md) but returns `0` in case of an error. + +**Syntax** + +```sql +toUInt256OrZero(x) +``` + +**Arguments** + +- `x` — A String representation of a number. [String](../data-types/string.md). + +Supported arguments: +- String representations of (U)Int8/16/32/128/256. + +Unsupported arguments (return `0`): +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toUInt256OrZero('0xc0fe');`. + +:::note +If the input value cannot be represented within the bounds of [UInt256](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered an error. +::: + +**Returned value** + +- 256-bit unsigned integer value if successful, otherwise `0`. [UInt256](../data-types/int-uint.md). + +:::note +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +**Example** + +Query: + +``` sql +SELECT + toUInt256OrZero('256'), + toUInt256OrZero('abc') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toUInt256OrZero('256'): 256 +toUInt256OrZero('abc'): 0 +``` + +**See also** + +- [`toUInt256`](#touint256). +- [`toUInt256OrNull`](#touint256ornull). +- [`toUInt256OrDefault`](#touint256ordefault). + +## toUInt256OrNull + +Like [`toUInt256`](#touint256), this function converts an input value to a value of type [UInt256](../data-types/int-uint.md) but returns `NULL` in case of an error. + +**Syntax** + +```sql +toUInt256OrNull(x) +``` + +**Arguments** + +- `x` — A String representation of a number. [String](../data-types/string.md). + +Supported arguments: +- String representations of (U)Int8/16/32/128/256. + +Unsupported arguments (return `\N`) +- String representations of Float32/64 values, including `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toUInt256OrNull('0xc0fe');`. + +:::note +If the input value cannot be represented within the bounds of [UInt256](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered an error. +::: + +**Returned value** + +- 256-bit unsigned integer value if successful, otherwise `NULL`. [UInt256](../data-types/int-uint.md) / [NULL](../data-types/nullable.md). + +:::note +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +**Example** + +Query: + +``` sql +SELECT + toUInt256OrNull('256'), + toUInt256OrNull('abc') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toUInt256OrNull('256'): 256 +toUInt256OrNull('abc'): ᴺᵁᴸᴸ +``` + +**See also** + +- [`toUInt256`](#touint256). +- [`toUInt256OrZero`](#touint256orzero). +- [`toUInt256OrDefault`](#touint256ordefault). + +## toUInt256OrDefault + +Like [`toUInt256`](#touint256), this function converts an input value to a value of type [UInt256](../data-types/int-uint.md) but returns the default value in case of an error. +If no `default` value is passed then `0` is returned in case of an error. + +**Syntax** + +```sql +toUInt256OrDefault(expr[, default]) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). +- `default` (optional) — The default value to return if parsing to type `UInt256` is unsuccessful. [UInt256](../data-types/int-uint.md). + +Supported arguments: +- Values of type (U)Int8/16/32/64/128/256. +- Values of type Float32/64. +- String representations of (U)Int8/16/32/128/256. + +Arguments for which the default value is returned: +- String representations of Float32/64 values, including `NaN` and `Inf` +- String representations of binary and hexadecimal values, e.g. `SELECT toUInt256OrDefault('0xc0fe', CAST('0', 'UInt256'));` + +:::note +If the input value cannot be represented within the bounds of [UInt256](../data-types/int-uint.md), overflow or underflow of the result occurs. +This is not considered an error. +::: + +**Returned value** + +- 256-bit unsigned integer value if successful, otherwise returns the default value if passed or `0` if not. [UInt256](../data-types/int-uint.md). + +:::note +- The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +- The default value type should be the same as the cast type. +::: + +**Example** + +Query: + +``` sql +SELECT + toUInt256OrDefault('-256', CAST('0', 'UInt256')), + toUInt256OrDefault('abc', CAST('0', 'UInt256')) +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toUInt256OrDefault('-256', CAST('0', 'UInt256')): 0 +toUInt256OrDefault('abc', CAST('0', 'UInt256')): 0 +``` + +**See also** + +- [`toUInt256`](#touint256). +- [`toUInt256OrZero`](#touint256orzero). +- [`toUInt256OrNull`](#touint256ornull). ## toFloat(32\|64) @@ -2289,7 +5141,7 @@ Result: └─────────────────────┴─────────────────┴─────────────────────────────────────┘ ``` -**See Also** +**See also** - [RFC 1123](https://datatracker.ietf.org/doc/html/rfc1123) - [toDate](#todate) diff --git a/docs/en/sql-reference/statements/alter/column.md b/docs/en/sql-reference/statements/alter/column.md index aa6f132e08e..2e9b0cf3080 100644 --- a/docs/en/sql-reference/statements/alter/column.md +++ b/docs/en/sql-reference/statements/alter/column.md @@ -36,7 +36,7 @@ These actions are described in detail below. ADD COLUMN [IF NOT EXISTS] name [type] [default_expr] [codec] [AFTER name_after | FIRST] ``` -Adds a new column to the table with the specified `name`, `type`, [`codec`](../create/table.md/#codecs) and `default_expr` (see the section [Default expressions](/docs/en/sql-reference/statements/create/table.md/#create-default-values)). +Adds a new column to the table with the specified `name`, `type`, [`codec`](../create/table.md/#column_compression_codec) and `default_expr` (see the section [Default expressions](/docs/en/sql-reference/statements/create/table.md/#create-default-values)). If the `IF NOT EXISTS` clause is included, the query won’t return an error if the column already exists. If you specify `AFTER name_after` (the name of another column), the column is added after the specified one in the list of table columns. If you want to add a column to the beginning of the table use the `FIRST` clause. Otherwise, the column is added to the end of the table. For a chain of actions, `name_after` can be the name of a column that is added in one of the previous actions. @@ -155,7 +155,7 @@ This query changes the `name` column properties: - Column-level Settings -For examples of columns compression CODECS modifying, see [Column Compression Codecs](../create/table.md/#codecs). +For examples of columns compression CODECS modifying, see [Column Compression Codecs](../create/table.md/#column_compression_codec). For examples of columns TTL modifying, see [Column TTL](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#mergetree-column-ttl). diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index 3e613532f3a..1d99d223ee9 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -1135,8 +1135,6 @@ void Client::processOptions(const OptionsDescription & options_description, if ((query_fuzzer_runs = options["query-fuzzer-runs"].as())) { - // Fuzzer implies multiquery. - config().setBool("multiquery", true); // Ignore errors in parsing queries. config().setBool("ignore-error", true); ignore_error = true; @@ -1144,8 +1142,6 @@ void Client::processOptions(const OptionsDescription & options_description, if ((create_query_fuzzer_runs = options["create-query-fuzzer-runs"].as())) { - // Fuzzer implies multiquery. - config().setBool("multiquery", true); // Ignore errors in parsing queries. config().setBool("ignore-error", true); @@ -1201,9 +1197,6 @@ void Client::processConfig() } print_stack_trace = config().getBool("stacktrace", false); - if (config().has("multiquery")) - is_multiquery = true; - pager = config().getString("pager", ""); setDefaultFormatsAndCompressionFromConfiguration(); @@ -1359,13 +1352,6 @@ void Client::readArguments( allow_repeated_settings = true; else if (arg == "--allow_merge_tree_settings") allow_merge_tree_settings = true; - else if (arg == "--multiquery" && (arg_num + 1) < argc && !std::string_view(argv[arg_num + 1]).starts_with('-')) - { - /// Transform the abbreviated syntax '--multiquery ' into the full syntax '--multiquery -q ' - ++arg_num; - arg = argv[arg_num]; - addMultiquery(arg, common_arguments); - } else if (arg == "--password" && ((arg_num + 1) >= argc || std::string_view(argv[arg_num + 1]).starts_with('-'))) { common_arguments.emplace_back(arg); diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index ce0e179939d..88d5a0253d1 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -569,9 +569,6 @@ void LocalServer::processConfig() if (!queries.empty() && getClientConfiguration().has("queries-file")) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Options '--query' and '--queries-file' cannot be specified at the same time"); - if (getClientConfiguration().has("multiquery")) - is_multiquery = true; - pager = getClientConfiguration().getString("pager", ""); delayed_interactive = getClientConfiguration().has("interactive") && (!queries.empty() || getClientConfiguration().has("queries-file")); @@ -936,13 +933,6 @@ void LocalServer::readArguments(int argc, char ** argv, Arguments & common_argum query_parameters.emplace(param_continuation.substr(0, equal_pos), param_continuation.substr(equal_pos + 1)); } } - else if (arg == "--multiquery" && (arg_num + 1) < argc && !std::string_view(argv[arg_num + 1]).starts_with('-')) - { - /// Transform the abbreviated syntax '--multiquery ' into the full syntax '--multiquery -q ' - ++arg_num; - arg = argv[arg_num]; - addMultiquery(arg, common_arguments); - } else { common_arguments.emplace_back(arg); diff --git a/programs/server/config.d/backups.xml b/programs/server/config.d/backups.xml new file mode 100644 index 00000000000..382794e1123 --- /dev/null +++ b/programs/server/config.d/backups.xml @@ -0,0 +1,13 @@ + + + + + local + /tmp/backups/ + + + + + backups + + diff --git a/programs/server/config.d/enable_keeper_map.xml b/programs/server/config.d/enable_keeper_map.xml new file mode 120000 index 00000000000..47c36159882 --- /dev/null +++ b/programs/server/config.d/enable_keeper_map.xml @@ -0,0 +1 @@ +../../../tests/config/config.d/enable_keeper_map.xml \ No newline at end of file diff --git a/programs/server/config.d/session_log.xml b/programs/server/config.d/session_log.xml new file mode 120000 index 00000000000..7678cc33129 --- /dev/null +++ b/programs/server/config.d/session_log.xml @@ -0,0 +1 @@ +../../../tests/config/config.d/session_log.xml \ No newline at end of file diff --git a/src/Access/Common/AccessRightsElement.cpp b/src/Access/Common/AccessRightsElement.cpp index 24ff4e7631b..63bda09a51b 100644 --- a/src/Access/Common/AccessRightsElement.cpp +++ b/src/Access/Common/AccessRightsElement.cpp @@ -224,7 +224,11 @@ void AccessRightsElement::replaceEmptyDatabase(const String & current_database) String AccessRightsElement::toString() const { return toStringImpl(*this, true); } String AccessRightsElement::toStringWithoutOptions() const { return toStringImpl(*this, false); } - +String AccessRightsElement::toStringForAccessTypeSource() const +{ + String result{access_flags.toKeywords().front()}; + return result + " ON *.*"; +} bool AccessRightsElements::empty() const { return std::all_of(begin(), end(), [](const AccessRightsElement & e) { return e.empty(); }); } diff --git a/src/Access/Common/AccessRightsElement.h b/src/Access/Common/AccessRightsElement.h index ba625fc43df..78e94e6f2e4 100644 --- a/src/Access/Common/AccessRightsElement.h +++ b/src/Access/Common/AccessRightsElement.h @@ -89,6 +89,7 @@ struct AccessRightsElement /// Returns a human-readable representation like "GRANT SELECT, UPDATE(x, y) ON db.table". String toString() const; String toStringWithoutOptions() const; + String toStringForAccessTypeSource() const; }; diff --git a/src/Access/ContextAccess.cpp b/src/Access/ContextAccess.cpp index a2807ecc5ea..e50521a0730 100644 --- a/src/Access/ContextAccess.cpp +++ b/src/Access/ContextAccess.cpp @@ -38,6 +38,24 @@ namespace ErrorCodes namespace { + const std::vector> source_and_table_engines = { + {AccessType::FILE, "File"}, + {AccessType::URL, "URL"}, + {AccessType::REMOTE, "Distributed"}, + {AccessType::MONGO, "MongoDB"}, + {AccessType::REDIS, "Redis"}, + {AccessType::MYSQL, "MySQL"}, + {AccessType::POSTGRES, "PostgreSQL"}, + {AccessType::SQLITE, "SQLite"}, + {AccessType::ODBC, "ODBC"}, + {AccessType::JDBC, "JDBC"}, + {AccessType::HDFS, "HDFS"}, + {AccessType::S3, "S3"}, + {AccessType::HIVE, "Hive"}, + {AccessType::AZURE, "AzureBlobStorage"} + }; + + AccessRights mixAccessRightsFromUserAndRoles(const User & user, const EnabledRolesInfo & roles_info) { AccessRights res = user.access; @@ -206,22 +224,6 @@ namespace } /// There is overlap between AccessType sources and table engines, so the following code avoids user granting twice. - static const std::vector> source_and_table_engines = { - {AccessType::FILE, "File"}, - {AccessType::URL, "URL"}, - {AccessType::REMOTE, "Distributed"}, - {AccessType::MONGO, "MongoDB"}, - {AccessType::REDIS, "Redis"}, - {AccessType::MYSQL, "MySQL"}, - {AccessType::POSTGRES, "PostgreSQL"}, - {AccessType::SQLITE, "SQLite"}, - {AccessType::ODBC, "ODBC"}, - {AccessType::JDBC, "JDBC"}, - {AccessType::HDFS, "HDFS"}, - {AccessType::S3, "S3"}, - {AccessType::HIVE, "Hive"}, - {AccessType::AZURE, "AzureBlobStorage"} - }; /// Sync SOURCE and TABLE_ENGINE, so only need to check TABLE_ENGINE later. if (access_control.doesTableEnginesRequireGrant()) @@ -267,6 +269,11 @@ namespace template std::string_view getDatabase(std::string_view arg1, const OtherArgs &...) { return arg1; } + + std::string_view getTableEngine() { return {}; } + + template + std::string_view getTableEngine(std::string_view arg1, const OtherArgs &...) { return arg1; } } @@ -620,18 +627,58 @@ bool ContextAccess::checkAccessImplHelper(const ContextPtr & context, AccessFlag if (!granted) { - if (grant_option && acs->isGranted(flags, args...)) + auto access_denied_no_grant = [&](AccessFlags access_flags, FmtArgs && ...fmt_args) { + if (grant_option && acs->isGranted(access_flags, fmt_args...)) + { + return access_denied(ErrorCodes::ACCESS_DENIED, + "{}: Not enough privileges. " + "The required privileges have been granted, but without grant option. " + "To execute this query, it's necessary to have the grant {} WITH GRANT OPTION", + AccessRightsElement{access_flags, fmt_args...}.toStringWithoutOptions()); + } + return access_denied(ErrorCodes::ACCESS_DENIED, - "{}: Not enough privileges. " - "The required privileges have been granted, but without grant option. " - "To execute this query, it's necessary to have the grant {} WITH GRANT OPTION", - AccessRightsElement{flags, args...}.toStringWithoutOptions()); + "{}: Not enough privileges. To execute this query, it's necessary to have the grant {}", + AccessRightsElement{access_flags, fmt_args...}.toStringWithoutOptions() + (grant_option ? " WITH GRANT OPTION" : "")); + }; + + /// As we check the SOURCES from the Table Engine logic, direct prompt about Table Engine would be misleading + /// since SOURCES is not granted actually. In order to solve this, turn the prompt logic back to Sources. + if (flags & AccessType::TABLE_ENGINE && !access_control->doesTableEnginesRequireGrant()) + { + AccessFlags new_flags; + + String table_engine_name{getTableEngine(args...)}; + for (const auto & source_and_table_engine : source_and_table_engines) + { + const auto & table_engine = std::get<1>(source_and_table_engine); + if (table_engine != table_engine_name) continue; + const auto & source = std::get<0>(source_and_table_engine); + /// Set the flags from Table Engine to SOURCES so that prompts can be meaningful. + new_flags = source; + break; + } + + /// Might happen in the case of grant Table Engine on A (but not source), then revoke A. + if (new_flags.isEmpty()) + return access_denied_no_grant(flags, args...); + + if (grant_option && acs->isGranted(flags, args...)) + { + return access_denied(ErrorCodes::ACCESS_DENIED, + "{}: Not enough privileges. " + "The required privileges have been granted, but without grant option. " + "To execute this query, it's necessary to have the grant {} WITH GRANT OPTION", + AccessRightsElement{new_flags}.toStringForAccessTypeSource()); + } + + return access_denied(ErrorCodes::ACCESS_DENIED, + "{}: Not enough privileges. To execute this query, it's necessary to have the grant {}", + AccessRightsElement{new_flags}.toStringForAccessTypeSource() + (grant_option ? " WITH GRANT OPTION" : "")); } - return access_denied(ErrorCodes::ACCESS_DENIED, - "{}: Not enough privileges. To execute this query, it's necessary to have the grant {}", - AccessRightsElement{flags, args...}.toStringWithoutOptions() + (grant_option ? " WITH GRANT OPTION" : "")); + return access_denied_no_grant(flags, args...); } struct PrecalculatedFlags diff --git a/src/Analyzer/QueryTreeBuilder.cpp b/src/Analyzer/QueryTreeBuilder.cpp index a62b6e56ac5..ed1227b0f00 100644 --- a/src/Analyzer/QueryTreeBuilder.cpp +++ b/src/Analyzer/QueryTreeBuilder.cpp @@ -268,6 +268,8 @@ QueryTreeNodePtr QueryTreeBuilder::buildSelectExpression(const ASTPtr & select_q } } + const auto enable_order_by_all = updated_context->getSettingsRef().enable_order_by_all; + auto current_query_tree = std::make_shared(std::move(updated_context), std::move(settings_changes)); current_query_tree->setIsSubquery(is_subquery); @@ -281,7 +283,10 @@ QueryTreeNodePtr QueryTreeBuilder::buildSelectExpression(const ASTPtr & select_q current_query_tree->setIsGroupByWithRollup(select_query_typed.group_by_with_rollup); current_query_tree->setIsGroupByWithGroupingSets(select_query_typed.group_by_with_grouping_sets); current_query_tree->setIsGroupByAll(select_query_typed.group_by_all); - current_query_tree->setIsOrderByAll(select_query_typed.order_by_all); + /// order_by_all flag in AST is set w/o consideration of `enable_order_by_all` setting + /// since SETTINGS section has not been parsed yet, - so, check the setting here + if (enable_order_by_all) + current_query_tree->setIsOrderByAll(select_query_typed.order_by_all); current_query_tree->setOriginalAST(select_query); auto current_context = current_query_tree->getContext(); diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 78d8e7e3d6a..a88eed25db1 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -70,7 +70,6 @@ #include #include -#include #include #include #include @@ -80,6 +79,8 @@ #include #include "config.h" +#include +#include #if USE_GWP_ASAN # include @@ -745,14 +746,6 @@ void ClientBase::adjustSettings() /// NOTE: Do not forget to set changed=false to avoid sending it to the server (to avoid breakage read only profiles) - /// In case of multi-query we allow data after semicolon since it will be - /// parsed by the client and interpreted as new query - if (is_multiquery && !global_context->getSettingsRef().input_format_values_allow_data_after_semicolon.changed) - { - settings.input_format_values_allow_data_after_semicolon = true; - settings.input_format_values_allow_data_after_semicolon.changed = false; - } - /// Do not limit pretty format output in case of --pager specified or in case of stdout is not a tty. if (!pager.empty() || !stdout_is_a_tty) { @@ -1525,13 +1518,6 @@ void ClientBase::setInsertionTable(const ASTInsertQuery & insert_query) } -void ClientBase::addMultiquery(std::string_view query, Arguments & common_arguments) const -{ - common_arguments.emplace_back("--multiquery"); - common_arguments.emplace_back("-q"); - common_arguments.emplace_back(query); -} - namespace { bool isStdinNotEmptyAndValid(ReadBufferFromFileDescriptor & std_in) @@ -2186,23 +2172,48 @@ MultiQueryProcessingStage ClientBase::analyzeMultiQueryText( return MultiQueryProcessingStage::PARSING_FAILED; } - // INSERT queries may have the inserted data in the query text - // that follow the query itself, e.g. "insert into t format CSV 1;2". - // They need special handling. First of all, here we find where the - // inserted data ends. In multi-query mode, it is delimited by a - // newline. - // The VALUES format needs even more handling - we also allow the - // data to be delimited by semicolon. This case is handled later by - // the format parser itself. - // We can't do multiline INSERTs with inline data, because most - // row input formats (e.g. TSV) can't tell when the input stops, - // unlike VALUES. + // INSERT queries may have the inserted data in the query text that follow the query itself, e.g. "insert into t format CSV 1,2". They + // need special handling. + // - If the INSERT statement FORMAT is VALUES, we use the VALUES format parser to skip the inserted data until we reach the trailing single semicolon. + // - Other formats (e.g. FORMAT CSV) are arbitrarily more complex and tricky to parse. For example, we may be unable to distinguish if the semicolon + // is part of the data or ends the statement. In this case, we simply assume that the end of the INSERT statement is determined by \n\n (two newlines). auto * insert_ast = parsed_query->as(); const char * query_to_execute_end = this_query_end; - if (insert_ast && insert_ast->data) { - this_query_end = find_first_symbols<'\n'>(insert_ast->data, all_queries_end); + if (insert_ast->format == "Values") + { + // Invoke the VALUES format parser to skip the inserted data + ReadBufferFromMemory data_in(insert_ast->data, all_queries_end - insert_ast->data); + skipBOMIfExists(data_in); + do + { + skipWhitespaceIfAny(data_in); + if (data_in.eof() || *data_in.position() == ';') + break; + } + while (ValuesBlockInputFormat::skipToNextRow(&data_in, 1, 0)); + // Handle the case of a comment followed by a semicolon + // Example: INSERT INTO tab VALUES xx; -- {serverError xx} + // If we use this error hint, the next query should not be placed on the same line + this_query_end = insert_ast->data + data_in.count(); + const auto * pos_newline = find_first_symbols<'\n'>(this_query_end, all_queries_end); + if (pos_newline != this_query_end) + { + TestHint hint(String(this_query_end, pos_newline - this_query_end)); + if (hint.hasClientErrors() || hint.hasServerErrors()) + this_query_end = pos_newline; + } + } + else + { + // Handling of generic formats + auto pos_newline = String(insert_ast->data, all_queries_end).find("\n\n"); + if (pos_newline != std::string::npos) + this_query_end = insert_ast->data + pos_newline; + else + this_query_end = all_queries_end; + } insert_ast->end = this_query_end; query_to_execute_end = isSyncInsertWithData(*insert_ast, client_context) ? insert_ast->data : this_query_end; } @@ -2237,7 +2248,10 @@ bool ClientBase::executeMultiQuery(const String & all_queries_text) size_t test_tags_length = getTestTagsLength(all_queries_text); /// Several queries separated by ';'. - /// INSERT data is ended by the end of line, not ';'. + /// INSERT data is ended by the empty line (\n\n), not ';'. + /// Unnecessary semicolons may cause data to be parsed containing ';' + /// e.g. 'insert into xx format csv val;' will insert "val;" instead of "val" + /// 'insert into xx format csv val\n;' will insert "val" and ";" /// An exception is VALUES format where we also support semicolon in /// addition to end of line. const char * this_query_begin = all_queries_text.data() + test_tags_length; @@ -2248,8 +2262,8 @@ bool ClientBase::executeMultiQuery(const String & all_queries_text) String query_to_execute; ASTPtr parsed_query; std::unique_ptr current_exception; - size_t retries_count = 0; + bool is_first = true; while (true) { @@ -2258,16 +2272,24 @@ bool ClientBase::executeMultiQuery(const String & all_queries_text) switch (stage) { case MultiQueryProcessingStage::QUERIES_END: + { + /// Compatible with old version when run interactive, e.g. "", "\ld" + if (is_first && is_interactive) + processTextAsSingleQuery(all_queries_text); + return true; + } case MultiQueryProcessingStage::PARSING_FAILED: { return true; } case MultiQueryProcessingStage::CONTINUE_PARSING: { + is_first = false; continue; } case MultiQueryProcessingStage::PARSING_EXCEPTION: { + is_first = false; this_query_end = find_first_symbols<'\n'>(this_query_end, all_queries_end); // Try to find test hint for syntax error. We don't know where @@ -2297,6 +2319,7 @@ bool ClientBase::executeMultiQuery(const String & all_queries_text) } case MultiQueryProcessingStage::EXECUTE_QUERY: { + is_first = false; full_query = all_queries_text.substr(this_query_begin - all_queries_text.data(), this_query_end - this_query_begin); if (query_fuzzer_runs) { @@ -2306,6 +2329,8 @@ bool ClientBase::executeMultiQuery(const String & all_queries_text) this_query_begin = this_query_end; continue; } + if (suggest) + updateSuggest(parsed_query); // Now we know for sure where the query ends. // Look for the hint in the text of query + insert data + trailing @@ -2456,14 +2481,6 @@ bool ClientBase::processQueryText(const String & text) return processMultiQueryFromFile(file_name); } - if (!is_multiquery) - { - assert(!query_fuzzer_runs); - processTextAsSingleQuery(text); - - return true; - } - if (query_fuzzer_runs) { processWithFuzzing(text); @@ -2901,9 +2918,9 @@ void ClientBase::init(int argc, char ** argv) ("config-file,C", po::value(), "config-file path") - ("query,q", po::value>()->multitoken(), R"(query; can be specified multiple times (--query "SELECT 1" --query "SELECT 2"...))") + ("query,q", po::value>()->multitoken(), R"(Query. Can be specified multiple times (--query "SELECT 1" --query "SELECT 2") or once with multiple comma-separated queries (--query "SELECT 1; SELECT 2;"). In the latter case, INSERT queries with non-VALUE format must be separated by empty lines.)") ("queries-file", po::value>()->multitoken(), "file path with queries to execute; multiple files can be specified (--queries-file file1 file2...)") - ("multiquery,n", "If specified, multiple queries separated by semicolons can be listed after --query. For convenience, it is also possible to omit --query and pass the queries directly after --multiquery.") + ("multiquery,n", "Obsolete, does nothing") ("multiline,m", "If specified, allow multiline queries (do not send the query on Enter)") ("database,d", po::value(), "database") ("query_kind", po::value()->default_value("initial_query"), "One of initial_query/secondary_query/no_query") @@ -2932,7 +2949,7 @@ void ClientBase::init(int argc, char ** argv) ("vertical,E", "vertical output format, same as --format=Vertical or FORMAT Vertical or \\G at end of command") ("highlight", po::value()->default_value(true), "enable or disable basic syntax highlight in interactive command line") - ("ignore-error", "do not stop processing in multiquery mode") + ("ignore-error", "do not stop processing when an error occurs") ("stacktrace", "print stack traces of exceptions") ("hardware-utilization", "print hardware utilization information in progress bar") ("print-profile-events", po::value(&profile_events.print)->zero_tokens(), "Printing ProfileEvents packets") @@ -3025,8 +3042,6 @@ void ClientBase::init(int argc, char ** argv) queries_files = options["queries-file"].as>(); if (options.count("multiline")) getClientConfiguration().setBool("multiline", true); - if (options.count("multiquery")) - getClientConfiguration().setBool("multiquery", true); if (options.count("ignore-error")) getClientConfiguration().setBool("ignore-error", true); if (options.count("format")) diff --git a/src/Client/ClientBase.h b/src/Client/ClientBase.h index e3d5fe8dc97..7689744a373 100644 --- a/src/Client/ClientBase.h +++ b/src/Client/ClientBase.h @@ -156,8 +156,6 @@ protected: void setInsertionTable(const ASTInsertQuery & insert_query); - void addMultiquery(std::string_view query, Arguments & common_arguments) const; - private: void receiveResult(ASTPtr parsed_query, Int32 signals_before_stop, bool partial_result_on_first_cancel); bool receiveAndProcessPacket(ASTPtr parsed_query, bool cancelled_); @@ -229,7 +227,6 @@ protected: std::unique_ptr signal_listener; bool is_interactive = false; /// Use either interactive line editing interface or batch mode. - bool is_multiquery = false; bool delayed_interactive = false; bool echo_queries = false; /// Print queries before execution in batch mode. diff --git a/src/Common/TimerDescriptor.cpp b/src/Common/TimerDescriptor.cpp index 248febe226e..b36ea4059cb 100644 --- a/src/Common/TimerDescriptor.cpp +++ b/src/Common/TimerDescriptor.cpp @@ -1,11 +1,12 @@ #if defined(OS_LINUX) + #include #include #include -#include #include + namespace DB { @@ -13,21 +14,18 @@ namespace ErrorCodes { extern const int CANNOT_CREATE_TIMER; extern const int CANNOT_SET_TIMER_PERIOD; - extern const int CANNOT_FCNTL; extern const int CANNOT_READ_FROM_SOCKET; } -TimerDescriptor::TimerDescriptor(int clockid, int flags) +TimerDescriptor::TimerDescriptor() { - timer_fd = timerfd_create(clockid, flags); + timer_fd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK | TFD_CLOEXEC); if (timer_fd == -1) - throw Exception(ErrorCodes::CANNOT_CREATE_TIMER, "Cannot create timer_fd descriptor"); - - if (-1 == fcntl(timer_fd, F_SETFL, O_NONBLOCK)) - throw ErrnoException(ErrorCodes::CANNOT_FCNTL, "Cannot set O_NONBLOCK for timer_fd"); + throw ErrnoException(ErrorCodes::CANNOT_CREATE_TIMER, "Cannot create timer_fd descriptor"); } -TimerDescriptor::TimerDescriptor(TimerDescriptor && other) noexcept : timer_fd(other.timer_fd) +TimerDescriptor::TimerDescriptor(TimerDescriptor && other) noexcept + : timer_fd(other.timer_fd) { other.timer_fd = -1; } @@ -40,21 +38,19 @@ TimerDescriptor & TimerDescriptor::operator=(DB::TimerDescriptor && other) noexc TimerDescriptor::~TimerDescriptor() { - /// Do not check for result cause cannot throw exception. if (timer_fd != -1) { - int err = close(timer_fd); - chassert(!err || errno == EINTR); + if (0 != ::close(timer_fd)) + std::terminate(); } } void TimerDescriptor::reset() const { - itimerspec spec; - spec.it_interval.tv_nsec = 0; - spec.it_interval.tv_sec = 0; - spec.it_value.tv_sec = 0; - spec.it_value.tv_nsec = 0; + if (timer_fd == -1) + return; + + itimerspec spec{}; if (-1 == timerfd_settime(timer_fd, 0 /*relative timer */, &spec, nullptr)) throw ErrnoException(ErrorCodes::CANNOT_SET_TIMER_PERIOD, "Cannot reset timer_fd"); @@ -66,25 +62,46 @@ void TimerDescriptor::reset() const void TimerDescriptor::drain() const { + if (timer_fd == -1) + return; + /// It is expected that socket returns 8 bytes when readable. /// Read in loop anyway cause signal may interrupt read call. + + /// man timerfd_create: + /// If the timer has already expired one or more times since its settings were last modified using timerfd_settime(), + /// or since the last successful read(2), then the buffer given to read(2) returns an unsigned 8-byte integer (uint64_t) + /// containing the number of expirations that have occurred. + /// (The returned value is in host byte order—that is, the native byte order for integers on the host machine.) uint64_t buf; while (true) { ssize_t res = ::read(timer_fd, &buf, sizeof(buf)); if (res < 0) { + /// man timerfd_create: + /// If no timer expirations have occurred at the time of the read(2), + /// then the call either blocks until the next timer expiration, or fails with the error EAGAIN + /// if the file descriptor has been made nonblocking + /// (via the use of the fcntl(2) F_SETFL operation to set the O_NONBLOCK flag). if (errno == EAGAIN) break; - if (errno != EINTR) - throw ErrnoException(ErrorCodes::CANNOT_READ_FROM_SOCKET, "Cannot drain timer_fd"); + /// A signal happened, need to retry. + if (errno == EINTR) + continue; + + throw ErrnoException(ErrorCodes::CANNOT_READ_FROM_SOCKET, "Cannot drain timer_fd"); } + + chassert(res == sizeof(buf)); } } void TimerDescriptor::setRelative(uint64_t usec) const { + chassert(timer_fd >= 0); + static constexpr uint32_t TIMER_PRECISION = 1e6; itimerspec spec; @@ -103,4 +120,5 @@ void TimerDescriptor::setRelative(Poco::Timespan timespan) const } } + #endif diff --git a/src/Common/TimerDescriptor.h b/src/Common/TimerDescriptor.h index 0292f85d770..8d3b22868ac 100644 --- a/src/Common/TimerDescriptor.h +++ b/src/Common/TimerDescriptor.h @@ -12,7 +12,7 @@ private: int timer_fd; public: - explicit TimerDescriptor(int clockid = CLOCK_MONOTONIC, int flags = 0); + TimerDescriptor(); ~TimerDescriptor(); TimerDescriptor(const TimerDescriptor &) = delete; diff --git a/src/Core/InterpolateDescription.cpp b/src/Core/InterpolateDescription.cpp index d828c2e85e9..86681fdb591 100644 --- a/src/Core/InterpolateDescription.cpp +++ b/src/Core/InterpolateDescription.cpp @@ -13,10 +13,10 @@ namespace DB { - InterpolateDescription::InterpolateDescription(ActionsDAGPtr actions_, const Aliases & aliases) - : actions(actions_) + InterpolateDescription::InterpolateDescription(ActionsDAG actions_, const Aliases & aliases) + : actions(std::move(actions_)) { - for (const auto & name_type : actions->getRequiredColumns()) + for (const auto & name_type : actions.getRequiredColumns()) { if (const auto & p = aliases.find(name_type.name); p != aliases.end()) required_columns_map[p->second->getColumnName()] = name_type; @@ -24,7 +24,7 @@ namespace DB required_columns_map[name_type.name] = name_type; } - for (const ColumnWithTypeAndName & column : actions->getResultColumns()) + for (const ColumnWithTypeAndName & column : actions.getResultColumns()) { std::string name = column.name; if (const auto & p = aliases.find(name); p != aliases.end()) diff --git a/src/Core/InterpolateDescription.h b/src/Core/InterpolateDescription.h index 62d7120508b..eeead71d780 100644 --- a/src/Core/InterpolateDescription.h +++ b/src/Core/InterpolateDescription.h @@ -5,21 +5,20 @@ #include #include #include +#include namespace DB { -class ActionsDAG; -using ActionsDAGPtr = std::shared_ptr; using Aliases = std::unordered_map; /// Interpolate description struct InterpolateDescription { - explicit InterpolateDescription(ActionsDAGPtr actions, const Aliases & aliases); + explicit InterpolateDescription(ActionsDAG actions, const Aliases & aliases); - ActionsDAGPtr actions; + ActionsDAG actions; std::unordered_map required_columns_map; /// input column name -> {alias, type} std::unordered_set result_columns_set; /// result block columns diff --git a/src/Core/Settings.h b/src/Core/Settings.h index c7a1a7e2739..4fc2034b855 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -186,7 +186,7 @@ class IColumn; M(Bool, allow_suspicious_ttl_expressions, false, "Reject TTL expressions that don't depend on any of table's columns. It indicates a user error most of the time.", 0) \ M(Bool, allow_suspicious_variant_types, false, "In CREATE TABLE statement allows specifying Variant type with similar variant types (for example, with different numeric or date types). Enabling this setting may introduce some ambiguity when working with values with similar types.", 0) \ M(Bool, allow_suspicious_primary_key, false, "Forbid suspicious PRIMARY KEY/ORDER BY for MergeTree (i.e. SimpleAggregateFunction)", 0) \ - M(Bool, compile_expressions, true, "Compile some scalar functions and operators to native code.", 0) \ + M(Bool, compile_expressions, false, "Compile some scalar functions and operators to native code. Due to a bug in the LLVM compiler infrastructure, on AArch64 machines, it is known to lead to a nullptr dereference and, consequently, server crash. Do not enable this setting.", 0) \ M(UInt64, min_count_to_compile_expression, 3, "The number of identical expressions before they are JIT-compiled", 0) \ M(Bool, compile_aggregate_expressions, true, "Compile aggregate functions to native code.", 0) \ M(UInt64, min_count_to_compile_aggregate_expression, 3, "The number of identical aggregate expressions before they are JIT-compiled", 0) \ @@ -1156,7 +1156,6 @@ class IColumn; M(Bool, input_format_values_interpret_expressions, true, "For Values format: if the field could not be parsed by streaming parser, run SQL parser and try to interpret it as SQL expression.", 0) \ M(Bool, input_format_values_deduce_templates_of_expressions, true, "For Values format: if the field could not be parsed by streaming parser, run SQL parser, deduce template of the SQL expression, try to parse all rows using template and then interpret expression for all rows.", 0) \ M(Bool, input_format_values_accurate_types_of_literals, true, "For Values format: when parsing and interpreting expressions using template, check actual type of literal to avoid possible overflow and precision issues.", 0) \ - M(Bool, input_format_values_allow_data_after_semicolon, false, "For Values format: allow extra data after semicolon (used by client to interpret comments).", 0) \ M(Bool, input_format_avro_allow_missing_fields, false, "For Avro/AvroConfluent format: when field is not found in schema use default value instead of error", 0) \ /** This setting is obsolete and do nothing, left for compatibility reasons. */ \ M(Bool, input_format_avro_null_as_default, false, "For Avro/AvroConfluent format: insert default in case of null and non Nullable column", 0) \ diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 87eaeff0ca9..9faf77e9087 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -57,7 +57,6 @@ String ClickHouseVersion::toString() const /// Note: please check if the key already exists to prevent duplicate entries. static std::initializer_list> settings_changes_history_initializer = { - {"24.8", {{"compile_expressions", false, true, "We believe that the LLVM infrastructure behind the JIT compiler is stable enough to enable this setting by default."}}}, {"24.7", {{"output_format_parquet_write_page_index", false, true, "Add a possibility to write page index into parquet files."}, {"output_format_binary_encode_types_in_binary_format", false, false, "Added new setting to allow to write type names in binary format in RowBinaryWithNamesAndTypes output format"}, {"input_format_binary_decode_types_in_binary_format", false, false, "Added new setting to allow to read type names in binary format in RowBinaryWithNamesAndTypes input format"}, @@ -81,7 +80,7 @@ static std::initializer_listgetID()); for (size_t i = argument_types_start_idx; i < arguments->children.size(); ++i) argument_types.push_back(DataTypeFactory::instance().get(arguments->children[i])); diff --git a/src/DataTypes/DataTypeFactory.cpp b/src/DataTypes/DataTypeFactory.cpp index eb3bc973857..6f7dcd65b83 100644 --- a/src/DataTypes/DataTypeFactory.cpp +++ b/src/DataTypes/DataTypeFactory.cpp @@ -2,7 +2,7 @@ #include #include #include -#include +#include #include #include #include @@ -22,7 +22,6 @@ namespace ErrorCodes { extern const int LOGICAL_ERROR; extern const int UNKNOWN_TYPE; - extern const int ILLEGAL_SYNTAX_FOR_DATA_TYPE; extern const int UNEXPECTED_AST_STRUCTURE; extern const int DATA_TYPE_CANNOT_HAVE_ARGUMENTS; } @@ -83,15 +82,9 @@ DataTypePtr DataTypeFactory::tryGet(const ASTPtr & ast) const template DataTypePtr DataTypeFactory::getImpl(const ASTPtr & ast) const { - if (const auto * func = ast->as()) + if (const auto * type = ast->as()) { - if (func->parameters) - { - if constexpr (nullptr_on_error) - return nullptr; - throw Exception(ErrorCodes::ILLEGAL_SYNTAX_FOR_DATA_TYPE, "Data type cannot have multiple parenthesized parameters."); - } - return getImpl(func->name, func->arguments); + return getImpl(type->name, type->arguments); } if (const auto * ident = ast->as()) @@ -107,7 +100,7 @@ DataTypePtr DataTypeFactory::getImpl(const ASTPtr & ast) const if constexpr (nullptr_on_error) return nullptr; - throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "Unexpected AST element for data type."); + throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "Unexpected AST element for data type: {}.", ast->getID()); } DataTypePtr DataTypeFactory::get(const String & family_name_param, const ASTPtr & parameters) const diff --git a/src/DataTypes/DataTypeObject.cpp b/src/DataTypes/DataTypeObject.cpp index 5636a46373f..91b9bfcb2a5 100644 --- a/src/DataTypes/DataTypeObject.cpp +++ b/src/DataTypes/DataTypeObject.cpp @@ -4,9 +4,10 @@ #include #include -#include +#include #include + namespace DB { @@ -53,13 +54,13 @@ static DataTypePtr create(const ASTPtr & arguments) ASTPtr schema_argument = arguments->children[0]; bool is_nullable = false; - if (const auto * func = schema_argument->as()) + if (const auto * type = schema_argument->as()) { - if (func->name != "Nullable" || func->arguments->children.size() != 1) + if (type->name != "Nullable" || type->arguments->children.size() != 1) throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, - "Expected 'Nullable()' as parameter for type Object (function: {})", func->name); + "Expected 'Nullable()' as parameter for type Object (function: {})", type->name); - schema_argument = func->arguments->children[0]; + schema_argument = type->arguments->children[0]; is_nullable = true; } diff --git a/src/Databases/DatabaseReplicated.cpp b/src/Databases/DatabaseReplicated.cpp index 21218e095bf..f127ccbc224 100644 --- a/src/Databases/DatabaseReplicated.cpp +++ b/src/Databases/DatabaseReplicated.cpp @@ -647,12 +647,13 @@ LoadTaskPtr DatabaseReplicated::startupDatabaseAsync(AsyncLoader & async_loader, { std::lock_guard lock{ddl_worker_mutex}; ddl_worker = std::make_unique(this, getContext()); + ddl_worker->startup(); + ddl_worker_initialized = true; } - ddl_worker->startup(); - ddl_worker_initialized = true; }); std::scoped_lock lock(mutex); - return startup_replicated_database_task = makeLoadTask(async_loader, {job}); + startup_replicated_database_task = makeLoadTask(async_loader, {job}); + return startup_replicated_database_task; } void DatabaseReplicated::waitDatabaseStarted() const @@ -1530,8 +1531,11 @@ void DatabaseReplicated::stopReplication() void DatabaseReplicated::shutdown() { stopReplication(); - ddl_worker_initialized = false; - ddl_worker = nullptr; + { + std::lock_guard lock{ddl_worker_mutex}; + ddl_worker_initialized = false; + ddl_worker = nullptr; + } DatabaseAtomic::shutdown(); } @@ -1679,6 +1683,7 @@ bool DatabaseReplicated::canExecuteReplicatedMetadataAlter() const /// It may update the metadata digest (both locally and in ZooKeeper) /// before DatabaseReplicatedDDLWorker::initializeReplication() has finished. /// We should not update metadata until the database is initialized. + std::lock_guard lock{ddl_worker_mutex}; return ddl_worker_initialized && ddl_worker->isCurrentlyActive(); } diff --git a/src/Databases/DatabaseReplicated.h b/src/Databases/DatabaseReplicated.h index 0f901538c33..27ab262d1f1 100644 --- a/src/Databases/DatabaseReplicated.h +++ b/src/Databases/DatabaseReplicated.h @@ -155,7 +155,7 @@ private: std::atomic_bool is_recovering = false; std::atomic_bool ddl_worker_initialized = false; std::unique_ptr ddl_worker; - std::mutex ddl_worker_mutex; + mutable std::mutex ddl_worker_mutex; UInt32 max_log_ptr_at_creation = 0; /// Usually operation with metadata are single-threaded because of the way replication works, diff --git a/src/Databases/DatabasesCommon.cpp b/src/Databases/DatabasesCommon.cpp index b8e9231f5c6..d8151cf340a 100644 --- a/src/Databases/DatabasesCommon.cpp +++ b/src/Databases/DatabasesCommon.cpp @@ -149,7 +149,7 @@ ASTPtr getCreateQueryFromStorage(const StoragePtr & storage, const ASTPtr & ast_ columns = metadata_ptr->columns.getAll(); for (const auto & column_name_and_type: columns) { - const auto & ast_column_declaration = std::make_shared(); + const auto ast_column_declaration = std::make_shared(); ast_column_declaration->name = column_name_and_type.name; /// parser typename { @@ -164,7 +164,7 @@ ASTPtr getCreateQueryFromStorage(const StoragePtr & storage, const ASTPtr & ast_ if (!parser.parse(pos, ast_type, expected)) { if (throw_on_error) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot parser metadata of {}.{}", + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot parse metadata of {}.{}", backQuote(table_id.database_name), backQuote(table_id.table_name)); else return nullptr; diff --git a/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp b/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp index a846e23cd4f..032fc33ea16 100644 --- a/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp @@ -12,9 +12,9 @@ #include #include #include +#include #include #include -#include #include #include #include @@ -25,6 +25,7 @@ #include #include + namespace fs = std::filesystem; namespace DB @@ -432,7 +433,7 @@ ASTPtr DatabasePostgreSQL::getCreateTableQueryImpl(const String & table_name, Co auto metadata_snapshot = storage->getInMemoryMetadataPtr(); for (const auto & column_type_and_name : metadata_snapshot->getColumns().getOrdinary()) { - const auto & column_declaration = std::make_shared(); + const auto column_declaration = std::make_shared(); column_declaration->name = column_type_and_name.name; column_declaration->type = getColumnDeclaration(column_type_and_name.type); columns_expression_list->children.emplace_back(column_declaration); @@ -470,17 +471,15 @@ ASTPtr DatabasePostgreSQL::getColumnDeclaration(const DataTypePtr & data_type) c WhichDataType which(data_type); if (which.isNullable()) - return makeASTFunction("Nullable", getColumnDeclaration(typeid_cast(data_type.get())->getNestedType())); + return makeASTDataType("Nullable", getColumnDeclaration(typeid_cast(data_type.get())->getNestedType())); if (which.isArray()) - return makeASTFunction("Array", getColumnDeclaration(typeid_cast(data_type.get())->getNestedType())); + return makeASTDataType("Array", getColumnDeclaration(typeid_cast(data_type.get())->getNestedType())); if (which.isDateTime64()) - { - return makeASTFunction("DateTime64", std::make_shared(static_cast(6))); - } + return makeASTDataType("DateTime64", std::make_shared(static_cast(6))); - return std::make_shared(data_type->getName()); + return makeASTDataType(data_type->getName()); } void registerDatabasePostgreSQL(DatabaseFactory & factory) diff --git a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp index 198f6c0ea04..c928d25c7b8 100644 --- a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp +++ b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp @@ -810,6 +810,7 @@ bool CachedOnDiskReadBufferFromFile::nextImplStep() { last_caller_id = FileSegment::getCallerId(); + chassert(file_offset_of_buffer_end <= read_until_position); if (file_offset_of_buffer_end == read_until_position) return false; @@ -1051,7 +1052,11 @@ bool CachedOnDiskReadBufferFromFile::nextImplStep() if (download_current_segment && download_current_segment_succeeded) chassert(file_segment.getCurrentWriteOffset() >= file_offset_of_buffer_end); - chassert(file_offset_of_buffer_end <= read_until_position); + + chassert( + file_offset_of_buffer_end <= read_until_position, + fmt::format("Expected {} <= {} (size: {}, read range: {})", + file_offset_of_buffer_end, read_until_position, size, current_read_range.toString())); } swap(*implementation_buffer); diff --git a/src/Formats/FormatFactory.cpp b/src/Formats/FormatFactory.cpp index 501343d2ca9..efd467abb37 100644 --- a/src/Formats/FormatFactory.cpp +++ b/src/Formats/FormatFactory.cpp @@ -215,7 +215,6 @@ FormatSettings getFormatSettings(const ContextPtr & context, const Settings & se format_settings.tsv.allow_variable_number_of_columns = settings.input_format_tsv_allow_variable_number_of_columns; format_settings.tsv.crlf_end_of_line_input = settings.input_format_tsv_crlf_end_of_line; format_settings.values.accurate_types_of_literals = settings.input_format_values_accurate_types_of_literals; - format_settings.values.allow_data_after_semicolon = settings.input_format_values_allow_data_after_semicolon; format_settings.values.deduce_templates_of_expressions = settings.input_format_values_deduce_templates_of_expressions; format_settings.values.interpret_expressions = settings.input_format_values_interpret_expressions; format_settings.values.escape_quote_with_quote = settings.output_format_values_escape_quote_with_quote; diff --git a/src/Functions/CRC.cpp b/src/Functions/CRC.cpp index 0ba976669a3..73318d523b5 100644 --- a/src/Functions/CRC.cpp +++ b/src/Functions/CRC.cpp @@ -81,46 +81,43 @@ struct CRCFunctionWrapper static constexpr auto is_fixed_to_constant = true; using ReturnType = typename Impl::ReturnType; - static void vector(const ColumnString::Chars & data, const ColumnString::Offsets & offsets, PaddedPODArray & res) + static void vector(const ColumnString::Chars & data, const ColumnString::Offsets & offsets, PaddedPODArray & res, size_t input_rows_count) { - size_t size = offsets.size(); - ColumnString::Offset prev_offset = 0; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { res[i] = doCRC(data, prev_offset, offsets[i] - prev_offset - 1); prev_offset = offsets[i]; } } - static void vectorFixedToConstant(const ColumnString::Chars & data, size_t n, ReturnType & res) { res = doCRC(data, 0, n); } - - static void vectorFixedToVector(const ColumnString::Chars & data, size_t n, PaddedPODArray & res) + static void vectorFixedToConstant(const ColumnString::Chars & data, size_t n, ReturnType & res, size_t) { - size_t size = data.size() / n; - - for (size_t i = 0; i < size; ++i) - { - res[i] = doCRC(data, i * n, n); - } + res = doCRC(data, 0, n); } - [[noreturn]] static void array(const ColumnString::Offsets & /*offsets*/, PaddedPODArray & /*res*/) + static void vectorFixedToVector(const ColumnString::Chars & data, size_t n, PaddedPODArray & res, size_t input_rows_count) + { + for (size_t i = 0; i < input_rows_count; ++i) + res[i] = doCRC(data, i * n, n); + } + + [[noreturn]] static void array(const ColumnString::Offsets &, PaddedPODArray &, size_t) { throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function {} to Array argument", std::string(Impl::name)); } - [[noreturn]] static void uuid(const ColumnUUID::Container & /*offsets*/, size_t /*n*/, PaddedPODArray & /*res*/) + [[noreturn]] static void uuid(const ColumnUUID::Container &, size_t, PaddedPODArray &, size_t) { throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function {} to UUID argument", std::string(Impl::name)); } - [[noreturn]] static void ipv6(const ColumnIPv6::Container & /*offsets*/, size_t /*n*/, PaddedPODArray & /*res*/) + [[noreturn]] static void ipv6(const ColumnIPv6::Container &, size_t, PaddedPODArray &, size_t) { throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function {} to IPv6 argument", std::string(Impl::name)); } - [[noreturn]] static void ipv4(const ColumnIPv4::Container & /*offsets*/, size_t /*n*/, PaddedPODArray & /*res*/) + [[noreturn]] static void ipv4(const ColumnIPv4::Container &, size_t, PaddedPODArray &, size_t) { throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function {} to IPv4 argument", std::string(Impl::name)); } diff --git a/src/Functions/CustomWeekTransforms.h b/src/Functions/CustomWeekTransforms.h index 75fb3c32f16..5b4de765362 100644 --- a/src/Functions/CustomWeekTransforms.h +++ b/src/Functions/CustomWeekTransforms.h @@ -32,13 +32,12 @@ struct WeekTransformer {} template - void vector(const FromVectorType & vec_from, ToVectorType & vec_to, UInt8 week_mode, const DateLUTImpl & time_zone) const + void vector(const FromVectorType & vec_from, ToVectorType & vec_to, UInt8 week_mode, const DateLUTImpl & time_zone, size_t input_rows_count) const { using ValueType = typename ToVectorType::value_type; - size_t size = vec_from.size(); - vec_to.resize(size); + vec_to.resize(input_rows_count); - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { if constexpr (is_extended_result) vec_to[i] = static_cast(transform.executeExtendedResult(vec_from[i], week_mode, time_zone)); @@ -56,7 +55,7 @@ template - static ColumnPtr execute(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/, Transform transform = {}) + static ColumnPtr execute(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count, Transform transform = {}) { const auto op = WeekTransformer{transform}; @@ -77,9 +76,9 @@ struct CustomWeekTransformImpl const auto * sources = checkAndGetColumn(source_col.get()); auto col_to = ToDataType::ColumnType::create(); - col_to->getData().resize(sources->size()); + col_to->getData().resize(input_rows_count); - for (size_t i = 0; i < sources->size(); ++i) + for (size_t i = 0; i < input_rows_count; ++i) { DateTime64 dt64; ReadBufferFromString buf(sources->getDataAt(i).toView()); @@ -92,7 +91,7 @@ struct CustomWeekTransformImpl else if (const auto * sources = checkAndGetColumn(source_col.get())) { auto col_to = ToDataType::ColumnType::create(); - op.vector(sources->getData(), col_to->getData(), week_mode, time_zone); + op.vector(sources->getData(), col_to->getData(), week_mode, time_zone, input_rows_count); return col_to; } else diff --git a/src/Functions/DateTimeTransforms.h b/src/Functions/DateTimeTransforms.h index 5f745f3ccad..a7bd398cdaa 100644 --- a/src/Functions/DateTimeTransforms.h +++ b/src/Functions/DateTimeTransforms.h @@ -24,7 +24,7 @@ namespace DB static constexpr auto millisecond_multiplier = 1'000; static constexpr auto microsecond_multiplier = 1'000'000; -static constexpr auto nanosecond_multiplier = 1'000'000'000; +static constexpr auto nanosecond_multiplier = 1'000'000'000; static constexpr FormatSettings::DateTimeOverflowBehavior default_date_time_overflow_behavior = FormatSettings::DateTimeOverflowBehavior::Ignore; @@ -2134,13 +2134,12 @@ struct Transformer { template static void vector(const FromTypeVector & vec_from, ToTypeVector & vec_to, const DateLUTImpl & time_zone, const Transform & transform, - [[maybe_unused]] ColumnUInt8::Container * vec_null_map_to) + [[maybe_unused]] ColumnUInt8::Container * vec_null_map_to, size_t input_rows_count) { using ValueType = typename ToTypeVector::value_type; - size_t size = vec_from.size(); - vec_to.resize(size); + vec_to.resize(input_rows_count); - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { if constexpr (std::is_same_v || std::is_same_v) { @@ -2178,7 +2177,7 @@ struct DateTimeTransformImpl { template static ColumnPtr execute( - const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t /*input_rows_count*/, const Transform & transform = {}) + const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count, const Transform & transform = {}) { using Op = Transformer; @@ -2200,7 +2199,7 @@ struct DateTimeTransformImpl if (result_data_type.isDateTime() || result_data_type.isDateTime64()) { const auto & time_zone = dynamic_cast(*result_type).getTimeZone(); - Op::vector(sources->getData(), col_to->getData(), time_zone, transform, vec_null_map_to); + Op::vector(sources->getData(), col_to->getData(), time_zone, transform, vec_null_map_to, input_rows_count); } else { @@ -2209,15 +2208,13 @@ struct DateTimeTransformImpl time_zone_argument_position = 2; const DateLUTImpl & time_zone = extractTimeZoneFromFunctionArguments(arguments, time_zone_argument_position, 0); - Op::vector(sources->getData(), col_to->getData(), time_zone, transform, vec_null_map_to); + Op::vector(sources->getData(), col_to->getData(), time_zone, transform, vec_null_map_to, input_rows_count); } if constexpr (std::is_same_v) { if (vec_null_map_to) - { return ColumnNullable::create(std::move(mutable_result_col), std::move(col_null_map_to)); - } } return mutable_result_col; diff --git a/src/Functions/EmptyImpl.h b/src/Functions/EmptyImpl.h index d3b2dda024b..03675254eb6 100644 --- a/src/Functions/EmptyImpl.h +++ b/src/Functions/EmptyImpl.h @@ -21,11 +21,10 @@ struct EmptyImpl /// If the function will return constant value for FixedString data type. static constexpr auto is_fixed_to_constant = false; - static void vector(const ColumnString::Chars & /*data*/, const ColumnString::Offsets & offsets, PaddedPODArray & res) + static void vector(const ColumnString::Chars & /*data*/, const ColumnString::Offsets & offsets, PaddedPODArray & res, size_t input_rows_count) { - size_t size = offsets.size(); ColumnString::Offset prev_offset = 1; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { res[i] = negative ^ (offsets[i] == prev_offset); prev_offset = offsets[i] + 1; @@ -33,42 +32,40 @@ struct EmptyImpl } /// Only make sense if is_fixed_to_constant. - static void vectorFixedToConstant(const ColumnString::Chars & /*data*/, size_t /*n*/, UInt8 & /*res*/) + static void vectorFixedToConstant(const ColumnString::Chars &, size_t, UInt8 &, size_t) { throw Exception(ErrorCodes::LOGICAL_ERROR, "'vectorFixedToConstant method' is called"); } - static void vectorFixedToVector(const ColumnString::Chars & data, size_t n, PaddedPODArray & res) + static void vectorFixedToVector(const ColumnString::Chars & data, size_t n, PaddedPODArray & res, size_t input_rows_count) { - size_t size = data.size() / n; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) res[i] = negative ^ memoryIsZeroSmallAllowOverflow15(data.data() + i * n, n); } - static void array(const ColumnString::Offsets & offsets, PaddedPODArray & res) + static void array(const ColumnString::Offsets & offsets, PaddedPODArray & res, size_t input_rows_count) { - size_t size = offsets.size(); ColumnString::Offset prev_offset = 0; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { res[i] = negative ^ (offsets[i] == prev_offset); prev_offset = offsets[i]; } } - static void uuid(const ColumnUUID::Container & container, size_t n, PaddedPODArray & res) + static void uuid(const ColumnUUID::Container & container, size_t n, PaddedPODArray & res, size_t) { for (size_t i = 0; i < n; ++i) res[i] = negative ^ (container[i].toUnderType() == 0); } - static void ipv6(const ColumnIPv6::Container & container, size_t n, PaddedPODArray & res) + static void ipv6(const ColumnIPv6::Container & container, size_t n, PaddedPODArray & res, size_t) { for (size_t i = 0; i < n; ++i) res[i] = negative ^ (container[i].toUnderType() == 0); } - static void ipv4(const ColumnIPv4::Container & container, size_t n, PaddedPODArray & res) + static void ipv4(const ColumnIPv4::Container & container, size_t n, PaddedPODArray & res, size_t) { for (size_t i = 0; i < n; ++i) res[i] = negative ^ (container[i].toUnderType() == 0); diff --git a/src/Functions/ExtractString.h b/src/Functions/ExtractString.h index 6beb8be830a..9f039f86b18 100644 --- a/src/Functions/ExtractString.h +++ b/src/Functions/ExtractString.h @@ -20,7 +20,7 @@ namespace DB // includes extracting ASCII ngram, UTF8 ngram, ASCII word and UTF8 word struct ExtractStringImpl { - static ALWAYS_INLINE inline const UInt8 * readOneWord(const UInt8 *& pos, const UInt8 * end) + static const UInt8 * readOneWord(const UInt8 *& pos, const UInt8 * end) { // jump separators while (pos < end && isUTF8Sep(*pos)) diff --git a/src/Functions/FunctionBitTestMany.h b/src/Functions/FunctionBitTestMany.h index 950e4ab4ea8..514b78ce59f 100644 --- a/src/Functions/FunctionBitTestMany.h +++ b/src/Functions/FunctionBitTestMany.h @@ -46,7 +46,7 @@ public: throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of first argument of function {}", first_arg->getName(), getName()); - for (const auto i : collections::range(1, arguments.size())) + for (size_t i = 1; i < arguments.size(); ++i) { const auto & pos_arg = arguments[i]; @@ -57,19 +57,19 @@ public: return std::make_shared(); } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override { const auto * value_col = arguments.front().column.get(); ColumnPtr res; - if (!((res = execute(arguments, result_type, value_col)) - || (res = execute(arguments, result_type, value_col)) - || (res = execute(arguments, result_type, value_col)) - || (res = execute(arguments, result_type, value_col)) - || (res = execute(arguments, result_type, value_col)) - || (res = execute(arguments, result_type, value_col)) - || (res = execute(arguments, result_type, value_col)) - || (res = execute(arguments, result_type, value_col)))) + if (!((res = execute(arguments, result_type, value_col, input_rows_count)) + || (res = execute(arguments, result_type, value_col, input_rows_count)) + || (res = execute(arguments, result_type, value_col, input_rows_count)) + || (res = execute(arguments, result_type, value_col, input_rows_count)) + || (res = execute(arguments, result_type, value_col, input_rows_count)) + || (res = execute(arguments, result_type, value_col, input_rows_count)) + || (res = execute(arguments, result_type, value_col, input_rows_count)) + || (res = execute(arguments, result_type, value_col, input_rows_count)))) throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of argument of function {}", value_col->getName(), getName()); return res; @@ -79,28 +79,28 @@ private: template ColumnPtr execute( const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, - const IColumn * const value_col_untyped) const + const IColumn * const value_col_untyped, + size_t input_rows_count) const { if (const auto value_col = checkAndGetColumn>(value_col_untyped)) { - const auto size = value_col->size(); bool is_const; const auto const_mask = createConstMaskIfConst(arguments, is_const); const auto & val = value_col->getData(); - auto out_col = ColumnVector::create(size); + auto out_col = ColumnVector::create(input_rows_count); auto & out = out_col->getData(); if (is_const) { - for (const auto i : collections::range(0, size)) + for (size_t i = 0; i < input_rows_count; ++i) out[i] = Impl::apply(val[i], const_mask); } else { - const auto mask = createMask(size, arguments); + const auto mask = createMask(input_rows_count, arguments); - for (const auto i : collections::range(0, size)) + for (size_t i = 0; i < input_rows_count; ++i) out[i] = Impl::apply(val[i], mask[i]); } @@ -108,23 +108,22 @@ private: } else if (const auto value_col_const = checkAndGetColumnConst>(value_col_untyped)) { - const auto size = value_col_const->size(); bool is_const; const auto const_mask = createConstMaskIfConst(arguments, is_const); const auto val = value_col_const->template getValue(); if (is_const) { - return result_type->createColumnConst(size, toField(Impl::apply(val, const_mask))); + return result_type->createColumnConst(input_rows_count, toField(Impl::apply(val, const_mask))); } else { - const auto mask = createMask(size, arguments); - auto out_col = ColumnVector::create(size); + const auto mask = createMask(input_rows_count, arguments); + auto out_col = ColumnVector::create(input_rows_count); auto & out = out_col->getData(); - for (const auto i : collections::range(0, size)) + for (size_t i = 0; i < input_rows_count; ++i) out[i] = Impl::apply(val, mask[i]); return out_col; @@ -140,7 +139,7 @@ private: out_is_const = true; ValueType mask = 0; - for (const auto i : collections::range(1, arguments.size())) + for (size_t i = 1; i < arguments.size(); ++i) { if (auto pos_col_const = checkAndGetColumnConst>(arguments[i].column.get())) { @@ -166,7 +165,7 @@ private: { PaddedPODArray mask(size, ValueType{}); - for (const auto i : collections::range(1, arguments.size())) + for (size_t i = 1; i < arguments.size(); ++i) { const auto * pos_col = arguments[i].column.get(); @@ -187,7 +186,7 @@ private: { const auto & pos = pos_col->getData(); - for (const auto i : collections::range(0, mask.size())) + for (size_t i = 0; i < mask.size(); ++i) if (pos[i] < 8 * sizeof(ValueType)) mask[i] = mask[i] | (ValueType(1) << pos[i]); else @@ -205,7 +204,7 @@ private: const auto new_mask = ValueType(1) << pos; - for (const auto i : collections::range(0, mask.size())) + for (size_t i = 0; i < mask.size(); ++i) mask[i] = mask[i] | new_mask; return true; diff --git a/src/Functions/FunctionChar.cpp b/src/Functions/FunctionChar.cpp index 79e346a3ea4..dbdb7692177 100644 --- a/src/Functions/FunctionChar.cpp +++ b/src/Functions/FunctionChar.cpp @@ -103,14 +103,11 @@ private: const ColumnVector * src_data_concrete = checkAndGetColumn>(&src_data); if (!src_data_concrete) - { return false; - } for (size_t row = 0; row < rows; ++row) - { out_vec[row * size_per_row + column_idx] = static_cast(src_data_concrete->getInt(row)); - } + return true; } }; diff --git a/src/Functions/FunctionDateOrDateTimeAddInterval.h b/src/Functions/FunctionDateOrDateTimeAddInterval.h index f50b1415622..25231b8887b 100644 --- a/src/Functions/FunctionDateOrDateTimeAddInterval.h +++ b/src/Functions/FunctionDateOrDateTimeAddInterval.h @@ -428,19 +428,17 @@ struct Processor {} template - void NO_INLINE vectorConstant(const FromColumnType & col_from, ToColumnType & col_to, Int64 delta, const DateLUTImpl & time_zone, UInt16 scale) const + void NO_INLINE vectorConstant(const FromColumnType & col_from, ToColumnType & col_to, Int64 delta, const DateLUTImpl & time_zone, UInt16 scale, size_t input_rows_count) const { static const DateLUTImpl & utc_time_zone = DateLUT::instance("UTC"); if constexpr (std::is_same_v) { - const auto & offsets_from = col_from.getOffsets(); auto & vec_to = col_to.getData(); - size_t size = offsets_from.size(); - vec_to.resize(size); + vec_to.resize(input_rows_count); - for (size_t i = 0 ; i < size; ++i) + for (size_t i = 0 ; i < input_rows_count; ++i) { std::string_view from = col_from.getDataAt(i).toView(); vec_to[i] = transform.execute(from, checkOverflow(delta), time_zone, utc_time_zone, scale); @@ -451,32 +449,31 @@ struct Processor const auto & vec_from = col_from.getData(); auto & vec_to = col_to.getData(); - size_t size = vec_from.size(); - vec_to.resize(size); + vec_to.resize(input_rows_count); - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) vec_to[i] = transform.execute(vec_from[i], checkOverflow(delta), time_zone, utc_time_zone, scale); } } template - void vectorVector(const FromColumnType & col_from, ToColumnType & col_to, const IColumn & delta, const DateLUTImpl & time_zone, UInt16 scale) const + void vectorVector(const FromColumnType & col_from, ToColumnType & col_to, const IColumn & delta, const DateLUTImpl & time_zone, UInt16 scale, size_t input_rows_count) const { castTypeToEither< ColumnUInt8, ColumnUInt16, ColumnUInt32, ColumnUInt64, ColumnInt8, ColumnInt16, ColumnInt32, ColumnInt64, ColumnFloat32, ColumnFloat64>( - &delta, [&](const auto & column){ vectorVector(col_from, col_to, column, time_zone, scale); return true; }); + &delta, [&](const auto & column){ vectorVector(col_from, col_to, column, time_zone, scale, input_rows_count); return true; }); } template - void constantVector(const FromType & from, ToColumnType & col_to, const IColumn & delta, const DateLUTImpl & time_zone, UInt16 scale) const + void constantVector(const FromType & from, ToColumnType & col_to, const IColumn & delta, const DateLUTImpl & time_zone, UInt16 scale, size_t input_rows_count) const { castTypeToEither< ColumnUInt8, ColumnUInt16, ColumnUInt32, ColumnUInt64, ColumnInt8, ColumnInt16, ColumnInt32, ColumnInt64, ColumnFloat32, ColumnFloat64>( - &delta, [&](const auto & column){ constantVector(from, col_to, column, time_zone, scale); return true; }); + &delta, [&](const auto & column){ constantVector(from, col_to, column, time_zone, scale, input_rows_count); return true; }); } private: @@ -491,19 +488,17 @@ private: template NO_INLINE NO_SANITIZE_UNDEFINED void vectorVector( - const FromColumnType & col_from, ToColumnType & col_to, const DeltaColumnType & delta, const DateLUTImpl & time_zone, UInt16 scale) const + const FromColumnType & col_from, ToColumnType & col_to, const DeltaColumnType & delta, const DateLUTImpl & time_zone, UInt16 scale, size_t input_rows_count) const { static const DateLUTImpl & utc_time_zone = DateLUT::instance("UTC"); if constexpr (std::is_same_v) { - const auto & offsets_from = col_from.getOffsets(); auto & vec_to = col_to.getData(); - size_t size = offsets_from.size(); - vec_to.resize(size); + vec_to.resize(input_rows_count); - for (size_t i = 0 ; i < size; ++i) + for (size_t i = 0 ; i < input_rows_count; ++i) { std::string_view from = col_from.getDataAt(i).toView(); vec_to[i] = transform.execute(from, checkOverflow(delta.getData()[i]), time_zone, utc_time_zone, scale); @@ -514,26 +509,24 @@ private: const auto & vec_from = col_from.getData(); auto & vec_to = col_to.getData(); - size_t size = vec_from.size(); - vec_to.resize(size); + vec_to.resize(input_rows_count); - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) vec_to[i] = transform.execute(vec_from[i], checkOverflow(delta.getData()[i]), time_zone, utc_time_zone, scale); } } template NO_INLINE NO_SANITIZE_UNDEFINED void constantVector( - const FromType & from, ToColumnType & col_to, const DeltaColumnType & delta, const DateLUTImpl & time_zone, UInt16 scale) const + const FromType & from, ToColumnType & col_to, const DeltaColumnType & delta, const DateLUTImpl & time_zone, UInt16 scale, size_t input_rows_count) const { static const DateLUTImpl & utc_time_zone = DateLUT::instance("UTC"); auto & vec_to = col_to.getData(); - size_t size = delta.size(); - vec_to.resize(size); + vec_to.resize(input_rows_count); - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) vec_to[i] = transform.execute(from, checkOverflow(delta.getData()[i]), time_zone, utc_time_zone, scale); } }; @@ -542,7 +535,7 @@ private: template struct DateTimeAddIntervalImpl { - static ColumnPtr execute(Transform transform, const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, UInt16 scale) + static ColumnPtr execute(Transform transform, const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, UInt16 scale, size_t input_rows_count) { using FromValueType = typename FromDataType::FieldType; using FromColumnType = typename FromDataType::ColumnType; @@ -561,15 +554,15 @@ struct DateTimeAddIntervalImpl if (const auto * sources = checkAndGetColumn(&source_column)) { if (const auto * delta_const_column = typeid_cast(&delta_column)) - processor.vectorConstant(*sources, *col_to, delta_const_column->getInt(0), time_zone, scale); + processor.vectorConstant(*sources, *col_to, delta_const_column->getInt(0), time_zone, scale, input_rows_count); else - processor.vectorVector(*sources, *col_to, delta_column, time_zone, scale); + processor.vectorVector(*sources, *col_to, delta_column, time_zone, scale, input_rows_count); } else if (const auto * sources_const = checkAndGetColumnConst(&source_column)) { processor.constantVector( sources_const->template getValue(), - *col_to, delta_column, time_zone, scale); + *col_to, delta_column, time_zone, scale, input_rows_count); } else { @@ -708,25 +701,25 @@ public: bool useDefaultImplementationForConstants() const override { return true; } ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {2}; } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override { const IDataType * from_type = arguments[0].type.get(); WhichDataType which(from_type); if (which.isDate()) - return DateTimeAddIntervalImpl, Transform>::execute(Transform{}, arguments, result_type, 0); + return DateTimeAddIntervalImpl, Transform>::execute(Transform{}, arguments, result_type, 0, input_rows_count); else if (which.isDate32()) - return DateTimeAddIntervalImpl, Transform>::execute(Transform{}, arguments, result_type, 0); + return DateTimeAddIntervalImpl, Transform>::execute(Transform{}, arguments, result_type, 0, input_rows_count); else if (which.isDateTime()) - return DateTimeAddIntervalImpl, Transform>::execute(Transform{}, arguments, result_type, 0); + return DateTimeAddIntervalImpl, Transform>::execute(Transform{}, arguments, result_type, 0, input_rows_count); else if (which.isDateTime64()) { const auto * datetime64_type = assert_cast(from_type); auto from_scale = datetime64_type->getScale(); - return DateTimeAddIntervalImpl, Transform>::execute(Transform{}, arguments, result_type, from_scale); + return DateTimeAddIntervalImpl, Transform>::execute(Transform{}, arguments, result_type, from_scale, input_rows_count); } else if (which.isString()) - return DateTimeAddIntervalImpl::execute(Transform{}, arguments, result_type, 3); + return DateTimeAddIntervalImpl::execute(Transform{}, arguments, result_type, 3, input_rows_count); else throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of first argument of function {}", arguments[0].type->getName(), getName()); } diff --git a/src/Functions/FunctionMathBinaryFloat64.h b/src/Functions/FunctionMathBinaryFloat64.h index 1b75ee688f4..82ff6ae7fbf 100644 --- a/src/Functions/FunctionMathBinaryFloat64.h +++ b/src/Functions/FunctionMathBinaryFloat64.h @@ -54,7 +54,7 @@ private: } template - static ColumnPtr executeTyped(const ColumnConst * left_arg, const IColumn * right_arg) + static ColumnPtr executeTyped(const ColumnConst * left_arg, const IColumn * right_arg, size_t input_rows_count) { if (const auto right_arg_typed = checkAndGetColumn>(right_arg)) { @@ -63,12 +63,11 @@ private: LeftType left_src_data[Impl::rows_per_iteration]; std::fill(std::begin(left_src_data), std::end(left_src_data), left_arg->template getValue()); const auto & right_src_data = right_arg_typed->getData(); - const auto src_size = right_src_data.size(); auto & dst_data = dst->getData(); - dst_data.resize(src_size); + dst_data.resize(input_rows_count); - const auto rows_remaining = src_size % Impl::rows_per_iteration; - const auto rows_size = src_size - rows_remaining; + const auto rows_remaining = input_rows_count % Impl::rows_per_iteration; + const auto rows_size = input_rows_count - rows_remaining; for (size_t i = 0; i < rows_size; i += Impl::rows_per_iteration) Impl::execute(left_src_data, &right_src_data[i], &dst_data[i]); @@ -92,7 +91,7 @@ private: } template - static ColumnPtr executeTyped(const ColumnVector * left_arg, const IColumn * right_arg) + static ColumnPtr executeTyped(const ColumnVector * left_arg, const IColumn * right_arg, size_t input_rows_count) { if (const auto right_arg_typed = checkAndGetColumn>(right_arg)) { @@ -100,12 +99,11 @@ private: const auto & left_src_data = left_arg->getData(); const auto & right_src_data = right_arg_typed->getData(); - const auto src_size = left_src_data.size(); auto & dst_data = dst->getData(); - dst_data.resize(src_size); + dst_data.resize(input_rows_count); - const auto rows_remaining = src_size % Impl::rows_per_iteration; - const auto rows_size = src_size - rows_remaining; + const auto rows_remaining = input_rows_count % Impl::rows_per_iteration; + const auto rows_size = input_rows_count - rows_remaining; for (size_t i = 0; i < rows_size; i += Impl::rows_per_iteration) Impl::execute(&left_src_data[i], &right_src_data[i], &dst_data[i]); @@ -136,12 +134,11 @@ private: const auto & left_src_data = left_arg->getData(); RightType right_src_data[Impl::rows_per_iteration]; std::fill(std::begin(right_src_data), std::end(right_src_data), right_arg_typed->template getValue()); - const auto src_size = left_src_data.size(); auto & dst_data = dst->getData(); - dst_data.resize(src_size); + dst_data.resize(input_rows_count); - const auto rows_remaining = src_size % Impl::rows_per_iteration; - const auto rows_size = src_size - rows_remaining; + const auto rows_remaining = input_rows_count % Impl::rows_per_iteration; + const auto rows_size = input_rows_count - rows_remaining; for (size_t i = 0; i < rows_size; i += Impl::rows_per_iteration) Impl::execute(&left_src_data[i], right_src_data, &dst_data[i]); @@ -165,7 +162,7 @@ private: return nullptr; } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { const ColumnWithTypeAndName & col_left = arguments[0]; const ColumnWithTypeAndName & col_right = arguments[1]; @@ -202,7 +199,7 @@ private: if (const auto left_arg_typed = checkAndGetColumn(left_arg)) { - if ((res = executeTyped(left_arg_typed, right_arg))) + if ((res = executeTyped(left_arg_typed, right_arg, input_rows_count))) return true; throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of second argument of function {}", @@ -210,7 +207,7 @@ private: } if (const auto left_arg_typed = checkAndGetColumnConst(left_arg)) { - if ((res = executeTyped(left_arg_typed, right_arg))) + if ((res = executeTyped(left_arg_typed, right_arg, input_rows_count))) return true; throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of second argument of function {}", diff --git a/src/Functions/FunctionMathUnary.h b/src/Functions/FunctionMathUnary.h index 9f400932356..3cc8bf391b4 100644 --- a/src/Functions/FunctionMathUnary.h +++ b/src/Functions/FunctionMathUnary.h @@ -106,42 +106,40 @@ private: } template - static ColumnPtr execute(const ColumnVector * col) + static ColumnPtr execute(const ColumnVector * col, size_t input_rows_count) { const auto & src_data = col->getData(); - const size_t size = src_data.size(); auto dst = ColumnVector::create(); auto & dst_data = dst->getData(); - dst_data.resize(size); + dst_data.resize(input_rows_count); - executeInIterations(src_data.data(), dst_data.data(), size); + executeInIterations(src_data.data(), dst_data.data(), input_rows_count); return dst; } template - static ColumnPtr execute(const ColumnDecimal * col) + static ColumnPtr execute(const ColumnDecimal * col, size_t input_rows_count) { const auto & src_data = col->getData(); - const size_t size = src_data.size(); UInt32 scale = col->getScale(); auto dst = ColumnVector::create(); auto & dst_data = dst->getData(); - dst_data.resize(size); + dst_data.resize(input_rows_count); - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) dst_data[i] = DecimalUtils::convertTo(src_data[i], scale); - executeInIterations(dst_data.data(), dst_data.data(), size); + executeInIterations(dst_data.data(), dst_data.data(), input_rows_count); return dst; } bool useDefaultImplementationForConstants() const override { return true; } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { const ColumnWithTypeAndName & col = arguments[0]; ColumnPtr res; @@ -156,7 +154,7 @@ private: const auto col_vec = checkAndGetColumn(col.column.get()); if (col_vec == nullptr) return false; - return (res = execute(col_vec)) != nullptr; + return (res = execute(col_vec, input_rows_count)) != nullptr; }; if (!callOnBasicType(col.type->getTypeId(), call)) diff --git a/src/Functions/FunctionNumericPredicate.h b/src/Functions/FunctionNumericPredicate.h index fd495d7e8e7..97a3639734b 100644 --- a/src/Functions/FunctionNumericPredicate.h +++ b/src/Functions/FunctionNumericPredicate.h @@ -53,39 +53,37 @@ public: bool useDefaultImplementationForConstants() const override { return true; } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { const auto * in = arguments.front().column.get(); ColumnPtr res; - if (!((res = execute(in)) - || (res = execute(in)) - || (res = execute(in)) - || (res = execute(in)) - || (res = execute(in)) - || (res = execute(in)) - || (res = execute(in)) - || (res = execute(in)) - || (res = execute(in)) - || (res = execute(in)))) + if (!((res = execute(in, input_rows_count)) + || (res = execute(in, input_rows_count)) + || (res = execute(in, input_rows_count)) + || (res = execute(in, input_rows_count)) + || (res = execute(in, input_rows_count)) + || (res = execute(in, input_rows_count)) + || (res = execute(in, input_rows_count)) + || (res = execute(in, input_rows_count)) + || (res = execute(in, input_rows_count)) + || (res = execute(in, input_rows_count)))) throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of first argument of function {}", in->getName(), getName()); return res; } template - ColumnPtr execute(const IColumn * in_untyped) const + ColumnPtr execute(const IColumn * in_untyped, size_t input_rows_count) const { if (const auto in = checkAndGetColumn>(in_untyped)) { - const auto size = in->size(); - - auto out = ColumnUInt8::create(size); + auto out = ColumnUInt8::create(input_rows_count); const auto & in_data = in->getData(); auto & out_data = out->getData(); - for (const auto i : collections::range(0, size)) + for (size_t i = 0; i < input_rows_count; ++i) out_data[i] = Impl::execute(in_data[i]); return out; diff --git a/src/Functions/FunctionSpaceFillingCurve.h b/src/Functions/FunctionSpaceFillingCurve.h index ac9215f88e1..76f6678e847 100644 --- a/src/Functions/FunctionSpaceFillingCurve.h +++ b/src/Functions/FunctionSpaceFillingCurve.h @@ -132,9 +132,7 @@ public: } DataTypes types(tuple_size); for (size_t i = 0; i < tuple_size; i++) - { types[i] = std::make_shared(); - } return std::make_shared(types); } }; diff --git a/src/Functions/FunctionStringOrArrayToT.h b/src/Functions/FunctionStringOrArrayToT.h index 26c740f1fac..40f780d82a8 100644 --- a/src/Functions/FunctionStringOrArrayToT.h +++ b/src/Functions/FunctionStringOrArrayToT.h @@ -71,7 +71,7 @@ public: typename ColumnVector::Container & vec_res = col_res->getData(); vec_res.resize(col->size()); - Impl::vector(col->getChars(), col->getOffsets(), vec_res); + Impl::vector(col->getChars(), col->getOffsets(), vec_res, input_rows_count); return col_res; } @@ -80,7 +80,7 @@ public: if (Impl::is_fixed_to_constant) { ResultType res = 0; - Impl::vectorFixedToConstant(col_fixed->getChars(), col_fixed->getN(), res); + Impl::vectorFixedToConstant(col_fixed->getChars(), col_fixed->getN(), res, input_rows_count); return result_type->createColumnConst(col_fixed->size(), toField(res)); } @@ -90,7 +90,7 @@ public: typename ColumnVector::Container & vec_res = col_res->getData(); vec_res.resize(col_fixed->size()); - Impl::vectorFixedToVector(col_fixed->getChars(), col_fixed->getN(), vec_res); + Impl::vectorFixedToVector(col_fixed->getChars(), col_fixed->getN(), vec_res, input_rows_count); return col_res; } @@ -101,7 +101,7 @@ public: typename ColumnVector::Container & vec_res = col_res->getData(); vec_res.resize(col_arr->size()); - Impl::array(col_arr->getOffsets(), vec_res); + Impl::array(col_arr->getOffsets(), vec_res, input_rows_count); return col_res; } @@ -112,7 +112,7 @@ public: vec_res.resize(col_map->size()); const auto & col_nested = col_map->getNestedColumn(); - Impl::array(col_nested.getOffsets(), vec_res); + Impl::array(col_nested.getOffsets(), vec_res, input_rows_count); return col_res; } else if (const ColumnUUID * col_uuid = checkAndGetColumn(column.get())) @@ -120,7 +120,7 @@ public: auto col_res = ColumnVector::create(); typename ColumnVector::Container & vec_res = col_res->getData(); vec_res.resize(col_uuid->size()); - Impl::uuid(col_uuid->getData(), input_rows_count, vec_res); + Impl::uuid(col_uuid->getData(), input_rows_count, vec_res, input_rows_count); return col_res; } else if (const ColumnIPv6 * col_ipv6 = checkAndGetColumn(column.get())) @@ -128,7 +128,7 @@ public: auto col_res = ColumnVector::create(); typename ColumnVector::Container & vec_res = col_res->getData(); vec_res.resize(col_ipv6->size()); - Impl::ipv6(col_ipv6->getData(), input_rows_count, vec_res); + Impl::ipv6(col_ipv6->getData(), input_rows_count, vec_res, input_rows_count); return col_res; } else if (const ColumnIPv4 * col_ipv4 = checkAndGetColumn(column.get())) @@ -136,7 +136,7 @@ public: auto col_res = ColumnVector::create(); typename ColumnVector::Container & vec_res = col_res->getData(); vec_res.resize(col_ipv4->size()); - Impl::ipv4(col_ipv4->getData(), input_rows_count, vec_res); + Impl::ipv4(col_ipv4->getData(), input_rows_count, vec_res, input_rows_count); return col_res; } else diff --git a/src/Functions/FunctionStringReplace.h b/src/Functions/FunctionStringReplace.h index b4bcfa514a8..432e03bfe9e 100644 --- a/src/Functions/FunctionStringReplace.h +++ b/src/Functions/FunctionStringReplace.h @@ -45,7 +45,7 @@ public: return std::make_shared(); } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { ColumnPtr column_haystack = arguments[0].column; column_haystack = column_haystack->convertToFullColumnIfConst(); @@ -70,7 +70,8 @@ public: col_haystack->getChars(), col_haystack->getOffsets(), col_needle_const->getValue(), col_replacement_const->getValue(), - col_res->getChars(), col_res->getOffsets()); + col_res->getChars(), col_res->getOffsets(), + input_rows_count); return col_res; } else if (col_haystack && col_needle_vector && col_replacement_const) @@ -79,7 +80,8 @@ public: col_haystack->getChars(), col_haystack->getOffsets(), col_needle_vector->getChars(), col_needle_vector->getOffsets(), col_replacement_const->getValue(), - col_res->getChars(), col_res->getOffsets()); + col_res->getChars(), col_res->getOffsets(), + input_rows_count); return col_res; } else if (col_haystack && col_needle_const && col_replacement_vector) @@ -88,7 +90,8 @@ public: col_haystack->getChars(), col_haystack->getOffsets(), col_needle_const->getValue(), col_replacement_vector->getChars(), col_replacement_vector->getOffsets(), - col_res->getChars(), col_res->getOffsets()); + col_res->getChars(), col_res->getOffsets(), + input_rows_count); return col_res; } else if (col_haystack && col_needle_vector && col_replacement_vector) @@ -97,7 +100,8 @@ public: col_haystack->getChars(), col_haystack->getOffsets(), col_needle_vector->getChars(), col_needle_vector->getOffsets(), col_replacement_vector->getChars(), col_replacement_vector->getOffsets(), - col_res->getChars(), col_res->getOffsets()); + col_res->getChars(), col_res->getOffsets(), + input_rows_count); return col_res; } else if (col_haystack_fixed && col_needle_const && col_replacement_const) @@ -106,7 +110,8 @@ public: col_haystack_fixed->getChars(), col_haystack_fixed->getN(), col_needle_const->getValue(), col_replacement_const->getValue(), - col_res->getChars(), col_res->getOffsets()); + col_res->getChars(), col_res->getOffsets(), + input_rows_count); return col_res; } else diff --git a/src/Functions/FunctionsBinaryRepresentation.cpp b/src/Functions/FunctionsBinaryRepresentation.cpp index f77d2f1f350..b3861f0394d 100644 --- a/src/Functions/FunctionsBinaryRepresentation.cpp +++ b/src/Functions/FunctionsBinaryRepresentation.cpp @@ -632,7 +632,7 @@ public: bool useDefaultImplementationForConstants() const override { return true; } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { const ColumnPtr & column = arguments[0].column; @@ -646,11 +646,10 @@ public: const ColumnString::Chars & in_vec = col->getChars(); const ColumnString::Offsets & in_offsets = col->getOffsets(); - size_t size = in_offsets.size(); - out_offsets.resize(size); + out_offsets.resize(input_rows_count); size_t max_out_len = 0; - for (size_t i = 0; i < in_offsets.size(); ++i) + for (size_t i = 0; i < input_rows_count; ++i) { const size_t len = in_offsets[i] - (i == 0 ? 0 : in_offsets[i - 1]) - /* trailing zero symbol that is always added in ColumnString and that is ignored while decoding */ 1; @@ -662,7 +661,7 @@ public: char * pos = begin; size_t prev_offset = 0; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { size_t new_offset = in_offsets[i]; @@ -691,15 +690,14 @@ public: const ColumnString::Chars & in_vec = col_fix_string->getChars(); const size_t n = col_fix_string->getN(); - size_t size = col_fix_string->size(); - out_offsets.resize(size); - out_vec.resize(((n + word_size - 1) / word_size + /* trailing zero symbol that is always added by Impl::decode */ 1) * size); + out_offsets.resize(input_rows_count); + out_vec.resize(((n + word_size - 1) / word_size + /* trailing zero symbol that is always added by Impl::decode */ 1) * input_rows_count); char * begin = reinterpret_cast(out_vec.data()); char * pos = begin; size_t prev_offset = 0; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { size_t new_offset = prev_offset + n; diff --git a/src/Functions/FunctionsBitToArray.cpp b/src/Functions/FunctionsBitToArray.cpp index beaaccad6db..0a14516268a 100644 --- a/src/Functions/FunctionsBitToArray.cpp +++ b/src/Functions/FunctionsBitToArray.cpp @@ -60,17 +60,17 @@ public: bool useDefaultImplementationForConstants() const override { return true; } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { ColumnPtr res; - if (!((res = executeType(arguments)) - || (res = executeType(arguments)) - || (res = executeType(arguments)) - || (res = executeType(arguments)) - || (res = executeType(arguments)) - || (res = executeType(arguments)) - || (res = executeType(arguments)) - || (res = executeType(arguments)))) + if (!((res = executeType(arguments, input_rows_count)) + || (res = executeType(arguments, input_rows_count)) + || (res = executeType(arguments, input_rows_count)) + || (res = executeType(arguments, input_rows_count)) + || (res = executeType(arguments, input_rows_count)) + || (res = executeType(arguments, input_rows_count)) + || (res = executeType(arguments, input_rows_count)) + || (res = executeType(arguments, input_rows_count)))) throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of argument of function {}", arguments[0].column->getName(), getName()); @@ -98,7 +98,7 @@ private: } template - ColumnPtr executeType(const ColumnsWithTypeAndName & columns) const + ColumnPtr executeType(const ColumnsWithTypeAndName & columns, size_t input_rows_count) const { if (const ColumnVector * col_from = checkAndGetColumn>(columns[0].column.get())) { @@ -107,13 +107,12 @@ private: const typename ColumnVector::Container & vec_from = col_from->getData(); ColumnString::Chars & data_to = col_to->getChars(); ColumnString::Offsets & offsets_to = col_to->getOffsets(); - size_t size = vec_from.size(); - data_to.resize(size * 2); - offsets_to.resize(size); + data_to.resize(input_rows_count * 2); + offsets_to.resize(input_rows_count); WriteBufferFromVector buf_to(data_to); - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { writeBitmask(vec_from[i], buf_to); writeChar(0, buf_to); @@ -244,7 +243,7 @@ public: bool useDefaultImplementationForConstants() const override { return true; } template - ColumnPtr executeType(const IColumn * column) const + ColumnPtr executeType(const IColumn * column, size_t input_rows_count) const { const ColumnVector * col_from = checkAndGetColumn>(column); if (!col_from) @@ -257,13 +256,12 @@ public: auto & result_array_offsets_data = result_array_offsets->getData(); auto & vec_from = col_from->getData(); - size_t size = vec_from.size(); - result_array_offsets_data.resize(size); - result_array_values_data.reserve(size * 2); + result_array_offsets_data.resize(input_rows_count); + result_array_values_data.reserve(input_rows_count * 2); using UnsignedType = make_unsigned_t; - for (size_t row = 0; row < size; ++row) + for (size_t row = 0; row < input_rows_count; ++row) { UnsignedType x = static_cast(vec_from[row]); @@ -302,24 +300,24 @@ public: return result_column; } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { const IColumn * in_column = arguments[0].column.get(); ColumnPtr result_column; - if (!((result_column = executeType(in_column)) - || (result_column = executeType(in_column)) - || (result_column = executeType(in_column)) - || (result_column = executeType(in_column)) - || (result_column = executeType(in_column)) - || (result_column = executeType(in_column)) - || (result_column = executeType(in_column)) - || (result_column = executeType(in_column)) - || (result_column = executeType(in_column)) - || (result_column = executeType(in_column)) - || (result_column = executeType(in_column)) - || (result_column = executeType(in_column)) - || (result_column = executeType(in_column)))) + if (!((result_column = executeType(in_column, input_rows_count)) + || (result_column = executeType(in_column, input_rows_count)) + || (result_column = executeType(in_column, input_rows_count)) + || (result_column = executeType(in_column, input_rows_count)) + || (result_column = executeType(in_column, input_rows_count)) + || (result_column = executeType(in_column, input_rows_count)) + || (result_column = executeType(in_column, input_rows_count)) + || (result_column = executeType(in_column, input_rows_count)) + || (result_column = executeType(in_column, input_rows_count)) + || (result_column = executeType(in_column, input_rows_count)) + || (result_column = executeType(in_column, input_rows_count)) + || (result_column = executeType(in_column, input_rows_count)) + || (result_column = executeType(in_column, input_rows_count)))) { throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of first argument of function {}", diff --git a/src/Functions/FunctionsBitmap.h b/src/Functions/FunctionsBitmap.h index 92ec71a3118..12b2b1a662a 100644 --- a/src/Functions/FunctionsBitmap.h +++ b/src/Functions/FunctionsBitmap.h @@ -155,7 +155,7 @@ public: bool useDefaultImplementationForConstants() const override { return true; } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /* input_rows_count */) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { const IDataType * from_type = arguments[0].type.get(); const auto * array_type = typeid_cast(from_type); @@ -165,21 +165,21 @@ public: WhichDataType which(nested_type); if (which.isUInt8()) - return executeBitmapData(argument_types, arguments); + return executeBitmapData(argument_types, arguments, input_rows_count); else if (which.isUInt16()) - return executeBitmapData(argument_types, arguments); + return executeBitmapData(argument_types, arguments, input_rows_count); else if (which.isUInt32()) - return executeBitmapData(argument_types, arguments); + return executeBitmapData(argument_types, arguments, input_rows_count); else if (which.isUInt64()) - return executeBitmapData(argument_types, arguments); + return executeBitmapData(argument_types, arguments, input_rows_count); else if (which.isInt8()) - return executeBitmapData(argument_types, arguments); + return executeBitmapData(argument_types, arguments, input_rows_count); else if (which.isInt16()) - return executeBitmapData(argument_types, arguments); + return executeBitmapData(argument_types, arguments, input_rows_count); else if (which.isInt32()) - return executeBitmapData(argument_types, arguments); + return executeBitmapData(argument_types, arguments, input_rows_count); else if (which.isInt64()) - return executeBitmapData(argument_types, arguments); + return executeBitmapData(argument_types, arguments, input_rows_count); else throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Unexpected type {} of argument of function {}", from_type->getName(), getName()); @@ -187,7 +187,7 @@ public: private: template - ColumnPtr executeBitmapData(DataTypes & argument_types, const ColumnsWithTypeAndName & arguments) const + ColumnPtr executeBitmapData(DataTypes & argument_types, const ColumnsWithTypeAndName & arguments, size_t input_rows_count) const { // input data const ColumnArray * array = typeid_cast(arguments[0].column.get()); @@ -203,10 +203,10 @@ private: AggregateFunctionPtr bitmap_function = AggregateFunctionFactory::instance().get( AggregateFunctionGroupBitmapData::name(), action, argument_types, params_row, properties); auto col_to = ColumnAggregateFunction::create(bitmap_function); - col_to->reserve(offsets.size()); + col_to->reserve(input_rows_count); size_t pos = 0; - for (size_t i = 0; i < offsets.size(); ++i) + for (size_t i = 0; i < input_rows_count; ++i) { col_to->insertDefault(); AggregateFunctionGroupBitmapData & bitmap_data diff --git a/src/Functions/FunctionsCharsetClassification.cpp b/src/Functions/FunctionsCharsetClassification.cpp index 0a332ab70a9..f01ede64cc0 100644 --- a/src/Functions/FunctionsCharsetClassification.cpp +++ b/src/Functions/FunctionsCharsetClassification.cpp @@ -23,7 +23,7 @@ namespace constexpr size_t max_string_size = 1UL << 15; template - ALWAYS_INLINE inline Float64 naiveBayes( + Float64 naiveBayes( const FrequencyHolder::EncodingMap & standard, const ModelMap & model, Float64 max_result) @@ -51,7 +51,7 @@ namespace /// Count how many times each bigram occurs in the text. template - ALWAYS_INLINE inline void calculateStats( + void calculateStats( const UInt8 * data, const size_t size, ModelMap & model) @@ -77,24 +77,25 @@ struct CharsetClassificationImpl const ColumnString::Chars & data, const ColumnString::Offsets & offsets, ColumnString::Chars & res_data, - ColumnString::Offsets & res_offsets) + ColumnString::Offsets & res_offsets, + size_t input_rows_count) { const auto & encodings_freq = FrequencyHolder::getInstance().getEncodingsFrequency(); if constexpr (detect_language) /// 2 chars for ISO code + 1 zero byte - res_data.reserve(offsets.size() * 3); + res_data.reserve(input_rows_count * 3); else /// Mean charset length is 8 - res_data.reserve(offsets.size() * 8); + res_data.reserve(input_rows_count * 8); - res_offsets.resize(offsets.size()); + res_offsets.resize(input_rows_count); size_t current_result_offset = 0; double zero_frequency_log = log(zero_frequency); - for (size_t i = 0; i < offsets.size(); ++i) + for (size_t i = 0; i < input_rows_count; ++i) { const UInt8 * str = data.data() + offsets[i - 1]; const size_t str_len = offsets[i] - offsets[i - 1] - 1; diff --git a/src/Functions/FunctionsCodingIP.cpp b/src/Functions/FunctionsCodingIP.cpp index 0a97d029f84..0416df8f462 100644 --- a/src/Functions/FunctionsCodingIP.cpp +++ b/src/Functions/FunctionsCodingIP.cpp @@ -341,7 +341,7 @@ class FunctionIPv4NumToString : public IFunction { private: template - ColumnPtr executeTyped(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const + ColumnPtr executeTyped(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const { using ColumnType = ColumnVector; @@ -356,12 +356,12 @@ private: ColumnString::Chars & vec_res = col_res->getChars(); ColumnString::Offsets & offsets_res = col_res->getOffsets(); - vec_res.resize(vec_in.size() * (IPV4_MAX_TEXT_LENGTH + 1)); /// the longest value is: 255.255.255.255\0 - offsets_res.resize(vec_in.size()); + vec_res.resize(input_rows_count * (IPV4_MAX_TEXT_LENGTH + 1)); /// the longest value is: 255.255.255.255\0 + offsets_res.resize(input_rows_count); char * begin = reinterpret_cast(vec_res.data()); char * pos = begin; - for (size_t i = 0; i < vec_in.size(); ++i) + for (size_t i = 0; i < input_rows_count; ++i) { DB::formatIPv4(reinterpret_cast(&vec_in[i]), sizeof(ArgType), pos, mask_tail_octets, "xxx"); offsets_res[i] = pos - begin; @@ -532,7 +532,7 @@ public: bool useDefaultImplementationForConstants() const override { return true; } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { const auto & col_type_name = arguments[0]; const ColumnPtr & column = col_type_name.column; @@ -542,11 +542,11 @@ public: auto col_res = ColumnIPv6::create(); auto & vec_res = col_res->getData(); - vec_res.resize(col_in->size()); + vec_res.resize(input_rows_count); const auto & vec_in = col_in->getData(); - for (size_t i = 0; i < vec_res.size(); ++i) + for (size_t i = 0; i < input_rows_count; ++i) mapIPv4ToIPv6(vec_in[i], reinterpret_cast(&vec_res[i].toUnderType())); return col_res; @@ -557,7 +557,7 @@ public: auto col_res = ColumnFixedString::create(IPV6_BINARY_LENGTH); auto & vec_res = col_res->getChars(); - vec_res.resize(col_in->size() * IPV6_BINARY_LENGTH); + vec_res.resize(input_rows_count * IPV6_BINARY_LENGTH); const auto & vec_in = col_in->getData(); @@ -742,7 +742,7 @@ public: bool useDefaultImplementationForConstants() const override { return true; } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { const ColumnPtr & column = arguments[0].column; @@ -751,13 +751,13 @@ public: auto col_res = ColumnUInt64::create(); ColumnUInt64::Container & vec_res = col_res->getData(); - vec_res.resize(col->size()); + vec_res.resize(input_rows_count); const ColumnString::Chars & vec_src = col->getChars(); const ColumnString::Offsets & offsets_src = col->getOffsets(); size_t prev_offset = 0; - for (size_t i = 0; i < vec_res.size(); ++i) + for (size_t i = 0; i < input_rows_count; ++i) { size_t current_offset = offsets_src[i]; size_t string_size = current_offset - prev_offset - 1; /// mind the terminating zero byte @@ -1054,7 +1054,7 @@ public: return std::make_shared(); } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { const ColumnString * input_column = checkAndGetColumn(arguments[0].column.get()); @@ -1067,14 +1067,14 @@ public: auto col_res = ColumnUInt8::create(); ColumnUInt8::Container & vec_res = col_res->getData(); - vec_res.resize(input_column->size()); + vec_res.resize(input_rows_count); const ColumnString::Chars & vec_src = input_column->getChars(); const ColumnString::Offsets & offsets_src = input_column->getOffsets(); size_t prev_offset = 0; UInt32 result = 0; - for (size_t i = 0; i < vec_res.size(); ++i) + for (size_t i = 0; i < input_rows_count; ++i) { vec_res[i] = DB::parseIPv4whole(reinterpret_cast(&vec_src[prev_offset]), reinterpret_cast(&result)); prev_offset = offsets_src[i]; @@ -1110,7 +1110,7 @@ public: return std::make_shared(); } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { const ColumnString * input_column = checkAndGetColumn(arguments[0].column.get()); if (!input_column) @@ -1122,14 +1122,14 @@ public: auto col_res = ColumnUInt8::create(); ColumnUInt8::Container & vec_res = col_res->getData(); - vec_res.resize(input_column->size()); + vec_res.resize(input_rows_count); const ColumnString::Chars & vec_src = input_column->getChars(); const ColumnString::Offsets & offsets_src = input_column->getOffsets(); size_t prev_offset = 0; char buffer[IPV6_BINARY_LENGTH]; - for (size_t i = 0; i < vec_res.size(); ++i) + for (size_t i = 0; i < input_rows_count; ++i) { vec_res[i] = DB::parseIPv6whole(reinterpret_cast(&vec_src[prev_offset]), reinterpret_cast(&vec_src[offsets_src[i] - 1]), diff --git a/src/Functions/FunctionsCodingUUID.cpp b/src/Functions/FunctionsCodingUUID.cpp index 83fdcbc4af9..179ba1bf97a 100644 --- a/src/Functions/FunctionsCodingUUID.cpp +++ b/src/Functions/FunctionsCodingUUID.cpp @@ -177,7 +177,7 @@ public: bool useDefaultImplementationForConstants() const override { return true; } ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1}; } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { const ColumnWithTypeAndName & col_type_name = arguments[0]; const ColumnPtr & column = col_type_name.column; @@ -189,21 +189,20 @@ public: "Illegal type {} of column {} argument of function {}, expected FixedString({})", col_type_name.type->getName(), col_in->getName(), getName(), uuid_bytes_length); - const auto size = col_in->size(); const auto & vec_in = col_in->getChars(); auto col_res = ColumnString::create(); ColumnString::Chars & vec_res = col_res->getChars(); ColumnString::Offsets & offsets_res = col_res->getOffsets(); - vec_res.resize(size * (uuid_text_length + 1)); - offsets_res.resize(size); + vec_res.resize(input_rows_count * (uuid_text_length + 1)); + offsets_res.resize(input_rows_count); size_t src_offset = 0; size_t dst_offset = 0; const UUIDSerializer uuid_serializer(variant); - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { uuid_serializer.deserialize(&vec_in[src_offset], &vec_res[dst_offset]); src_offset += uuid_bytes_length; @@ -256,7 +255,7 @@ public: bool useDefaultImplementationForConstants() const override { return true; } ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1}; } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { const ColumnWithTypeAndName & col_type_name = arguments[0]; const ColumnPtr & column = col_type_name.column; @@ -266,17 +265,16 @@ public: { const auto & vec_in = col_in->getChars(); const auto & offsets_in = col_in->getOffsets(); - const size_t size = offsets_in.size(); auto col_res = ColumnFixedString::create(uuid_bytes_length); ColumnString::Chars & vec_res = col_res->getChars(); - vec_res.resize(size * uuid_bytes_length); + vec_res.resize(input_rows_count * uuid_bytes_length); size_t src_offset = 0; size_t dst_offset = 0; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { /// If string has incorrect length - then return zero UUID. /// If string has correct length but contains something not like UUID - implementation specific behaviour. @@ -300,18 +298,17 @@ public: "Illegal type {} of column {} argument of function {}, expected FixedString({})", col_type_name.type->getName(), col_in_fixed->getName(), getName(), uuid_text_length); - const auto size = col_in_fixed->size(); const auto & vec_in = col_in_fixed->getChars(); auto col_res = ColumnFixedString::create(uuid_bytes_length); ColumnString::Chars & vec_res = col_res->getChars(); - vec_res.resize(size * uuid_bytes_length); + vec_res.resize(input_rows_count * uuid_bytes_length); size_t src_offset = 0; size_t dst_offset = 0; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { uuid_serializer.serialize(&vec_in[src_offset], &vec_res[dst_offset]); src_offset += uuid_text_length; @@ -359,7 +356,7 @@ public: return std::make_shared(uuid_bytes_length); } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { const ColumnWithTypeAndName & col_type_name = arguments[0]; const ColumnPtr & column = col_type_name.column; @@ -370,16 +367,15 @@ public: { const auto & vec_in = col_in->getData(); const UUID * uuids = vec_in.data(); - const size_t size = vec_in.size(); auto col_res = ColumnFixedString::create(uuid_bytes_length); ColumnString::Chars & vec_res = col_res->getChars(); - vec_res.resize(size * uuid_bytes_length); + vec_res.resize(input_rows_count * uuid_bytes_length); size_t dst_offset = 0; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { uint64_t hiBytes = DB::UUIDHelpers::getHighBytes(uuids[i]); uint64_t loBytes = DB::UUIDHelpers::getLowBytes(uuids[i]); @@ -448,7 +444,7 @@ public: return std::make_shared(datetime_scale, timezone); } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { const ColumnWithTypeAndName & col_type_name = arguments[0]; const ColumnPtr & column = col_type_name.column; @@ -457,12 +453,11 @@ public: { const auto & vec_in = col_in->getData(); const UUID * uuids = vec_in.data(); - const size_t size = vec_in.size(); - auto col_res = ColumnDateTime64::create(size, datetime_scale); + auto col_res = ColumnDateTime64::create(input_rows_count, datetime_scale); auto & vec_res = col_res->getData(); - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { const uint64_t hiBytes = DB::UUIDHelpers::getHighBytes(uuids[i]); const uint64_t ms = ((hiBytes & 0xf000) == 0x7000) ? (hiBytes >> 16) : 0; diff --git a/src/Functions/FunctionsDecimalArithmetics.h b/src/Functions/FunctionsDecimalArithmetics.h index e26ad7362b3..9b9045f7c69 100644 --- a/src/Functions/FunctionsDecimalArithmetics.h +++ b/src/Functions/FunctionsDecimalArithmetics.h @@ -151,36 +151,36 @@ struct Processor template void NO_INLINE vectorConstant(const FirstArgVectorType & vec_first, const SecondArgType second_value, - PaddedPODArray & vec_to, UInt16 scale_a, UInt16 scale_b, UInt16 result_scale) const + PaddedPODArray & vec_to, UInt16 scale_a, UInt16 scale_b, UInt16 result_scale, + size_t input_rows_count) const { - size_t size = vec_first.size(); - vec_to.resize(size); + vec_to.resize(input_rows_count); - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) vec_to[i] = transform.execute(vec_first[i], second_value, scale_a, scale_b, result_scale); } template void NO_INLINE vectorVector(const FirstArgVectorType & vec_first, const SecondArgVectorType & vec_second, - PaddedPODArray & vec_to, UInt16 scale_a, UInt16 scale_b, UInt16 result_scale) const + PaddedPODArray & vec_to, UInt16 scale_a, UInt16 scale_b, UInt16 result_scale, + size_t input_rows_count) const { - size_t size = vec_first.size(); - vec_to.resize(size); + vec_to.resize(input_rows_count); - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) vec_to[i] = transform.execute(vec_first[i], vec_second[i], scale_a, scale_b, result_scale); } template void NO_INLINE constantVector(const FirstArgType & first_value, const SecondArgVectorType & vec_second, - PaddedPODArray & vec_to, UInt16 scale_a, UInt16 scale_b, UInt16 result_scale) const + PaddedPODArray & vec_to, UInt16 scale_a, UInt16 scale_b, UInt16 result_scale, + size_t input_rows_count) const { - size_t size = vec_second.size(); - vec_to.resize(size); + vec_to.resize(input_rows_count); - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) vec_to[i] = transform.execute(first_value, vec_second[i], scale_a, scale_b, result_scale); } }; @@ -189,7 +189,7 @@ struct Processor template struct DecimalArithmeticsImpl { - static ColumnPtr execute(Transform transform, const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type) + static ColumnPtr execute(Transform transform, const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) { using FirstArgValueType = typename FirstArgType::FieldType; using FirstArgColumnType = typename FirstArgType::ColumnType; @@ -214,13 +214,13 @@ struct DecimalArithmeticsImpl if (first_col) { if (second_col_const) - op.vectorConstant(first_col->getData(), second_col_const->template getValue(), col_to->getData(), scale_a, scale_b, result_scale); + op.vectorConstant(first_col->getData(), second_col_const->template getValue(), col_to->getData(), scale_a, scale_b, result_scale, input_rows_count); else - op.vectorVector(first_col->getData(), second_col->getData(), col_to->getData(), scale_a, scale_b, result_scale); + op.vectorVector(first_col->getData(), second_col->getData(), col_to->getData(), scale_a, scale_b, result_scale, input_rows_count); } else if (first_col_const) { - op.constantVector(first_col_const->template getValue(), second_col->getData(), col_to->getData(), scale_a, scale_b, result_scale); + op.constantVector(first_col_const->template getValue(), second_col->getData(), col_to->getData(), scale_a, scale_b, result_scale, input_rows_count); } else { @@ -293,14 +293,14 @@ public: bool useDefaultImplementationForConstants() const override { return true; } ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {2}; } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override { - return resolveOverload(arguments, result_type); + return resolveOverload(arguments, result_type, input_rows_count); } private: // long resolver to call proper templated func - ColumnPtr resolveOverload(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type) const + ColumnPtr resolveOverload(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const { WhichDataType which_dividend(arguments[0].type.get()); WhichDataType which_divisor(arguments[1].type.get()); @@ -309,26 +309,26 @@ private: { using DividendType = DataTypeDecimal32; if (which_divisor.isDecimal32()) - return DecimalArithmeticsImpl::execute(Transform{}, arguments, result_type); + return DecimalArithmeticsImpl::execute(Transform{}, arguments, result_type, input_rows_count); else if (which_divisor.isDecimal64()) - return DecimalArithmeticsImpl::execute(Transform{}, arguments, result_type); + return DecimalArithmeticsImpl::execute(Transform{}, arguments, result_type, input_rows_count); else if (which_divisor.isDecimal128()) - return DecimalArithmeticsImpl::execute(Transform{}, arguments, result_type); + return DecimalArithmeticsImpl::execute(Transform{}, arguments, result_type, input_rows_count); else if (which_divisor.isDecimal256()) - return DecimalArithmeticsImpl::execute(Transform{}, arguments, result_type); + return DecimalArithmeticsImpl::execute(Transform{}, arguments, result_type, input_rows_count); } else if (which_dividend.isDecimal64()) { using DividendType = DataTypeDecimal64; if (which_divisor.isDecimal32()) - return DecimalArithmeticsImpl::execute(Transform{}, arguments, result_type); + return DecimalArithmeticsImpl::execute(Transform{}, arguments, result_type, input_rows_count); else if (which_divisor.isDecimal64()) - return DecimalArithmeticsImpl::execute(Transform{}, arguments, result_type); + return DecimalArithmeticsImpl::execute(Transform{}, arguments, result_type, input_rows_count); else if (which_divisor.isDecimal128()) - return DecimalArithmeticsImpl::execute(Transform{}, arguments, result_type); + return DecimalArithmeticsImpl::execute(Transform{}, arguments, result_type, input_rows_count); else if (which_divisor.isDecimal256()) - return DecimalArithmeticsImpl::execute(Transform{}, arguments, result_type); + return DecimalArithmeticsImpl::execute(Transform{}, arguments, result_type, input_rows_count); } @@ -336,13 +336,13 @@ private: { using DividendType = DataTypeDecimal128; if (which_divisor.isDecimal32()) - return DecimalArithmeticsImpl::execute(Transform{}, arguments, result_type); + return DecimalArithmeticsImpl::execute(Transform{}, arguments, result_type, input_rows_count); else if (which_divisor.isDecimal64()) - return DecimalArithmeticsImpl::execute(Transform{}, arguments, result_type); + return DecimalArithmeticsImpl::execute(Transform{}, arguments, result_type, input_rows_count); else if (which_divisor.isDecimal128()) - return DecimalArithmeticsImpl::execute(Transform{}, arguments, result_type); + return DecimalArithmeticsImpl::execute(Transform{}, arguments, result_type, input_rows_count); else if (which_divisor.isDecimal256()) - return DecimalArithmeticsImpl::execute(Transform{}, arguments, result_type); + return DecimalArithmeticsImpl::execute(Transform{}, arguments, result_type, input_rows_count); } @@ -350,13 +350,13 @@ private: { using DividendType = DataTypeDecimal256; if (which_divisor.isDecimal32()) - return DecimalArithmeticsImpl::execute(Transform{}, arguments, result_type); + return DecimalArithmeticsImpl::execute(Transform{}, arguments, result_type, input_rows_count); else if (which_divisor.isDecimal64()) - return DecimalArithmeticsImpl::execute(Transform{}, arguments, result_type); + return DecimalArithmeticsImpl::execute(Transform{}, arguments, result_type, input_rows_count); else if (which_divisor.isDecimal128()) - return DecimalArithmeticsImpl::execute(Transform{}, arguments, result_type); + return DecimalArithmeticsImpl::execute(Transform{}, arguments, result_type, input_rows_count); else if (which_divisor.isDecimal256()) - return DecimalArithmeticsImpl::execute(Transform{}, arguments, result_type); + return DecimalArithmeticsImpl::execute(Transform{}, arguments, result_type, input_rows_count); } diff --git a/src/Functions/FunctionsEmbeddedDictionaries.h b/src/Functions/FunctionsEmbeddedDictionaries.h index 2f270bf999a..f27934ce5a9 100644 --- a/src/Functions/FunctionsEmbeddedDictionaries.h +++ b/src/Functions/FunctionsEmbeddedDictionaries.h @@ -181,7 +181,7 @@ public: bool isDeterministic() const override { return false; } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { /// The dictionary key that defines the "point of view". std::string dict_key; @@ -205,10 +205,9 @@ public: const typename ColumnVector::Container & vec_from = col_from->getData(); typename ColumnVector::Container & vec_to = col_to->getData(); - size_t size = vec_from.size(); - vec_to.resize(size); + vec_to.resize(input_rows_count); - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) vec_to[i] = Transform::apply(vec_from[i], dict); return col_to; @@ -273,7 +272,7 @@ public: bool isDeterministic() const override { return false; } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { /// The dictionary key that defines the "point of view". std::string dict_key; @@ -303,10 +302,9 @@ public: const typename ColumnVector::Container & vec_from1 = col_vec1->getData(); const typename ColumnVector::Container & vec_from2 = col_vec2->getData(); typename ColumnUInt8::Container & vec_to = col_to->getData(); - size_t size = vec_from1.size(); - vec_to.resize(size); + vec_to.resize(input_rows_count); - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) vec_to[i] = Transform::apply(vec_from1[i], vec_from2[i], dict); return col_to; @@ -318,10 +316,9 @@ public: const typename ColumnVector::Container & vec_from1 = col_vec1->getData(); const T const_from2 = col_const2->template getValue(); typename ColumnUInt8::Container & vec_to = col_to->getData(); - size_t size = vec_from1.size(); - vec_to.resize(size); + vec_to.resize(input_rows_count); - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) vec_to[i] = Transform::apply(vec_from1[i], const_from2, dict); return col_to; @@ -333,10 +330,9 @@ public: const T const_from1 = col_const1->template getValue(); const typename ColumnVector::Container & vec_from2 = col_vec2->getData(); typename ColumnUInt8::Container & vec_to = col_to->getData(); - size_t size = vec_from2.size(); - vec_to.resize(size); + vec_to.resize(input_rows_count); - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) vec_to[i] = Transform::apply(const_from1, vec_from2[i], dict); return col_to; @@ -405,7 +401,7 @@ public: bool isDeterministic() const override { return false; } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { /// The dictionary key that defines the "point of view". std::string dict_key; @@ -432,11 +428,10 @@ public: auto & res_values = col_values->getData(); const typename ColumnVector::Container & vec_from = col_from->getData(); - size_t size = vec_from.size(); - res_offsets.resize(size); - res_values.reserve(size * 4); + res_offsets.resize(input_rows_count); + res_values.reserve(input_rows_count * 4); - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { T cur = vec_from[i]; for (size_t depth = 0; cur && depth < DBMS_HIERARCHICAL_DICTIONARY_MAX_DEPTH; ++depth) diff --git a/src/Functions/FunctionsJSON.cpp b/src/Functions/FunctionsJSON.cpp index 848856c500f..c35df8ba72d 100644 --- a/src/Functions/FunctionsJSON.cpp +++ b/src/Functions/FunctionsJSON.cpp @@ -125,7 +125,7 @@ public: } String error; - for (const auto i : collections::range(0, input_rows_count)) + for (size_t i = 0; i < input_rows_count; ++i) { if (!col_json_const) { @@ -314,7 +314,7 @@ private: static size_t calculateMaxSize(const ColumnString::Offsets & offsets) { size_t max_size = 0; - for (const auto i : collections::range(0, offsets.size())) + for (size_t i = 0; i < offsets.size(); ++i) { size_t size = offsets[i] - offsets[i - 1]; max_size = std::max(max_size, size); diff --git a/src/Functions/FunctionsLanguageClassification.cpp b/src/Functions/FunctionsLanguageClassification.cpp index 55485d41ce0..410eea0f437 100644 --- a/src/Functions/FunctionsLanguageClassification.cpp +++ b/src/Functions/FunctionsLanguageClassification.cpp @@ -31,7 +31,7 @@ extern const int SUPPORT_IS_DISABLED; struct FunctionDetectLanguageImpl { - static ALWAYS_INLINE inline std::string_view codeISO(std::string_view code_string) + static std::string_view codeISO(std::string_view code_string) { if (code_string.ends_with("-Latn")) code_string.remove_suffix(code_string.size() - 5); @@ -63,16 +63,17 @@ struct FunctionDetectLanguageImpl const ColumnString::Chars & data, const ColumnString::Offsets & offsets, ColumnString::Chars & res_data, - ColumnString::Offsets & res_offsets) + ColumnString::Offsets & res_offsets, + size_t input_rows_count) { /// Constant 3 is based on the fact that in general we need 2 characters for ISO code + 1 zero byte - res_data.reserve(offsets.size() * 3); - res_offsets.resize(offsets.size()); + res_data.reserve(input_rows_count * 3); + res_offsets.resize(input_rows_count); bool is_reliable; size_t res_offset = 0; - for (size_t i = 0; i < offsets.size(); ++i) + for (size_t i = 0; i < input_rows_count; ++i) { const UInt8 * str = data.data() + offsets[i - 1]; const size_t str_len = offsets[i] - offsets[i - 1] - 1; diff --git a/src/Functions/FunctionsLogical.cpp b/src/Functions/FunctionsLogical.cpp index e1814150da6..65d7473b945 100644 --- a/src/Functions/FunctionsLogical.cpp +++ b/src/Functions/FunctionsLogical.cpp @@ -48,7 +48,7 @@ using UInt8Container = ColumnUInt8::Container; using UInt8ColumnPtrs = std::vector; -MutableColumnPtr buildColumnFromTernaryData(const UInt8Container & ternary_data, const bool make_nullable) +MutableColumnPtr buildColumnFromTernaryData(const UInt8Container & ternary_data, bool make_nullable) { const size_t rows_count = ternary_data.size(); diff --git a/src/Functions/FunctionsProgrammingClassification.cpp b/src/Functions/FunctionsProgrammingClassification.cpp index c01e47ad0d7..bbedef024d5 100644 --- a/src/Functions/FunctionsProgrammingClassification.cpp +++ b/src/Functions/FunctionsProgrammingClassification.cpp @@ -40,17 +40,18 @@ struct FunctionDetectProgrammingLanguageImpl const ColumnString::Chars & data, const ColumnString::Offsets & offsets, ColumnString::Chars & res_data, - ColumnString::Offsets & res_offsets) + ColumnString::Offsets & res_offsets, + size_t input_rows_count) { const auto & programming_freq = FrequencyHolder::getInstance().getProgrammingFrequency(); /// Constant 5 is arbitrary - res_data.reserve(offsets.size() * 5); - res_offsets.resize(offsets.size()); + res_data.reserve(input_rows_count * 5); + res_offsets.resize(input_rows_count); size_t res_offset = 0; - for (size_t i = 0; i < offsets.size(); ++i) + for (size_t i = 0; i < input_rows_count; ++i) { const UInt8 * str = data.data() + offsets[i - 1]; const size_t str_len = offsets[i] - offsets[i - 1] - 1; diff --git a/src/Functions/FunctionsRandom.h b/src/Functions/FunctionsRandom.h index 36448c6f689..83075ca01cb 100644 --- a/src/Functions/FunctionsRandom.h +++ b/src/Functions/FunctionsRandom.h @@ -80,8 +80,7 @@ public: auto col_to = ColumnVector::create(); typename ColumnVector::Container & vec_to = col_to->getData(); - size_t size = input_rows_count; - vec_to.resize(size); + vec_to.resize(input_rows_count); RandImpl::execute(reinterpret_cast(vec_to.data()), vec_to.size() * sizeof(ToType)); return col_to; diff --git a/src/Functions/FunctionsStringDistance.cpp b/src/Functions/FunctionsStringDistance.cpp index 48f4aaf4e09..5aae92a8141 100644 --- a/src/Functions/FunctionsStringDistance.cpp +++ b/src/Functions/FunctionsStringDistance.cpp @@ -37,12 +37,12 @@ struct FunctionStringDistanceImpl const ColumnString::Offsets & haystack_offsets, const ColumnString::Chars & needle_data, const ColumnString::Offsets & needle_offsets, - PaddedPODArray & res) + PaddedPODArray & res, + size_t input_rows_count) { - size_t size = res.size(); const char * haystack = reinterpret_cast(haystack_data.data()); const char * needle = reinterpret_cast(needle_data.data()); - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { res[i] = Op::process( haystack + haystack_offsets[i - 1], @@ -56,13 +56,13 @@ struct FunctionStringDistanceImpl const String & haystack, const ColumnString::Chars & needle_data, const ColumnString::Offsets & needle_offsets, - PaddedPODArray & res) + PaddedPODArray & res, + size_t input_rows_count) { const char * haystack_data = haystack.data(); size_t haystack_size = haystack.size(); const char * needle = reinterpret_cast(needle_data.data()); - size_t size = res.size(); - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { res[i] = Op::process(haystack_data, haystack_size, needle + needle_offsets[i - 1], needle_offsets[i] - needle_offsets[i - 1] - 1); @@ -73,9 +73,10 @@ struct FunctionStringDistanceImpl const ColumnString::Chars & data, const ColumnString::Offsets & offsets, const String & needle, - PaddedPODArray & res) + PaddedPODArray & res, + size_t input_rows_count) { - constantVector(needle, data, offsets, res); + constantVector(needle, data, offsets, res, input_rows_count); } }; diff --git a/src/Functions/FunctionsStringHash.cpp b/src/Functions/FunctionsStringHash.cpp index 0bf6e39e651..bd7d45d781a 100644 --- a/src/Functions/FunctionsStringHash.cpp +++ b/src/Functions/FunctionsStringHash.cpp @@ -315,9 +315,9 @@ struct SimHashImpl return getSimHash(finger_vec); } - static void apply(const ColumnString::Chars & data, const ColumnString::Offsets & offsets, size_t shingle_size, PaddedPODArray & res) + static void apply(const ColumnString::Chars & data, const ColumnString::Offsets & offsets, size_t shingle_size, PaddedPODArray & res, size_t input_rows_count) { - for (size_t i = 0; i < offsets.size(); ++i) + for (size_t i = 0; i < input_rows_count; ++i) { const UInt8 * one_data = &data[offsets[i - 1]]; const size_t data_size = offsets[i] - offsets[i - 1] - 1; @@ -543,12 +543,13 @@ struct MinHashImpl PaddedPODArray * res1, PaddedPODArray * res2, ColumnTuple * res1_strings, - ColumnTuple * res2_strings) + ColumnTuple * res2_strings, + size_t input_rows_count) { MinHeap min_heap; MaxHeap max_heap; - for (size_t i = 0; i < offsets.size(); ++i) + for (size_t i = 0; i < input_rows_count; ++i) { const UInt8 * one_data = &data[offsets[i - 1]]; const size_t data_size = offsets[i] - offsets[i - 1] - 1; diff --git a/src/Functions/FunctionsStringHash.h b/src/Functions/FunctionsStringHash.h index fcd4c970a47..f790c660e21 100644 --- a/src/Functions/FunctionsStringHash.h +++ b/src/Functions/FunctionsStringHash.h @@ -135,7 +135,7 @@ public: bool useDefaultImplementationForConstants() const override { return true; } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { const ColumnPtr & column = arguments[0].column; @@ -152,9 +152,9 @@ public: { auto col_res = ColumnVector::create(); auto & vec_res = col_res->getData(); - vec_res.resize(column->size()); + vec_res.resize(input_rows_count); const ColumnString & col_str_vector = checkAndGetColumn(*column); - Impl::apply(col_str_vector.getChars(), col_str_vector.getOffsets(), shingle_size, vec_res); + Impl::apply(col_str_vector.getChars(), col_str_vector.getOffsets(), shingle_size, vec_res, input_rows_count); return col_res; } else if constexpr (is_arg) // Min hash arg @@ -171,7 +171,7 @@ public: auto max_tuple = ColumnTuple::create(std::move(max_columns)); const ColumnString & col_str_vector = checkAndGetColumn(*column); - Impl::apply(col_str_vector.getChars(), col_str_vector.getOffsets(), shingle_size, num_hashes, nullptr, nullptr, min_tuple.get(), max_tuple.get()); + Impl::apply(col_str_vector.getChars(), col_str_vector.getOffsets(), shingle_size, num_hashes, nullptr, nullptr, min_tuple.get(), max_tuple.get(), input_rows_count); MutableColumns tuple_columns; tuple_columns.emplace_back(std::move(min_tuple)); @@ -184,10 +184,10 @@ public: auto col_h2 = ColumnVector::create(); auto & vec_h1 = col_h1->getData(); auto & vec_h2 = col_h2->getData(); - vec_h1.resize(column->size()); - vec_h2.resize(column->size()); + vec_h1.resize(input_rows_count); + vec_h2.resize(input_rows_count); const ColumnString & col_str_vector = checkAndGetColumn(*column); - Impl::apply(col_str_vector.getChars(), col_str_vector.getOffsets(), shingle_size, num_hashes, &vec_h1, &vec_h2, nullptr, nullptr); + Impl::apply(col_str_vector.getChars(), col_str_vector.getOffsets(), shingle_size, num_hashes, &vec_h1, &vec_h2, nullptr, nullptr, input_rows_count); MutableColumns tuple_columns; tuple_columns.emplace_back(std::move(col_h1)); tuple_columns.emplace_back(std::move(col_h2)); diff --git a/src/Functions/FunctionsStringHashFixedString.cpp b/src/Functions/FunctionsStringHashFixedString.cpp index 01e989a7f2c..9474fe2629e 100644 --- a/src/Functions/FunctionsStringHashFixedString.cpp +++ b/src/Functions/FunctionsStringHashFixedString.cpp @@ -224,7 +224,7 @@ public: bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { if (const ColumnString * col_from = checkAndGetColumn(arguments[0].column.get())) { @@ -233,11 +233,10 @@ public: const typename ColumnString::Chars & data = col_from->getChars(); const typename ColumnString::Offsets & offsets = col_from->getOffsets(); auto & chars_to = col_to->getChars(); - const auto size = offsets.size(); - chars_to.resize(size * Impl::length); + chars_to.resize(input_rows_count * Impl::length); ColumnString::Offset current_offset = 0; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { Impl::apply( reinterpret_cast(&data[current_offset]), @@ -253,11 +252,10 @@ public: { auto col_to = ColumnFixedString::create(Impl::length); const typename ColumnFixedString::Chars & data = col_from_fix->getChars(); - const auto size = col_from_fix->size(); auto & chars_to = col_to->getChars(); const auto length = col_from_fix->getN(); - chars_to.resize(size * Impl::length); - for (size_t i = 0; i < size; ++i) + chars_to.resize(input_rows_count * Impl::length); + for (size_t i = 0; i < input_rows_count; ++i) { Impl::apply( reinterpret_cast(&data[i * length]), length, reinterpret_cast(&chars_to[i * Impl::length])); @@ -268,11 +266,10 @@ public: { auto col_to = ColumnFixedString::create(Impl::length); const typename ColumnIPv6::Container & data = col_from_ip->getData(); - const auto size = col_from_ip->size(); auto & chars_to = col_to->getChars(); const auto length = sizeof(IPv6::UnderlyingType); - chars_to.resize(size * Impl::length); - for (size_t i = 0; i < size; ++i) + chars_to.resize(input_rows_count * Impl::length); + for (size_t i = 0; i < input_rows_count; ++i) { Impl::apply( reinterpret_cast(&data[i]), length, reinterpret_cast(&chars_to[i * Impl::length])); diff --git a/src/Functions/FunctionsStringSimilarity.cpp b/src/Functions/FunctionsStringSimilarity.cpp index 7b3f2337c89..5e26e4ad482 100644 --- a/src/Functions/FunctionsStringSimilarity.cpp +++ b/src/Functions/FunctionsStringSimilarity.cpp @@ -90,7 +90,7 @@ struct NgramDistanceImpl ((cont[Offset + I] = std::tolower(cont[Offset + I])), ...); } - static ALWAYS_INLINE size_t readASCIICodePoints(CodePoint * code_points, const char *& pos, const char * end) + static size_t readASCIICodePoints(CodePoint * code_points, const char *& pos, const char * end) { /// Offset before which we copy some data. constexpr size_t padding_offset = default_padding - N + 1; @@ -120,7 +120,7 @@ struct NgramDistanceImpl return default_padding; } - static ALWAYS_INLINE size_t readUTF8CodePoints(CodePoint * code_points, const char *& pos, const char * end) + static size_t readUTF8CodePoints(CodePoint * code_points, const char *& pos, const char * end) { /// The same copying as described in the function above. memcpy(code_points, code_points + default_padding - N + 1, roundUpToPowerOfTwoOrZero(N - 1) * sizeof(CodePoint)); @@ -195,7 +195,7 @@ struct NgramDistanceImpl } template - static ALWAYS_INLINE inline size_t calculateNeedleStats( + static inline size_t calculateNeedleStats( const char * data, const size_t size, NgramCount * ngram_stats, @@ -228,7 +228,7 @@ struct NgramDistanceImpl } template - static ALWAYS_INLINE inline UInt64 calculateHaystackStatsAndMetric( + static inline UInt64 calculateHaystackStatsAndMetric( const char * data, const size_t size, NgramCount * ngram_stats, @@ -318,9 +318,9 @@ struct NgramDistanceImpl const ColumnString::Offsets & haystack_offsets, const ColumnString::Chars & needle_data, const ColumnString::Offsets & needle_offsets, - PaddedPODArray & res) + PaddedPODArray & res, + size_t input_rows_count) { - const size_t haystack_offsets_size = haystack_offsets.size(); size_t prev_haystack_offset = 0; size_t prev_needle_offset = 0; @@ -331,7 +331,7 @@ struct NgramDistanceImpl std::unique_ptr needle_ngram_storage(new UInt16[max_string_size]); std::unique_ptr haystack_ngram_storage(new UInt16[max_string_size]); - for (size_t i = 0; i < haystack_offsets_size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { const char * haystack = reinterpret_cast(&haystack_data[prev_haystack_offset]); const size_t haystack_size = haystack_offsets[i] - prev_haystack_offset - 1; @@ -391,12 +391,13 @@ struct NgramDistanceImpl std::string haystack, const ColumnString::Chars & needle_data, const ColumnString::Offsets & needle_offsets, - PaddedPODArray & res) + PaddedPODArray & res, + size_t input_rows_count) { /// For symmetric version it is better to use vector_constant if constexpr (symmetric) { - vectorConstant(needle_data, needle_offsets, std::move(haystack), res); + vectorConstant(needle_data, needle_offsets, std::move(haystack), res, input_rows_count); } else { @@ -404,7 +405,6 @@ struct NgramDistanceImpl haystack.resize(haystack_size + default_padding); /// For logic explanation see vector_vector function. - const size_t needle_offsets_size = needle_offsets.size(); size_t prev_offset = 0; std::unique_ptr common_stats{new NgramCount[map_size]{}}; @@ -412,7 +412,7 @@ struct NgramDistanceImpl std::unique_ptr needle_ngram_storage(new UInt16[max_string_size]); std::unique_ptr haystack_ngram_storage(new UInt16[max_string_size]); - for (size_t i = 0; i < needle_offsets_size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { const char * needle = reinterpret_cast(&needle_data[prev_offset]); const size_t needle_size = needle_offsets[i] - prev_offset - 1; @@ -456,7 +456,8 @@ struct NgramDistanceImpl const ColumnString::Chars & data, const ColumnString::Offsets & offsets, std::string needle, - PaddedPODArray & res) + PaddedPODArray & res, + size_t input_rows_count) { /// zeroing our map std::unique_ptr common_stats{new NgramCount[map_size]{}}; @@ -472,7 +473,7 @@ struct NgramDistanceImpl size_t distance = needle_stats_size; size_t prev_offset = 0; - for (size_t i = 0; i < offsets.size(); ++i) + for (size_t i = 0; i < input_rows_count; ++i) { const UInt8 * haystack = &data[prev_offset]; const size_t haystack_size = offsets[i] - prev_offset - 1; diff --git a/src/Functions/FunctionsStringSimilarity.h b/src/Functions/FunctionsStringSimilarity.h index e148730054d..c2cf7137286 100644 --- a/src/Functions/FunctionsStringSimilarity.h +++ b/src/Functions/FunctionsStringSimilarity.h @@ -57,7 +57,7 @@ public: return std::make_shared>(); } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override { using ResultType = typename Impl::ResultType; @@ -90,7 +90,7 @@ public: auto col_res = ColumnVector::create(); typename ColumnVector::Container & vec_res = col_res->getData(); - vec_res.resize(column_haystack->size()); + vec_res.resize(input_rows_count); const ColumnString * col_haystack_vector = checkAndGetColumn(&*column_haystack); const ColumnString * col_needle_vector = checkAndGetColumn(&*column_needle); @@ -110,7 +110,7 @@ public: Impl::max_string_size); } } - Impl::vectorConstant(col_haystack_vector->getChars(), col_haystack_vector->getOffsets(), needle, vec_res); + Impl::vectorConstant(col_haystack_vector->getChars(), col_haystack_vector->getOffsets(), needle, vec_res, input_rows_count); } else if (col_haystack_vector && col_needle_vector) { @@ -119,7 +119,8 @@ public: col_haystack_vector->getOffsets(), col_needle_vector->getChars(), col_needle_vector->getOffsets(), - vec_res); + vec_res, + input_rows_count); } else if (col_haystack_const && col_needle_vector) { @@ -136,7 +137,7 @@ public: Impl::max_string_size); } } - Impl::constantVector(haystack, col_needle_vector->getChars(), col_needle_vector->getOffsets(), vec_res); + Impl::constantVector(haystack, col_needle_vector->getChars(), col_needle_vector->getOffsets(), vec_res, input_rows_count); } else { diff --git a/src/Functions/FunctionsTextClassification.h b/src/Functions/FunctionsTextClassification.h index 90e8af06ccc..d5cba690f81 100644 --- a/src/Functions/FunctionsTextClassification.h +++ b/src/Functions/FunctionsTextClassification.h @@ -55,7 +55,7 @@ public: return arguments[0]; } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & /*result_type*/, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & /*result_type*/, size_t input_rows_count) const override { const ColumnPtr & column = arguments[0].column; const ColumnString * col = checkAndGetColumn(column.get()); @@ -65,7 +65,7 @@ public: arguments[0].column->getName(), getName()); auto col_res = ColumnString::create(); - Impl::vector(col->getChars(), col->getOffsets(), col_res->getChars(), col_res->getOffsets()); + Impl::vector(col->getChars(), col->getOffsets(), col_res->getChars(), col_res->getOffsets(), input_rows_count); return col_res; } }; @@ -104,7 +104,7 @@ public: return std::make_shared(); } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & /*result_type*/, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & /*result_type*/, size_t input_rows_count) const override { const ColumnPtr & column = arguments[0].column; const ColumnString * col = checkAndGetColumn(column.get()); @@ -115,9 +115,9 @@ public: auto col_res = ColumnVector::create(); ColumnVector::Container & vec_res = col_res->getData(); - vec_res.resize(col->size()); + vec_res.resize(input_rows_count); - Impl::vector(col->getChars(), col->getOffsets(), vec_res); + Impl::vector(col->getChars(), col->getOffsets(), vec_res, input_rows_count); return col_res; } }; diff --git a/src/Functions/FunctionsTimeWindow.cpp b/src/Functions/FunctionsTimeWindow.cpp index f93a885ee65..77d740803be 100644 --- a/src/Functions/FunctionsTimeWindow.cpp +++ b/src/Functions/FunctionsTimeWindow.cpp @@ -130,7 +130,7 @@ struct TimeWindowImpl static DataTypePtr getReturnType(const ColumnsWithTypeAndName & arguments, const String & function_name); - static ColumnPtr dispatchForColumns(const ColumnsWithTypeAndName & arguments, const String & function_name); + static ColumnPtr dispatchForColumns(const ColumnsWithTypeAndName & arguments, const String & function_name, size_t input_rows_count); }; template @@ -196,7 +196,7 @@ struct TimeWindowImpl return std::make_shared(DataTypes{data_type, data_type}); } - static ColumnPtr dispatchForColumns(const ColumnsWithTypeAndName & arguments, const String & function_name) + static ColumnPtr dispatchForColumns(const ColumnsWithTypeAndName & arguments, const String & function_name, size_t input_rows_count) { const auto & time_column = arguments[0]; const auto & interval_column = arguments[1]; @@ -214,38 +214,37 @@ struct TimeWindowImpl { /// TODO: add proper support for fractional seconds case IntervalKind::Kind::Second: - return executeTumble(*time_column_vec, std::get<1>(interval), time_zone); + return executeTumble(*time_column_vec, std::get<1>(interval), time_zone, input_rows_count); case IntervalKind::Kind::Minute: - return executeTumble(*time_column_vec, std::get<1>(interval), time_zone); + return executeTumble(*time_column_vec, std::get<1>(interval), time_zone, input_rows_count); case IntervalKind::Kind::Hour: - return executeTumble(*time_column_vec, std::get<1>(interval), time_zone); + return executeTumble(*time_column_vec, std::get<1>(interval), time_zone, input_rows_count); case IntervalKind::Kind::Day: - return executeTumble(*time_column_vec, std::get<1>(interval), time_zone); + return executeTumble(*time_column_vec, std::get<1>(interval), time_zone, input_rows_count); case IntervalKind::Kind::Week: - return executeTumble(*time_column_vec, std::get<1>(interval), time_zone); + return executeTumble(*time_column_vec, std::get<1>(interval), time_zone, input_rows_count); case IntervalKind::Kind::Month: - return executeTumble(*time_column_vec, std::get<1>(interval), time_zone); + return executeTumble(*time_column_vec, std::get<1>(interval), time_zone, input_rows_count); case IntervalKind::Kind::Quarter: - return executeTumble(*time_column_vec, std::get<1>(interval), time_zone); + return executeTumble(*time_column_vec, std::get<1>(interval), time_zone, input_rows_count); case IntervalKind::Kind::Year: - return executeTumble(*time_column_vec, std::get<1>(interval), time_zone); + return executeTumble(*time_column_vec, std::get<1>(interval), time_zone, input_rows_count); default: throw Exception(ErrorCodes::SYNTAX_ERROR, "Fraction seconds are unsupported by windows yet"); } } template - static ColumnPtr executeTumble(const ColumnDateTime & time_column, UInt64 num_units, const DateLUTImpl & time_zone) + static ColumnPtr executeTumble(const ColumnDateTime & time_column, UInt64 num_units, const DateLUTImpl & time_zone, size_t input_rows_count) { const auto & time_data = time_column.getData(); - size_t size = time_column.size(); auto start = ColumnVector::create(); auto end = ColumnVector::create(); auto & start_data = start->getData(); auto & end_data = end->getData(); - start_data.resize(size); - end_data.resize(size); - for (size_t i = 0; i != size; ++i) + start_data.resize(input_rows_count); + end_data.resize(input_rows_count); + for (size_t i = 0; i != input_rows_count; ++i) { start_data[i] = ToStartOfTransform::execute(time_data[i], num_units, time_zone); end_data[i] = AddTime::execute(start_data[i], num_units, time_zone); @@ -283,7 +282,7 @@ struct TimeWindowImpl } } - [[maybe_unused]] static ColumnPtr dispatchForColumns(const ColumnsWithTypeAndName & arguments, const String & function_name) + [[maybe_unused]] static ColumnPtr dispatchForColumns(const ColumnsWithTypeAndName & arguments, const String & function_name, size_t input_rows_count) { const auto & time_column = arguments[0]; const auto which_type = WhichDataType(time_column.type); @@ -296,7 +295,7 @@ struct TimeWindowImpl result_column = time_column.column; } else - result_column = TimeWindowImpl::dispatchForColumns(arguments, function_name); + result_column = TimeWindowImpl::dispatchForColumns(arguments, function_name, input_rows_count); return executeWindowBound(result_column, 0, function_name); } }; @@ -311,7 +310,7 @@ struct TimeWindowImpl return TimeWindowImpl::getReturnType(arguments, function_name); } - [[maybe_unused]] static ColumnPtr dispatchForColumns(const ColumnsWithTypeAndName & arguments, const String& function_name) + [[maybe_unused]] static ColumnPtr dispatchForColumns(const ColumnsWithTypeAndName & arguments, const String& function_name, size_t input_rows_count) { const auto & time_column = arguments[0]; const auto which_type = WhichDataType(time_column.type); @@ -324,7 +323,7 @@ struct TimeWindowImpl result_column = time_column.column; } else - result_column = TimeWindowImpl::dispatchForColumns(arguments, function_name); + result_column = TimeWindowImpl::dispatchForColumns(arguments, function_name, input_rows_count); return executeWindowBound(result_column, 1, function_name); } }; @@ -372,7 +371,7 @@ struct TimeWindowImpl return std::make_shared(DataTypes{data_type, data_type}); } - static ColumnPtr dispatchForColumns(const ColumnsWithTypeAndName & arguments, const String & function_name) + static ColumnPtr dispatchForColumns(const ColumnsWithTypeAndName & arguments, const String & function_name, size_t input_rows_count) { const auto & time_column = arguments[0]; const auto & hop_interval_column = arguments[1]; @@ -396,28 +395,28 @@ struct TimeWindowImpl /// TODO: add proper support for fractional seconds case IntervalKind::Kind::Second: return executeHop( - *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone); + *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone, input_rows_count); case IntervalKind::Kind::Minute: return executeHop( - *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone); + *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone, input_rows_count); case IntervalKind::Kind::Hour: return executeHop( - *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone); + *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone, input_rows_count); case IntervalKind::Kind::Day: return executeHop( - *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone); + *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone, input_rows_count); case IntervalKind::Kind::Week: return executeHop( - *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone); + *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone, input_rows_count); case IntervalKind::Kind::Month: return executeHop( - *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone); + *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone, input_rows_count); case IntervalKind::Kind::Quarter: return executeHop( - *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone); + *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone, input_rows_count); case IntervalKind::Kind::Year: return executeHop( - *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone); + *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone, input_rows_count); default: throw Exception(ErrorCodes::SYNTAX_ERROR, "Fraction seconds are unsupported by windows yet"); } @@ -425,18 +424,17 @@ struct TimeWindowImpl template static ColumnPtr - executeHop(const ColumnDateTime & time_column, UInt64 hop_num_units, UInt64 window_num_units, const DateLUTImpl & time_zone) + executeHop(const ColumnDateTime & time_column, UInt64 hop_num_units, UInt64 window_num_units, const DateLUTImpl & time_zone, size_t input_rows_count) { const auto & time_data = time_column.getData(); - size_t size = time_column.size(); auto start = ColumnVector::create(); auto end = ColumnVector::create(); auto & start_data = start->getData(); auto & end_data = end->getData(); - start_data.resize(size); - end_data.resize(size); + start_data.resize(input_rows_count); + end_data.resize(input_rows_count); - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { ToType wstart = ToStartOfTransform::execute(time_data[i], hop_num_units, time_zone); ToType wend = AddTime::execute(wstart, hop_num_units, time_zone); @@ -509,7 +507,7 @@ struct TimeWindowImpl return std::make_shared(); } - static ColumnPtr dispatchForHopColumns(const ColumnsWithTypeAndName & arguments, const String & function_name) + static ColumnPtr dispatchForHopColumns(const ColumnsWithTypeAndName & arguments, const String & function_name, size_t input_rows_count) { const auto & time_column = arguments[0]; const auto & hop_interval_column = arguments[1]; @@ -533,28 +531,28 @@ struct TimeWindowImpl /// TODO: add proper support for fractional seconds case IntervalKind::Kind::Second: return executeHopSlice( - *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone); + *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone, input_rows_count); case IntervalKind::Kind::Minute: return executeHopSlice( - *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone); + *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone, input_rows_count); case IntervalKind::Kind::Hour: return executeHopSlice( - *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone); + *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone, input_rows_count); case IntervalKind::Kind::Day: return executeHopSlice( - *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone); + *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone, input_rows_count); case IntervalKind::Kind::Week: return executeHopSlice( - *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone); + *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone, input_rows_count); case IntervalKind::Kind::Month: return executeHopSlice( - *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone); + *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone, input_rows_count); case IntervalKind::Kind::Quarter: return executeHopSlice( - *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone); + *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone, input_rows_count); case IntervalKind::Kind::Year: return executeHopSlice( - *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone); + *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone, input_rows_count); default: throw Exception(ErrorCodes::SYNTAX_ERROR, "Fraction seconds are unsupported by windows yet"); } @@ -563,17 +561,16 @@ struct TimeWindowImpl template static ColumnPtr - executeHopSlice(const ColumnDateTime & time_column, UInt64 hop_num_units, UInt64 window_num_units, const DateLUTImpl & time_zone) + executeHopSlice(const ColumnDateTime & time_column, UInt64 hop_num_units, UInt64 window_num_units, const DateLUTImpl & time_zone, size_t input_rows_count) { Int64 gcd_num_units = std::gcd(hop_num_units, window_num_units); const auto & time_data = time_column.getData(); - size_t size = time_column.size(); auto end = ColumnVector::create(); auto & end_data = end->getData(); - end_data.resize(size); - for (size_t i = 0; i < size; ++i) + end_data.resize(input_rows_count); + for (size_t i = 0; i < input_rows_count; ++i) { ToType wstart = ToStartOfTransform::execute(time_data[i], hop_num_units, time_zone); ToType wend = AddTime::execute(wstart, hop_num_units, time_zone); @@ -593,23 +590,23 @@ struct TimeWindowImpl return end; } - static ColumnPtr dispatchForTumbleColumns(const ColumnsWithTypeAndName & arguments, const String & function_name) + static ColumnPtr dispatchForTumbleColumns(const ColumnsWithTypeAndName & arguments, const String & function_name, size_t input_rows_count) { - ColumnPtr column = TimeWindowImpl::dispatchForColumns(arguments, function_name); + ColumnPtr column = TimeWindowImpl::dispatchForColumns(arguments, function_name, input_rows_count); return executeWindowBound(column, 1, function_name); } - static ColumnPtr dispatchForColumns(const ColumnsWithTypeAndName & arguments, const String & function_name) + static ColumnPtr dispatchForColumns(const ColumnsWithTypeAndName & arguments, const String & function_name, size_t input_rows_count) { if (arguments.size() == 2) - return dispatchForTumbleColumns(arguments, function_name); + return dispatchForTumbleColumns(arguments, function_name, input_rows_count); else { const auto & third_column = arguments[2]; if (arguments.size() == 3 && WhichDataType(third_column.type).isString()) - return dispatchForTumbleColumns(arguments, function_name); + return dispatchForTumbleColumns(arguments, function_name, input_rows_count); else - return dispatchForHopColumns(arguments, function_name); + return dispatchForHopColumns(arguments, function_name, input_rows_count); } } }; @@ -639,7 +636,7 @@ struct TimeWindowImpl } } - static ColumnPtr dispatchForColumns(const ColumnsWithTypeAndName & arguments, const String & function_name) + static ColumnPtr dispatchForColumns(const ColumnsWithTypeAndName & arguments, const String & function_name, size_t input_rows_count) { const auto & time_column = arguments[0]; const auto which_type = WhichDataType(time_column.type); @@ -652,7 +649,7 @@ struct TimeWindowImpl result_column = time_column.column; } else - result_column = TimeWindowImpl::dispatchForColumns(arguments, function_name); + result_column = TimeWindowImpl::dispatchForColumns(arguments, function_name, input_rows_count); return executeWindowBound(result_column, 0, function_name); } }; @@ -667,7 +664,7 @@ struct TimeWindowImpl return TimeWindowImpl::getReturnType(arguments, function_name); } - static ColumnPtr dispatchForColumns(const ColumnsWithTypeAndName & arguments, const String & function_name) + static ColumnPtr dispatchForColumns(const ColumnsWithTypeAndName & arguments, const String & function_name, size_t input_rows_count) { const auto & time_column = arguments[0]; const auto which_type = WhichDataType(time_column.type); @@ -680,7 +677,7 @@ struct TimeWindowImpl result_column = time_column.column; } else - result_column = TimeWindowImpl::dispatchForColumns(arguments, function_name); + result_column = TimeWindowImpl::dispatchForColumns(arguments, function_name, input_rows_count); return executeWindowBound(result_column, 1, function_name); } @@ -693,9 +690,9 @@ DataTypePtr FunctionTimeWindow::getReturnTypeImpl(const ColumnsWithTypeAnd } template -ColumnPtr FunctionTimeWindow::executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & /*result_type*/, size_t /*input_rows_count*/) const +ColumnPtr FunctionTimeWindow::executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & /*result_type*/, size_t input_rows_count) const { - return TimeWindowImpl::dispatchForColumns(arguments, name); + return TimeWindowImpl::dispatchForColumns(arguments, name, input_rows_count); } } diff --git a/src/Functions/FunctionsTonalityClassification.cpp b/src/Functions/FunctionsTonalityClassification.cpp index a9321819a26..7627c68c057 100644 --- a/src/Functions/FunctionsTonalityClassification.cpp +++ b/src/Functions/FunctionsTonalityClassification.cpp @@ -18,7 +18,7 @@ namespace DB */ struct FunctionDetectTonalityImpl { - static ALWAYS_INLINE inline Float32 detectTonality( + static Float32 detectTonality( const UInt8 * str, const size_t str_len, const FrequencyHolder::Map & emotional_dict) @@ -63,13 +63,13 @@ struct FunctionDetectTonalityImpl static void vector( const ColumnString::Chars & data, const ColumnString::Offsets & offsets, - PaddedPODArray & res) + PaddedPODArray & res, + size_t input_rows_count) { const auto & emotional_dict = FrequencyHolder::getInstance().getEmotionalDict(); - size_t size = offsets.size(); size_t prev_offset = 0; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { res[i] = detectTonality(data.data() + prev_offset, offsets[i] - 1 - prev_offset, emotional_dict); prev_offset = offsets[i]; diff --git a/src/Functions/Kusto/KqlArraySort.cpp b/src/Functions/Kusto/KqlArraySort.cpp index 11157aa53e6..fb3e6259ee4 100644 --- a/src/Functions/Kusto/KqlArraySort.cpp +++ b/src/Functions/Kusto/KqlArraySort.cpp @@ -73,13 +73,11 @@ public: size_t array_count = arguments.size(); const auto & last_arg = arguments[array_count - 1]; - size_t input_rows_count_local = input_rows_count; - bool null_last = true; if (!isArray(last_arg.type)) { --array_count; - null_last = check_condition(last_arg, context, input_rows_count_local); + null_last = check_condition(last_arg, context, input_rows_count); } ColumnsWithTypeAndName new_args; @@ -119,11 +117,11 @@ public: } auto zipped - = FunctionFactory::instance().get("arrayZip", context)->build(new_args)->execute(new_args, result_type, input_rows_count_local); + = FunctionFactory::instance().get("arrayZip", context)->build(new_args)->execute(new_args, result_type, input_rows_count); ColumnsWithTypeAndName sort_arg({{zipped, std::make_shared(result_type), "zipped"}}); auto sorted_tuple - = FunctionFactory::instance().get(sort_function, context)->build(sort_arg)->execute(sort_arg, result_type, input_rows_count_local); + = FunctionFactory::instance().get(sort_function, context)->build(sort_arg)->execute(sort_arg, result_type, input_rows_count); auto null_type = std::make_shared(std::make_shared()); @@ -139,10 +137,10 @@ public: = std::make_shared(makeNullable(nested_types[i])); ColumnsWithTypeAndName null_array_arg({ - {null_type->createColumnConstWithDefaultValue(input_rows_count_local), null_type, "NULL"}, + {null_type->createColumnConstWithDefaultValue(input_rows_count), null_type, "NULL"}, }); - tuple_columns[i] = fun_array->build(null_array_arg)->execute(null_array_arg, arg_type, input_rows_count_local); + tuple_columns[i] = fun_array->build(null_array_arg)->execute(null_array_arg, arg_type, input_rows_count); tuple_columns[i] = tuple_columns[i]->convertToFullColumnIfConst(); } else @@ -153,7 +151,7 @@ public: auto tuple_coulmn = FunctionFactory::instance() .get("tupleElement", context) ->build(untuple_args) - ->execute(untuple_args, result_type, input_rows_count_local); + ->execute(untuple_args, result_type, input_rows_count); auto out_tmp = ColumnArray::create(nested_types[i]->createColumn()); @@ -183,7 +181,7 @@ public: auto inside_null_type = nested_types[0]; ColumnsWithTypeAndName indexof_args({ arg_of_index, - {inside_null_type->createColumnConstWithDefaultValue(input_rows_count_local), inside_null_type, "NULL"}, + {inside_null_type->createColumnConstWithDefaultValue(input_rows_count), inside_null_type, "NULL"}, }); auto null_index_datetype = std::make_shared(); @@ -192,7 +190,7 @@ public: slice_index.column = FunctionFactory::instance() .get("indexOf", context) ->build(indexof_args) - ->execute(indexof_args, result_type, input_rows_count_local); + ->execute(indexof_args, result_type, input_rows_count); auto null_index_in_array = slice_index.column->get64(0); if (null_index_in_array > 0) @@ -220,15 +218,15 @@ public: ColumnsWithTypeAndName slice_args_right( {{ColumnWithTypeAndName(tuple_columns[i], arg_type, "array")}, slice_index}); ColumnWithTypeAndName arr_left{ - fun_slice->build(slice_args_left)->execute(slice_args_left, arg_type, input_rows_count_local), arg_type, ""}; + fun_slice->build(slice_args_left)->execute(slice_args_left, arg_type, input_rows_count), arg_type, ""}; ColumnWithTypeAndName arr_right{ - fun_slice->build(slice_args_right)->execute(slice_args_right, arg_type, input_rows_count_local), arg_type, ""}; + fun_slice->build(slice_args_right)->execute(slice_args_right, arg_type, input_rows_count), arg_type, ""}; ColumnsWithTypeAndName arr_cancat({arr_right, arr_left}); auto out_tmp = FunctionFactory::instance() .get("arrayConcat", context) ->build(arr_cancat) - ->execute(arr_cancat, arg_type, input_rows_count_local); + ->execute(arr_cancat, arg_type, input_rows_count); adjusted_columns[i] = std::move(out_tmp); } } diff --git a/src/Functions/PolygonUtils.h b/src/Functions/PolygonUtils.h index 57f1243537d..bf8241774a6 100644 --- a/src/Functions/PolygonUtils.h +++ b/src/Functions/PolygonUtils.h @@ -124,7 +124,7 @@ public: bool hasEmptyBound() const { return has_empty_bound; } - inline bool ALWAYS_INLINE contains(CoordinateType x, CoordinateType y) const + inline bool contains(CoordinateType x, CoordinateType y) const { Point point(x, y); @@ -167,7 +167,7 @@ public: UInt64 getAllocatedBytes() const; - inline bool ALWAYS_INLINE contains(CoordinateType x, CoordinateType y) const; + bool contains(CoordinateType x, CoordinateType y) const; private: enum class CellType : uint8_t @@ -199,7 +199,7 @@ private: } /// Inner part of the HalfPlane is the left side of initialized vector. - bool ALWAYS_INLINE contains(CoordinateType x, CoordinateType y) const { return a * x + b * y + c >= 0; } + bool contains(CoordinateType x, CoordinateType y) const { return a * x + b * y + c >= 0; } }; struct Cell @@ -233,7 +233,7 @@ private: void calcGridAttributes(Box & box); template - T ALWAYS_INLINE getCellIndex(T row, T col) const { return row * grid_size + col; } + T getCellIndex(T row, T col) const { return row * grid_size + col; } /// Complex case. Will check intersection directly. inline void addComplexPolygonCell(size_t index, const Box & box); diff --git a/src/Functions/ReplaceRegexpImpl.h b/src/Functions/ReplaceRegexpImpl.h index f5fb08f71d2..14f5a2d7932 100644 --- a/src/Functions/ReplaceRegexpImpl.h +++ b/src/Functions/ReplaceRegexpImpl.h @@ -201,15 +201,15 @@ struct ReplaceRegexpImpl const String & needle, const String & replacement, ColumnString::Chars & res_data, - ColumnString::Offsets & res_offsets) + ColumnString::Offsets & res_offsets, + size_t input_rows_count) { if (needle.empty()) throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Length of the pattern argument in function {} must be greater than 0.", name); ColumnString::Offset res_offset = 0; res_data.reserve(haystack_data.size()); - size_t haystack_size = haystack_offsets.size(); - res_offsets.resize(haystack_size); + res_offsets.resize(input_rows_count); re2::RE2::Options regexp_options; regexp_options.set_log_errors(false); /// don't write error messages to stderr @@ -232,13 +232,13 @@ struct ReplaceRegexpImpl case ReplaceRegexpTraits::Replace::All: return ReplaceStringTraits::Replace::All; } }; - ReplaceStringImpl::vectorConstantConstant(haystack_data, haystack_offsets, needle, replacement, res_data, res_offsets); + ReplaceStringImpl::vectorConstantConstant(haystack_data, haystack_offsets, needle, replacement, res_data, res_offsets, input_rows_count); return; } Instructions instructions = createInstructions(replacement, num_captures); - for (size_t i = 0; i < haystack_size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { size_t from = i > 0 ? haystack_offsets[i - 1] : 0; @@ -257,19 +257,19 @@ struct ReplaceRegexpImpl const ColumnString::Offsets & needle_offsets, const String & replacement, ColumnString::Chars & res_data, - ColumnString::Offsets & res_offsets) + ColumnString::Offsets & res_offsets, + size_t input_rows_count) { assert(haystack_offsets.size() == needle_offsets.size()); ColumnString::Offset res_offset = 0; res_data.reserve(haystack_data.size()); - size_t haystack_size = haystack_offsets.size(); - res_offsets.resize(haystack_size); + res_offsets.resize(input_rows_count); re2::RE2::Options regexp_options; regexp_options.set_log_errors(false); /// don't write error messages to stderr - for (size_t i = 0; i < haystack_size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { size_t hs_from = i > 0 ? haystack_offsets[i - 1] : 0; const char * hs_data = reinterpret_cast(haystack_data.data() + hs_from); @@ -302,7 +302,8 @@ struct ReplaceRegexpImpl const ColumnString::Chars & replacement_data, const ColumnString::Offsets & replacement_offsets, ColumnString::Chars & res_data, - ColumnString::Offsets & res_offsets) + ColumnString::Offsets & res_offsets, + size_t input_rows_count) { assert(haystack_offsets.size() == replacement_offsets.size()); @@ -311,8 +312,7 @@ struct ReplaceRegexpImpl ColumnString::Offset res_offset = 0; res_data.reserve(haystack_data.size()); - size_t haystack_size = haystack_offsets.size(); - res_offsets.resize(haystack_size); + res_offsets.resize(input_rows_count); re2::RE2::Options regexp_options; regexp_options.set_log_errors(false); /// don't write error messages to stderr @@ -323,7 +323,7 @@ struct ReplaceRegexpImpl int num_captures = std::min(searcher.NumberOfCapturingGroups() + 1, max_captures); - for (size_t i = 0; i < haystack_size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { size_t hs_from = i > 0 ? haystack_offsets[i - 1] : 0; const char * hs_data = reinterpret_cast(haystack_data.data() + hs_from); @@ -349,20 +349,20 @@ struct ReplaceRegexpImpl const ColumnString::Chars & replacement_data, const ColumnString::Offsets & replacement_offsets, ColumnString::Chars & res_data, - ColumnString::Offsets & res_offsets) + ColumnString::Offsets & res_offsets, + size_t input_rows_count) { assert(haystack_offsets.size() == needle_offsets.size()); assert(needle_offsets.size() == replacement_offsets.size()); ColumnString::Offset res_offset = 0; res_data.reserve(haystack_data.size()); - size_t haystack_size = haystack_offsets.size(); - res_offsets.resize(haystack_size); + res_offsets.resize(input_rows_count); re2::RE2::Options regexp_options; regexp_options.set_log_errors(false); /// don't write error messages to stderr - for (size_t i = 0; i < haystack_size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { size_t hs_from = i > 0 ? haystack_offsets[i - 1] : 0; const char * hs_data = reinterpret_cast(haystack_data.data() + hs_from); @@ -399,15 +399,15 @@ struct ReplaceRegexpImpl const String & needle, const String & replacement, ColumnString::Chars & res_data, - ColumnString::Offsets & res_offsets) + ColumnString::Offsets & res_offsets, + size_t input_rows_count) { if (needle.empty()) throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Length of the pattern argument in function {} must be greater than 0.", name); ColumnString::Offset res_offset = 0; - size_t haystack_size = haystack_data.size() / n; res_data.reserve(haystack_data.size()); - res_offsets.resize(haystack_size); + res_offsets.resize(input_rows_count); re2::RE2::Options regexp_options; regexp_options.set_log_errors(false); /// don't write error messages to stderr @@ -419,7 +419,7 @@ struct ReplaceRegexpImpl int num_captures = std::min(searcher.NumberOfCapturingGroups() + 1, max_captures); Instructions instructions = createInstructions(replacement, num_captures); - for (size_t i = 0; i < haystack_size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { size_t from = i * n; const char * hs_data = reinterpret_cast(haystack_data.data() + from); diff --git a/src/Functions/ReplaceStringImpl.h b/src/Functions/ReplaceStringImpl.h index de3942acbd8..7c56d657b3e 100644 --- a/src/Functions/ReplaceStringImpl.h +++ b/src/Functions/ReplaceStringImpl.h @@ -35,7 +35,8 @@ struct ReplaceStringImpl const String & needle, const String & replacement, ColumnString::Chars & res_data, - ColumnString::Offsets & res_offsets) + ColumnString::Offsets & res_offsets, + size_t input_rows_count) { if (needle.empty()) throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Length of the pattern argument in function {} must be greater than 0.", name); @@ -46,8 +47,7 @@ struct ReplaceStringImpl ColumnString::Offset res_offset = 0; res_data.reserve(haystack_data.size()); - const size_t haystack_size = haystack_offsets.size(); - res_offsets.resize(haystack_size); + res_offsets.resize(input_rows_count); /// The current index in the array of strings. size_t i = 0; @@ -124,21 +124,20 @@ struct ReplaceStringImpl const ColumnString::Offsets & needle_offsets, const String & replacement, ColumnString::Chars & res_data, - ColumnString::Offsets & res_offsets) + ColumnString::Offsets & res_offsets, + size_t input_rows_count) { chassert(haystack_offsets.size() == needle_offsets.size()); - const size_t haystack_size = haystack_offsets.size(); - res_data.reserve(haystack_data.size()); - res_offsets.resize(haystack_size); + res_offsets.resize(input_rows_count); ColumnString::Offset res_offset = 0; size_t prev_haystack_offset = 0; size_t prev_needle_offset = 0; - for (size_t i = 0; i < haystack_size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { const auto * const cur_haystack_data = &haystack_data[prev_haystack_offset]; const size_t cur_haystack_length = haystack_offsets[i] - prev_haystack_offset - 1; @@ -195,24 +194,23 @@ struct ReplaceStringImpl const ColumnString::Chars & replacement_data, const ColumnString::Offsets & replacement_offsets, ColumnString::Chars & res_data, - ColumnString::Offsets & res_offsets) + ColumnString::Offsets & res_offsets, + size_t input_rows_count) { chassert(haystack_offsets.size() == replacement_offsets.size()); if (needle.empty()) throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Length of the pattern argument in function {} must be greater than 0.", name); - const size_t haystack_size = haystack_offsets.size(); - res_data.reserve(haystack_data.size()); - res_offsets.resize(haystack_size); + res_offsets.resize(input_rows_count); ColumnString::Offset res_offset = 0; size_t prev_haystack_offset = 0; size_t prev_replacement_offset = 0; - for (size_t i = 0; i < haystack_size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { const auto * const cur_haystack_data = &haystack_data[prev_haystack_offset]; const size_t cur_haystack_length = haystack_offsets[i] - prev_haystack_offset - 1; @@ -267,15 +265,14 @@ struct ReplaceStringImpl const ColumnString::Chars & replacement_data, const ColumnString::Offsets & replacement_offsets, ColumnString::Chars & res_data, - ColumnString::Offsets & res_offsets) + ColumnString::Offsets & res_offsets, + size_t input_rows_count) { chassert(haystack_offsets.size() == needle_offsets.size()); chassert(needle_offsets.size() == replacement_offsets.size()); - const size_t haystack_size = haystack_offsets.size(); - res_data.reserve(haystack_data.size()); - res_offsets.resize(haystack_size); + res_offsets.resize(input_rows_count); ColumnString::Offset res_offset = 0; @@ -283,7 +280,7 @@ struct ReplaceStringImpl size_t prev_needle_offset = 0; size_t prev_replacement_offset = 0; - for (size_t i = 0; i < haystack_size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { const auto * const cur_haystack_data = &haystack_data[prev_haystack_offset]; const size_t cur_haystack_length = haystack_offsets[i] - prev_haystack_offset - 1; @@ -345,7 +342,8 @@ struct ReplaceStringImpl const String & needle, const String & replacement, ColumnString::Chars & res_data, - ColumnString::Offsets & res_offsets) + ColumnString::Offsets & res_offsets, + size_t input_rows_count) { if (needle.empty()) throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Length of the pattern argument in function {} must be greater than 0.", name); @@ -355,9 +353,8 @@ struct ReplaceStringImpl const UInt8 * pos = begin; ColumnString::Offset res_offset = 0; - size_t haystack_size = haystack_data.size() / n; res_data.reserve(haystack_data.size()); - res_offsets.resize(haystack_size); + res_offsets.resize(input_rows_count); /// The current index in the string array. size_t i = 0; @@ -384,13 +381,13 @@ struct ReplaceStringImpl /// Copy skipped strings without any changes but /// add zero byte to the end of each string. - while (i < haystack_size && begin + n * (i + 1) <= match) + while (i < input_rows_count && begin + n * (i + 1) <= match) { COPY_REST_OF_CURRENT_STRING(); } /// If you have reached the end, it's time to stop - if (i == haystack_size) + if (i == input_rows_count) break; /// Copy unchanged part of current string. diff --git a/src/Functions/URL/FirstSignificantSubdomainCustomImpl.h b/src/Functions/URL/FirstSignificantSubdomainCustomImpl.h index 68582198ea3..22fec17654c 100644 --- a/src/Functions/URL/FirstSignificantSubdomainCustomImpl.h +++ b/src/Functions/URL/FirstSignificantSubdomainCustomImpl.h @@ -64,7 +64,7 @@ public: return arguments[0].type; } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & /*result_type*/, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & /*result_type*/, size_t input_rows_count) const override { const ColumnConst * column_tld_list_name = checkAndGetColumnConstStringOrFixedString(arguments[1].column.get()); FirstSignificantSubdomainCustomLookup tld_lookup(column_tld_list_name->getValue()); @@ -72,7 +72,7 @@ public: if (const ColumnString * col = checkAndGetColumn(&*arguments[0].column)) { auto col_res = ColumnString::create(); - vector(tld_lookup, col->getChars(), col->getOffsets(), col_res->getChars(), col_res->getOffsets()); + vector(tld_lookup, col->getChars(), col->getOffsets(), col_res->getChars(), col_res->getOffsets(), input_rows_count); return col_res; } else @@ -82,11 +82,11 @@ public: static void vector(FirstSignificantSubdomainCustomLookup & tld_lookup, const ColumnString::Chars & data, const ColumnString::Offsets & offsets, - ColumnString::Chars & res_data, ColumnString::Offsets & res_offsets) + ColumnString::Chars & res_data, ColumnString::Offsets & res_offsets, + size_t input_rows_count) { - size_t size = offsets.size(); - res_offsets.resize(size); - res_data.reserve(size * Extractor::getReserveLengthForElement()); + res_offsets.resize(input_rows_count); + res_data.reserve(input_rows_count * Extractor::getReserveLengthForElement()); size_t prev_offset = 0; size_t res_offset = 0; @@ -95,7 +95,7 @@ public: Pos start; size_t length; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { Extractor::execute(tld_lookup, reinterpret_cast(&data[prev_offset]), offsets[i] - prev_offset - 1, start, length); diff --git a/src/Functions/URL/cutURLParameter.cpp b/src/Functions/URL/cutURLParameter.cpp index 7a2b96ec874..3ab9cad1ea7 100644 --- a/src/Functions/URL/cutURLParameter.cpp +++ b/src/Functions/URL/cutURLParameter.cpp @@ -44,7 +44,7 @@ public: return std::make_shared(); } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { const ColumnPtr column = arguments[0].column; const ColumnPtr column_needle = arguments[1].column; @@ -71,7 +71,7 @@ public: ColumnString::Chars & vec_res = col_res->getChars(); ColumnString::Offsets & offsets_res = col_res->getOffsets(); - vector(col->getChars(), col->getOffsets(), col_needle, col_needle_const_array, vec_res, offsets_res); + vector(col->getChars(), col->getOffsets(), col_needle, col_needle_const_array, vec_res, offsets_res, input_rows_count); return col_res; } else @@ -130,7 +130,8 @@ public: const ColumnString::Offsets & offsets, const ColumnConst * col_needle, const ColumnArray * col_needle_const_array, - ColumnString::Chars & res_data, ColumnString::Offsets & res_offsets) + ColumnString::Chars & res_data, ColumnString::Offsets & res_offsets, + size_t input_rows_count) { res_data.reserve(data.size()); res_offsets.resize(offsets.size()); @@ -141,7 +142,7 @@ public: size_t res_offset = 0; size_t cur_res_offset; - for (size_t i = 0; i < offsets.size(); ++i) + for (size_t i = 0; i < input_rows_count; ++i) { cur_offset = offsets[i]; cur_len = cur_offset - prev_offset; diff --git a/src/Functions/URL/port.cpp b/src/Functions/URL/port.cpp index fac46281604..7492ebcb4e9 100644 --- a/src/Functions/URL/port.cpp +++ b/src/Functions/URL/port.cpp @@ -46,7 +46,7 @@ struct FunctionPortImpl : public IFunction } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { UInt16 default_port = 0; if (arguments.size() == 2) @@ -64,7 +64,7 @@ struct FunctionPortImpl : public IFunction typename ColumnVector::Container & vec_res = col_res->getData(); vec_res.resize(url_column->size()); - vector(default_port, url_strs->getChars(), url_strs->getOffsets(), vec_res); + vector(default_port, url_strs->getChars(), url_strs->getOffsets(), vec_res, input_rows_count); return col_res; } else @@ -73,12 +73,10 @@ struct FunctionPortImpl : public IFunction } private: - static void vector(UInt16 default_port, const ColumnString::Chars & data, const ColumnString::Offsets & offsets, PaddedPODArray & res) + static void vector(UInt16 default_port, const ColumnString::Chars & data, const ColumnString::Offsets & offsets, PaddedPODArray & res, size_t input_rows_count) { - size_t size = offsets.size(); - ColumnString::Offset prev_offset = 0; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { res[i] = extractPort(default_port, data, prev_offset, offsets[i] - prev_offset - 1); prev_offset = offsets[i]; diff --git a/src/Functions/UTCTimestampTransform.cpp b/src/Functions/UTCTimestampTransform.cpp index 36ec520068f..35015188078 100644 --- a/src/Functions/UTCTimestampTransform.cpp +++ b/src/Functions/UTCTimestampTransform.cpp @@ -67,7 +67,7 @@ namespace return date_time_type; } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { if (arguments.size() != 2) throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {}'s arguments number must be 2.", name); @@ -81,11 +81,10 @@ namespace if (WhichDataType(arg1.type).isDateTime()) { const auto & date_time_col = checkAndGetColumn(*arg1.column); - size_t col_size = date_time_col.size(); using ColVecTo = DataTypeDateTime::ColumnType; - typename ColVecTo::MutablePtr result_column = ColVecTo::create(col_size); + typename ColVecTo::MutablePtr result_column = ColVecTo::create(input_rows_count); typename ColVecTo::Container & result_data = result_column->getData(); - for (size_t i = 0; i < col_size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { UInt32 date_time_val = date_time_col.getElement(i); LocalDateTime date_time(date_time_val, Name::to ? utc_time_zone : DateLUT::instance(time_zone_val)); @@ -97,14 +96,13 @@ namespace else if (WhichDataType(arg1.type).isDateTime64()) { const auto & date_time_col = checkAndGetColumn(*arg1.column); - size_t col_size = date_time_col.size(); const DataTypeDateTime64 * date_time_type = static_cast(arg1.type.get()); UInt32 col_scale = date_time_type->getScale(); Int64 scale_multiplier = DecimalUtils::scaleMultiplier(col_scale); using ColDecimalTo = DataTypeDateTime64::ColumnType; - typename ColDecimalTo::MutablePtr result_column = ColDecimalTo::create(col_size, col_scale); + typename ColDecimalTo::MutablePtr result_column = ColDecimalTo::create(input_rows_count, col_scale); typename ColDecimalTo::Container & result_data = result_column->getData(); - for (size_t i = 0; i < col_size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { DateTime64 date_time_val = date_time_col.getElement(i); Int64 seconds = date_time_val.value / scale_multiplier; diff --git a/src/Functions/array/arrayAUC.cpp b/src/Functions/array/arrayAUC.cpp index 3e2a3bf6863..7a61c9d368f 100644 --- a/src/Functions/array/arrayAUC.cpp +++ b/src/Functions/array/arrayAUC.cpp @@ -143,13 +143,13 @@ private: const IColumn & scores, const IColumn & labels, const ColumnArray::Offsets & offsets, - PaddedPODArray & result) + PaddedPODArray & result, + size_t input_rows_count) { - size_t size = offsets.size(); - result.resize(size); + result.resize(input_rows_count); ColumnArray::Offset current_offset = 0; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { auto next_offset = offsets[i]; result[i] = apply(scores, labels, current_offset, next_offset); @@ -179,7 +179,7 @@ public: return std::make_shared(); } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { ColumnPtr col1 = arguments[0].column->convertToFullColumnIfConst(); ColumnPtr col2 = arguments[1].column->convertToFullColumnIfConst(); @@ -203,7 +203,8 @@ public: col_array1->getData(), col_array2->getData(), col_array1->getOffsets(), - col_res->getData()); + col_res->getData(), + input_rows_count); return col_res; } diff --git a/src/Functions/array/arrayConcat.cpp b/src/Functions/array/arrayConcat.cpp index cdb361b73b9..768877bac99 100644 --- a/src/Functions/array/arrayConcat.cpp +++ b/src/Functions/array/arrayConcat.cpp @@ -40,7 +40,6 @@ ColumnPtr FunctionArrayConcat::executeImpl(const ColumnsWithTypeAndName & argume if (result_type->onlyNull()) return result_type->createColumnConstWithDefaultValue(input_rows_count); - size_t rows = input_rows_count; size_t num_args = arguments.size(); Columns preprocessed_columns(num_args); @@ -69,7 +68,7 @@ ColumnPtr FunctionArrayConcat::executeImpl(const ColumnsWithTypeAndName & argume } if (const auto * argument_column_array = typeid_cast(argument_column.get())) - sources.emplace_back(GatherUtils::createArraySource(*argument_column_array, is_const, rows)); + sources.emplace_back(GatherUtils::createArraySource(*argument_column_array, is_const, input_rows_count)); else throw Exception(ErrorCodes::LOGICAL_ERROR, "Arguments for function {} must be arrays.", getName()); } diff --git a/src/Functions/array/arrayEnumerateRanked.h b/src/Functions/array/arrayEnumerateRanked.h index ad325fe542a..269a7db6e92 100644 --- a/src/Functions/array/arrayEnumerateRanked.h +++ b/src/Functions/array/arrayEnumerateRanked.h @@ -132,7 +132,7 @@ private: /// Hash a set of keys into a UInt128 value. -static inline UInt128 ALWAYS_INLINE hash128depths(const std::vector & indices, const ColumnRawPtrs & key_columns) +static UInt128 hash128depths(const std::vector & indices, const ColumnRawPtrs & key_columns) { SipHash hash; for (size_t j = 0, keys_size = key_columns.size(); j < keys_size; ++j) diff --git a/src/Functions/array/length.cpp b/src/Functions/array/length.cpp index d81c071b55e..760506194fa 100644 --- a/src/Functions/array/length.cpp +++ b/src/Functions/array/length.cpp @@ -16,40 +16,38 @@ struct LengthImpl { static constexpr auto is_fixed_to_constant = true; - static void vector(const ColumnString::Chars & /*data*/, const ColumnString::Offsets & offsets, PaddedPODArray & res) + static void vector(const ColumnString::Chars & /*data*/, const ColumnString::Offsets & offsets, PaddedPODArray & res, size_t input_rows_count) { - size_t size = offsets.size(); - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) res[i] = offsets[i] - 1 - offsets[i - 1]; } - static void vectorFixedToConstant(const ColumnString::Chars & /*data*/, size_t n, UInt64 & res) + static void vectorFixedToConstant(const ColumnString::Chars & /*data*/, size_t n, UInt64 & res, size_t) { res = n; } - static void vectorFixedToVector(const ColumnString::Chars & /*data*/, size_t /*n*/, PaddedPODArray & /*res*/) + static void vectorFixedToVector(const ColumnString::Chars & /*data*/, size_t /*n*/, PaddedPODArray & /*res*/, size_t) { } - static void array(const ColumnString::Offsets & offsets, PaddedPODArray & res) + static void array(const ColumnString::Offsets & offsets, PaddedPODArray & res, size_t input_rows_count) { - size_t size = offsets.size(); - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) res[i] = offsets[i] - offsets[i - 1]; } - [[noreturn]] static void uuid(const ColumnUUID::Container &, size_t &, PaddedPODArray &) + [[noreturn]] static void uuid(const ColumnUUID::Container &, size_t &, PaddedPODArray &, size_t) { throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function length to UUID argument"); } - [[noreturn]] static void ipv6(const ColumnIPv6::Container &, size_t &, PaddedPODArray &) + [[noreturn]] static void ipv6(const ColumnIPv6::Container &, size_t &, PaddedPODArray &, size_t) { throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function length to IPv6 argument"); } - [[noreturn]] static void ipv4(const ColumnIPv4::Container &, size_t &, PaddedPODArray &) + [[noreturn]] static void ipv4(const ColumnIPv4::Container &, size_t &, PaddedPODArray &, size_t) { throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function length to IPv4 argument"); } diff --git a/src/Functions/ascii.cpp b/src/Functions/ascii.cpp index 0d50e5d203b..e65996a90e1 100644 --- a/src/Functions/ascii.cpp +++ b/src/Functions/ascii.cpp @@ -23,47 +23,43 @@ struct AsciiImpl using ReturnType = Int32; - static void vector(const ColumnString::Chars & data, const ColumnString::Offsets & offsets, PaddedPODArray & res) + static void vector(const ColumnString::Chars & data, const ColumnString::Offsets & offsets, PaddedPODArray & res, size_t input_rows_count) { - size_t size = offsets.size(); - ColumnString::Offset prev_offset = 0; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { res[i] = doAscii(data, prev_offset, offsets[i] - prev_offset - 1); prev_offset = offsets[i]; } } - [[noreturn]] static void vectorFixedToConstant(const ColumnString::Chars & /*data*/, size_t /*n*/, Int32 & /*res*/) + [[noreturn]] static void vectorFixedToConstant(const ColumnString::Chars &, size_t, Int32 &, size_t) { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "vectorFixedToConstant not implemented for function {}", AsciiName::name); } - static void vectorFixedToVector(const ColumnString::Chars & data, size_t n, PaddedPODArray & res) + static void vectorFixedToVector(const ColumnString::Chars & data, size_t n, PaddedPODArray & res, size_t input_rows_count) { - size_t size = data.size() / n; - - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) res[i] = doAscii(data, i * n, n); } - [[noreturn]] static void array(const ColumnString::Offsets & /*offsets*/, PaddedPODArray & /*res*/) + [[noreturn]] static void array(const ColumnString::Offsets &, PaddedPODArray &, size_t) { throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function {} to Array argument", AsciiName::name); } - [[noreturn]] static void uuid(const ColumnUUID::Container & /*offsets*/, size_t /*n*/, PaddedPODArray & /*res*/) + [[noreturn]] static void uuid(const ColumnUUID::Container &, size_t, PaddedPODArray &, size_t) { throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function {} to UUID argument", AsciiName::name); } - [[noreturn]] static void ipv6(const ColumnIPv6::Container & /*offsets*/, size_t /*n*/, PaddedPODArray & /*res*/) + [[noreturn]] static void ipv6(const ColumnIPv6::Container &, size_t, PaddedPODArray &, size_t) { throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function {} to IPv6 argument", AsciiName::name); } - [[noreturn]] static void ipv4(const ColumnIPv4::Container & /*offsets*/, size_t /*n*/, PaddedPODArray & /*res*/) + [[noreturn]] static void ipv4(const ColumnIPv4::Container &, size_t, PaddedPODArray &, size_t) { throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function {} to IPv4 argument", AsciiName::name); } diff --git a/src/Functions/indexHint.h b/src/Functions/indexHint.h index a71e1555c78..bc788601cd0 100644 --- a/src/Functions/indexHint.h +++ b/src/Functions/indexHint.h @@ -2,14 +2,12 @@ #include #include #include +#include namespace DB { -class ActionsDAG; -using ActionsDAGPtr = std::shared_ptr; - /** The `indexHint` function takes any number of any arguments and always returns one. * * This function has a special meaning (see ExpressionAnalyzer, KeyCondition) @@ -64,11 +62,11 @@ public: return DataTypeUInt8().createColumnConst(input_rows_count, 1u); } - void setActions(ActionsDAGPtr actions_) { actions = std::move(actions_); } - const ActionsDAGPtr & getActions() const { return actions; } + void setActions(ActionsDAG actions_) { actions = std::move(actions_); } + const ActionsDAG & getActions() const { return actions; } private: - ActionsDAGPtr actions; + ActionsDAG actions; }; } diff --git a/src/Functions/isValidUTF8.cpp b/src/Functions/isValidUTF8.cpp index d5f5e6a8986..1959502af06 100644 --- a/src/Functions/isValidUTF8.cpp +++ b/src/Functions/isValidUTF8.cpp @@ -219,42 +219,42 @@ SOFTWARE. static constexpr bool is_fixed_to_constant = false; - static void vector(const ColumnString::Chars & data, const ColumnString::Offsets & offsets, PaddedPODArray & res) + static void vector(const ColumnString::Chars & data, const ColumnString::Offsets & offsets, PaddedPODArray & res, size_t input_rows_count) { - size_t size = offsets.size(); size_t prev_offset = 0; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { res[i] = isValidUTF8(data.data() + prev_offset, offsets[i] - 1 - prev_offset); prev_offset = offsets[i]; } } - static void vectorFixedToConstant(const ColumnString::Chars & /*data*/, size_t /*n*/, UInt8 & /*res*/) {} - - static void vectorFixedToVector(const ColumnString::Chars & data, size_t n, PaddedPODArray & res) + static void vectorFixedToConstant(const ColumnString::Chars &, size_t, UInt8 &, size_t) { - size_t size = data.size() / n; - for (size_t i = 0; i < size; ++i) + } + + static void vectorFixedToVector(const ColumnString::Chars & data, size_t n, PaddedPODArray & res, size_t input_rows_count) + { + for (size_t i = 0; i < input_rows_count; ++i) res[i] = isValidUTF8(data.data() + i * n, n); } - [[noreturn]] static void array(const ColumnString::Offsets &, PaddedPODArray &) + [[noreturn]] static void array(const ColumnString::Offsets &, PaddedPODArray &, size_t) { throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function isValidUTF8 to Array argument"); } - [[noreturn]] static void uuid(const ColumnUUID::Container &, size_t &, PaddedPODArray &) + [[noreturn]] static void uuid(const ColumnUUID::Container &, size_t &, PaddedPODArray &, size_t) { throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function isValidUTF8 to UUID argument"); } - [[noreturn]] static void ipv6(const ColumnIPv6::Container &, size_t &, PaddedPODArray &) + [[noreturn]] static void ipv6(const ColumnIPv6::Container &, size_t &, PaddedPODArray &, size_t) { throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function isValidUTF8 to IPv6 argument"); } - [[noreturn]] static void ipv4(const ColumnIPv4::Container &, size_t &, PaddedPODArray &) + [[noreturn]] static void ipv4(const ColumnIPv4::Container &, size_t &, PaddedPODArray &, size_t) { throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function isValidUTF8 to IPv4 argument"); } diff --git a/src/Functions/keyvaluepair/extractKeyValuePairs.cpp b/src/Functions/keyvaluepair/extractKeyValuePairs.cpp index 1c5164e132d..cc9e57ac186 100644 --- a/src/Functions/keyvaluepair/extractKeyValuePairs.cpp +++ b/src/Functions/keyvaluepair/extractKeyValuePairs.cpp @@ -54,7 +54,7 @@ class ExtractKeyValuePairs : public IFunction return builder.build(); } - ColumnPtr extract(ColumnPtr data_column, std::shared_ptr extractor) const + ColumnPtr extract(ColumnPtr data_column, std::shared_ptr extractor, size_t input_rows_count) const { auto offsets = ColumnUInt64::create(); @@ -63,7 +63,7 @@ class ExtractKeyValuePairs : public IFunction uint64_t offset = 0u; - for (auto i = 0u; i < data_column->size(); i++) + for (auto i = 0u; i < input_rows_count; i++) { auto row = data_column->getDataAt(i).toView(); @@ -97,13 +97,13 @@ public: return std::make_shared(context); } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { auto parsed_arguments = ArgumentExtractor::extract(arguments); auto extractor = getExtractor(parsed_arguments); - return extract(parsed_arguments.data_column, extractor); + return extract(parsed_arguments.data_column, extractor, input_rows_count); } DataTypePtr getReturnTypeImpl(const DataTypes &) const override diff --git a/src/Functions/lengthUTF8.cpp b/src/Functions/lengthUTF8.cpp index 59a0d532602..97a42816674 100644 --- a/src/Functions/lengthUTF8.cpp +++ b/src/Functions/lengthUTF8.cpp @@ -23,48 +23,42 @@ struct LengthUTF8Impl { static constexpr auto is_fixed_to_constant = false; - static void vector(const ColumnString::Chars & data, const ColumnString::Offsets & offsets, PaddedPODArray & res) + static void vector(const ColumnString::Chars & data, const ColumnString::Offsets & offsets, PaddedPODArray & res, size_t input_rows_count) { - size_t size = offsets.size(); - ColumnString::Offset prev_offset = 0; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { res[i] = UTF8::countCodePoints(&data[prev_offset], offsets[i] - prev_offset - 1); prev_offset = offsets[i]; } } - static void vectorFixedToConstant(const ColumnString::Chars & /*data*/, size_t /*n*/, UInt64 & /*res*/) + static void vectorFixedToConstant(const ColumnString::Chars & /*data*/, size_t /*n*/, UInt64 & /*res*/, size_t) { } - static void vectorFixedToVector(const ColumnString::Chars & data, size_t n, PaddedPODArray & res) + static void vectorFixedToVector(const ColumnString::Chars & data, size_t n, PaddedPODArray & res, size_t input_rows_count) { - size_t size = data.size() / n; - - for (size_t i = 0; i < size; ++i) - { + for (size_t i = 0; i < input_rows_count; ++i) res[i] = UTF8::countCodePoints(&data[i * n], n); - } } - [[noreturn]] static void array(const ColumnString::Offsets &, PaddedPODArray &) + [[noreturn]] static void array(const ColumnString::Offsets &, PaddedPODArray &, size_t) { throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function lengthUTF8 to Array argument"); } - [[noreturn]] static void uuid(const ColumnUUID::Container &, size_t &, PaddedPODArray &) + [[noreturn]] static void uuid(const ColumnUUID::Container &, size_t &, PaddedPODArray &, size_t) { throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function lengthUTF8 to UUID argument"); } - [[noreturn]] static void ipv6(const ColumnIPv6::Container &, size_t &, PaddedPODArray &) + [[noreturn]] static void ipv6(const ColumnIPv6::Container &, size_t &, PaddedPODArray &, size_t) { throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function lengthUTF8 to IPv6 argument"); } - [[noreturn]] static void ipv4(const ColumnIPv4::Container &, size_t &, PaddedPODArray &) + [[noreturn]] static void ipv4(const ColumnIPv4::Container &, size_t &, PaddedPODArray &, size_t) { throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function lengthUTF8 to IPv4 argument"); } diff --git a/src/Functions/randDistribution.cpp b/src/Functions/randDistribution.cpp index 6a3dac748c1..dc2ddf2ef24 100644 --- a/src/Functions/randDistribution.cpp +++ b/src/Functions/randDistribution.cpp @@ -93,6 +93,9 @@ struct ChiSquaredDistribution static void generate(Float64 degree_of_freedom, ColumnFloat64::Container & container) { + if (degree_of_freedom <= 0) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Argument (degrees of freedom) of function {} should be greater than zero", getName()); + auto distribution = std::chi_squared_distribution<>(degree_of_freedom); for (auto & elem : container) elem = distribution(thread_local_rng); @@ -107,6 +110,9 @@ struct StudentTDistribution static void generate(Float64 degree_of_freedom, ColumnFloat64::Container & container) { + if (degree_of_freedom <= 0) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Argument (degrees of freedom) of function {} should be greater than zero", getName()); + auto distribution = std::student_t_distribution<>(degree_of_freedom); for (auto & elem : container) elem = distribution(thread_local_rng); @@ -121,6 +127,9 @@ struct FisherFDistribution static void generate(Float64 d1, Float64 d2, ColumnFloat64::Container & container) { + if (d1 <= 0 || d2 <= 0) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Argument (degrees of freedom) of function {} should be greater than zero", getName()); + auto distribution = std::fisher_f_distribution<>(d1, d2); for (auto & elem : container) elem = distribution(thread_local_rng); @@ -300,7 +309,7 @@ public: } else { - throw Exception(ErrorCodes::BAD_ARGUMENTS, "More than two argument specified for function {}", getName()); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "More than two arguments specified for function {}", getName()); } return res_column; diff --git a/src/Functions/reinterpretAs.cpp b/src/Functions/reinterpretAs.cpp index 5293b688678..81c9d20ce82 100644 --- a/src/Functions/reinterpretAs.cpp +++ b/src/Functions/reinterpretAs.cpp @@ -114,7 +114,7 @@ public: return to_type; } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override { auto from_type = arguments[0].type; @@ -136,9 +136,9 @@ public: ColumnFixedString * dst_concrete = assert_cast(dst.get()); if (src.isFixedAndContiguous() && src.sizeOfValueIfFixed() == dst_concrete->getN()) - executeContiguousToFixedString(src, *dst_concrete, dst_concrete->getN()); + executeContiguousToFixedString(src, *dst_concrete, dst_concrete->getN(), input_rows_count); else - executeToFixedString(src, *dst_concrete, dst_concrete->getN()); + executeToFixedString(src, *dst_concrete, dst_concrete->getN(), input_rows_count); result = std::move(dst); @@ -156,7 +156,7 @@ public: MutableColumnPtr dst = result_type->createColumn(); ColumnString * dst_concrete = assert_cast(dst.get()); - executeToString(src, *dst_concrete); + executeToString(src, *dst_concrete, input_rows_count); result = std::move(dst); @@ -174,12 +174,11 @@ public: const auto & data_from = col_from->getChars(); const auto & offsets_from = col_from->getOffsets(); - size_t size = offsets_from.size(); auto & vec_res = col_res->getData(); - vec_res.resize_fill(size); + vec_res.resize_fill(input_rows_count); size_t offset = 0; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { size_t copy_size = std::min(static_cast(sizeof(ToFieldType)), offsets_from[i] - offset - 1); if constexpr (std::endian::native == std::endian::little) @@ -209,7 +208,6 @@ public: const auto& data_from = col_from_fixed->getChars(); size_t step = col_from_fixed->getN(); - size_t size = data_from.size() / step; auto & vec_res = col_res->getData(); size_t offset = 0; @@ -217,11 +215,11 @@ public: size_t index = data_from.size() - copy_size; if (sizeof(ToFieldType) <= step) - vec_res.resize(size); + vec_res.resize(input_rows_count); else - vec_res.resize_fill(size); + vec_res.resize_fill(input_rows_count); - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { if constexpr (std::endian::native == std::endian::little) memcpy(&vec_res[i], &data_from[offset], copy_size); @@ -251,12 +249,11 @@ public: auto & from = column_from->getData(); auto & to = column_to->getData(); - size_t size = from.size(); - to.resize_fill(size); + to.resize_fill(input_rows_count); static constexpr size_t copy_size = std::min(sizeof(From), sizeof(To)); - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { if constexpr (std::endian::native == std::endian::little) memcpy(static_cast(&to[i]), static_cast(&from[i]), copy_size); @@ -307,14 +304,13 @@ private: type.isDecimal(); } - static void NO_INLINE executeToFixedString(const IColumn & src, ColumnFixedString & dst, size_t n) + static void NO_INLINE executeToFixedString(const IColumn & src, ColumnFixedString & dst, size_t n, size_t input_rows_count) { - size_t rows = src.size(); ColumnFixedString::Chars & data_to = dst.getChars(); - data_to.resize_fill(n * rows); + data_to.resize_fill(n * input_rows_count); ColumnFixedString::Offset offset = 0; - for (size_t i = 0; i < rows; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { std::string_view data = src.getDataAt(i).toView(); @@ -327,11 +323,10 @@ private: } } - static void NO_INLINE executeContiguousToFixedString(const IColumn & src, ColumnFixedString & dst, size_t n) + static void NO_INLINE executeContiguousToFixedString(const IColumn & src, ColumnFixedString & dst, size_t n, size_t input_rows_count) { - size_t rows = src.size(); ColumnFixedString::Chars & data_to = dst.getChars(); - data_to.resize(n * rows); + data_to.resize(n * input_rows_count); #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ memcpy(data_to.data(), src.getRawData().data(), data_to.size()); @@ -340,15 +335,14 @@ private: #endif } - static void NO_INLINE executeToString(const IColumn & src, ColumnString & dst) + static void NO_INLINE executeToString(const IColumn & src, ColumnString & dst, size_t input_rows_count) { - size_t rows = src.size(); ColumnString::Chars & data_to = dst.getChars(); ColumnString::Offsets & offsets_to = dst.getOffsets(); - offsets_to.resize(rows); + offsets_to.resize(input_rows_count); ColumnString::Offset offset = 0; - for (size_t i = 0; i < rows; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { StringRef data = src.getDataAt(i); diff --git a/src/Functions/visibleWidth.cpp b/src/Functions/visibleWidth.cpp index ebd4a1ff713..3e70418a456 100644 --- a/src/Functions/visibleWidth.cpp +++ b/src/Functions/visibleWidth.cpp @@ -66,9 +66,8 @@ public: ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { const auto & src = arguments[0]; - size_t size = input_rows_count; - auto res_col = ColumnUInt64::create(size); + auto res_col = ColumnUInt64::create(input_rows_count); auto & res_data = assert_cast(*res_col).getData(); /// For simplicity reasons, the function is implemented by serializing into temporary buffer. @@ -76,7 +75,7 @@ public: String tmp; FormatSettings format_settings; auto serialization = src.type->getDefaultSerialization(); - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { { WriteBufferFromString out(tmp); diff --git a/src/Functions/widthBucket.cpp b/src/Functions/widthBucket.cpp index d007cc968f0..ba24362034f 100644 --- a/src/Functions/widthBucket.cpp +++ b/src/Functions/widthBucket.cpp @@ -166,12 +166,12 @@ class FunctionWidthBucket : public IFunction result_column->reserve(1); auto & result_data = result_column->getData(); - for (const auto row_index : collections::range(0, input_rows_count)) + for (size_t row = 0; row < input_rows_count; ++row) { - const auto operand = getValue(operands_col_const, operands_vec, row_index); - const auto low = getValue(lows_col_const, lows_vec, row_index); - const auto high = getValue(highs_col_const, highs_vec, row_index); - const auto count = getValue(counts_col_const, counts_vec, row_index); + const auto operand = getValue(operands_col_const, operands_vec, row); + const auto low = getValue(lows_col_const, lows_vec, row); + const auto high = getValue(highs_col_const, highs_vec, row); + const auto count = getValue(counts_col_const, counts_vec, row); result_data.push_back(calculate(operand, low, high, count)); } diff --git a/src/IO/AsynchronousReadBufferFromFile.cpp b/src/IO/AsynchronousReadBufferFromFile.cpp index c6fe16a7f14..4d148a5c9f9 100644 --- a/src/IO/AsynchronousReadBufferFromFile.cpp +++ b/src/IO/AsynchronousReadBufferFromFile.cpp @@ -93,7 +93,10 @@ void AsynchronousReadBufferFromFile::close() return; if (0 != ::close(fd)) + { + fd = -1; throw Exception(ErrorCodes::CANNOT_CLOSE_FILE, "Cannot close file"); + } fd = -1; } diff --git a/src/IO/MMapReadBufferFromFile.cpp b/src/IO/MMapReadBufferFromFile.cpp index d3eb11c920d..d0865269f1b 100644 --- a/src/IO/MMapReadBufferFromFile.cpp +++ b/src/IO/MMapReadBufferFromFile.cpp @@ -77,7 +77,10 @@ void MMapReadBufferFromFile::close() finish(); if (0 != ::close(fd)) + { + fd = -1; throw Exception(ErrorCodes::CANNOT_CLOSE_FILE, "Cannot close file"); + } fd = -1; metric_increment.destroy(); diff --git a/src/IO/MMappedFile.cpp b/src/IO/MMappedFile.cpp index 7249a25decb..26949061274 100644 --- a/src/IO/MMappedFile.cpp +++ b/src/IO/MMappedFile.cpp @@ -69,7 +69,10 @@ void MMappedFile::close() finish(); if (0 != ::close(fd)) + { + fd = -1; throw Exception(ErrorCodes::CANNOT_CLOSE_FILE, "Cannot close file"); + } fd = -1; metric_increment.destroy(); diff --git a/src/IO/OpenedFile.cpp b/src/IO/OpenedFile.cpp index 4677a8259db..c51dc430b35 100644 --- a/src/IO/OpenedFile.cpp +++ b/src/IO/OpenedFile.cpp @@ -67,11 +67,13 @@ void OpenedFile::close() return; if (0 != ::close(fd)) + { + fd = -1; throw Exception(ErrorCodes::CANNOT_CLOSE_FILE, "Cannot close file"); + } fd = -1; metric_increment.destroy(); } } - diff --git a/src/IO/ReadBufferFromFile.cpp b/src/IO/ReadBufferFromFile.cpp index cb987171bad..2e5b9fcf753 100644 --- a/src/IO/ReadBufferFromFile.cpp +++ b/src/IO/ReadBufferFromFile.cpp @@ -88,7 +88,10 @@ void ReadBufferFromFile::close() return; if (0 != ::close(fd)) + { + fd = -1; throw Exception(ErrorCodes::CANNOT_CLOSE_FILE, "Cannot close file"); + } fd = -1; metric_increment.destroy(); diff --git a/src/IO/WithFileSize.cpp b/src/IO/WithFileSize.cpp index cbbcab83de2..54747cef8af 100644 --- a/src/IO/WithFileSize.cpp +++ b/src/IO/WithFileSize.cpp @@ -5,6 +5,7 @@ #include #include + namespace DB { @@ -88,5 +89,4 @@ size_t getDataOffsetMaybeCompressed(const ReadBuffer & in) return in.count(); } - } diff --git a/src/IO/WriteBufferFromFile.cpp b/src/IO/WriteBufferFromFile.cpp index 37b1161356f..f1825ce1e22 100644 --- a/src/IO/WriteBufferFromFile.cpp +++ b/src/IO/WriteBufferFromFile.cpp @@ -116,7 +116,10 @@ void WriteBufferFromFile::close() finalize(); if (0 != ::close(fd)) + { + fd = -1; throw Exception(ErrorCodes::CANNOT_CLOSE_FILE, "Cannot close file"); + } fd = -1; metric_increment.destroy(); diff --git a/src/Interpreters/ActionsDAG.cpp b/src/Interpreters/ActionsDAG.cpp index 34f3e0a98bd..4aaecc491e0 100644 --- a/src/Interpreters/ActionsDAG.cpp +++ b/src/Interpreters/ActionsDAG.cpp @@ -624,9 +624,9 @@ void ActionsDAG::removeAliasesForFilter(const std::string & filter_name) } } -ActionsDAGPtr ActionsDAG::cloneSubDAG(const NodeRawConstPtrs & outputs, bool remove_aliases) +ActionsDAG ActionsDAG::cloneSubDAG(const NodeRawConstPtrs & outputs, bool remove_aliases) { - auto actions = std::make_shared(); + ActionsDAG actions; std::unordered_map copy_map; struct Frame @@ -661,21 +661,21 @@ ActionsDAGPtr ActionsDAG::cloneSubDAG(const NodeRawConstPtrs & outputs, bool rem if (remove_aliases && frame.node->type == ActionType::ALIAS) copy_node = copy_map[frame.node->children.front()]; else - copy_node = &actions->nodes.emplace_back(*frame.node); + copy_node = &actions.nodes.emplace_back(*frame.node); if (frame.node->type == ActionType::INPUT) - actions->inputs.push_back(copy_node); + actions.inputs.push_back(copy_node); stack.pop(); } } - for (auto & node : actions->nodes) + for (auto & node : actions.nodes) for (auto & child : node.children) child = copy_map[child]; for (const auto * output : outputs) - actions->outputs.push_back(copy_map[output]); + actions.outputs.push_back(copy_map[output]); return actions; } @@ -961,9 +961,9 @@ NameSet ActionsDAG::foldActionsByProjection( } -ActionsDAGPtr ActionsDAG::foldActionsByProjection(const std::unordered_map & new_inputs, const NodeRawConstPtrs & required_outputs) +ActionsDAG ActionsDAG::foldActionsByProjection(const std::unordered_map & new_inputs, const NodeRawConstPtrs & required_outputs) { - auto dag = std::make_unique(); + ActionsDAG dag; std::unordered_map inputs_mapping; std::unordered_map mapping; struct Frame @@ -1003,9 +1003,9 @@ ActionsDAGPtr ActionsDAG::foldActionsByProjection(const std::unordered_mapresult_name != rename->result_name; const auto & input_name = should_rename ? rename->result_name : new_input->result_name; - mapped_input = &dag->addInput(input_name, new_input->result_type); + mapped_input = &dag.addInput(input_name, new_input->result_type); if (should_rename) - mapped_input = &dag->addAlias(*mapped_input, new_input->result_name); + mapped_input = &dag.addAlias(*mapped_input, new_input->result_name); } node = mapped_input; @@ -1034,7 +1034,7 @@ ActionsDAGPtr ActionsDAG::foldActionsByProjection(const std::unordered_mapresult_name, frame.node->result_name); - auto & node = dag->nodes.emplace_back(*frame.node); + auto & node = dag.nodes.emplace_back(*frame.node); for (auto & child : node.children) child = mapping[child]; @@ -1049,8 +1049,8 @@ ActionsDAGPtr ActionsDAG::foldActionsByProjection(const std::unordered_mapresult_name != mapped_output->result_name) - mapped_output = &dag->addAlias(*mapped_output, output->result_name); - dag->outputs.push_back(mapped_output); + mapped_output = &dag.addAlias(*mapped_output, output->result_name); + dag.outputs.push_back(mapped_output); } return dag; @@ -1246,27 +1246,31 @@ bool ActionsDAG::removeUnusedResult(const std::string & column_name) return true; } -ActionsDAGPtr ActionsDAG::clone() const +ActionsDAG ActionsDAG::clone() const { - auto actions = std::make_shared(); + std::unordered_map old_to_new_nodes; + return clone(old_to_new_nodes); +} - std::unordered_map copy_map; +ActionsDAG ActionsDAG::clone(std::unordered_map & old_to_new_nodes) const +{ + ActionsDAG actions; for (const auto & node : nodes) { - auto & copy_node = actions->nodes.emplace_back(node); - copy_map[&node] = ©_node; + auto & copy_node = actions.nodes.emplace_back(node); + old_to_new_nodes[&node] = ©_node; } - for (auto & node : actions->nodes) + for (auto & node : actions.nodes) for (auto & child : node.children) - child = copy_map[child]; + child = old_to_new_nodes[child]; for (const auto & output_node : outputs) - actions->outputs.push_back(copy_map[output_node]); + actions.outputs.push_back(old_to_new_nodes[output_node]); for (const auto & input_node : inputs) - actions->inputs.push_back(copy_map[input_node]); + actions.inputs.push_back(old_to_new_nodes[input_node]); return actions; } @@ -1404,7 +1408,7 @@ const ActionsDAG::Node & ActionsDAG::materializeNode(const Node & node) return addAlias(*func, name); } -ActionsDAGPtr ActionsDAG::makeConvertingActions( +ActionsDAG ActionsDAG::makeConvertingActions( const ColumnsWithTypeAndName & source, const ColumnsWithTypeAndName & result, MatchColumnsMode mode, @@ -1421,7 +1425,7 @@ ActionsDAGPtr ActionsDAG::makeConvertingActions( if (add_casted_columns && mode != MatchColumnsMode::Name) throw Exception(ErrorCodes::LOGICAL_ERROR, "Converting with add_casted_columns supported only for MatchColumnsMode::Name"); - auto actions_dag = std::make_shared(source); + ActionsDAG actions_dag(source); NodeRawConstPtrs projection(num_result_columns); FunctionOverloadResolverPtr func_builder_materialize = std::make_unique(std::make_shared()); @@ -1429,9 +1433,9 @@ ActionsDAGPtr ActionsDAG::makeConvertingActions( std::unordered_map> inputs; if (mode == MatchColumnsMode::Name) { - size_t input_nodes_size = actions_dag->inputs.size(); + size_t input_nodes_size = actions_dag.inputs.size(); for (size_t pos = 0; pos < input_nodes_size; ++pos) - inputs[actions_dag->inputs[pos]->result_name].push_back(pos); + inputs[actions_dag.inputs[pos]->result_name].push_back(pos); } for (size_t result_col_num = 0; result_col_num < num_result_columns; ++result_col_num) @@ -1444,7 +1448,7 @@ ActionsDAGPtr ActionsDAG::makeConvertingActions( { case MatchColumnsMode::Position: { - src_node = dst_node = actions_dag->inputs[result_col_num]; + src_node = dst_node = actions_dag.inputs[result_col_num]; break; } @@ -1455,7 +1459,7 @@ ActionsDAGPtr ActionsDAG::makeConvertingActions( { const auto * res_const = typeid_cast(res_elem.column.get()); if (ignore_constant_values && res_const) - src_node = dst_node = &actions_dag->addColumn(res_elem); + src_node = dst_node = &actions_dag.addColumn(res_elem); else throw Exception(ErrorCodes::THERE_IS_NO_COLUMN, "Cannot find column `{}` in source stream, there are only columns: [{}]", @@ -1463,7 +1467,7 @@ ActionsDAGPtr ActionsDAG::makeConvertingActions( } else { - src_node = dst_node = actions_dag->inputs[input.front()]; + src_node = dst_node = actions_dag.inputs[input.front()]; input.pop_front(); } break; @@ -1476,7 +1480,7 @@ ActionsDAGPtr ActionsDAG::makeConvertingActions( if (const auto * src_const = typeid_cast(dst_node->column.get())) { if (ignore_constant_values) - dst_node = &actions_dag->addColumn(res_elem); + dst_node = &actions_dag.addColumn(res_elem); else if (res_const->getField() != src_const->getField()) throw Exception( ErrorCodes::ILLEGAL_COLUMN, @@ -1498,7 +1502,7 @@ ActionsDAGPtr ActionsDAG::makeConvertingActions( column.column = DataTypeString().createColumnConst(0, column.name); column.type = std::make_shared(); - const auto * right_arg = &actions_dag->addColumn(std::move(column)); + const auto * right_arg = &actions_dag.addColumn(std::move(column)); const auto * left_arg = dst_node; CastDiagnostic diagnostic = {dst_node->result_name, res_elem.name}; @@ -1506,13 +1510,13 @@ ActionsDAGPtr ActionsDAG::makeConvertingActions( = createInternalCastOverloadResolver(CastType::nonAccurate, std::move(diagnostic)); NodeRawConstPtrs children = { left_arg, right_arg }; - dst_node = &actions_dag->addFunction(func_builder_cast, std::move(children), {}); + dst_node = &actions_dag.addFunction(func_builder_cast, std::move(children), {}); } if (dst_node->column && isColumnConst(*dst_node->column) && !(res_elem.column && isColumnConst(*res_elem.column))) { NodeRawConstPtrs children = {dst_node}; - dst_node = &actions_dag->addFunction(func_builder_materialize, std::move(children), {}); + dst_node = &actions_dag.addFunction(func_builder_materialize, std::move(children), {}); } if (dst_node->result_name != res_elem.name) @@ -1531,7 +1535,7 @@ ActionsDAGPtr ActionsDAG::makeConvertingActions( } else { - dst_node = &actions_dag->addAlias(*dst_node, res_elem.name); + dst_node = &actions_dag.addAlias(*dst_node, res_elem.name); projection[result_col_num] = dst_node; } } @@ -1541,36 +1545,36 @@ ActionsDAGPtr ActionsDAG::makeConvertingActions( } } - actions_dag->outputs.swap(projection); - actions_dag->removeUnusedActions(false); + actions_dag.outputs.swap(projection); + actions_dag.removeUnusedActions(false); return actions_dag; } -ActionsDAGPtr ActionsDAG::makeAddingColumnActions(ColumnWithTypeAndName column) +ActionsDAG ActionsDAG::makeAddingColumnActions(ColumnWithTypeAndName column) { - auto adding_column_action = std::make_shared(); + ActionsDAG adding_column_action; FunctionOverloadResolverPtr func_builder_materialize = std::make_unique(std::make_shared()); auto column_name = column.name; - const auto * column_node = &adding_column_action->addColumn(std::move(column)); + const auto * column_node = &adding_column_action.addColumn(std::move(column)); NodeRawConstPtrs inputs = {column_node}; - const auto & function_node = adding_column_action->addFunction(func_builder_materialize, std::move(inputs), {}); - const auto & alias_node = adding_column_action->addAlias(function_node, std::move(column_name)); + const auto & function_node = adding_column_action.addFunction(func_builder_materialize, std::move(inputs), {}); + const auto & alias_node = adding_column_action.addAlias(function_node, std::move(column_name)); - adding_column_action->outputs.push_back(&alias_node); + adding_column_action.outputs.push_back(&alias_node); return adding_column_action; } -ActionsDAGPtr ActionsDAG::merge(ActionsDAG && first, ActionsDAG && second) +ActionsDAG ActionsDAG::merge(ActionsDAG && first, ActionsDAG && second) { first.mergeInplace(std::move(second)); /// Some actions could become unused. Do not drop inputs to preserve the header. first.removeUnusedActions(false); - return std::make_shared(std::move(first)); + return std::move(first); } void ActionsDAG::mergeInplace(ActionsDAG && second) @@ -1963,15 +1967,15 @@ ActionsDAG::SplitResult ActionsDAG::split(std::unordered_set split second_inputs.push_back(cur.to_second); } - auto first_actions = std::make_shared(); - first_actions->nodes.swap(first_nodes); - first_actions->outputs.swap(first_outputs); - first_actions->inputs.swap(first_inputs); + ActionsDAG first_actions; + first_actions.nodes.swap(first_nodes); + first_actions.outputs.swap(first_outputs); + first_actions.inputs.swap(first_inputs); - auto second_actions = std::make_shared(); - second_actions->nodes.swap(second_nodes); - second_actions->outputs.swap(second_outputs); - second_actions->inputs.swap(second_inputs); + ActionsDAG second_actions; + second_actions.nodes.swap(second_nodes); + second_actions.outputs.swap(second_outputs); + second_actions.inputs.swap(second_inputs); std::unordered_map split_nodes_mapping; if (create_split_nodes_mapping) @@ -2091,7 +2095,7 @@ ActionsDAG::SplitResult ActionsDAG::splitActionsBySortingDescription(const NameS return res; } -bool ActionsDAG::isFilterAlwaysFalseForDefaultValueInputs(const std::string & filter_name, const Block & input_stream_header) +bool ActionsDAG::isFilterAlwaysFalseForDefaultValueInputs(const std::string & filter_name, const Block & input_stream_header) const { const auto * filter_node = tryFindInOutputs(filter_name); if (!filter_node) @@ -2115,7 +2119,7 @@ bool ActionsDAG::isFilterAlwaysFalseForDefaultValueInputs(const std::string & fi input_node_name_to_default_input_column.emplace(input->result_name, std::move(constant_column_with_type_and_name)); } - ActionsDAGPtr filter_with_default_value_inputs; + std::optional filter_with_default_value_inputs; try { @@ -2297,12 +2301,12 @@ ColumnsWithTypeAndName prepareFunctionArguments(const ActionsDAG::NodeRawConstPt /// /// Result actions add single column with conjunction result (it is always first in outputs). /// No other columns are added or removed. -ActionsDAGPtr ActionsDAG::createActionsForConjunction(NodeRawConstPtrs conjunction, const ColumnsWithTypeAndName & all_inputs) +std::optional ActionsDAG::createActionsForConjunction(NodeRawConstPtrs conjunction, const ColumnsWithTypeAndName & all_inputs) { if (conjunction.empty()) - return nullptr; + return {}; - auto actions = std::make_shared(); + ActionsDAG actions; FunctionOverloadResolverPtr func_builder_and = std::make_unique(std::make_shared()); @@ -2343,7 +2347,7 @@ ActionsDAGPtr ActionsDAG::createActionsForConjunction(NodeRawConstPtrs conjuncti if (cur.next_child_to_visit == cur.node->children.size()) { - auto & node = actions->nodes.emplace_back(*cur.node); + auto & node = actions.nodes.emplace_back(*cur.node); nodes_mapping[cur.node] = &node; for (auto & child : node.children) @@ -2366,33 +2370,33 @@ ActionsDAGPtr ActionsDAG::createActionsForConjunction(NodeRawConstPtrs conjuncti for (const auto * predicate : conjunction) args.emplace_back(nodes_mapping[predicate]); - result_predicate = &actions->addFunction(func_builder_and, std::move(args), {}); + result_predicate = &actions.addFunction(func_builder_and, std::move(args), {}); } - actions->outputs.push_back(result_predicate); + actions.outputs.push_back(result_predicate); for (const auto & col : all_inputs) { const Node * input; auto & list = required_inputs[col.name]; if (list.empty()) - input = &actions->addInput(col); + input = &actions.addInput(col); else { input = list.front(); list.pop_front(); - actions->inputs.push_back(input); + actions.inputs.push_back(input); } /// We should not add result_predicate into the outputs for the second time. if (input->result_name != result_predicate->result_name) - actions->outputs.push_back(input); + actions.outputs.push_back(input); } return actions; } -ActionsDAGPtr ActionsDAG::splitActionsForFilterPushDown( +std::optional ActionsDAG::splitActionsForFilterPushDown( const std::string & filter_name, bool removes_filter, const Names & available_inputs, @@ -2408,7 +2412,7 @@ ActionsDAGPtr ActionsDAG::splitActionsForFilterPushDown( /// If condition is constant let's do nothing. /// It means there is nothing to push down or optimization was already applied. if (predicate->type == ActionType::COLUMN) - return nullptr; + return {}; std::unordered_set allowed_nodes; @@ -2432,7 +2436,7 @@ ActionsDAGPtr ActionsDAG::splitActionsForFilterPushDown( auto conjunction = getConjunctionNodes(predicate, allowed_nodes); if (conjunction.allowed.empty()) - return nullptr; + return {}; chassert(predicate->result_type); @@ -2444,13 +2448,13 @@ ActionsDAGPtr ActionsDAG::splitActionsForFilterPushDown( && !conjunction.rejected.front()->result_type->equals(*predicate->result_type)) { /// No further optimization can be done - return nullptr; + return {}; } } auto actions = createActionsForConjunction(conjunction.allowed, all_inputs); if (!actions) - return nullptr; + return {}; /// Now, when actions are created, update the current DAG. removeUnusedConjunctions(std::move(conjunction.rejected), predicate, removes_filter); @@ -2555,11 +2559,11 @@ ActionsDAG::ActionsForJOINFilterPushDown ActionsDAG::splitActionsForJOINFilterPu auto left_stream_filter_to_push_down = createActionsForConjunction(left_stream_allowed_conjunctions, left_stream_header.getColumnsWithTypeAndName()); auto right_stream_filter_to_push_down = createActionsForConjunction(right_stream_allowed_conjunctions, right_stream_header.getColumnsWithTypeAndName()); - auto replace_equivalent_columns_in_filter = [](const ActionsDAGPtr & filter, + auto replace_equivalent_columns_in_filter = [](const ActionsDAG & filter, const Block & stream_header, const std::unordered_map & columns_to_replace) { - auto updated_filter = ActionsDAG::buildFilterActionsDAG({filter->getOutputs()[0]}, columns_to_replace); + auto updated_filter = ActionsDAG::buildFilterActionsDAG({filter.getOutputs()[0]}, columns_to_replace); chassert(updated_filter->getOutputs().size() == 1); /** If result filter to left or right stream has column that is one of the stream inputs, we need distinguish filter column from @@ -2580,7 +2584,7 @@ ActionsDAG::ActionsForJOINFilterPushDown ActionsDAG::splitActionsForJOINFilterPu for (const auto & input : updated_filter->getInputs()) updated_filter_inputs[input->result_name].push_back(input); - for (const auto & input : filter->getInputs()) + for (const auto & input : filter.getInputs()) { if (updated_filter_inputs.contains(input->result_name)) continue; @@ -2618,12 +2622,12 @@ ActionsDAG::ActionsForJOINFilterPushDown ActionsDAG::splitActionsForJOINFilterPu }; if (left_stream_filter_to_push_down) - left_stream_filter_to_push_down = replace_equivalent_columns_in_filter(left_stream_filter_to_push_down, + left_stream_filter_to_push_down = replace_equivalent_columns_in_filter(*left_stream_filter_to_push_down, left_stream_header, equivalent_right_stream_column_to_left_stream_column); if (right_stream_filter_to_push_down) - right_stream_filter_to_push_down = replace_equivalent_columns_in_filter(right_stream_filter_to_push_down, + right_stream_filter_to_push_down = replace_equivalent_columns_in_filter(*right_stream_filter_to_push_down, right_stream_header, equivalent_left_stream_column_to_right_stream_column); @@ -2852,13 +2856,13 @@ bool ActionsDAG::isSortingPreserved( return true; } -ActionsDAGPtr ActionsDAG::buildFilterActionsDAG( +std::optional ActionsDAG::buildFilterActionsDAG( const NodeRawConstPtrs & filter_nodes, const std::unordered_map & node_name_to_input_node_column, bool single_output_condition_node) { if (filter_nodes.empty()) - return nullptr; + return {}; struct Frame { @@ -2866,7 +2870,7 @@ ActionsDAGPtr ActionsDAG::buildFilterActionsDAG( bool visited_children = false; }; - auto result_dag = std::make_shared(); + ActionsDAG result_dag; std::unordered_map result_inputs; std::unordered_map node_to_result_node; @@ -2897,7 +2901,7 @@ ActionsDAGPtr ActionsDAG::buildFilterActionsDAG( { auto & result_input = result_inputs[input_node_it->second.name]; if (!result_input) - result_input = &result_dag->addInput(input_node_it->second); + result_input = &result_dag.addInput(input_node_it->second); node_to_result_node.emplace(node, result_input); nodes_to_process.pop_back(); @@ -2924,25 +2928,25 @@ ActionsDAGPtr ActionsDAG::buildFilterActionsDAG( { auto & result_input = result_inputs[node->result_name]; if (!result_input) - result_input = &result_dag->addInput({node->column, node->result_type, node->result_name}); + result_input = &result_dag.addInput({node->column, node->result_type, node->result_name}); result_node = result_input; break; } case ActionsDAG::ActionType::COLUMN: { - result_node = &result_dag->addColumn({node->column, node->result_type, node->result_name}); + result_node = &result_dag.addColumn({node->column, node->result_type, node->result_name}); break; } case ActionsDAG::ActionType::ALIAS: { const auto * child = node->children.front(); - result_node = &result_dag->addAlias(*(node_to_result_node.find(child)->second), node->result_name); + result_node = &result_dag.addAlias(*(node_to_result_node.find(child)->second), node->result_name); break; } case ActionsDAG::ActionType::ARRAY_JOIN: { const auto * child = node->children.front(); - result_node = &result_dag->addArrayJoin(*(node_to_result_node.find(child)->second), {}); + result_node = &result_dag.addArrayJoin(*(node_to_result_node.find(child)->second), {}); break; } case ActionsDAG::ActionType::FUNCTION: @@ -2960,13 +2964,11 @@ ActionsDAGPtr ActionsDAG::buildFilterActionsDAG( { if (const auto * index_hint = typeid_cast(adaptor->getFunction().get())) { - ActionsDAGPtr index_hint_filter_dag; - const auto & index_hint_args = index_hint->getActions()->getOutputs(); + ActionsDAG index_hint_filter_dag; + const auto & index_hint_args = index_hint->getActions().getOutputs(); - if (index_hint_args.empty()) - index_hint_filter_dag = std::make_shared(); - else - index_hint_filter_dag = buildFilterActionsDAG(index_hint_args, + if (!index_hint_args.empty()) + index_hint_filter_dag = *buildFilterActionsDAG(index_hint_args, node_name_to_input_node_column, false /*single_output_condition_node*/); @@ -2988,7 +2990,7 @@ ActionsDAGPtr ActionsDAG::buildFilterActionsDAG( auto [arguments, all_const] = getFunctionArguments(function_children); auto function_base = function_overload_resolver ? function_overload_resolver->build(arguments) : node->function_base; - result_node = &result_dag->addFunctionImpl( + result_node = &result_dag.addFunctionImpl( function_base, std::move(function_children), std::move(arguments), @@ -3003,7 +3005,7 @@ ActionsDAGPtr ActionsDAG::buildFilterActionsDAG( nodes_to_process.pop_back(); } - auto & result_dag_outputs = result_dag->getOutputs(); + auto & result_dag_outputs = result_dag.getOutputs(); result_dag_outputs.reserve(filter_nodes_size); for (const auto & node : filter_nodes) @@ -3012,7 +3014,7 @@ ActionsDAGPtr ActionsDAG::buildFilterActionsDAG( if (result_dag_outputs.size() > 1 && single_output_condition_node) { FunctionOverloadResolverPtr func_builder_and = std::make_unique(std::make_shared()); - result_dag_outputs = { &result_dag->addFunction(func_builder_and, result_dag_outputs, {}) }; + result_dag_outputs = { &result_dag.addFunction(func_builder_and, result_dag_outputs, {}) }; } return result_dag; @@ -3108,10 +3110,9 @@ ActionsDAG::NodeRawConstPtrs ActionsDAG::filterNodesByAllowedInputs( return nodes; } -FindOriginalNodeForOutputName::FindOriginalNodeForOutputName(const ActionsDAGPtr & actions_) - :actions(actions_) +FindOriginalNodeForOutputName::FindOriginalNodeForOutputName(const ActionsDAG & actions_) { - const auto & actions_outputs = actions->getOutputs(); + const auto & actions_outputs = actions_.getOutputs(); for (const auto * output_node : actions_outputs) { /// find input node which refers to the output node @@ -3147,10 +3148,9 @@ const ActionsDAG::Node * FindOriginalNodeForOutputName::find(const String & outp return it->second; } -FindAliasForInputName::FindAliasForInputName(const ActionsDAGPtr & actions_) - :actions(actions_) +FindAliasForInputName::FindAliasForInputName(const ActionsDAG & actions_) { - const auto & actions_outputs = actions->getOutputs(); + const auto & actions_outputs = actions_.getOutputs(); for (const auto * output_node : actions_outputs) { /// find input node which corresponds to alias diff --git a/src/Interpreters/ActionsDAG.h b/src/Interpreters/ActionsDAG.h index c9974fd849c..43c1b41a240 100644 --- a/src/Interpreters/ActionsDAG.h +++ b/src/Interpreters/ActionsDAG.h @@ -11,9 +11,6 @@ namespace DB { -class ActionsDAG; -using ActionsDAGPtr = std::shared_ptr; - class IExecutableFunction; using ExecutableFunctionPtr = std::shared_ptr; @@ -247,7 +244,7 @@ public: /// c * d e /// \ / /// c * d - e - static ActionsDAGPtr foldActionsByProjection( + static ActionsDAG foldActionsByProjection( const std::unordered_map & new_inputs, const NodeRawConstPtrs & required_outputs); @@ -261,9 +258,10 @@ public: void compileExpressions(size_t min_count_to_compile_expression, const std::unordered_set & lazy_executed_nodes = {}); #endif - ActionsDAGPtr clone() const; + ActionsDAG clone(std::unordered_map & old_to_new_nodes) const; + ActionsDAG clone() const; - static ActionsDAGPtr cloneSubDAG(const NodeRawConstPtrs & outputs, bool remove_aliases); + static ActionsDAG cloneSubDAG(const NodeRawConstPtrs & outputs, bool remove_aliases); /// Execute actions for header. Input block must have empty columns. /// Result should be equal to the execution of ExpressionActions built from this DAG. @@ -301,7 +299,7 @@ public: /// @param ignore_constant_values - Do not check that constants are same. Use value from result_header. /// @param add_casted_columns - Create new columns with converted values instead of replacing original. /// @param new_names - Output parameter for new column names when add_casted_columns is used. - static ActionsDAGPtr makeConvertingActions( + static ActionsDAG makeConvertingActions( const ColumnsWithTypeAndName & source, const ColumnsWithTypeAndName & result, MatchColumnsMode mode, @@ -310,13 +308,13 @@ public: NameToNameMap * new_names = nullptr); /// Create expression which add const column and then materialize it. - static ActionsDAGPtr makeAddingColumnActions(ColumnWithTypeAndName column); + static ActionsDAG makeAddingColumnActions(ColumnWithTypeAndName column); /// Create ActionsDAG which represents expression equivalent to applying first and second actions consequently. /// Is used to replace `(first -> second)` expression chain to single `merge(first, second)` expression. /// If first.settings.project_input is set, then outputs of `first` must include inputs of `second`. /// Otherwise, any two actions may be combined. - static ActionsDAGPtr merge(ActionsDAG && first, ActionsDAG && second); + static ActionsDAG merge(ActionsDAG && first, ActionsDAG && second); /// The result is similar to merge(*this, second); /// Invariant : no nodes are removed from the first (this) DAG. @@ -327,12 +325,7 @@ public: /// *out_outputs is filled with pointers to the nodes corresponding to second.getOutputs(). void mergeNodes(ActionsDAG && second, NodeRawConstPtrs * out_outputs = nullptr); - struct SplitResult - { - ActionsDAGPtr first; - ActionsDAGPtr second; - std::unordered_map split_nodes_mapping; - }; + struct SplitResult; /// Split ActionsDAG into two DAGs, where first part contains all nodes from split_nodes and their children. /// Execution of first then second parts on block is equivalent to execution of initial DAG. @@ -360,7 +353,7 @@ public: * @param filter_name - name of filter node in current DAG. * @param input_stream_header - input stream header. */ - bool isFilterAlwaysFalseForDefaultValueInputs(const std::string & filter_name, const Block & input_stream_header); + bool isFilterAlwaysFalseForDefaultValueInputs(const std::string & filter_name, const Block & input_stream_header) const; /// Create actions which may calculate part of filter using only available_inputs. /// If nothing may be calculated, returns nullptr. @@ -379,19 +372,13 @@ public: /// columns will be transformed like `x, y, z` -> `z > 0, z, x, y` -(remove filter)-> `z, x, y`. /// To avoid it, add inputs from `all_inputs` list, /// so actions `x, y, z -> z > 0, x, y, z` -(remove filter)-> `x, y, z` will not change columns order. - ActionsDAGPtr splitActionsForFilterPushDown( + std::optional splitActionsForFilterPushDown( const std::string & filter_name, bool removes_filter, const Names & available_inputs, const ColumnsWithTypeAndName & all_inputs); - struct ActionsForJOINFilterPushDown - { - ActionsDAGPtr left_stream_filter_to_push_down; - bool left_stream_filter_removes_filter; - ActionsDAGPtr right_stream_filter_to_push_down; - bool right_stream_filter_removes_filter; - }; + struct ActionsForJOINFilterPushDown; /** Split actions for JOIN filter push down. * @@ -438,7 +425,7 @@ public: * * If single_output_condition_node = false, result dag has multiple output nodes. */ - static ActionsDAGPtr buildFilterActionsDAG( + static std::optional buildFilterActionsDAG( const NodeRawConstPtrs & filter_nodes, const std::unordered_map & node_name_to_input_node_column = {}, bool single_output_condition_node = true); @@ -470,21 +457,35 @@ private: void compileFunctions(size_t min_count_to_compile_expression, const std::unordered_set & lazy_executed_nodes = {}); #endif - static ActionsDAGPtr createActionsForConjunction(NodeRawConstPtrs conjunction, const ColumnsWithTypeAndName & all_inputs); + static std::optional createActionsForConjunction(NodeRawConstPtrs conjunction, const ColumnsWithTypeAndName & all_inputs); void removeUnusedConjunctions(NodeRawConstPtrs rejected_conjunctions, Node * predicate, bool removes_filter); }; +struct ActionsDAG::SplitResult +{ + ActionsDAG first; + ActionsDAG second; + std::unordered_map split_nodes_mapping; +}; + +struct ActionsDAG::ActionsForJOINFilterPushDown +{ + std::optional left_stream_filter_to_push_down; + bool left_stream_filter_removes_filter; + std::optional right_stream_filter_to_push_down; + bool right_stream_filter_removes_filter; +}; + class FindOriginalNodeForOutputName { using NameToNodeIndex = std::unordered_map; public: - explicit FindOriginalNodeForOutputName(const ActionsDAGPtr & actions); + explicit FindOriginalNodeForOutputName(const ActionsDAG & actions); const ActionsDAG::Node * find(const String & output_name); private: - ActionsDAGPtr actions; NameToNodeIndex index; }; @@ -493,11 +494,10 @@ class FindAliasForInputName using NameToNodeIndex = std::unordered_map; public: - explicit FindAliasForInputName(const ActionsDAGPtr & actions); + explicit FindAliasForInputName(const ActionsDAG & actions); const ActionsDAG::Node * find(const String & name); private: - ActionsDAGPtr actions; NameToNodeIndex index; }; diff --git a/src/Interpreters/ActionsVisitor.cpp b/src/Interpreters/ActionsVisitor.cpp index a874067e43c..e1b7e92ee5d 100644 --- a/src/Interpreters/ActionsVisitor.cpp +++ b/src/Interpreters/ActionsVisitor.cpp @@ -1024,7 +1024,7 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data & dag.project(args); auto index_hint = std::make_shared(); - index_hint->setActions(std::make_shared(std::move(dag))); + index_hint->setActions(std::move(dag)); // Arguments are removed. We add function instead of constant column to avoid constant folding. data.addFunction(std::make_unique(index_hint), {}, column_name); @@ -1287,7 +1287,7 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data & lambda_dag.removeUnusedActions(Names(1, result_name)); auto lambda_actions = std::make_shared( - std::make_shared(std::move(lambda_dag)), + std::move(lambda_dag), ExpressionActionsSettings::fromContext(data.getContext(), CompileExpressions::yes)); DataTypePtr result_type = lambda_actions->getSampleBlock().getByName(result_name).type; diff --git a/src/Interpreters/ActionsVisitor.h b/src/Interpreters/ActionsVisitor.h index 46d2d60e461..5b638fc14c8 100644 --- a/src/Interpreters/ActionsVisitor.h +++ b/src/Interpreters/ActionsVisitor.h @@ -18,12 +18,6 @@ namespace DB class ASTExpressionList; class ASTFunction; -class ExpressionActions; -using ExpressionActionsPtr = std::shared_ptr; - -class ActionsDAG; -using ActionsDAGPtr = std::shared_ptr; - class IFunctionOverloadResolver; using FunctionOverloadResolverPtr = std::shared_ptr; @@ -32,7 +26,7 @@ FutureSetPtr makeExplicitSet( const ASTFunction * node, const ActionsDAG & actions, ContextPtr context, PreparedSets & prepared_sets); /** For ActionsVisitor - * A stack of ExpressionActions corresponding to nested lambda expressions. + * A stack of ActionsDAG corresponding to nested lambda expressions. * The new action should be added to the highest possible level. * For example, in the expression "select arrayMap(x -> x + column1 * column2, array1)" * calculation of the product must be done outside the lambda expression (it does not depend on x), diff --git a/src/Interpreters/ExpressionActions.cpp b/src/Interpreters/ExpressionActions.cpp index d832f568cb8..02323972102 100644 --- a/src/Interpreters/ExpressionActions.cpp +++ b/src/Interpreters/ExpressionActions.cpp @@ -49,18 +49,17 @@ namespace ErrorCodes static std::unordered_set processShortCircuitFunctions(const ActionsDAG & actions_dag, ShortCircuitFunctionEvaluation short_circuit_function_evaluation); -ExpressionActions::ExpressionActions(ActionsDAGPtr actions_dag_, const ExpressionActionsSettings & settings_, bool project_inputs_) - : project_inputs(project_inputs_) +ExpressionActions::ExpressionActions(ActionsDAG actions_dag_, const ExpressionActionsSettings & settings_, bool project_inputs_) + : actions_dag(std::move(actions_dag_)) + , project_inputs(project_inputs_) , settings(settings_) { - actions_dag = actions_dag_->clone(); - /// It's important to determine lazy executed nodes before compiling expressions. - std::unordered_set lazy_executed_nodes = processShortCircuitFunctions(*actions_dag, settings.short_circuit_function_evaluation); + std::unordered_set lazy_executed_nodes = processShortCircuitFunctions(actions_dag, settings.short_circuit_function_evaluation); #if USE_EMBEDDED_COMPILER if (settings.can_compile_expressions && settings.compile_expressions == CompileExpressions::yes) - actions_dag->compileExpressions(settings.min_count_to_compile_expression, lazy_executed_nodes); + actions_dag.compileExpressions(settings.min_count_to_compile_expression, lazy_executed_nodes); #endif linearizeActions(lazy_executed_nodes); @@ -68,12 +67,32 @@ ExpressionActions::ExpressionActions(ActionsDAGPtr actions_dag_, const Expressio if (settings.max_temporary_columns && num_columns > settings.max_temporary_columns) throw Exception(ErrorCodes::TOO_MANY_TEMPORARY_COLUMNS, "Too many temporary columns: {}. Maximum: {}", - actions_dag->dumpNames(), settings.max_temporary_columns); + actions_dag.dumpNames(), settings.max_temporary_columns); } ExpressionActionsPtr ExpressionActions::clone() const { - return std::make_shared(*this); + auto copy = std::make_shared(ExpressionActions()); + + std::unordered_map copy_map; + copy->actions_dag = actions_dag.clone(copy_map); + copy->actions = actions; + for (auto & action : copy->actions) + action.node = copy_map[action.node]; + + for (const auto * input : copy->actions_dag.getInputs()) + copy->input_positions.emplace(input->result_name, input_positions.at(input->result_name)); + + copy->num_columns = num_columns; + + copy->required_columns = required_columns; + copy->result_positions = result_positions; + copy->sample_block = sample_block; + + copy->project_inputs = project_inputs; + copy->settings = settings; + + return copy; } namespace @@ -341,8 +360,8 @@ void ExpressionActions::linearizeActions(const std::unordered_setgetOutputs(); - const auto & inputs = actions_dag->getInputs(); + const auto & outputs = actions_dag.getOutputs(); + const auto & inputs = actions_dag.getInputs(); auto reverse_info = getActionsDAGReverseInfo(nodes, outputs); std::vector data; diff --git a/src/Interpreters/ExpressionActions.h b/src/Interpreters/ExpressionActions.h index ddffe022215..6ff39ee07f7 100644 --- a/src/Interpreters/ExpressionActions.h +++ b/src/Interpreters/ExpressionActions.h @@ -70,7 +70,7 @@ public: using NameToInputMap = std::unordered_map>; private: - ActionsDAGPtr actions_dag; + ActionsDAG actions_dag; Actions actions; size_t num_columns = 0; @@ -84,14 +84,13 @@ private: ExpressionActionsSettings settings; public: - ExpressionActions() = delete; - explicit ExpressionActions(ActionsDAGPtr actions_dag_, const ExpressionActionsSettings & settings_ = {}, bool project_inputs_ = false); - ExpressionActions(const ExpressionActions &) = default; - ExpressionActions & operator=(const ExpressionActions &) = default; + explicit ExpressionActions(ActionsDAG actions_dag_, const ExpressionActionsSettings & settings_ = {}, bool project_inputs_ = false); + ExpressionActions(ExpressionActions &&) = default; + ExpressionActions & operator=(ExpressionActions &&) = default; const Actions & getActions() const { return actions; } - const std::list & getNodes() const { return actions_dag->getNodes(); } - const ActionsDAG & getActionsDAG() const { return *actions_dag; } + const std::list & getNodes() const { return actions_dag.getNodes(); } + const ActionsDAG & getActionsDAG() const { return actions_dag; } const ColumnNumbers & getResultPositions() const { return result_positions; } const ExpressionActionsSettings & getSettings() const { return settings; } @@ -131,6 +130,7 @@ public: ExpressionActionsPtr clone() const; private: + ExpressionActions() = default; void checkLimits(const ColumnsWithTypeAndName & columns) const; void linearizeActions(const std::unordered_set & lazy_executed_nodes); diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index 16d0eb71278..d25434a515d 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -658,7 +658,7 @@ void ExpressionAnalyzer::makeWindowDescriptionFromAST(const Context & context_, with_alias->getColumnName(), 1 /* direction */, 1 /* nulls_direction */)); - auto actions_dag = std::make_shared(aggregated_columns); + auto actions_dag = std::make_unique(aggregated_columns); getRootActions(column_ast, false, *actions_dag); desc.partition_by_actions.push_back(std::move(actions_dag)); } @@ -679,7 +679,7 @@ void ExpressionAnalyzer::makeWindowDescriptionFromAST(const Context & context_, order_by_element.direction, order_by_element.nulls_direction)); - auto actions_dag = std::make_shared(aggregated_columns); + auto actions_dag = std::make_unique(aggregated_columns); getRootActions(column_ast, false, *actions_dag); desc.order_by_actions.push_back(std::move(actions_dag)); } @@ -823,13 +823,14 @@ void ExpressionAnalyzer::makeWindowDescriptions(ActionsDAG & actions) makeWindowDescriptionFromAST(*current_context, window_descriptions, desc, &definition); + auto full_sort_description = desc.full_sort_description; + auto [it, inserted] = window_descriptions.insert( - {default_window_name, desc}); + {default_window_name, std::move(desc)}); if (!inserted) { - assert(it->second.full_sort_description - == desc.full_sort_description); + assert(it->second.full_sort_description == full_sort_description); } it->second.window_functions.push_back(window_function); @@ -927,7 +928,7 @@ JoinPtr SelectQueryExpressionAnalyzer::appendJoin( { const ColumnsWithTypeAndName & left_sample_columns = chain.getLastStep().getResultColumns(); - ActionsDAGPtr converting_actions; + std::optional converting_actions; JoinPtr join = makeJoin(*syntax->ast_join, left_sample_columns, converting_actions); if (converting_actions) @@ -1039,7 +1040,7 @@ static std::unique_ptr buildJoinedPlan( /// Actions which need to be calculated on joined block. auto joined_block_actions = analyzed_join.createJoinedBlockActions(context); NamesWithAliases required_columns_with_aliases = analyzed_join.getRequiredColumns( - Block(joined_block_actions->getResultColumns()), joined_block_actions->getRequiredColumns().getNames()); + Block(joined_block_actions.getResultColumns()), joined_block_actions.getRequiredColumns().getNames()); Names original_right_column_names; for (auto & pr : required_columns_with_aliases) @@ -1060,17 +1061,17 @@ static std::unique_ptr buildJoinedPlan( interpreter->buildQueryPlan(*joined_plan); { Block original_right_columns = interpreter->getSampleBlock(); - auto rename_dag = std::make_unique(original_right_columns.getColumnsWithTypeAndName()); + ActionsDAG rename_dag(original_right_columns.getColumnsWithTypeAndName()); for (const auto & name_with_alias : required_columns_with_aliases) { if (name_with_alias.first != name_with_alias.second && original_right_columns.has(name_with_alias.first)) { auto pos = original_right_columns.getPositionByName(name_with_alias.first); - const auto & alias = rename_dag->addAlias(*rename_dag->getInputs()[pos], name_with_alias.second); - rename_dag->getOutputs()[pos] = &alias; + const auto & alias = rename_dag.addAlias(*rename_dag.getInputs()[pos], name_with_alias.second); + rename_dag.getOutputs()[pos] = &alias; } } - rename_dag->appendInputsForUnusedColumns(joined_plan->getCurrentDataStream().header); + rename_dag.appendInputsForUnusedColumns(joined_plan->getCurrentDataStream().header); auto rename_step = std::make_unique(joined_plan->getCurrentDataStream(), std::move(rename_dag)); rename_step->setStepDescription("Rename joined columns"); joined_plan->addStep(std::move(rename_step)); @@ -1130,14 +1131,14 @@ std::shared_ptr tryKeyValueJoin(std::shared_ptr a JoinPtr SelectQueryExpressionAnalyzer::makeJoin( const ASTTablesInSelectQueryElement & join_element, const ColumnsWithTypeAndName & left_columns, - ActionsDAGPtr & left_convert_actions) + std::optional & left_convert_actions) { /// Two JOINs are not supported with the same subquery, but different USINGs. if (joined_plan) throw Exception(ErrorCodes::LOGICAL_ERROR, "Table join was already created for query"); - ActionsDAGPtr right_convert_actions = nullptr; + std::optional right_convert_actions; const auto & analyzed_join = syntax->analyzed_join; @@ -1145,7 +1146,7 @@ JoinPtr SelectQueryExpressionAnalyzer::makeJoin( { auto joined_block_actions = analyzed_join->createJoinedBlockActions(getContext()); NamesWithAliases required_columns_with_aliases = analyzed_join->getRequiredColumns( - Block(joined_block_actions->getResultColumns()), joined_block_actions->getRequiredColumns().getNames()); + Block(joined_block_actions.getResultColumns()), joined_block_actions.getRequiredColumns().getNames()); Names original_right_column_names; for (auto & pr : required_columns_with_aliases) @@ -1162,7 +1163,7 @@ JoinPtr SelectQueryExpressionAnalyzer::makeJoin( std::tie(left_convert_actions, right_convert_actions) = analyzed_join->createConvertingActions(left_columns, right_columns); if (right_convert_actions) { - auto converting_step = std::make_unique(joined_plan->getCurrentDataStream(), right_convert_actions); + auto converting_step = std::make_unique(joined_plan->getCurrentDataStream(), std::move(*right_convert_actions)); converting_step->setStepDescription("Convert joined columns"); joined_plan->addStep(std::move(converting_step)); } @@ -1354,10 +1355,10 @@ bool SelectQueryExpressionAnalyzer::appendGroupBy(ExpressionActionsChain & chain { for (auto & child : asts) { - auto actions_dag = std::make_shared(columns_after_join); - getRootActions(child, only_types, *actions_dag); + ActionsDAG actions_dag(columns_after_join); + getRootActions(child, only_types, actions_dag); group_by_elements_actions.emplace_back( - std::make_shared(actions_dag, ExpressionActionsSettings::fromContext(getContext(), CompileExpressions::yes))); + std::make_shared(std::move(actions_dag), ExpressionActionsSettings::fromContext(getContext(), CompileExpressions::yes))); } } @@ -1471,7 +1472,7 @@ void SelectQueryExpressionAnalyzer::appendGroupByModifiers(ActionsDAG & before_a ExpressionActionsChain::Step & step = chain.addStep(before_aggregation.getNamesAndTypesList()); step.required_output = std::move(required_output); - step.actions()->dag = std::move(*ActionsDAG::makeConvertingActions(source_columns, result_columns, ActionsDAG::MatchColumnsMode::Position)); + step.actions()->dag = ActionsDAG::makeConvertingActions(source_columns, result_columns, ActionsDAG::MatchColumnsMode::Position); } void SelectQueryExpressionAnalyzer::appendSelectSkipWindowExpressions(ExpressionActionsChain::Step & step, ASTPtr const & node) @@ -1607,10 +1608,10 @@ ActionsAndProjectInputsFlagPtr SelectQueryExpressionAnalyzer::appendOrderBy(Expr { for (const auto & child : select_query->orderBy()->children) { - auto actions_dag = std::make_shared(columns_after_join); - getRootActions(child, only_types, *actions_dag); + ActionsDAG actions_dag(columns_after_join); + getRootActions(child, only_types, actions_dag); order_by_elements_actions.emplace_back( - std::make_shared(actions_dag, ExpressionActionsSettings::fromContext(getContext(), CompileExpressions::yes))); + std::make_shared(std::move(actions_dag), ExpressionActionsSettings::fromContext(getContext(), CompileExpressions::yes))); } } @@ -1737,7 +1738,7 @@ void ExpressionAnalyzer::appendExpression(ExpressionActionsChain & chain, const step.addRequiredOutput(expr->getColumnName()); } -ActionsDAGPtr ExpressionAnalyzer::getActionsDAG(bool add_aliases, bool remove_unused_result) +ActionsDAG ExpressionAnalyzer::getActionsDAG(bool add_aliases, bool remove_unused_result) { ActionsDAG actions_dag(aggregated_columns); NamesWithAliases result_columns; @@ -1789,7 +1790,7 @@ ActionsDAGPtr ExpressionAnalyzer::getActionsDAG(bool add_aliases, bool remove_un actions_dag.removeUnusedActions(name_set); } - return std::make_unique(std::move(actions_dag)); + return actions_dag; } ExpressionActionsPtr ExpressionAnalyzer::getActions(bool add_aliases, bool remove_unused_result, CompileExpressions compile_expressions) @@ -1798,17 +1799,17 @@ ExpressionActionsPtr ExpressionAnalyzer::getActions(bool add_aliases, bool remov getActionsDAG(add_aliases, remove_unused_result), ExpressionActionsSettings::fromContext(getContext(), compile_expressions), add_aliases && remove_unused_result); } -ActionsDAGPtr ExpressionAnalyzer::getConstActionsDAG(const ColumnsWithTypeAndName & constant_inputs) +ActionsDAG ExpressionAnalyzer::getConstActionsDAG(const ColumnsWithTypeAndName & constant_inputs) { - auto actions = std::make_shared(constant_inputs); - getRootActions(query, true /* no_makeset_for_subqueries */, *actions, true /* only_consts */); + ActionsDAG actions(constant_inputs); + getRootActions(query, true /* no_makeset_for_subqueries */, actions, true /* only_consts */); return actions; } ExpressionActionsPtr ExpressionAnalyzer::getConstActions(const ColumnsWithTypeAndName & constant_inputs) { auto actions = getConstActionsDAG(constant_inputs); - return std::make_shared(actions, ExpressionActionsSettings::fromContext(getContext())); + return std::make_shared(std::move(actions), ExpressionActionsSettings::fromContext(getContext())); } std::unique_ptr SelectQueryExpressionAnalyzer::getJoinedPlan() @@ -1879,8 +1880,7 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( if (prewhere_dag_and_flags) { - auto dag = std::make_shared(std::move(prewhere_dag_and_flags->dag)); - prewhere_info = std::make_shared(std::move(dag), query.prewhere()->getColumnName()); + prewhere_info = std::make_shared(std::move(prewhere_dag_and_flags->dag), query.prewhere()->getColumnName()); prewhere_dag_and_flags.reset(); } @@ -1923,7 +1923,7 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( if (storage && additional_filter) { - Names columns_for_additional_filter = additional_filter->actions->getRequiredColumnsNames(); + Names columns_for_additional_filter = additional_filter->actions.getRequiredColumnsNames(); additional_required_columns_after_prewhere.insert(additional_required_columns_after_prewhere.end(), columns_for_additional_filter.begin(), columns_for_additional_filter.end()); } @@ -1944,10 +1944,7 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( Block before_prewhere_sample = source_header; if (sanitizeBlock(before_prewhere_sample)) { - auto dag = prewhere_dag_and_flags->dag.clone(); - ExpressionActions( - dag, - ExpressionActionsSettings::fromSettings(context->getSettingsRef())).execute(before_prewhere_sample); + before_prewhere_sample = prewhere_dag_and_flags->dag.updateHeader(before_prewhere_sample); auto & column_elem = before_prewhere_sample.getByName(query.prewhere()->getColumnName()); /// If the filter column is a constant, record it. if (column_elem.column) @@ -1979,9 +1976,7 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( before_where_sample = source_header; if (sanitizeBlock(before_where_sample)) { - ExpressionActions( - before_where->dag.clone(), - ExpressionActionsSettings::fromSettings(context->getSettingsRef())).execute(before_where_sample); + before_where_sample = before_where->dag.updateHeader(before_where_sample); auto & column_elem = before_where_sample.getByName(query.where()->getColumnName()); @@ -2054,7 +2049,7 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( auto & step = chain.lastStep(query_analyzer.aggregated_columns); auto & actions = step.actions()->dag; - actions = std::move(*ActionsDAG::merge(std::move(actions), std::move(*converting))); + actions = ActionsDAG::merge(std::move(actions), std::move(converting)); } } @@ -2235,12 +2230,11 @@ void ExpressionAnalysisResult::checkActions() const /// Check that PREWHERE doesn't contain unusual actions. Unusual actions are that can change number of rows. if (hasPrewhere()) { - auto check_actions = [](const ActionsDAGPtr & actions) + auto check_actions = [](ActionsDAG & actions) { - if (actions) - for (const auto & node : actions->getNodes()) - if (node.type == ActionsDAG::ActionType::ARRAY_JOIN) - throw Exception(ErrorCodes::ILLEGAL_PREWHERE, "PREWHERE cannot contain ARRAY JOIN action"); + for (const auto & node : actions.getNodes()) + if (node.type == ActionsDAG::ActionType::ARRAY_JOIN) + throw Exception(ErrorCodes::ILLEGAL_PREWHERE, "PREWHERE cannot contain ARRAY JOIN action"); }; check_actions(prewhere_info->prewhere_actions); diff --git a/src/Interpreters/ExpressionAnalyzer.h b/src/Interpreters/ExpressionAnalyzer.h index 12d6dce8f72..0c00247df85 100644 --- a/src/Interpreters/ExpressionAnalyzer.h +++ b/src/Interpreters/ExpressionAnalyzer.h @@ -38,9 +38,6 @@ using StorageMetadataPtr = std::shared_ptr; class ArrayJoinAction; using ArrayJoinActionPtr = std::shared_ptr; -class ActionsDAG; -using ActionsDAGPtr = std::shared_ptr; - /// Create columns in block or return false if not possible bool sanitizeBlock(Block & block, bool throw_if_cannot_create_column = false); @@ -117,12 +114,12 @@ public: /// If add_aliases, only the calculated values in the desired order and add aliases. /// If also remove_unused_result, than only aliases remain in the output block. /// Otherwise, only temporary columns will be deleted from the block. - ActionsDAGPtr getActionsDAG(bool add_aliases, bool remove_unused_result = true); + ActionsDAG getActionsDAG(bool add_aliases, bool remove_unused_result = true); ExpressionActionsPtr getActions(bool add_aliases, bool remove_unused_result = true, CompileExpressions compile_expressions = CompileExpressions::no); /// Get actions to evaluate a constant expression. The function adds constants and applies functions that depend only on constants. /// Does not execute subqueries. - ActionsDAGPtr getConstActionsDAG(const ColumnsWithTypeAndName & constant_inputs = {}); + ActionsDAG getConstActionsDAG(const ColumnsWithTypeAndName & constant_inputs = {}); ExpressionActionsPtr getConstActions(const ColumnsWithTypeAndName & constant_inputs = {}); /** Sets that require a subquery to be create. @@ -367,7 +364,7 @@ private: JoinPtr makeJoin( const ASTTablesInSelectQueryElement & join_element, const ColumnsWithTypeAndName & left_columns, - ActionsDAGPtr & left_convert_actions); + std::optional & left_convert_actions); const ASTSelectQuery * getAggregatingQuery() const; diff --git a/src/Interpreters/GlobalSubqueriesVisitor.h b/src/Interpreters/GlobalSubqueriesVisitor.h index 986134a6998..b1745ea8956 100644 --- a/src/Interpreters/GlobalSubqueriesVisitor.h +++ b/src/Interpreters/GlobalSubqueriesVisitor.h @@ -296,7 +296,7 @@ private: { auto joined_block_actions = data.table_join->createJoinedBlockActions(data.getContext()); NamesWithAliases required_columns_with_aliases = data.table_join->getRequiredColumns( - Block(joined_block_actions->getResultColumns()), joined_block_actions->getRequiredColumns().getNames()); + Block(joined_block_actions.getResultColumns()), joined_block_actions.getRequiredColumns().getNames()); for (auto & pr : required_columns_with_aliases) required_columns.push_back(pr.first); diff --git a/src/Interpreters/IInterpreterUnionOrSelectQuery.cpp b/src/Interpreters/IInterpreterUnionOrSelectQuery.cpp index f64b9540dbb..0034b50f02c 100644 --- a/src/Interpreters/IInterpreterUnionOrSelectQuery.cpp +++ b/src/Interpreters/IInterpreterUnionOrSelectQuery.cpp @@ -124,16 +124,16 @@ static ASTPtr parseAdditionalPostFilter(const Context & context) "additional filter", settings.max_query_size, settings.max_parser_depth, settings.max_parser_backtracks); } -static ActionsDAGPtr makeAdditionalPostFilter(ASTPtr & ast, ContextPtr context, const Block & header) +static ActionsDAG makeAdditionalPostFilter(ASTPtr & ast, ContextPtr context, const Block & header) { auto syntax_result = TreeRewriter(context).analyze(ast, header.getNamesAndTypesList()); String result_column_name = ast->getColumnName(); auto dag = ExpressionAnalyzer(ast, syntax_result, context).getActionsDAG(false, false); - const ActionsDAG::Node * result_node = &dag->findInOutputs(result_column_name); - auto & outputs = dag->getOutputs(); + const ActionsDAG::Node * result_node = &dag.findInOutputs(result_column_name); + auto & outputs = dag.getOutputs(); outputs.clear(); - outputs.reserve(dag->getInputs().size() + 1); - for (const auto * node : dag->getInputs()) + outputs.reserve(dag.getInputs().size() + 1); + for (const auto * node : dag.getInputs()) outputs.push_back(node); outputs.push_back(result_node); @@ -151,7 +151,7 @@ void IInterpreterUnionOrSelectQuery::addAdditionalPostFilter(QueryPlan & plan) c return; auto dag = makeAdditionalPostFilter(ast, context, plan.getCurrentDataStream().header); - std::string filter_name = dag->getOutputs().back()->result_name; + std::string filter_name = dag.getOutputs().back()->result_name; auto filter_step = std::make_unique( plan.getCurrentDataStream(), std::move(dag), std::move(filter_name), true); filter_step->setStepDescription("Additional result filter"); diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index 454cedae15c..427378decd5 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -1373,8 +1373,8 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) if (need_add_to_database) database = DatabaseCatalog::instance().tryGetDatabase(database_name); - bool allow_heavy_create = getContext()->getSettingsRef().database_replicated_allow_heavy_create; - if (!allow_heavy_create && database && database->getEngineName() == "Replicated" && (create.select || create.is_populate)) + bool allow_heavy_populate = getContext()->getSettingsRef().database_replicated_allow_heavy_create && create.is_populate; + if (!allow_heavy_populate && database && database->getEngineName() == "Replicated" && (create.select || create.is_populate)) { bool is_storage_replicated = false; @@ -1392,10 +1392,18 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) const bool allow_create_select_for_replicated = (create.isView() && !create.is_populate) || create.is_create_empty || !is_storage_replicated; if (!allow_create_select_for_replicated) + { + /// POPULATE can be enabled with setting, provide hint in error message + if (create.is_populate) + throw Exception( + ErrorCodes::SUPPORT_IS_DISABLED, + "CREATE with POPULATE is not supported with Replicated databases. Consider using separate CREATE and INSERT queries. " + "Alternatively, you can enable 'database_replicated_allow_heavy_create' setting to allow this operation, use with caution"); + throw Exception( ErrorCodes::SUPPORT_IS_DISABLED, - "CREATE AS SELECT and POPULATE is not supported with Replicated databases. Consider using separate CREATE and INSERT queries. " - "Alternatively, you can enable 'database_replicated_allow_heavy_create' setting to allow this operation, use with caution"); + "CREATE AS SELECT is not supported with Replicated databases. Consider using separate CREATE and INSERT queries."); + } } if (database && database->shouldReplicateQuery(getContext(), query_ptr)) diff --git a/src/Interpreters/InterpreterInsertQuery.cpp b/src/Interpreters/InterpreterInsertQuery.cpp index 3906cef1995..48013d3ab00 100644 --- a/src/Interpreters/InterpreterInsertQuery.cpp +++ b/src/Interpreters/InterpreterInsertQuery.cpp @@ -390,7 +390,7 @@ Chain InterpreterInsertQuery::buildPreSinkChain( context_ptr, null_as_default); - auto adding_missing_defaults_actions = std::make_shared(adding_missing_defaults_dag); + auto adding_missing_defaults_actions = std::make_shared(std::move(adding_missing_defaults_dag)); /// Actually we don't know structure of input blocks from query/table, /// because some clients break insertion protocol (columns != header) @@ -536,7 +536,7 @@ QueryPipeline InterpreterInsertQuery::buildInsertSelectPipeline(ASTInsertQuery & pipeline.getHeader().getColumnsWithTypeAndName(), query_sample_block.getColumnsWithTypeAndName(), ActionsDAG::MatchColumnsMode::Position); - auto actions = std::make_shared(actions_dag, ExpressionActionsSettings::fromContext(getContext(), CompileExpressions::yes)); + auto actions = std::make_shared(std::move(actions_dag), ExpressionActionsSettings::fromContext(getContext(), CompileExpressions::yes)); pipeline.addSimpleTransform([&](const Block & in_header) -> ProcessorPtr { diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index 0f24888cb79..41306a79198 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -177,15 +177,15 @@ FilterDAGInfoPtr generateFilterActions( /// Using separate expression analyzer to prevent any possible alias injection auto syntax_result = TreeRewriter(context).analyzeSelect(query_ast, TreeRewriterResult({}, storage, storage_snapshot)); SelectQueryExpressionAnalyzer analyzer(query_ast, syntax_result, context, metadata_snapshot, {}, false, {}, prepared_sets); - filter_info->actions = std::make_unique(std::move(analyzer.simpleSelectActions()->dag)); + filter_info->actions = std::move(analyzer.simpleSelectActions()->dag); filter_info->column_name = expr_list->children.at(0)->getColumnName(); - filter_info->actions->removeUnusedActions(NameSet{filter_info->column_name}); + filter_info->actions.removeUnusedActions(NameSet{filter_info->column_name}); - for (const auto * node : filter_info->actions->getInputs()) - filter_info->actions->getOutputs().push_back(node); + for (const auto * node : filter_info->actions.getInputs()) + filter_info->actions.getOutputs().push_back(node); - auto required_columns_from_filter = filter_info->actions->getRequiredColumns(); + auto required_columns_from_filter = filter_info->actions.getRequiredColumns(); for (const auto & column : required_columns_from_filter) { @@ -937,7 +937,7 @@ bool InterpreterSelectQuery::adjustParallelReplicasAfterAnalysis() { { const auto & node - = query_info_copy.prewhere_info->prewhere_actions->findInOutputs(query_info_copy.prewhere_info->prewhere_column_name); + = query_info_copy.prewhere_info->prewhere_actions.findInOutputs(query_info_copy.prewhere_info->prewhere_column_name); added_filter_nodes.nodes.push_back(&node); } @@ -949,7 +949,8 @@ bool InterpreterSelectQuery::adjustParallelReplicasAfterAnalysis() } } - query_info_copy.filter_actions_dag = ActionsDAG::buildFilterActionsDAG(added_filter_nodes.nodes); + if (auto filter_actions_dag = ActionsDAG::buildFilterActionsDAG(added_filter_nodes.nodes)) + query_info_copy.filter_actions_dag = std::make_shared(std::move(*filter_actions_dag)); UInt64 rows_to_read = storage_merge_tree->estimateNumberOfRowsToRead(context, storage_snapshot, query_info_copy); /// Note that we treat an estimation of 0 rows as a real estimation size_t number_of_replicas_to_use = rows_to_read / settings.parallel_replicas_min_number_of_rows_per_replica; @@ -984,7 +985,7 @@ void InterpreterSelectQuery::buildQueryPlan(QueryPlan & query_plan) ActionsDAG::MatchColumnsMode::Name, true); - auto converting = std::make_unique(query_plan.getCurrentDataStream(), convert_actions_dag); + auto converting = std::make_unique(query_plan.getCurrentDataStream(), std::move(convert_actions_dag)); query_plan.addStep(std::move(converting)); } @@ -1057,7 +1058,7 @@ Block InterpreterSelectQuery::getSampleBlockImpl() if (analysis_result.prewhere_info) { - header = analysis_result.prewhere_info->prewhere_actions->updateHeader(header); + header = analysis_result.prewhere_info->prewhere_actions.updateHeader(header); if (analysis_result.prewhere_info->remove_prewhere_column) header.erase(analysis_result.prewhere_info->prewhere_column_name); } @@ -1308,12 +1309,12 @@ static InterpolateDescriptionPtr getInterpolateDescription( auto syntax_result = TreeRewriter(context).analyze(exprs, source_columns); ExpressionAnalyzer analyzer(exprs, syntax_result, context); - ActionsDAGPtr actions = analyzer.getActionsDAG(true); - ActionsDAGPtr conv_dag = ActionsDAG::makeConvertingActions(actions->getResultColumns(), + ActionsDAG actions = analyzer.getActionsDAG(true); + ActionsDAG conv_dag = ActionsDAG::makeConvertingActions(actions.getResultColumns(), result_columns, ActionsDAG::MatchColumnsMode::Position, true); - ActionsDAGPtr merge_dag = ActionsDAG::merge(std::move(*actions->clone()), std::move(*conv_dag)); + ActionsDAG merge_dag = ActionsDAG::merge(std::move(actions), std::move(conv_dag)); - interpolate_descr = std::make_shared(merge_dag, aliases); + interpolate_descr = std::make_shared(std::move(merge_dag), aliases); } return interpolate_descr; @@ -1496,7 +1497,7 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional

( query_plan.getCurrentDataStream(), - expressions.filter_info->actions, + expressions.filter_info->actions.clone(), expressions.filter_info->column_name, expressions.filter_info->do_remove_column); @@ -1510,7 +1511,7 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional

( query_plan.getCurrentDataStream(), - expressions.prewhere_info->row_level_filter, + expressions.prewhere_info->row_level_filter->clone(), expressions.prewhere_info->row_level_column_name, true); @@ -1520,7 +1521,7 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional

( query_plan.getCurrentDataStream(), - expressions.prewhere_info->prewhere_actions, + expressions.prewhere_info->prewhere_actions.clone(), expressions.prewhere_info->prewhere_column_name, expressions.prewhere_info->remove_prewhere_column); @@ -1622,7 +1623,7 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional

( query_plan.getCurrentDataStream(), - expressions.filter_info->actions, + expressions.filter_info->actions.clone(), expressions.filter_info->column_name, expressions.filter_info->do_remove_column); @@ -1630,11 +1631,11 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional

( query_plan.getCurrentDataStream(), - new_filter_info->actions, + std::move(new_filter_info->actions), new_filter_info->column_name, new_filter_info->do_remove_column); @@ -2056,18 +2057,20 @@ void InterpreterSelectQuery::addEmptySourceToQueryPlan(QueryPlan & query_plan, c if (prewhere_info.row_level_filter) { + auto row_level_actions = std::make_shared(prewhere_info.row_level_filter->clone()); pipe.addSimpleTransform([&](const Block & header) { return std::make_shared(header, - std::make_shared(prewhere_info.row_level_filter), + row_level_actions, prewhere_info.row_level_column_name, true); }); } + auto filter_actions = std::make_shared(prewhere_info.prewhere_actions.clone()); pipe.addSimpleTransform([&](const Block & header) { return std::make_shared( - header, std::make_shared(prewhere_info.prewhere_actions), + header, filter_actions, prewhere_info.prewhere_column_name, prewhere_info.remove_prewhere_column); }); } @@ -2111,8 +2114,8 @@ void InterpreterSelectQuery::applyFiltersToPrewhereInAnalysis(ExpressionAnalysis if (does_storage_support_prewhere && shouldMoveToPrewhere()) { /// Execute row level filter in prewhere as a part of "move to prewhere" optimization. - analysis.prewhere_info = std::make_shared(analysis.filter_info->actions, analysis.filter_info->column_name); - analysis.prewhere_info->remove_prewhere_column = analysis.filter_info->do_remove_column; + analysis.prewhere_info = std::make_shared(std::move(analysis.filter_info->actions), analysis.filter_info->column_name); + analysis.prewhere_info->remove_prewhere_column = std::move(analysis.filter_info->do_remove_column); analysis.prewhere_info->need_filter = true; analysis.filter_info = nullptr; } @@ -2120,8 +2123,8 @@ void InterpreterSelectQuery::applyFiltersToPrewhereInAnalysis(ExpressionAnalysis else { /// Add row level security actions to prewhere. - analysis.prewhere_info->row_level_filter = analysis.filter_info->actions; - analysis.prewhere_info->row_level_column_name = analysis.filter_info->column_name; + analysis.prewhere_info->row_level_filter = std::move(analysis.filter_info->actions); + analysis.prewhere_info->row_level_column_name = std::move(analysis.filter_info->column_name); analysis.filter_info = nullptr; } } @@ -2154,7 +2157,7 @@ void InterpreterSelectQuery::addPrewhereAliasActions() if (prewhere_info) { /// Get some columns directly from PREWHERE expression actions - auto prewhere_required_columns = prewhere_info->prewhere_actions->getRequiredColumns().getNames(); + auto prewhere_required_columns = prewhere_info->prewhere_actions.getRequiredColumns().getNames(); columns.insert(prewhere_required_columns.begin(), prewhere_required_columns.end()); if (prewhere_info->row_level_filter) @@ -2226,7 +2229,7 @@ void InterpreterSelectQuery::addPrewhereAliasActions() if (prewhere_info) { NameSet columns_to_remove(columns_to_remove_after_prewhere.begin(), columns_to_remove_after_prewhere.end()); - Block prewhere_actions_result = prewhere_info->prewhere_actions->getResultColumns(); + Block prewhere_actions_result = prewhere_info->prewhere_actions.getResultColumns(); /// Populate required columns with the columns, added by PREWHERE actions and not removed afterwards. /// XXX: looks hacky that we already know which columns after PREWHERE we won't need for sure. @@ -2265,7 +2268,7 @@ void InterpreterSelectQuery::addPrewhereAliasActions() { /// Don't remove columns which are needed to be aliased. for (const auto & name : required_columns) - prewhere_info->prewhere_actions->tryRestoreColumn(name); + prewhere_info->prewhere_actions.tryRestoreColumn(name); /// Add physical columns required by prewhere actions. for (const auto & column : required_columns_from_prewhere) @@ -2323,7 +2326,7 @@ std::optional InterpreterSelectQuery::getTrivialCount(UInt64 max_paralle if (analysis_result.hasPrewhere()) { auto & prewhere_info = analysis_result.prewhere_info; - filter_nodes.push_back(&prewhere_info->prewhere_actions->findInOutputs(prewhere_info->prewhere_column_name)); + filter_nodes.push_back(&prewhere_info->prewhere_actions.findInOutputs(prewhere_info->prewhere_column_name)); if (prewhere_info->row_level_filter) filter_nodes.push_back(&prewhere_info->row_level_filter->findInOutputs(prewhere_info->row_level_column_name)); @@ -2337,7 +2340,7 @@ std::optional InterpreterSelectQuery::getTrivialCount(UInt64 max_paralle if (!filter_actions_dag) return {}; - return storage->totalRowsByPartitionPredicate(filter_actions_dag, context); + return storage->totalRowsByPartitionPredicate(*filter_actions_dag, context); } } @@ -2587,7 +2590,7 @@ void InterpreterSelectQuery::executeFetchColumns(QueryProcessingStage::Enum proc /// Aliases in table declaration. if (processing_stage == QueryProcessingStage::FetchColumns && alias_actions) { - auto table_aliases = std::make_unique(query_plan.getCurrentDataStream(), alias_actions); + auto table_aliases = std::make_unique(query_plan.getCurrentDataStream(), alias_actions->clone()); table_aliases->setStepDescription("Add table aliases"); query_plan.addStep(std::move(table_aliases)); } @@ -2597,7 +2600,7 @@ void InterpreterSelectQuery::executeWhere(QueryPlan & query_plan, const ActionsA { auto dag = expression->dag.clone(); if (expression->project_input) - dag->appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); + dag.appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); auto where_step = std::make_unique( query_plan.getCurrentDataStream(), std::move(dag), getSelectQuery().where()->getColumnName(), remove_filter); @@ -2771,7 +2774,7 @@ void InterpreterSelectQuery::executeHaving(QueryPlan & query_plan, const Actions { auto dag = expression->dag.clone(); if (expression->project_input) - dag->appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); + dag.appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); auto having_step = std::make_unique(query_plan.getCurrentDataStream(), std::move(dag), getSelectQuery().having()->getColumnName(), remove_filter); @@ -2784,7 +2787,7 @@ void InterpreterSelectQuery::executeHaving(QueryPlan & query_plan, const Actions void InterpreterSelectQuery::executeTotalsAndHaving( QueryPlan & query_plan, bool has_having, const ActionsAndProjectInputsFlagPtr & expression, bool remove_filter, bool overflow_row, bool final) { - ActionsDAGPtr dag; + std::optional dag; if (expression) { dag = expression->dag.clone(); @@ -2838,7 +2841,7 @@ void InterpreterSelectQuery::executeExpression(QueryPlan & query_plan, const Act auto dag = expression->dag.clone(); if (expression->project_input) - dag->appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); + dag.appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); auto expression_step = std::make_unique(query_plan.getCurrentDataStream(), std::move(dag)); diff --git a/src/Interpreters/InterpreterSelectQuery.h b/src/Interpreters/InterpreterSelectQuery.h index ac1230a6eba..a01a390f3c7 100644 --- a/src/Interpreters/InterpreterSelectQuery.h +++ b/src/Interpreters/InterpreterSelectQuery.h @@ -239,7 +239,7 @@ private: Block source_header; /// Actions to calculate ALIAS if required. - ActionsDAGPtr alias_actions; + std::optional alias_actions; /// The subquery interpreter, if the subquery std::unique_ptr interpreter_subquery; diff --git a/src/Interpreters/InterpreterShowCreateQuery.cpp b/src/Interpreters/InterpreterShowCreateQuery.cpp index 16add79d226..e5549b2e539 100644 --- a/src/Interpreters/InterpreterShowCreateQuery.cpp +++ b/src/Interpreters/InterpreterShowCreateQuery.cpp @@ -1,9 +1,7 @@ #include #include -#include #include #include -#include #include #include #include diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index 480c6736bc5..57ad5caa4c7 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -1200,7 +1200,7 @@ void MutationsInterpreter::Source::read( const auto & names = first_stage.filter_column_names; size_t num_filters = names.size(); - ActionsDAGPtr filter; + std::optional filter; if (!first_stage.filter_column_names.empty()) { ActionsDAG::NodeRawConstPtrs nodes(num_filters); @@ -1214,7 +1214,7 @@ void MutationsInterpreter::Source::read( MergeTreeSequentialSourceType::Mutation, plan, *data, storage_snapshot, part, required_columns, - apply_deleted_mask_, filter, context_, + apply_deleted_mask_, std::move(filter), context_, getLogger("MutationsInterpreter")); } else @@ -1283,17 +1283,17 @@ QueryPipelineBuilder MutationsInterpreter::addStreamsForLaterStages(const std::v { auto dag = step->actions()->dag.clone(); if (step->actions()->project_input) - dag->appendInputsForUnusedColumns(plan.getCurrentDataStream().header); + dag.appendInputsForUnusedColumns(plan.getCurrentDataStream().header); /// Execute DELETEs. - plan.addStep(std::make_unique(plan.getCurrentDataStream(), dag, stage.filter_column_names[i], false)); + plan.addStep(std::make_unique(plan.getCurrentDataStream(), std::move(dag), stage.filter_column_names[i], false)); } else { auto dag = step->actions()->dag.clone(); if (step->actions()->project_input) - dag->appendInputsForUnusedColumns(plan.getCurrentDataStream().header); + dag.appendInputsForUnusedColumns(plan.getCurrentDataStream().header); /// Execute UPDATE or final projection. - plan.addStep(std::make_unique(plan.getCurrentDataStream(), dag)); + plan.addStep(std::make_unique(plan.getCurrentDataStream(), std::move(dag))); } } diff --git a/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp b/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp index 4821d607d0e..7296bdaf383 100644 --- a/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp +++ b/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -16,7 +17,6 @@ #include #include -#include #include #include #include @@ -29,6 +29,7 @@ #include #include + namespace DB { @@ -95,22 +96,22 @@ NamesAndTypesList getColumnsList(const ASTExpressionList * columns_definition) } ASTPtr data_type = declare_column->data_type; - auto * data_type_function = data_type->as(); + auto * data_type_node = data_type->as(); - if (data_type_function) + if (data_type_node) { - String type_name_upper = Poco::toUpper(data_type_function->name); + String type_name_upper = Poco::toUpper(data_type_node->name); if (is_unsigned) { /// For example(in MySQL): CREATE TABLE test(column_name INT NOT NULL ... UNSIGNED) if (type_name_upper.find("INT") != String::npos && !endsWith(type_name_upper, "SIGNED") && !endsWith(type_name_upper, "UNSIGNED")) - data_type_function->name = type_name_upper + " UNSIGNED"; + data_type_node->name = type_name_upper + " UNSIGNED"; } if (type_name_upper == "SET") - data_type_function->arguments.reset(); + data_type_node->arguments.reset(); /// Transforms MySQL ENUM's list of strings to ClickHouse string-integer pairs /// For example ENUM('a', 'b', 'c') -> ENUM('a'=1, 'b'=2, 'c'=3) @@ -119,7 +120,7 @@ NamesAndTypesList getColumnsList(const ASTExpressionList * columns_definition) if (type_name_upper.find("ENUM") != String::npos) { UInt16 i = 0; - for (ASTPtr & child : data_type_function->arguments->children) + for (ASTPtr & child : data_type_node->arguments->children) { auto new_child = std::make_shared(); new_child->name = "equals"; @@ -133,10 +134,10 @@ NamesAndTypesList getColumnsList(const ASTExpressionList * columns_definition) } if (type_name_upper == "DATE") - data_type_function->name = "Date32"; + data_type_node->name = "Date32"; } if (is_nullable) - data_type = makeASTFunction("Nullable", data_type); + data_type = makeASTDataType("Nullable", data_type); columns_name_and_type.emplace_back(declare_column->name, DataTypeFactory::instance().get(data_type)); } @@ -156,7 +157,7 @@ static ColumnsDescription createColumnsDescription(const NamesAndTypesList & col /// (see git blame for details). auto column_name_and_type = columns_name_and_type.begin(); const auto * declare_column_ast = columns_definition->children.begin(); - for (; column_name_and_type != columns_name_and_type.end(); column_name_and_type++, declare_column_ast++) + for (; column_name_and_type != columns_name_and_type.end(); ++column_name_and_type, ++declare_column_ast) { const auto & declare_column = (*declare_column_ast)->as(); String comment; @@ -182,7 +183,7 @@ static NamesAndTypesList getNames(const ASTFunction & expr, ContextPtr context, ASTPtr temp_ast = expr.clone(); auto syntax = TreeRewriter(context).analyze(temp_ast, columns); - auto required_columns = ExpressionAnalyzer(temp_ast, syntax, context).getActionsDAG(false)->getRequiredColumns(); + auto required_columns = ExpressionAnalyzer(temp_ast, syntax, context).getActionsDAG(false).getRequiredColumns(); return required_columns; } @@ -482,7 +483,7 @@ ASTs InterpreterCreateImpl::getRewrittenQueries( { auto column_declaration = std::make_shared(); column_declaration->name = name; - column_declaration->type = makeASTFunction(type); + column_declaration->type = makeASTDataType(type); column_declaration->default_specifier = "MATERIALIZED"; column_declaration->default_expression = std::make_shared(default_value); column_declaration->children.emplace_back(column_declaration->type); diff --git a/src/Interpreters/MySQL/tests/gtest_create_rewritten.cpp b/src/Interpreters/MySQL/tests/gtest_create_rewritten.cpp index 6d6077a0295..81e6e6a8761 100644 --- a/src/Interpreters/MySQL/tests/gtest_create_rewritten.cpp +++ b/src/Interpreters/MySQL/tests/gtest_create_rewritten.cpp @@ -2,12 +2,10 @@ #include -#include #include #include #include #include -#include #include #include #include @@ -26,8 +24,8 @@ static inline ASTPtr tryRewrittenCreateQuery(const String & query, ContextPtr co context, "test_database", "test_database")[0]; } -static const char MATERIALIZEDMYSQL_TABLE_COLUMNS[] = ", `_sign` Int8() MATERIALIZED 1" - ", `_version` UInt64() MATERIALIZED 1" +static const char MATERIALIZEDMYSQL_TABLE_COLUMNS[] = ", `_sign` Int8 MATERIALIZED 1" + ", `_version` UInt64 MATERIALIZED 1" ", INDEX _version _version TYPE minmax GRANULARITY 1"; TEST(MySQLCreateRewritten, ColumnsDataType) diff --git a/src/Interpreters/TableJoin.cpp b/src/Interpreters/TableJoin.cpp index 6191eb73fd4..c8c926db13c 100644 --- a/src/Interpreters/TableJoin.cpp +++ b/src/Interpreters/TableJoin.cpp @@ -462,19 +462,19 @@ static void makeColumnNameUnique(const ColumnsWithTypeAndName & source_columns, } } -static ActionsDAGPtr createWrapWithTupleActions( +static std::optional createWrapWithTupleActions( const ColumnsWithTypeAndName & source_columns, std::unordered_set && column_names_to_wrap, NameToNameMap & new_names) { if (column_names_to_wrap.empty()) - return nullptr; + return {}; - auto actions_dag = std::make_shared(source_columns); + ActionsDAG actions_dag(source_columns); FunctionOverloadResolverPtr func_builder = std::make_unique(std::make_shared()); - for (const auto * input_node : actions_dag->getInputs()) + for (const auto * input_node : actions_dag.getInputs()) { const auto & column_name = input_node->result_name; auto it = column_names_to_wrap.find(column_name); @@ -485,9 +485,9 @@ static ActionsDAGPtr createWrapWithTupleActions( String node_name = "__wrapNullsafe(" + column_name + ")"; makeColumnNameUnique(source_columns, node_name); - const auto & dst_node = actions_dag->addFunction(func_builder, {input_node}, node_name); + const auto & dst_node = actions_dag.addFunction(func_builder, {input_node}, node_name); new_names[column_name] = dst_node.result_name; - actions_dag->addOrReplaceInOutputs(dst_node); + actions_dag.addOrReplaceInOutputs(dst_node); } if (!column_names_to_wrap.empty()) @@ -537,21 +537,23 @@ std::pair TableJoin::getKeysForNullSafeComparion(const Columns return {left_keys_to_wrap, right_keys_to_wrap}; } -static void mergeDags(ActionsDAGPtr & result_dag, ActionsDAGPtr && new_dag) +static void mergeDags(std::optional & result_dag, std::optional && new_dag) { + if (!new_dag) + return; if (result_dag) result_dag->mergeInplace(std::move(*new_dag)); else result_dag = std::move(new_dag); } -std::pair +std::pair, std::optional> TableJoin::createConvertingActions( const ColumnsWithTypeAndName & left_sample_columns, const ColumnsWithTypeAndName & right_sample_columns) { - ActionsDAGPtr left_dag = nullptr; - ActionsDAGPtr right_dag = nullptr; + std::optional left_dag; + std::optional right_dag; /** If the types are not equal, we need to convert them to a common type. * Example: * SELECT * FROM t1 JOIN t2 ON t1.a = t2.b @@ -616,7 +618,7 @@ TableJoin::createConvertingActions( mergeDags(right_dag, std::move(new_right_dag)); } - return {left_dag, right_dag}; + return {std::move(left_dag), std::move(right_dag)}; } template @@ -693,7 +695,7 @@ void TableJoin::inferJoinKeyCommonType(const LeftNamesAndTypes & left, const Rig } } -static ActionsDAGPtr changeKeyTypes(const ColumnsWithTypeAndName & cols_src, +static std::optional changeKeyTypes(const ColumnsWithTypeAndName & cols_src, const TableJoin::NameToTypeMap & type_mapping, bool add_new_cols, NameToNameMap & key_column_rename) @@ -710,7 +712,7 @@ static ActionsDAGPtr changeKeyTypes(const ColumnsWithTypeAndName & cols_src, } } if (!has_some_to_do) - return nullptr; + return {}; return ActionsDAG::makeConvertingActions( /* source= */ cols_src, @@ -721,7 +723,7 @@ static ActionsDAGPtr changeKeyTypes(const ColumnsWithTypeAndName & cols_src, /* new_names= */ &key_column_rename); } -static ActionsDAGPtr changeTypesToNullable( +static std::optional changeTypesToNullable( const ColumnsWithTypeAndName & cols_src, const NameSet & exception_cols) { @@ -737,7 +739,7 @@ static ActionsDAGPtr changeTypesToNullable( } if (!has_some_to_do) - return nullptr; + return {}; return ActionsDAG::makeConvertingActions( /* source= */ cols_src, @@ -748,29 +750,29 @@ static ActionsDAGPtr changeTypesToNullable( /* new_names= */ nullptr); } -ActionsDAGPtr TableJoin::applyKeyConvertToTable( +std::optional TableJoin::applyKeyConvertToTable( const ColumnsWithTypeAndName & cols_src, const NameToTypeMap & type_mapping, JoinTableSide table_side, NameToNameMap & key_column_rename) { if (type_mapping.empty()) - return nullptr; + return {}; /// Create DAG to convert key columns - ActionsDAGPtr convert_dag = changeKeyTypes(cols_src, type_mapping, !hasUsing(), key_column_rename); + auto convert_dag = changeKeyTypes(cols_src, type_mapping, !hasUsing(), key_column_rename); applyRename(table_side, key_column_rename); return convert_dag; } -ActionsDAGPtr TableJoin::applyNullsafeWrapper( +std::optional TableJoin::applyNullsafeWrapper( const ColumnsWithTypeAndName & cols_src, const NameSet & columns_for_nullsafe_comparison, JoinTableSide table_side, NameToNameMap & key_column_rename) { if (columns_for_nullsafe_comparison.empty()) - return nullptr; + return {}; std::unordered_set column_names_to_wrap; for (const auto & name : columns_for_nullsafe_comparison) @@ -784,7 +786,7 @@ ActionsDAGPtr TableJoin::applyNullsafeWrapper( } /// Create DAG to wrap keys with tuple for null-safe comparison - ActionsDAGPtr null_safe_wrap_dag = createWrapWithTupleActions(cols_src, std::move(column_names_to_wrap), key_column_rename); + auto null_safe_wrap_dag = createWrapWithTupleActions(cols_src, std::move(column_names_to_wrap), key_column_rename); for (auto & clause : clauses) { for (size_t i : clause.nullsafe_compare_key_indexes) @@ -799,7 +801,7 @@ ActionsDAGPtr TableJoin::applyNullsafeWrapper( return null_safe_wrap_dag; } -ActionsDAGPtr TableJoin::applyJoinUseNullsConversion( +std::optional TableJoin::applyJoinUseNullsConversion( const ColumnsWithTypeAndName & cols_src, const NameToNameMap & key_column_rename) { @@ -809,8 +811,7 @@ ActionsDAGPtr TableJoin::applyJoinUseNullsConversion( exclude_columns.insert(it.second); /// Create DAG to make columns nullable if needed - ActionsDAGPtr add_nullable_dag = changeTypesToNullable(cols_src, exclude_columns); - return add_nullable_dag; + return changeTypesToNullable(cols_src, exclude_columns); } void TableJoin::setStorageJoin(std::shared_ptr storage) @@ -957,7 +958,7 @@ bool TableJoin::allowParallelHashJoin() const return true; } -ActionsDAGPtr TableJoin::createJoinedBlockActions(ContextPtr context) const +ActionsDAG TableJoin::createJoinedBlockActions(ContextPtr context) const { ASTPtr expression_list = rightKeysList(); auto syntax_result = TreeRewriter(context).analyze(expression_list, columnsFromJoinedTable()); diff --git a/src/Interpreters/TableJoin.h b/src/Interpreters/TableJoin.h index ed14fe4273c..3f2bebb5816 100644 --- a/src/Interpreters/TableJoin.h +++ b/src/Interpreters/TableJoin.h @@ -201,19 +201,19 @@ private: Names requiredJoinedNames() const; /// Create converting actions and change key column names if required - ActionsDAGPtr applyKeyConvertToTable( + std::optional applyKeyConvertToTable( const ColumnsWithTypeAndName & cols_src, const NameToTypeMap & type_mapping, JoinTableSide table_side, NameToNameMap & key_column_rename); - ActionsDAGPtr applyNullsafeWrapper( + std::optional applyNullsafeWrapper( const ColumnsWithTypeAndName & cols_src, const NameSet & columns_for_nullsafe_comparison, JoinTableSide table_side, NameToNameMap & key_column_rename); - ActionsDAGPtr applyJoinUseNullsConversion( + std::optional applyJoinUseNullsConversion( const ColumnsWithTypeAndName & cols_src, const NameToNameMap & key_column_rename); @@ -263,7 +263,7 @@ public: TemporaryDataOnDiskScopePtr getTempDataOnDisk() { return tmp_data; } - ActionsDAGPtr createJoinedBlockActions(ContextPtr context) const; + ActionsDAG createJoinedBlockActions(ContextPtr context) const; const std::vector & getEnabledJoinAlgorithms() const { return join_algorithm; } @@ -378,7 +378,7 @@ public: /// Calculate converting actions, rename key columns in required /// For `USING` join we will convert key columns inplace and affect into types in the result table /// For `JOIN ON` we will create new columns with converted keys to join by. - std::pair + std::pair, std::optional> createConvertingActions( const ColumnsWithTypeAndName & left_sample_columns, const ColumnsWithTypeAndName & right_sample_columns); diff --git a/src/Interpreters/WindowDescription.h b/src/Interpreters/WindowDescription.h index c26e4517c9a..d51d9ca94d8 100644 --- a/src/Interpreters/WindowDescription.h +++ b/src/Interpreters/WindowDescription.h @@ -14,7 +14,6 @@ namespace DB class ASTFunction; class ActionsDAG; -using ActionsDAGPtr = std::shared_ptr; struct WindowFunctionDescription { @@ -93,8 +92,8 @@ struct WindowDescription // then by ORDER BY. This field holds this combined sort order. SortDescription full_sort_description; - std::vector partition_by_actions; - std::vector order_by_actions; + std::vector> partition_by_actions; + std::vector> order_by_actions; WindowFrame frame; diff --git a/src/Interpreters/addMissingDefaults.cpp b/src/Interpreters/addMissingDefaults.cpp index fbf17d7efb7..27d79e86622 100644 --- a/src/Interpreters/addMissingDefaults.cpp +++ b/src/Interpreters/addMissingDefaults.cpp @@ -14,15 +14,15 @@ namespace DB { -ActionsDAGPtr addMissingDefaults( +ActionsDAG addMissingDefaults( const Block & header, const NamesAndTypesList & required_columns, const ColumnsDescription & columns, ContextPtr context, bool null_as_default) { - auto actions = std::make_shared(header.getColumnsWithTypeAndName()); - auto & index = actions->getOutputs(); + ActionsDAG actions(header.getColumnsWithTypeAndName()); + auto & index = actions.getOutputs(); /// For missing columns of nested structure, you need to create not a column of empty arrays, but a column of arrays of correct lengths. /// First, remember the offset columns for all arrays in the block. @@ -40,7 +40,7 @@ ActionsDAGPtr addMissingDefaults( if (group.empty()) group.push_back(nullptr); - group.push_back(actions->getInputs()[i]); + group.push_back(actions.getInputs()[i]); } } @@ -62,11 +62,11 @@ ActionsDAGPtr addMissingDefaults( { const auto & nested_type = array_type->getNestedType(); ColumnPtr nested_column = nested_type->createColumnConstWithDefaultValue(0); - const auto & constant = actions->addColumn({nested_column, nested_type, column.name}); + const auto & constant = actions.addColumn({nested_column, nested_type, column.name}); auto & group = nested_groups[offsets_name]; group[0] = &constant; - index.push_back(&actions->addFunction(func_builder_replicate, group, constant.result_name)); + index.push_back(&actions.addFunction(func_builder_replicate, group, constant.result_name)); continue; } @@ -75,17 +75,17 @@ ActionsDAGPtr addMissingDefaults( * it can be full (or the interpreter may decide that it is constant everywhere). */ auto new_column = column.type->createColumnConstWithDefaultValue(0); - const auto * col = &actions->addColumn({new_column, column.type, column.name}); - index.push_back(&actions->materializeNode(*col)); + const auto * col = &actions.addColumn({new_column, column.type, column.name}); + index.push_back(&actions.materializeNode(*col)); } /// Computes explicitly specified values by default and materialized columns. - if (auto dag = evaluateMissingDefaults(actions->getResultColumns(), required_columns, columns, context, true, null_as_default)) - actions = ActionsDAG::merge(std::move(*actions), std::move(*dag)); + if (auto dag = evaluateMissingDefaults(actions.getResultColumns(), required_columns, columns, context, true, null_as_default)) + actions = ActionsDAG::merge(std::move(actions), std::move(*dag)); /// Removes unused columns and reorders result. - actions->removeUnusedActions(required_columns.getNames(), false); - actions->addMaterializingOutputActions(); + actions.removeUnusedActions(required_columns.getNames(), false); + actions.addMaterializingOutputActions(); return actions; } diff --git a/src/Interpreters/addMissingDefaults.h b/src/Interpreters/addMissingDefaults.h index 0a3d4de478c..551583a0006 100644 --- a/src/Interpreters/addMissingDefaults.h +++ b/src/Interpreters/addMissingDefaults.h @@ -2,11 +2,6 @@ #include -#include -#include -#include - - namespace DB { @@ -15,7 +10,6 @@ class NamesAndTypesList; class ColumnsDescription; class ActionsDAG; -using ActionsDAGPtr = std::shared_ptr; /** Adds three types of columns into block * 1. Columns, that are missed inside request, but present in table without defaults (missed columns) @@ -24,7 +18,7 @@ using ActionsDAGPtr = std::shared_ptr; * Also can substitute NULL with DEFAULT value in case of INSERT SELECT query (null_as_default) if according setting is 1. * All three types of columns are materialized (not constants). */ -ActionsDAGPtr addMissingDefaults( +ActionsDAG addMissingDefaults( const Block & header, const NamesAndTypesList & required_columns, const ColumnsDescription & columns, ContextPtr context, bool null_as_default = false); } diff --git a/src/Interpreters/evaluateConstantExpression.cpp b/src/Interpreters/evaluateConstantExpression.cpp index ac80ad2fb68..4bfc80af1fe 100644 --- a/src/Interpreters/evaluateConstantExpression.cpp +++ b/src/Interpreters/evaluateConstantExpression.cpp @@ -91,7 +91,7 @@ std::optional evaluateConstantExpressionImpl(c ColumnPtr result_column; DataTypePtr result_type; String result_name = ast->getColumnName(); - for (const auto & action_node : actions->getOutputs()) + for (const auto & action_node : actions.getOutputs()) { if ((action_node->result_name == result_name) && action_node->column) { @@ -679,9 +679,9 @@ std::optional evaluateExpressionOverConstantCondition( size_t max_elements) { auto inverted_dag = KeyCondition::cloneASTWithInversionPushDown({predicate}, context); - auto matches = matchTrees(expr, *inverted_dag, false); + auto matches = matchTrees(expr, inverted_dag, false); - auto predicates = analyze(inverted_dag->getOutputs().at(0), matches, context, max_elements); + auto predicates = analyze(inverted_dag.getOutputs().at(0), matches, context, max_elements); if (!predicates) return {}; diff --git a/src/Interpreters/formatWithPossiblyHidingSecrets.h b/src/Interpreters/formatWithPossiblyHidingSecrets.h index 039bcbc2bca..ea8c295b169 100644 --- a/src/Interpreters/formatWithPossiblyHidingSecrets.h +++ b/src/Interpreters/formatWithPossiblyHidingSecrets.h @@ -1,11 +1,14 @@ #pragma once -#include "Access/ContextAccess.h" -#include "Interpreters/Context.h" + +#include +#include + #include namespace DB { + struct SecretHidingFormatSettings { // We can't store const Context& as there's a dangerous usage {.ctx = *getContext()} @@ -24,4 +27,5 @@ inline String format(const SecretHidingFormatSettings & settings) return settings.query.formatWithPossiblyHidingSensitiveData(settings.max_length, settings.one_line, show_secrets); } + } diff --git a/src/Interpreters/inplaceBlockConversions.cpp b/src/Interpreters/inplaceBlockConversions.cpp index 239cce5b427..62f8aea86d1 100644 --- a/src/Interpreters/inplaceBlockConversions.cpp +++ b/src/Interpreters/inplaceBlockConversions.cpp @@ -152,22 +152,20 @@ ASTPtr convertRequiredExpressions(Block & block, const NamesAndTypesList & requi return conversion_expr_list; } -ActionsDAGPtr createExpressions( +std::optional createExpressions( const Block & header, ASTPtr expr_list, bool save_unneeded_columns, ContextPtr context) { if (!expr_list) - return nullptr; + return {}; auto syntax_result = TreeRewriter(context).analyze(expr_list, header.getNamesAndTypesList()); auto expression_analyzer = ExpressionAnalyzer{expr_list, syntax_result, context}; - auto dag = std::make_shared(header.getNamesAndTypesList()); + ActionsDAG dag(header.getNamesAndTypesList()); auto actions = expression_analyzer.getActionsDAG(true, !save_unneeded_columns); - dag = ActionsDAG::merge(std::move(*dag), std::move(*actions)); - - return dag; + return ActionsDAG::merge(std::move(dag), std::move(actions)); } } @@ -180,7 +178,7 @@ void performRequiredConversions(Block & block, const NamesAndTypesList & require if (auto dag = createExpressions(block, conversion_expr_list, true, context)) { - auto expression = std::make_shared(std::move(dag), ExpressionActionsSettings::fromContext(context)); + auto expression = std::make_shared(std::move(*dag), ExpressionActionsSettings::fromContext(context)); expression->execute(block); } } @@ -195,7 +193,7 @@ bool needConvertAnyNullToDefault(const Block & header, const NamesAndTypesList & return false; } -ActionsDAGPtr evaluateMissingDefaults( +std::optional evaluateMissingDefaults( const Block & header, const NamesAndTypesList & required_columns, const ColumnsDescription & columns, @@ -204,7 +202,7 @@ ActionsDAGPtr evaluateMissingDefaults( bool null_as_default) { if (!columns.hasDefaults() && (!null_as_default || !needConvertAnyNullToDefault(header, required_columns, columns))) - return nullptr; + return {}; ASTPtr expr_list = defaultRequiredExpressions(header, required_columns, columns, null_as_default); return createExpressions(header, expr_list, save_unneeded_columns, context); diff --git a/src/Interpreters/inplaceBlockConversions.h b/src/Interpreters/inplaceBlockConversions.h index bea44bf6db9..570eb75dd4a 100644 --- a/src/Interpreters/inplaceBlockConversions.h +++ b/src/Interpreters/inplaceBlockConversions.h @@ -5,9 +5,6 @@ #include #include -#include -#include - namespace DB { @@ -24,12 +21,11 @@ struct StorageInMemoryMetadata; using StorageMetadataPtr = std::shared_ptr; class ActionsDAG; -using ActionsDAGPtr = std::shared_ptr; /// Create actions which adds missing defaults to block according to required_columns using columns description /// or substitute NULL into DEFAULT value in case of INSERT SELECT query (null_as_default) if according setting is 1. /// Return nullptr if no actions required. -ActionsDAGPtr evaluateMissingDefaults( +std::optional evaluateMissingDefaults( const Block & header, const NamesAndTypesList & required_columns, const ColumnsDescription & columns, diff --git a/src/Parsers/ASTColumnDeclaration.cpp b/src/Parsers/ASTColumnDeclaration.cpp index 4a8a3d2967d..c96499095d5 100644 --- a/src/Parsers/ASTColumnDeclaration.cpp +++ b/src/Parsers/ASTColumnDeclaration.cpp @@ -1,8 +1,6 @@ #include #include #include -#include -#include namespace DB @@ -15,8 +13,6 @@ ASTPtr ASTColumnDeclaration::clone() const if (type) { - // Type may be an ASTFunction (e.g. `create table t (a Decimal(9,0))`), - // so we have to clone it properly as well. res->type = type->clone(); res->children.push_back(res->type); } diff --git a/src/Parsers/ASTDataType.cpp b/src/Parsers/ASTDataType.cpp new file mode 100644 index 00000000000..3c17ae8c380 --- /dev/null +++ b/src/Parsers/ASTDataType.cpp @@ -0,0 +1,57 @@ +#include +#include +#include + + +namespace DB +{ + +String ASTDataType::getID(char delim) const +{ + return "DataType" + (delim + name); +} + +ASTPtr ASTDataType::clone() const +{ + auto res = std::make_shared(*this); + res->children.clear(); + + if (arguments) + { + res->arguments = arguments->clone(); + res->children.push_back(res->arguments); + } + + return res; +} + +void ASTDataType::updateTreeHashImpl(SipHash & hash_state, bool) const +{ + hash_state.update(name.size()); + hash_state.update(name); + /// Children are hashed automatically. +} + +void ASTDataType::formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const +{ + settings.ostr << (settings.hilite ? hilite_function : "") << name; + + if (arguments && !arguments->children.empty()) + { + settings.ostr << '(' << (settings.hilite ? hilite_none : ""); + + for (size_t i = 0, size = arguments->children.size(); i < size; ++i) + { + if (i != 0) + settings.ostr << ", "; + + arguments->children[i]->formatImpl(settings, state, frame); + } + + settings.ostr << (settings.hilite ? hilite_function : "") << ')'; + } + + settings.ostr << (settings.hilite ? hilite_none : ""); +} + +} diff --git a/src/Parsers/ASTDataType.h b/src/Parsers/ASTDataType.h new file mode 100644 index 00000000000..71d3aeaa4eb --- /dev/null +++ b/src/Parsers/ASTDataType.h @@ -0,0 +1,38 @@ +#pragma once + +#include + + +namespace DB +{ + +/// AST for data types, e.g. UInt8 or Tuple(x UInt8, y Enum(a = 1)) +class ASTDataType : public IAST +{ +public: + String name; + ASTPtr arguments; + + String getID(char delim) const override; + ASTPtr clone() const override; + void updateTreeHashImpl(SipHash & hash_state, bool ignore_aliases) const override; + void formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override; +}; + +template +std::shared_ptr makeASTDataType(const String & name, Args &&... args) +{ + auto data_type = std::make_shared(); + data_type->name = name; + + if constexpr (sizeof...(args)) + { + data_type->arguments = std::make_shared(); + data_type->children.push_back(data_type->arguments); + data_type->arguments->children = { std::forward(args)... }; + } + + return data_type; +} + +} diff --git a/src/Parsers/ASTFunction.h b/src/Parsers/ASTFunction.h index 3a94691f25d..be2b6beae54 100644 --- a/src/Parsers/ASTFunction.h +++ b/src/Parsers/ASTFunction.h @@ -46,7 +46,7 @@ public: NullsAction nulls_action = NullsAction::EMPTY; - /// do not print empty parentheses if there are no args - compatibility with new AST for data types and engine names. + /// do not print empty parentheses if there are no args - compatibility with engine names. bool no_empty_args = false; /// Specifies where this function-like expression is used. diff --git a/src/Parsers/ExpressionElementParsers.cpp b/src/Parsers/ExpressionElementParsers.cpp index d4fc9a4bc4d..865d07faaa7 100644 --- a/src/Parsers/ExpressionElementParsers.cpp +++ b/src/Parsers/ExpressionElementParsers.cpp @@ -9,8 +9,8 @@ #include #include #include -#include "Parsers/CommonParsers.h" +#include #include #include #include @@ -725,7 +725,6 @@ bool ParserStatisticsType::parseImpl(Pos & pos, ASTPtr & node, Expected & expect function_node->name = "STATISTICS"; function_node->arguments = stat_type; function_node->children.push_back(function_node->arguments); - node = function_node; return true; } diff --git a/src/Parsers/ExpressionListParsers.cpp b/src/Parsers/ExpressionListParsers.cpp index f97c042e91e..d38dc6d5f37 100644 --- a/src/Parsers/ExpressionListParsers.cpp +++ b/src/Parsers/ExpressionListParsers.cpp @@ -2388,6 +2388,24 @@ bool ParserFunction::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) } } +bool ParserExpressionWithOptionalArguments::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) +{ + ParserIdentifier id_p; + ParserFunction func_p; + + if (ParserFunction(false, false).parse(pos, node, expected)) + return true; + + if (ParserIdentifier().parse(pos, node, expected)) + { + node = makeASTFunction(node->as()->name()); + node->as().no_empty_args = true; + return true; + } + + return false; +} + const std::vector> ParserExpressionImpl::operators_table { {"->", Operator("lambda", 1, 2, OperatorType::Lambda)}, diff --git a/src/Parsers/ExpressionListParsers.h b/src/Parsers/ExpressionListParsers.h index 235d5782630..6ab38416f32 100644 --- a/src/Parsers/ExpressionListParsers.h +++ b/src/Parsers/ExpressionListParsers.h @@ -144,6 +144,16 @@ protected: }; +/** Similar to ParserFunction (and yields ASTFunction), but can also parse identifiers without braces. + */ +class ParserExpressionWithOptionalArguments : public IParserBase +{ +protected: + const char * getName() const override { return "expression with optional parameters"; } + bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; +}; + + /** An expression with an infix binary left-associative operator. * For example, a + b - c + d. */ diff --git a/src/Parsers/IAST.h b/src/Parsers/IAST.h index 4f8edac8597..d70c1cd0b6c 100644 --- a/src/Parsers/IAST.h +++ b/src/Parsers/IAST.h @@ -271,16 +271,15 @@ public: throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown element in AST: {}", getID()); } - // Secrets are displayed regarding show_secrets, then SensitiveDataMasker is applied. - // You can use Interpreters/formatWithPossiblyHidingSecrets.h for convenience. + /// Secrets are displayed regarding show_secrets, then SensitiveDataMasker is applied. + /// You can use Interpreters/formatWithPossiblyHidingSecrets.h for convenience. String formatWithPossiblyHidingSensitiveData(size_t max_length, bool one_line, bool show_secrets) const; - /* - * formatForLogging and formatForErrorMessage always hide secrets. This inconsistent - * behaviour is due to the fact such functions are called from Client which knows nothing about - * access rights and settings. Moreover, the only use case for displaying secrets are backups, - * and backup tools use only direct input and ignore logs and error messages. - */ + /** formatForLogging and formatForErrorMessage always hide secrets. This inconsistent + * behaviour is due to the fact such functions are called from Client which knows nothing about + * access rights and settings. Moreover, the only use case for displaying secrets are backups, + * and backup tools use only direct input and ignore logs and error messages. + */ String formatForLogging(size_t max_length = 0) const { return formatWithPossiblyHidingSensitiveData(max_length, true, false); diff --git a/src/Parsers/MySQL/tests/gtest_column_parser.cpp b/src/Parsers/MySQL/tests/gtest_column_parser.cpp index 21c37e4ee2e..3a9a0690f06 100644 --- a/src/Parsers/MySQL/tests/gtest_column_parser.cpp +++ b/src/Parsers/MySQL/tests/gtest_column_parser.cpp @@ -1,13 +1,14 @@ #include #include #include -#include +#include #include #include #include #include #include + using namespace DB; using namespace DB::MySQLParser; @@ -19,8 +20,8 @@ TEST(ParserColumn, AllNonGeneratedColumnOption) "COLUMN_FORMAT FIXED STORAGE MEMORY REFERENCES tbl_name (col_01) CHECK 1"; ASTPtr ast = parseQuery(p_column, input.data(), input.data() + input.size(), "", 0, 0, 0); EXPECT_EQ(ast->as()->name, "col_01"); - EXPECT_EQ(ast->as()->data_type->as()->name, "VARCHAR"); - EXPECT_EQ(ast->as()->data_type->as()->arguments->children[0]->as()->value.safeGet(), 100); + EXPECT_EQ(ast->as()->data_type->as()->name, "VARCHAR"); + EXPECT_EQ(ast->as()->data_type->as()->arguments->children[0]->as()->value.safeGet(), 100); ASTDeclareOptions * declare_options = ast->as()->column_options->as(); EXPECT_EQ(declare_options->changes["is_null"]->as()->value.safeGet(), 0); @@ -44,8 +45,8 @@ TEST(ParserColumn, AllGeneratedColumnOption) "REFERENCES tbl_name (col_01) CHECK 1 GENERATED ALWAYS AS (1) STORED"; ASTPtr ast = parseQuery(p_column, input.data(), input.data() + input.size(), "", 0, 0, 0); EXPECT_EQ(ast->as()->name, "col_01"); - EXPECT_EQ(ast->as()->data_type->as()->name, "VARCHAR"); - EXPECT_EQ(ast->as()->data_type->as()->arguments->children[0]->as()->value.safeGet(), 100); + EXPECT_EQ(ast->as()->data_type->as()->name, "VARCHAR"); + EXPECT_EQ(ast->as()->data_type->as()->arguments->children[0]->as()->value.safeGet(), 100); ASTDeclareOptions * declare_options = ast->as()->column_options->as(); EXPECT_EQ(declare_options->changes["is_null"]->as()->value.safeGet(), 1); diff --git a/src/Parsers/ParserAlterQuery.cpp b/src/Parsers/ParserAlterQuery.cpp index 28dbf781011..dbefb0cb966 100644 --- a/src/Parsers/ParserAlterQuery.cpp +++ b/src/Parsers/ParserAlterQuery.cpp @@ -9,8 +9,6 @@ #include #include #include -#include -#include #include #include #include diff --git a/src/Parsers/ParserCreateIndexQuery.cpp b/src/Parsers/ParserCreateIndexQuery.cpp index 2fa34696c58..9ebee4cc852 100644 --- a/src/Parsers/ParserCreateIndexQuery.cpp +++ b/src/Parsers/ParserCreateIndexQuery.cpp @@ -7,9 +7,9 @@ #include #include #include -#include #include + namespace DB { @@ -21,7 +21,7 @@ bool ParserCreateIndexDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected ParserToken close_p(TokenType::ClosingRoundBracket); ParserOrderByExpressionList order_list_p; - ParserDataType data_type_p; + ParserExpressionWithOptionalArguments type_p; ParserExpression expression_p; ParserUnsignedInteger granularity_p; @@ -68,7 +68,7 @@ bool ParserCreateIndexDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected if (s_type.ignore(pos, expected)) { - if (!data_type_p.parse(pos, type, expected)) + if (!type_p.parse(pos, type, expected)) return false; } diff --git a/src/Parsers/ParserCreateQuery.cpp b/src/Parsers/ParserCreateQuery.cpp index 1cc565a41d1..a592975613b 100644 --- a/src/Parsers/ParserCreateQuery.cpp +++ b/src/Parsers/ParserCreateQuery.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -76,9 +77,9 @@ bool ParserNestedTable::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) if (!close.ignore(pos, expected)) return false; - auto func = std::make_shared(); + auto func = std::make_shared(); tryGetIdentifierNameInto(name, func->name); - // FIXME(ilezhankin): func->no_empty_args = true; ? + func->arguments = columns; func->children.push_back(columns); node = func; @@ -179,7 +180,7 @@ bool ParserIndexDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected & expe ParserKeyword s_granularity(Keyword::GRANULARITY); ParserIdentifier name_p; - ParserDataType data_type_p; + ParserExpressionWithOptionalArguments type_p; ParserExpression expression_p; ParserUnsignedInteger granularity_p; @@ -197,7 +198,7 @@ bool ParserIndexDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected & expe if (!s_type.ignore(pos, expected)) return false; - if (!data_type_p.parse(pos, type, expected)) + if (!type_p.parse(pos, type, expected)) return false; if (s_granularity.ignore(pos, expected)) @@ -231,7 +232,7 @@ bool ParserStatisticsDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected & ParserKeyword s_type(Keyword::TYPE); ParserList columns_p(std::make_unique(), std::make_unique(TokenType::Comma), false); - ParserList types_p(std::make_unique(), std::make_unique(TokenType::Comma), false); + ParserList types_p(std::make_unique(), std::make_unique(TokenType::Comma), false); ASTPtr columns; ASTPtr types; @@ -751,7 +752,7 @@ bool ParserCreateTableQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe auto * table_id = table->as(); - // Shortcut for ATTACH a previously detached table + /// A shortcut for ATTACH a previously detached table. bool short_attach = attach && !from_path; if (short_attach && (!pos.isValid() || pos.get().type == TokenType::Semicolon)) { diff --git a/src/Parsers/ParserCreateQuery.h b/src/Parsers/ParserCreateQuery.h index bb37491a366..53a62deb22b 100644 --- a/src/Parsers/ParserCreateQuery.h +++ b/src/Parsers/ParserCreateQuery.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include #include #include @@ -13,6 +14,7 @@ #include #include + namespace DB { @@ -101,17 +103,15 @@ class IParserColumnDeclaration : public IParserBase { public: explicit IParserColumnDeclaration(bool require_type_ = true, bool allow_null_modifiers_ = false, bool check_keywords_after_name_ = false) - : require_type(require_type_) - , allow_null_modifiers(allow_null_modifiers_) - , check_keywords_after_name(check_keywords_after_name_) + : require_type(require_type_) + , allow_null_modifiers(allow_null_modifiers_) + , check_keywords_after_name(check_keywords_after_name_) { } void enableCheckTypeKeyword() { check_type_keyword = true; } protected: - using ASTDeclarePtr = std::shared_ptr; - const char * getName() const override{ return "column declaration"; } bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; @@ -270,9 +270,8 @@ bool IParserColumnDeclaration::parseImpl(Pos & pos, ASTPtr & node, E auto default_function = std::make_shared(); default_function->name = "defaultValueOfTypeName"; default_function->arguments = std::make_shared(); - // Ephemeral columns don't really have secrets but we need to format - // into a String, hence the strange call - default_function->arguments->children.emplace_back(std::make_shared(type->as()->formatForLogging())); + /// Ephemeral columns don't really have secrets but we need to format into a String, hence the strange call + default_function->arguments->children.emplace_back(std::make_shared(type->as()->formatForLogging())); default_expression = default_function; } diff --git a/src/Parsers/ParserDataType.cpp b/src/Parsers/ParserDataType.cpp index ad33c7e4558..2edb0141e12 100644 --- a/src/Parsers/ParserDataType.cpp +++ b/src/Parsers/ParserDataType.cpp @@ -1,8 +1,8 @@ #include #include +#include #include -#include #include #include #include @@ -46,48 +46,6 @@ private: } }; -/// Wrapper to allow mixed lists of nested and normal types. -/// Parameters are either: -/// - Nested table elements; -/// - Enum element in form of 'a' = 1; -/// - literal; -/// - Dynamic type arguments; -/// - another data type (or identifier); -class ParserDataTypeArgument : public IParserBase -{ -public: - explicit ParserDataTypeArgument(std::string_view type_name_) : type_name(type_name_) - { - } - -private: - const char * getName() const override { return "data type argument"; } - bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override - { - if (type_name == "Dynamic") - { - DynamicArgumentsParser parser; - return parser.parse(pos, node, expected); - } - - ParserNestedTable nested_parser; - ParserDataType data_type_parser; - ParserAllCollectionsOfLiterals literal_parser(false); - - const char * operators[] = {"=", "equals", nullptr}; - ParserLeftAssociativeBinaryOperatorList enum_parser(operators, std::make_unique()); - - if (pos->type == TokenType::BareWord && std::string_view(pos->begin, pos->size()) == "Nested") - return nested_parser.parse(pos, node, expected); - - return enum_parser.parse(pos, node, expected) - || literal_parser.parse(pos, node, expected) - || data_type_parser.parse(pos, node, expected); - } - - std::string_view type_name; -}; - } bool ParserDataType::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) @@ -198,23 +156,102 @@ bool ParserDataType::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) } } - auto function_node = std::make_shared(); - function_node->name = type_name; - function_node->no_empty_args = true; + auto data_type_node = std::make_shared(); + data_type_node->name = type_name; if (pos->type != TokenType::OpeningRoundBracket) { - node = function_node; + node = data_type_node; return true; } ++pos; /// Parse optional parameters - ParserList args_parser(std::make_unique(type_name), std::make_unique(TokenType::Comma)); - ASTPtr expr_list_args; + ASTPtr expr_list_args = std::make_shared(); + + /// Allow mixed lists of nested and normal types. + /// Parameters are either: + /// - Nested table elements; + /// - Enum element in form of 'a' = 1; + /// - literal; + /// - Dynamic type arguments; + /// - another data type (or identifier); + + size_t arg_num = 0; + bool have_version_of_aggregate_function = false; + while (true) + { + if (arg_num > 0) + { + if (pos->type == TokenType::Comma) + ++pos; + else + break; + } + + ASTPtr arg; + if (type_name == "Dynamic") + { + DynamicArgumentsParser parser; + parser.parse(pos, arg, expected); + } + else if (type_name == "Nested") + { + ParserNestedTable nested_parser; + nested_parser.parse(pos, arg, expected); + } + else if (type_name == "AggregateFunction" || type_name == "SimpleAggregateFunction") + { + /// This is less trivial. + /// The first optional argument for AggregateFunction is a numeric literal, defining the version. + /// The next argument is the function name, optionally with parameters. + /// Subsequent arguments are data types. + + if (arg_num == 0 && type_name == "AggregateFunction") + { + ParserUnsignedInteger version_parser; + if (version_parser.parse(pos, arg, expected)) + { + have_version_of_aggregate_function = true; + expr_list_args->children.emplace_back(std::move(arg)); + ++arg_num; + continue; + } + } + + if (arg_num == (have_version_of_aggregate_function ? 1 : 0)) + { + ParserFunction function_parser; + ParserIdentifier identifier_parser; + function_parser.parse(pos, arg, expected) + || identifier_parser.parse(pos, arg, expected); + } + else + { + ParserDataType data_type_parser; + data_type_parser.parse(pos, arg, expected); + } + } + else + { + ParserDataType data_type_parser; + ParserAllCollectionsOfLiterals literal_parser(false); + + const char * operators[] = {"=", "equals", nullptr}; + ParserLeftAssociativeBinaryOperatorList enum_parser(operators, std::make_unique()); + + enum_parser.parse(pos, arg, expected) + || literal_parser.parse(pos, arg, expected) + || data_type_parser.parse(pos, arg, expected); + } + + if (!arg) + break; + + expr_list_args->children.emplace_back(std::move(arg)); + ++arg_num; + } - if (!args_parser.parse(pos, expr_list_args, expected)) - return false; if (pos->type == TokenType::Comma) // ignore trailing comma inside Nested structures like Tuple(Int, Tuple(Int, String),) ++pos; @@ -222,10 +259,10 @@ bool ParserDataType::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) return false; ++pos; - function_node->arguments = expr_list_args; - function_node->children.push_back(function_node->arguments); + data_type_node->arguments = expr_list_args; + data_type_node->children.push_back(data_type_node->arguments); - node = function_node; + node = data_type_node; return true; } diff --git a/src/Planner/CollectTableExpressionData.cpp b/src/Planner/CollectTableExpressionData.cpp index d5e39a9f123..2fe62aa9be0 100644 --- a/src/Planner/CollectTableExpressionData.cpp +++ b/src/Planner/CollectTableExpressionData.cpp @@ -88,16 +88,16 @@ public: auto column_identifier = planner_context->getGlobalPlannerContext()->createColumnIdentifier(node); - ActionsDAGPtr alias_column_actions_dag = std::make_shared(); + ActionsDAG alias_column_actions_dag; PlannerActionsVisitor actions_visitor(planner_context, false); - auto outputs = actions_visitor.visit(*alias_column_actions_dag, column_node->getExpression()); + auto outputs = actions_visitor.visit(alias_column_actions_dag, column_node->getExpression()); if (outputs.size() != 1) throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected single output in actions dag for alias column {}. Actual {}", column_node->dumpTree(), outputs.size()); const auto & column_name = column_node->getColumnName(); - const auto & alias_node = alias_column_actions_dag->addAlias(*outputs[0], column_name); - alias_column_actions_dag->addOrReplaceInOutputs(alias_node); - table_expression_data.addAliasColumn(column_node->getColumn(), column_identifier, alias_column_actions_dag, select_added_columns); + const auto & alias_node = alias_column_actions_dag.addAlias(*outputs[0], column_name); + alias_column_actions_dag.addOrReplaceInOutputs(alias_node); + table_expression_data.addAliasColumn(column_node->getColumn(), column_identifier, std::move(alias_column_actions_dag), select_added_columns); } return; @@ -335,22 +335,22 @@ void collectTableExpressionData(QueryTreeNodePtr & query_node, PlannerContextPtr collect_source_columns_visitor.setKeepAliasColumns(false); collect_source_columns_visitor.visit(query_node_typed.getPrewhere()); - auto prewhere_actions_dag = std::make_shared(); + ActionsDAG prewhere_actions_dag; QueryTreeNodePtr query_tree_node = query_node_typed.getPrewhere(); PlannerActionsVisitor visitor(planner_context, false /*use_column_identifier_as_action_node_name*/); - auto expression_nodes = visitor.visit(*prewhere_actions_dag, query_tree_node); + auto expression_nodes = visitor.visit(prewhere_actions_dag, query_tree_node); if (expression_nodes.size() != 1) throw Exception(ErrorCodes::ILLEGAL_PREWHERE, "Invalid PREWHERE. Expected single boolean expression. In query {}", query_node->formatASTForErrorMessage()); - prewhere_actions_dag->getOutputs().push_back(expression_nodes.back()); + prewhere_actions_dag.getOutputs().push_back(expression_nodes.back()); - for (const auto & prewhere_input_node : prewhere_actions_dag->getInputs()) + for (const auto & prewhere_input_node : prewhere_actions_dag.getInputs()) if (required_column_names_without_prewhere.contains(prewhere_input_node->result_name)) - prewhere_actions_dag->getOutputs().push_back(prewhere_input_node); + prewhere_actions_dag.getOutputs().push_back(prewhere_input_node); table_expression_data.setPrewhereFilterActions(std::move(prewhere_actions_dag)); } diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index 5640d84731f..968642dc9de 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -12,7 +12,6 @@ #include #include -#include #include #include @@ -216,9 +215,11 @@ FiltersForTableExpressionMap collectFiltersForAnalysis(const QueryTreeNodePtr & if (!read_from_dummy) continue; - auto filter_actions = read_from_dummy->getFilterActionsDAG(); - const auto & table_node = dummy_storage_to_table.at(&read_from_dummy->getStorage()); - res[table_node] = FiltersForTableExpression{std::move(filter_actions), read_from_dummy->getPrewhereInfo()}; + if (auto filter_actions = read_from_dummy->detachFilterActionsDAG()) + { + const auto & table_node = dummy_storage_to_table.at(&read_from_dummy->getStorage()); + res[table_node] = FiltersForTableExpression{std::move(filter_actions), read_from_dummy->getPrewhereInfo()}; + } } return res; @@ -332,34 +333,34 @@ public: }; void addExpressionStep(QueryPlan & query_plan, - const ActionsAndProjectInputsFlagPtr & expression_actions, + ActionsAndProjectInputsFlagPtr & expression_actions, const std::string & step_description, - std::vector & result_actions_to_execute) + UsefulSets & useful_sets) { - auto actions = expression_actions->dag.clone(); + auto actions = std::move(expression_actions->dag); if (expression_actions->project_input) - actions->appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); + actions.appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); - result_actions_to_execute.push_back(actions); - auto expression_step = std::make_unique(query_plan.getCurrentDataStream(), actions); + auto expression_step = std::make_unique(query_plan.getCurrentDataStream(), std::move(actions)); + appendSetsFromActionsDAG(expression_step->getExpression(), useful_sets); expression_step->setStepDescription(step_description); query_plan.addStep(std::move(expression_step)); } void addFilterStep(QueryPlan & query_plan, - const FilterAnalysisResult & filter_analysis_result, + FilterAnalysisResult & filter_analysis_result, const std::string & step_description, - std::vector & result_actions_to_execute) + UsefulSets & useful_sets) { - auto actions = filter_analysis_result.filter_actions->dag.clone(); + auto actions = std::move(filter_analysis_result.filter_actions->dag); if (filter_analysis_result.filter_actions->project_input) - actions->appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); + actions.appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); - result_actions_to_execute.push_back(actions); auto where_step = std::make_unique(query_plan.getCurrentDataStream(), - actions, + std::move(actions), filter_analysis_result.filter_column_name, filter_analysis_result.remove_filter_column); + appendSetsFromActionsDAG(where_step->getExpression(), useful_sets); where_step->setStepDescription(step_description); query_plan.addStep(std::move(where_step)); } @@ -543,39 +544,41 @@ void addMergingAggregatedStep(QueryPlan & query_plan, } void addTotalsHavingStep(QueryPlan & query_plan, - const PlannerExpressionsAnalysisResult & expression_analysis_result, + PlannerExpressionsAnalysisResult & expression_analysis_result, const QueryAnalysisResult & query_analysis_result, const PlannerContextPtr & planner_context, const QueryNode & query_node, - std::vector & result_actions_to_execute) + UsefulSets & useful_sets) { const auto & query_context = planner_context->getQueryContext(); const auto & settings = query_context->getSettingsRef(); - const auto & aggregation_analysis_result = expression_analysis_result.getAggregation(); - const auto & having_analysis_result = expression_analysis_result.getHaving(); + auto & aggregation_analysis_result = expression_analysis_result.getAggregation(); + auto & having_analysis_result = expression_analysis_result.getHaving(); bool need_finalize = !query_node.isGroupByWithRollup() && !query_node.isGroupByWithCube(); - ActionsDAGPtr actions; + std::optional actions; if (having_analysis_result.filter_actions) { - actions = having_analysis_result.filter_actions->dag.clone(); + actions = std::move(having_analysis_result.filter_actions->dag); if (having_analysis_result.filter_actions->project_input) actions->appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); - - result_actions_to_execute.push_back(actions); } auto totals_having_step = std::make_unique( query_plan.getCurrentDataStream(), aggregation_analysis_result.aggregate_descriptions, query_analysis_result.aggregate_overflow_row, - actions, + std::move(actions), having_analysis_result.filter_column_name, having_analysis_result.remove_filter_column, settings.totals_mode, settings.totals_auto_threshold, need_finalize); + + if (having_analysis_result.filter_actions) + appendSetsFromActionsDAG(*totals_having_step->getActions(), useful_sets); + query_plan.addStep(std::move(totals_having_step)); } @@ -717,13 +720,13 @@ void addWithFillStepIfNeeded(QueryPlan & query_plan, if (query_node.hasInterpolate()) { - auto interpolate_actions_dag = std::make_shared(); + ActionsDAG interpolate_actions_dag; auto query_plan_columns = query_plan.getCurrentDataStream().header.getColumnsWithTypeAndName(); for (auto & query_plan_column : query_plan_columns) { /// INTERPOLATE actions dag input columns must be non constant query_plan_column.column = nullptr; - interpolate_actions_dag->addInput(query_plan_column); + interpolate_actions_dag.addInput(query_plan_column); } auto & interpolate_list_node = query_node.getInterpolate()->as(); @@ -731,12 +734,12 @@ void addWithFillStepIfNeeded(QueryPlan & query_plan, if (interpolate_list_nodes.empty()) { - for (const auto * input_node : interpolate_actions_dag->getInputs()) + for (const auto * input_node : interpolate_actions_dag.getInputs()) { if (column_names_with_fill.contains(input_node->result_name)) continue; - interpolate_actions_dag->getOutputs().push_back(input_node); + interpolate_actions_dag.getOutputs().push_back(input_node); } } else @@ -746,12 +749,12 @@ void addWithFillStepIfNeeded(QueryPlan & query_plan, auto & interpolate_node_typed = interpolate_node->as(); PlannerActionsVisitor planner_actions_visitor(planner_context); - auto expression_to_interpolate_expression_nodes = planner_actions_visitor.visit(*interpolate_actions_dag, + auto expression_to_interpolate_expression_nodes = planner_actions_visitor.visit(interpolate_actions_dag, interpolate_node_typed.getExpression()); if (expression_to_interpolate_expression_nodes.size() != 1) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expression to interpolate expected to have single action node"); - auto interpolate_expression_nodes = planner_actions_visitor.visit(*interpolate_actions_dag, + auto interpolate_expression_nodes = planner_actions_visitor.visit(interpolate_actions_dag, interpolate_node_typed.getInterpolateExpression()); if (interpolate_expression_nodes.size() != 1) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Interpolate expression expected to have single action node"); @@ -762,16 +765,16 @@ void addWithFillStepIfNeeded(QueryPlan & query_plan, const auto * interpolate_expression = interpolate_expression_nodes[0]; if (!interpolate_expression->result_type->equals(*expression_to_interpolate->result_type)) { - interpolate_expression = &interpolate_actions_dag->addCast(*interpolate_expression, + interpolate_expression = &interpolate_actions_dag.addCast(*interpolate_expression, expression_to_interpolate->result_type, interpolate_expression->result_name); } - const auto * alias_node = &interpolate_actions_dag->addAlias(*interpolate_expression, expression_to_interpolate_name); - interpolate_actions_dag->getOutputs().push_back(alias_node); + const auto * alias_node = &interpolate_actions_dag.addAlias(*interpolate_expression, expression_to_interpolate_name); + interpolate_actions_dag.getOutputs().push_back(alias_node); } - interpolate_actions_dag->removeUnusedActions(); + interpolate_actions_dag.removeUnusedActions(); } Aliases empty_aliases; @@ -883,12 +886,12 @@ bool addPreliminaryLimitOptimizationStepIfNeeded(QueryPlan & query_plan, * WINDOW functions. */ void addPreliminarySortOrDistinctOrLimitStepsIfNeeded(QueryPlan & query_plan, - const PlannerExpressionsAnalysisResult & expressions_analysis_result, + PlannerExpressionsAnalysisResult & expressions_analysis_result, const QueryAnalysisResult & query_analysis_result, const PlannerContextPtr & planner_context, const PlannerQueryProcessingInfo & query_processing_info, const QueryTreeNodePtr & query_tree, - std::vector & result_actions_to_execute) + UsefulSets & useful_sets) { const auto & query_node = query_tree->as(); @@ -919,8 +922,8 @@ void addPreliminarySortOrDistinctOrLimitStepsIfNeeded(QueryPlan & query_plan, if (expressions_analysis_result.hasLimitBy()) { - const auto & limit_by_analysis_result = expressions_analysis_result.getLimitBy(); - addExpressionStep(query_plan, limit_by_analysis_result.before_limit_by_actions, "Before LIMIT BY", result_actions_to_execute); + auto & limit_by_analysis_result = expressions_analysis_result.getLimitBy(); + addExpressionStep(query_plan, limit_by_analysis_result.before_limit_by_actions, "Before LIMIT BY", useful_sets); addLimitByStep(query_plan, limit_by_analysis_result, query_node); } @@ -930,12 +933,12 @@ void addPreliminarySortOrDistinctOrLimitStepsIfNeeded(QueryPlan & query_plan, void addWindowSteps(QueryPlan & query_plan, const PlannerContextPtr & planner_context, - const WindowAnalysisResult & window_analysis_result) + WindowAnalysisResult & window_analysis_result) { const auto & query_context = planner_context->getQueryContext(); const auto & settings = query_context->getSettingsRef(); - auto window_descriptions = window_analysis_result.window_descriptions; + auto & window_descriptions = window_analysis_result.window_descriptions; sortWindowDescriptions(window_descriptions); size_t window_descriptions_size = window_descriptions.size(); @@ -1057,47 +1060,15 @@ void addOffsetStep(QueryPlan & query_plan, const QueryAnalysisResult & query_ana } } -void collectSetsFromActionsDAG(const ActionsDAGPtr & dag, std::unordered_set & useful_sets) -{ - for (const auto & node : dag->getNodes()) - { - if (node.column) - { - const IColumn * column = node.column.get(); - if (const auto * column_const = typeid_cast(column)) - column = &column_const->getDataColumn(); - - if (const auto * column_set = typeid_cast(column)) - useful_sets.insert(column_set->getData().get()); - } - - if (node.type == ActionsDAG::ActionType::FUNCTION && node.function_base->getName() == "indexHint") - { - ActionsDAG::NodeRawConstPtrs children; - if (const auto * adaptor = typeid_cast(node.function_base.get())) - { - if (const auto * index_hint = typeid_cast(adaptor->getFunction().get())) - { - collectSetsFromActionsDAG(index_hint->getActions(), useful_sets); - } - } - } - } -} - void addBuildSubqueriesForSetsStepIfNeeded( QueryPlan & query_plan, const SelectQueryOptions & select_query_options, const PlannerContextPtr & planner_context, - const std::vector & result_actions_to_execute) + const UsefulSets & useful_sets) { auto subqueries = planner_context->getPreparedSets().getSubqueries(); - std::unordered_set useful_sets; - for (const auto & actions_to_execute : result_actions_to_execute) - collectSetsFromActionsDAG(actions_to_execute, useful_sets); - - auto predicate = [&useful_sets](const auto & set) { return !useful_sets.contains(set.get()); }; + auto predicate = [&useful_sets](const auto & set) { return !useful_sets.contains(set); }; auto it = std::remove_if(subqueries.begin(), subqueries.end(), std::move(predicate)); subqueries.erase(it, subqueries.end()); @@ -1159,11 +1130,11 @@ void addAdditionalFilterStepIfNeeded(QueryPlan & query_plan, auto fake_table_expression = std::make_shared(std::move(storage), query_context); auto filter_info = buildFilterInfo(additional_result_filter_ast, fake_table_expression, planner_context, std::move(fake_name_set)); - if (!filter_info.actions || !query_plan.isInitialized()) + if (!query_plan.isInitialized()) return; auto filter_step = std::make_unique(query_plan.getCurrentDataStream(), - filter_info.actions, + std::move(filter_info.actions), filter_info.column_name, filter_info.do_remove_column); filter_step->setStepDescription("additional result filter"); @@ -1443,7 +1414,7 @@ void Planner::buildPlanForQueryNode() checkStoragesSupportTransactions(planner_context); const auto & table_filters = planner_context->getGlobalPlannerContext()->filters_for_table_expressions; - if (!select_query_options.only_analyze && !table_filters.empty()) // && top_level) + if (!select_query_options.only_analyze && !table_filters.empty()) { for (auto & [table_node, table_expression_data] : planner_context->getTableExpressionNodeToData()) { @@ -1451,7 +1422,7 @@ void Planner::buildPlanForQueryNode() if (it != table_filters.end()) { const auto & filters = it->second; - table_expression_data.setFilterActions(filters.filter_actions); + table_expression_data.setFilterActions(filters.filter_actions->clone()); table_expression_data.setPrewhereInfo(filters.prewhere_info); } } @@ -1542,15 +1513,15 @@ void Planner::buildPlanForQueryNode() planner_context, query_processing_info); - std::vector result_actions_to_execute = std::move(join_tree_query_plan.actions_dags); + auto useful_sets = std::move(join_tree_query_plan.useful_sets); for (auto & [_, table_expression_data] : planner_context->getTableExpressionNodeToData()) { if (table_expression_data.getPrewhereFilterActions()) - result_actions_to_execute.push_back(table_expression_data.getPrewhereFilterActions()); + appendSetsFromActionsDAG(*table_expression_data.getPrewhereFilterActions(), useful_sets); if (table_expression_data.getRowLevelFilterActions()) - result_actions_to_execute.push_back(table_expression_data.getRowLevelFilterActions()); + appendSetsFromActionsDAG(*table_expression_data.getRowLevelFilterActions(), useful_sets); } if (query_processing_info.isIntermediateStage()) @@ -1561,7 +1532,7 @@ void Planner::buildPlanForQueryNode() planner_context, query_processing_info, query_tree, - result_actions_to_execute); + useful_sets); if (expression_analysis_result.hasAggregation()) { @@ -1573,13 +1544,13 @@ void Planner::buildPlanForQueryNode() if (query_processing_info.isFirstStage()) { if (expression_analysis_result.hasWhere()) - addFilterStep(query_plan, expression_analysis_result.getWhere(), "WHERE", result_actions_to_execute); + addFilterStep(query_plan, expression_analysis_result.getWhere(), "WHERE", useful_sets); if (expression_analysis_result.hasAggregation()) { - const auto & aggregation_analysis_result = expression_analysis_result.getAggregation(); + auto & aggregation_analysis_result = expression_analysis_result.getAggregation(); if (aggregation_analysis_result.before_aggregation_actions) - addExpressionStep(query_plan, aggregation_analysis_result.before_aggregation_actions, "Before GROUP BY", result_actions_to_execute); + addExpressionStep(query_plan, aggregation_analysis_result.before_aggregation_actions, "Before GROUP BY", useful_sets); addAggregationStep(query_plan, aggregation_analysis_result, query_analysis_result, planner_context, select_query_info); } @@ -1596,9 +1567,9 @@ void Planner::buildPlanForQueryNode() * window functions, we can't execute ORDER BY and DISTINCT * now, on shard (first_stage). */ - const auto & window_analysis_result = expression_analysis_result.getWindow(); + auto & window_analysis_result = expression_analysis_result.getWindow(); if (window_analysis_result.before_window_actions) - addExpressionStep(query_plan, window_analysis_result.before_window_actions, "Before WINDOW", result_actions_to_execute); + addExpressionStep(query_plan, window_analysis_result.before_window_actions, "Before WINDOW", useful_sets); } else { @@ -1606,8 +1577,8 @@ void Planner::buildPlanForQueryNode() * Projection expressions, preliminary DISTINCT and before ORDER BY expressions * now, on shards (first_stage). */ - const auto & projection_analysis_result = expression_analysis_result.getProjection(); - addExpressionStep(query_plan, projection_analysis_result.projection_actions, "Projection", result_actions_to_execute); + auto & projection_analysis_result = expression_analysis_result.getProjection(); + addExpressionStep(query_plan, projection_analysis_result.projection_actions, "Projection", useful_sets); if (query_node.isDistinct()) { @@ -1622,8 +1593,8 @@ void Planner::buildPlanForQueryNode() if (expression_analysis_result.hasSort()) { - const auto & sort_analysis_result = expression_analysis_result.getSort(); - addExpressionStep(query_plan, sort_analysis_result.before_order_by_actions, "Before ORDER BY", result_actions_to_execute); + auto & sort_analysis_result = expression_analysis_result.getSort(); + addExpressionStep(query_plan, sort_analysis_result.before_order_by_actions, "Before ORDER BY", useful_sets); } } } @@ -1634,7 +1605,7 @@ void Planner::buildPlanForQueryNode() planner_context, query_processing_info, query_tree, - result_actions_to_execute); + useful_sets); } if (query_processing_info.isSecondStage() || query_processing_info.isFromAggregationState()) @@ -1656,14 +1627,14 @@ void Planner::buildPlanForQueryNode() if (query_node.isGroupByWithTotals()) { - addTotalsHavingStep(query_plan, expression_analysis_result, query_analysis_result, planner_context, query_node, result_actions_to_execute); + addTotalsHavingStep(query_plan, expression_analysis_result, query_analysis_result, planner_context, query_node, useful_sets); having_executed = true; } addCubeOrRollupStepIfNeeded(query_plan, aggregation_analysis_result, query_analysis_result, planner_context, select_query_info, query_node); if (!having_executed && expression_analysis_result.hasHaving()) - addFilterStep(query_plan, expression_analysis_result.getHaving(), "HAVING", result_actions_to_execute); + addFilterStep(query_plan, expression_analysis_result.getHaving(), "HAVING", useful_sets); } if (query_processing_info.isFromAggregationState()) @@ -1676,18 +1647,18 @@ void Planner::buildPlanForQueryNode() { if (expression_analysis_result.hasWindow()) { - const auto & window_analysis_result = expression_analysis_result.getWindow(); + auto & window_analysis_result = expression_analysis_result.getWindow(); if (expression_analysis_result.hasAggregation()) - addExpressionStep(query_plan, window_analysis_result.before_window_actions, "Before window functions", result_actions_to_execute); + addExpressionStep(query_plan, window_analysis_result.before_window_actions, "Before window functions", useful_sets); addWindowSteps(query_plan, planner_context, window_analysis_result); } if (expression_analysis_result.hasQualify()) - addFilterStep(query_plan, expression_analysis_result.getQualify(), "QUALIFY", result_actions_to_execute); + addFilterStep(query_plan, expression_analysis_result.getQualify(), "QUALIFY", useful_sets); - const auto & projection_analysis_result = expression_analysis_result.getProjection(); - addExpressionStep(query_plan, projection_analysis_result.projection_actions, "Projection", result_actions_to_execute); + auto & projection_analysis_result = expression_analysis_result.getProjection(); + addExpressionStep(query_plan, projection_analysis_result.projection_actions, "Projection", useful_sets); if (query_node.isDistinct()) { @@ -1702,8 +1673,8 @@ void Planner::buildPlanForQueryNode() if (expression_analysis_result.hasSort()) { - const auto & sort_analysis_result = expression_analysis_result.getSort(); - addExpressionStep(query_plan, sort_analysis_result.before_order_by_actions, "Before ORDER BY", result_actions_to_execute); + auto & sort_analysis_result = expression_analysis_result.getSort(); + addExpressionStep(query_plan, sort_analysis_result.before_order_by_actions, "Before ORDER BY", useful_sets); } } else @@ -1755,8 +1726,8 @@ void Planner::buildPlanForQueryNode() if (!query_processing_info.isFromAggregationState() && expression_analysis_result.hasLimitBy()) { - const auto & limit_by_analysis_result = expression_analysis_result.getLimitBy(); - addExpressionStep(query_plan, limit_by_analysis_result.before_limit_by_actions, "Before LIMIT BY", result_actions_to_execute); + auto & limit_by_analysis_result = expression_analysis_result.getLimitBy(); + addExpressionStep(query_plan, limit_by_analysis_result.before_limit_by_actions, "Before LIMIT BY", useful_sets); addLimitByStep(query_plan, limit_by_analysis_result, query_node); } @@ -1787,8 +1758,8 @@ void Planner::buildPlanForQueryNode() /// Project names is not done on shards, because initiator will not find columns in blocks if (!query_processing_info.isToAggregationState()) { - const auto & projection_analysis_result = expression_analysis_result.getProjection(); - addExpressionStep(query_plan, projection_analysis_result.project_names_actions, "Project names", result_actions_to_execute); + auto & projection_analysis_result = expression_analysis_result.getProjection(); + addExpressionStep(query_plan, projection_analysis_result.project_names_actions, "Project names", useful_sets); } // For additional_result_filter setting @@ -1796,7 +1767,7 @@ void Planner::buildPlanForQueryNode() } if (!select_query_options.only_analyze) - addBuildSubqueriesForSetsStepIfNeeded(query_plan, select_query_options, planner_context, result_actions_to_execute); + addBuildSubqueriesForSetsStepIfNeeded(query_plan, select_query_options, planner_context, useful_sets); query_node_to_plan_step_mapping[&query_node] = query_plan.getRootNode(); } diff --git a/src/Planner/PlannerActionsVisitor.cpp b/src/Planner/PlannerActionsVisitor.cpp index a0c7f9b2516..1960855792c 100644 --- a/src/Planner/PlannerActionsVisitor.cpp +++ b/src/Planner/PlannerActionsVisitor.cpp @@ -759,15 +759,15 @@ PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::vi lambda_arguments_names_and_types.emplace_back(lambda_argument_name, std::move(lambda_argument_type)); } - auto lambda_actions_dag = std::make_shared(); - actions_stack.emplace_back(*lambda_actions_dag, node); + ActionsDAG lambda_actions_dag; + actions_stack.emplace_back(lambda_actions_dag, node); auto [lambda_expression_node_name, levels] = visitImpl(lambda_node.getExpression()); - lambda_actions_dag->getOutputs().push_back(actions_stack.back().getNodeOrThrow(lambda_expression_node_name)); - lambda_actions_dag->removeUnusedActions(Names(1, lambda_expression_node_name)); + lambda_actions_dag.getOutputs().push_back(actions_stack.back().getNodeOrThrow(lambda_expression_node_name)); + lambda_actions_dag.removeUnusedActions(Names(1, lambda_expression_node_name)); auto expression_actions_settings = ExpressionActionsSettings::fromContext(planner_context->getQueryContext(), CompileExpressions::yes); - auto lambda_actions = std::make_shared(lambda_actions_dag, expression_actions_settings); + auto lambda_actions = std::make_shared(std::move(lambda_actions_dag), expression_actions_settings); Names captured_column_names; ActionsDAG::NodeRawConstPtrs lambda_children; @@ -881,14 +881,14 @@ PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::vi const auto & function_node = node->as(); auto function_node_name = action_node_name_helper.calculateActionNodeName(node); - auto index_hint_actions_dag = std::make_shared(); - auto & index_hint_actions_dag_outputs = index_hint_actions_dag->getOutputs(); + ActionsDAG index_hint_actions_dag; + auto & index_hint_actions_dag_outputs = index_hint_actions_dag.getOutputs(); std::unordered_set index_hint_actions_dag_output_node_names; PlannerActionsVisitor actions_visitor(planner_context); for (const auto & argument : function_node.getArguments()) { - auto index_hint_argument_expression_dag_nodes = actions_visitor.visit(*index_hint_actions_dag, argument); + auto index_hint_argument_expression_dag_nodes = actions_visitor.visit(index_hint_actions_dag, argument); for (auto & expression_dag_node : index_hint_argument_expression_dag_nodes) { diff --git a/src/Planner/PlannerContext.h b/src/Planner/PlannerContext.h index 418240fa34e..f35772ef7c0 100644 --- a/src/Planner/PlannerContext.h +++ b/src/Planner/PlannerContext.h @@ -25,7 +25,7 @@ class TableNode; struct FiltersForTableExpression { - ActionsDAGPtr filter_actions; + std::optional filter_actions; PrewhereInfoPtr prewhere_info; }; diff --git a/src/Planner/PlannerExpressionAnalysis.h b/src/Planner/PlannerExpressionAnalysis.h index 820df7131a7..283fcac7aba 100644 --- a/src/Planner/PlannerExpressionAnalysis.h +++ b/src/Planner/PlannerExpressionAnalysis.h @@ -64,7 +64,7 @@ public: : projection_analysis_result(std::move(projection_analysis_result_)) {} - const ProjectionAnalysisResult & getProjection() const + ProjectionAnalysisResult & getProjection() { return projection_analysis_result; } @@ -74,7 +74,7 @@ public: return where_analysis_result.filter_actions != nullptr; } - const FilterAnalysisResult & getWhere() const + FilterAnalysisResult & getWhere() { return where_analysis_result; } @@ -89,7 +89,7 @@ public: return !aggregation_analysis_result.aggregation_keys.empty() || !aggregation_analysis_result.aggregate_descriptions.empty(); } - const AggregationAnalysisResult & getAggregation() const + AggregationAnalysisResult & getAggregation() { return aggregation_analysis_result; } @@ -104,7 +104,7 @@ public: return having_analysis_result.filter_actions != nullptr; } - const FilterAnalysisResult & getHaving() const + FilterAnalysisResult & getHaving() { return having_analysis_result; } @@ -119,7 +119,7 @@ public: return !window_analysis_result.window_descriptions.empty(); } - const WindowAnalysisResult & getWindow() const + WindowAnalysisResult & getWindow() { return window_analysis_result; } @@ -134,7 +134,7 @@ public: return qualify_analysis_result.filter_actions != nullptr; } - const FilterAnalysisResult & getQualify() const + FilterAnalysisResult & getQualify() { return qualify_analysis_result; } @@ -149,7 +149,7 @@ public: return sort_analysis_result.before_order_by_actions != nullptr; } - const SortAnalysisResult & getSort() const + SortAnalysisResult & getSort() { return sort_analysis_result; } @@ -164,7 +164,7 @@ public: return limit_by_analysis_result.before_limit_by_actions != nullptr; } - const LimitByAnalysisResult & getLimitBy() const + LimitByAnalysisResult & getLimitBy() { return limit_by_analysis_result; } diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 4fedacbb48a..a3db0395ccc 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -437,7 +437,7 @@ void updatePrewhereOutputsIfNeeded(SelectQueryInfo & table_expression_query_info std::unordered_set required_output_nodes; - for (const auto * input : prewhere_actions->getInputs()) + for (const auto * input : prewhere_actions.getInputs()) { if (required_columns.contains(input->result_name)) required_output_nodes.insert(input); @@ -446,7 +446,7 @@ void updatePrewhereOutputsIfNeeded(SelectQueryInfo & table_expression_query_info if (required_output_nodes.empty()) return; - auto & prewhere_outputs = prewhere_actions->getOutputs(); + auto & prewhere_outputs = prewhere_actions.getOutputs(); for (const auto & output : prewhere_outputs) { auto required_output_node_it = required_output_nodes.find(output); @@ -459,7 +459,7 @@ void updatePrewhereOutputsIfNeeded(SelectQueryInfo & table_expression_query_info prewhere_outputs.insert(prewhere_outputs.end(), required_output_nodes.begin(), required_output_nodes.end()); } -FilterDAGInfo buildRowPolicyFilterIfNeeded(const StoragePtr & storage, +std::optional buildRowPolicyFilterIfNeeded(const StoragePtr & storage, SelectQueryInfo & table_expression_query_info, PlannerContextPtr & planner_context, std::set & used_row_policies) @@ -480,7 +480,7 @@ FilterDAGInfo buildRowPolicyFilterIfNeeded(const StoragePtr & storage, return buildFilterInfo(row_policy_filter->expression, table_expression_query_info.table_expression, planner_context); } -FilterDAGInfo buildCustomKeyFilterIfNeeded(const StoragePtr & storage, +std::optional buildCustomKeyFilterIfNeeded(const StoragePtr & storage, SelectQueryInfo & table_expression_query_info, PlannerContextPtr & planner_context) { @@ -514,7 +514,7 @@ FilterDAGInfo buildCustomKeyFilterIfNeeded(const StoragePtr & storage, } /// Apply filters from additional_table_filters setting -FilterDAGInfo buildAdditionalFiltersIfNeeded(const StoragePtr & storage, +std::optional buildAdditionalFiltersIfNeeded(const StoragePtr & storage, const String & table_expression_alias, SelectQueryInfo & table_expression_query_info, PlannerContextPtr & planner_context) @@ -590,21 +590,21 @@ UInt64 mainQueryNodeBlockSizeByLimit(const SelectQueryInfo & select_query_info) } std::unique_ptr createComputeAliasColumnsStep( - const std::unordered_map & alias_column_expressions, const DataStream & current_data_stream) + std::unordered_map & alias_column_expressions, const DataStream & current_data_stream) { - ActionsDAGPtr merged_alias_columns_actions_dag = std::make_shared(current_data_stream.header.getColumnsWithTypeAndName()); - ActionsDAG::NodeRawConstPtrs action_dag_outputs = merged_alias_columns_actions_dag->getInputs(); + ActionsDAG merged_alias_columns_actions_dag(current_data_stream.header.getColumnsWithTypeAndName()); + ActionsDAG::NodeRawConstPtrs action_dag_outputs = merged_alias_columns_actions_dag.getInputs(); - for (const auto & [column_name, alias_column_actions_dag] : alias_column_expressions) + for (auto & [column_name, alias_column_actions_dag] : alias_column_expressions) { - const auto & current_outputs = alias_column_actions_dag->getOutputs(); + const auto & current_outputs = alias_column_actions_dag.getOutputs(); action_dag_outputs.insert(action_dag_outputs.end(), current_outputs.begin(), current_outputs.end()); - merged_alias_columns_actions_dag->mergeNodes(std::move(*alias_column_actions_dag)); + merged_alias_columns_actions_dag.mergeNodes(std::move(alias_column_actions_dag)); } for (const auto * output_node : action_dag_outputs) - merged_alias_columns_actions_dag->addOrReplaceInOutputs(*output_node); - merged_alias_columns_actions_dag->removeUnusedActions(false); + merged_alias_columns_actions_dag.addOrReplaceInOutputs(*output_node); + merged_alias_columns_actions_dag.removeUnusedActions(false); auto alias_column_step = std::make_unique(current_data_stream, std::move(merged_alias_columns_actions_dag)); alias_column_step->setStepDescription("Compute alias columns"); @@ -647,7 +647,8 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres auto table_expression_query_info = select_query_info; table_expression_query_info.table_expression = table_expression; - table_expression_query_info.filter_actions_dag = table_expression_data.getFilterActions(); + if (const auto & filter_actions = table_expression_data.getFilterActions()) + table_expression_query_info.filter_actions_dag = std::make_shared(filter_actions->clone()); table_expression_query_info.current_table_chosen_for_reading_with_parallel_replicas = table_node == planner_context->getGlobalPlannerContext()->parallel_replicas_table; @@ -778,7 +779,7 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres if (prewhere_actions) { prewhere_info = std::make_shared(); - prewhere_info->prewhere_actions = prewhere_actions; + prewhere_info->prewhere_actions = prewhere_actions->clone(); prewhere_info->prewhere_column_name = prewhere_actions->getOutputs().at(0)->result_name; prewhere_info->remove_prewhere_column = true; prewhere_info->need_filter = true; @@ -789,11 +790,8 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres const auto & columns_names = table_expression_data.getColumnNames(); std::vector> where_filters; - const auto add_filter = [&](const FilterDAGInfo & filter_info, std::string description) + const auto add_filter = [&](FilterDAGInfo & filter_info, std::string description) { - if (!filter_info.actions) - return; - bool is_final = table_expression_query_info.table_expression_modifiers && table_expression_query_info.table_expression_modifiers->hasFinal(); bool optimize_move_to_prewhere @@ -803,46 +801,45 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres if (storage->canMoveConditionsToPrewhere() && optimize_move_to_prewhere && (!supported_prewhere_columns || supported_prewhere_columns->contains(filter_info.column_name))) { if (!prewhere_info) - prewhere_info = std::make_shared(); - - if (!prewhere_info->prewhere_actions) { - prewhere_info->prewhere_actions = filter_info.actions; + prewhere_info = std::make_shared(); + prewhere_info->prewhere_actions = std::move(filter_info.actions); prewhere_info->prewhere_column_name = filter_info.column_name; prewhere_info->remove_prewhere_column = filter_info.do_remove_column; prewhere_info->need_filter = true; } else if (!prewhere_info->row_level_filter) { - prewhere_info->row_level_filter = filter_info.actions; + prewhere_info->row_level_filter = std::move(filter_info.actions); prewhere_info->row_level_column_name = filter_info.column_name; prewhere_info->need_filter = true; } else { - where_filters.emplace_back(filter_info, std::move(description)); + where_filters.emplace_back(std::move(filter_info), std::move(description)); } } else { - where_filters.emplace_back(filter_info, std::move(description)); + where_filters.emplace_back(std::move(filter_info), std::move(description)); } }; auto row_policy_filter_info = buildRowPolicyFilterIfNeeded(storage, table_expression_query_info, planner_context, used_row_policies); - add_filter(row_policy_filter_info, "Row-level security filter"); - if (row_policy_filter_info.actions) - table_expression_data.setRowLevelFilterActions(row_policy_filter_info.actions); + if (row_policy_filter_info) + { + table_expression_data.setRowLevelFilterActions(row_policy_filter_info->actions.clone()); + add_filter(*row_policy_filter_info, "Row-level security filter"); + } if (query_context->canUseParallelReplicasCustomKey()) { if (settings.parallel_replicas_count > 1) { - auto parallel_replicas_custom_key_filter_info - = buildCustomKeyFilterIfNeeded(storage, table_expression_query_info, planner_context); - add_filter(parallel_replicas_custom_key_filter_info, "Parallel replicas custom key filter"); + if (auto parallel_replicas_custom_key_filter_info= buildCustomKeyFilterIfNeeded(storage, table_expression_query_info, planner_context)) + add_filter(*parallel_replicas_custom_key_filter_info, "Parallel replicas custom key filter"); } else if (auto * distributed = typeid_cast(storage.get()); distributed && query_context->canUseParallelReplicasCustomKeyForCluster(*distributed->getCluster())) @@ -857,9 +854,8 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres } const auto & table_expression_alias = table_expression->getOriginalAlias(); - auto additional_filters_info - = buildAdditionalFiltersIfNeeded(storage, table_expression_alias, table_expression_query_info, planner_context); - add_filter(additional_filters_info, "additional filter"); + if (auto additional_filters_info = buildAdditionalFiltersIfNeeded(storage, table_expression_alias, table_expression_query_info, planner_context)) + add_filter(*additional_filters_info, "additional filter"); from_stage = storage->getQueryProcessingStage( query_context, select_query_options.to_stage, storage_snapshot, table_expression_query_info); @@ -997,22 +993,20 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres } } - const auto & alias_column_expressions = table_expression_data.getAliasColumnExpressions(); + auto & alias_column_expressions = table_expression_data.getAliasColumnExpressions(); if (!alias_column_expressions.empty() && query_plan.isInitialized() && from_stage == QueryProcessingStage::FetchColumns) { auto alias_column_step = createComputeAliasColumnsStep(alias_column_expressions, query_plan.getCurrentDataStream()); query_plan.addStep(std::move(alias_column_step)); } - for (const auto & filter_info_and_description : where_filters) + for (auto && [filter_info, description] : where_filters) { - const auto & [filter_info, description] = filter_info_and_description; if (query_plan.isInitialized() && - from_stage == QueryProcessingStage::FetchColumns && - filter_info.actions) + from_stage == QueryProcessingStage::FetchColumns) { auto filter_step = std::make_unique(query_plan.getCurrentDataStream(), - filter_info.actions, + std::move(filter_info.actions), filter_info.column_name, filter_info.do_remove_column); filter_step->setStepDescription(description); @@ -1088,7 +1082,7 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres query_plan = std::move(subquery_planner).extractQueryPlan(); } - const auto & alias_column_expressions = table_expression_data.getAliasColumnExpressions(); + auto & alias_column_expressions = table_expression_data.getAliasColumnExpressions(); if (!alias_column_expressions.empty() && query_plan.isInitialized() && from_stage == QueryProcessingStage::FetchColumns) { auto alias_column_step = createComputeAliasColumnsStep(alias_column_expressions, query_plan.getCurrentDataStream()); @@ -1103,21 +1097,21 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres if (from_stage == QueryProcessingStage::FetchColumns) { - auto rename_actions_dag = std::make_shared(query_plan.getCurrentDataStream().header.getColumnsWithTypeAndName()); + ActionsDAG rename_actions_dag(query_plan.getCurrentDataStream().header.getColumnsWithTypeAndName()); ActionsDAG::NodeRawConstPtrs updated_actions_dag_outputs; - for (auto & output_node : rename_actions_dag->getOutputs()) + for (auto & output_node : rename_actions_dag.getOutputs()) { const auto * column_identifier = table_expression_data.getColumnIdentifierOrNull(output_node->result_name); if (!column_identifier) continue; - updated_actions_dag_outputs.push_back(&rename_actions_dag->addAlias(*output_node, *column_identifier)); + updated_actions_dag_outputs.push_back(&rename_actions_dag.addAlias(*output_node, *column_identifier)); } - rename_actions_dag->getOutputs() = std::move(updated_actions_dag_outputs); + rename_actions_dag.getOutputs() = std::move(updated_actions_dag_outputs); - auto rename_step = std::make_unique(query_plan.getCurrentDataStream(), rename_actions_dag); + auto rename_step = std::make_unique(query_plan.getCurrentDataStream(), std::move(rename_actions_dag)); rename_step->setStepDescription("Change column names to column identifiers"); query_plan.addStep(std::move(rename_step)); } @@ -1157,9 +1151,9 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres void joinCastPlanColumnsToNullable(QueryPlan & plan_to_add_cast, PlannerContextPtr & planner_context, const FunctionOverloadResolverPtr & to_nullable_function) { - auto cast_actions_dag = std::make_shared(plan_to_add_cast.getCurrentDataStream().header.getColumnsWithTypeAndName()); + ActionsDAG cast_actions_dag(plan_to_add_cast.getCurrentDataStream().header.getColumnsWithTypeAndName()); - for (auto & output_node : cast_actions_dag->getOutputs()) + for (auto & output_node : cast_actions_dag.getOutputs()) { if (planner_context->getGlobalPlannerContext()->hasColumnIdentifier(output_node->result_name)) { @@ -1168,11 +1162,11 @@ void joinCastPlanColumnsToNullable(QueryPlan & plan_to_add_cast, PlannerContextP type_to_check = type_to_check_low_cardinality->getDictionaryType(); if (type_to_check->canBeInsideNullable()) - output_node = &cast_actions_dag->addFunction(to_nullable_function, {output_node}, output_node->result_name); + output_node = &cast_actions_dag.addFunction(to_nullable_function, {output_node}, output_node->result_name); } } - cast_actions_dag->appendInputsForUnusedColumns(plan_to_add_cast.getCurrentDataStream().header); + cast_actions_dag.appendInputsForUnusedColumns(plan_to_add_cast.getCurrentDataStream().header); auto cast_join_columns_step = std::make_unique(plan_to_add_cast.getCurrentDataStream(), std::move(cast_actions_dag)); cast_join_columns_step->setStepDescription("Cast JOIN columns to Nullable"); plan_to_add_cast.addStep(std::move(cast_join_columns_step)); @@ -1218,14 +1212,16 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ join_table_expression, planner_context); - join_clauses_and_actions.left_join_expressions_actions->appendInputsForUnusedColumns(left_plan.getCurrentDataStream().header); - auto left_join_expressions_actions_step = std::make_unique(left_plan.getCurrentDataStream(), join_clauses_and_actions.left_join_expressions_actions); + join_clauses_and_actions.left_join_expressions_actions.appendInputsForUnusedColumns(left_plan.getCurrentDataStream().header); + auto left_join_expressions_actions_step = std::make_unique(left_plan.getCurrentDataStream(), std::move(join_clauses_and_actions.left_join_expressions_actions)); left_join_expressions_actions_step->setStepDescription("JOIN actions"); + appendSetsFromActionsDAG(left_join_expressions_actions_step->getExpression(), left_join_tree_query_plan.useful_sets); left_plan.addStep(std::move(left_join_expressions_actions_step)); - join_clauses_and_actions.right_join_expressions_actions->appendInputsForUnusedColumns(right_plan.getCurrentDataStream().header); - auto right_join_expressions_actions_step = std::make_unique(right_plan.getCurrentDataStream(), join_clauses_and_actions.right_join_expressions_actions); + join_clauses_and_actions.right_join_expressions_actions.appendInputsForUnusedColumns(right_plan.getCurrentDataStream().header); + auto right_join_expressions_actions_step = std::make_unique(right_plan.getCurrentDataStream(), std::move(join_clauses_and_actions.right_join_expressions_actions)); right_join_expressions_actions_step->setStepDescription("JOIN actions"); + appendSetsFromActionsDAG(right_join_expressions_actions_step->getExpression(), right_join_tree_query_plan.useful_sets); right_plan.addStep(std::move(right_join_expressions_actions_step)); } @@ -1263,19 +1259,19 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ auto join_cast_plan_output_nodes = [&](QueryPlan & plan_to_add_cast, std::unordered_map & plan_column_name_to_cast_type) { - auto cast_actions_dag = std::make_shared(plan_to_add_cast.getCurrentDataStream().header.getColumnsWithTypeAndName()); + ActionsDAG cast_actions_dag(plan_to_add_cast.getCurrentDataStream().header.getColumnsWithTypeAndName()); - for (auto & output_node : cast_actions_dag->getOutputs()) + for (auto & output_node : cast_actions_dag.getOutputs()) { auto it = plan_column_name_to_cast_type.find(output_node->result_name); if (it == plan_column_name_to_cast_type.end()) continue; const auto & cast_type = it->second; - output_node = &cast_actions_dag->addCast(*output_node, cast_type, output_node->result_name); + output_node = &cast_actions_dag.addCast(*output_node, cast_type, output_node->result_name); } - cast_actions_dag->appendInputsForUnusedColumns(plan_to_add_cast.getCurrentDataStream().header); + cast_actions_dag.appendInputsForUnusedColumns(plan_to_add_cast.getCurrentDataStream().header); auto cast_join_columns_step = std::make_unique(plan_to_add_cast.getCurrentDataStream(), std::move(cast_actions_dag)); cast_join_columns_step->setStepDescription("Cast JOIN USING columns"); @@ -1418,8 +1414,10 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ { ExpressionActionsPtr & mixed_join_expression = table_join->getMixedJoinExpression(); mixed_join_expression = std::make_shared( - join_clauses_and_actions.mixed_join_expressions_actions, + std::move(*join_clauses_and_actions.mixed_join_expressions_actions), ExpressionActionsSettings::fromContext(planner_context->getQueryContext())); + + appendSetsFromActionsDAG(mixed_join_expression->getActionsDAG(), left_join_tree_query_plan.useful_sets); } } else if (join_node.isUsingJoinExpression()) @@ -1465,7 +1463,8 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ auto result_plan = QueryPlan(); - if (join_algorithm->isFilled()) + bool is_filled_join = join_algorithm->isFilled(); + if (is_filled_join) { auto filled_join_step = std::make_unique( left_plan.getCurrentDataStream(), @@ -1574,12 +1573,12 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ result_plan.unitePlans(std::move(join_step), {std::move(plans)}); } - auto drop_unused_columns_after_join_actions_dag = std::make_shared(result_plan.getCurrentDataStream().header.getColumnsWithTypeAndName()); + ActionsDAG drop_unused_columns_after_join_actions_dag(result_plan.getCurrentDataStream().header.getColumnsWithTypeAndName()); ActionsDAG::NodeRawConstPtrs drop_unused_columns_after_join_actions_dag_updated_outputs; std::unordered_set drop_unused_columns_after_join_actions_dag_updated_outputs_names; std::optional first_skipped_column_node_index; - auto & drop_unused_columns_after_join_actions_dag_outputs = drop_unused_columns_after_join_actions_dag->getOutputs(); + auto & drop_unused_columns_after_join_actions_dag_outputs = drop_unused_columns_after_join_actions_dag.getOutputs(); size_t drop_unused_columns_after_join_actions_dag_outputs_size = drop_unused_columns_after_join_actions_dag_outputs.size(); for (size_t i = 0; i < drop_unused_columns_after_join_actions_dag_outputs_size; ++i) @@ -1618,15 +1617,10 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ for (const auto & right_join_tree_query_plan_row_policy : right_join_tree_query_plan.used_row_policies) left_join_tree_query_plan.used_row_policies.insert(right_join_tree_query_plan_row_policy); - /// Collect all required actions dags in `left_join_tree_query_plan.actions_dags` - for (auto && action_dag : right_join_tree_query_plan.actions_dags) - left_join_tree_query_plan.actions_dags.emplace_back(action_dag); - if (join_clauses_and_actions.left_join_expressions_actions) - left_join_tree_query_plan.actions_dags.emplace_back(std::move(join_clauses_and_actions.left_join_expressions_actions)); - if (join_clauses_and_actions.right_join_expressions_actions) - left_join_tree_query_plan.actions_dags.emplace_back(std::move(join_clauses_and_actions.right_join_expressions_actions)); - if (join_clauses_and_actions.mixed_join_expressions_actions) - left_join_tree_query_plan.actions_dags.push_back(join_clauses_and_actions.mixed_join_expressions_actions); + /// Collect all required actions sets in `left_join_tree_query_plan.useful_sets` + if (!is_filled_join) + for (const auto & useful_set : right_join_tree_query_plan.useful_sets) + left_join_tree_query_plan.useful_sets.insert(useful_set); auto mapping = std::move(left_join_tree_query_plan.query_node_to_plan_step_mapping); auto & r_mapping = right_join_tree_query_plan.query_node_to_plan_step_mapping; @@ -1636,7 +1630,7 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ .query_plan = std::move(result_plan), .from_stage = QueryProcessingStage::FetchColumns, .used_row_policies = std::move(left_join_tree_query_plan.used_row_policies), - .actions_dags = std::move(left_join_tree_query_plan.actions_dags), + .useful_sets = std::move(left_join_tree_query_plan.useful_sets), .query_node_to_plan_step_mapping = std::move(mapping), }; } @@ -1656,7 +1650,7 @@ JoinTreeQueryPlan buildQueryPlanForArrayJoinNode(const QueryTreeNodePtr & array_ auto plan = std::move(join_tree_query_plan.query_plan); auto plan_output_columns = plan.getCurrentDataStream().header.getColumnsWithTypeAndName(); - ActionsDAGPtr array_join_action_dag = std::make_shared(plan_output_columns); + ActionsDAG array_join_action_dag(plan_output_columns); PlannerActionsVisitor actions_visitor(planner_context); std::unordered_set array_join_expressions_output_nodes; @@ -1667,29 +1661,28 @@ JoinTreeQueryPlan buildQueryPlanForArrayJoinNode(const QueryTreeNodePtr & array_ array_join_column_names.insert(array_join_column_identifier); auto & array_join_expression_column = array_join_expression->as(); - auto expression_dag_index_nodes = actions_visitor.visit(*array_join_action_dag, array_join_expression_column.getExpressionOrThrow()); + auto expression_dag_index_nodes = actions_visitor.visit(array_join_action_dag, array_join_expression_column.getExpressionOrThrow()); for (auto & expression_dag_index_node : expression_dag_index_nodes) { - const auto * array_join_column_node = &array_join_action_dag->addAlias(*expression_dag_index_node, array_join_column_identifier); - array_join_action_dag->getOutputs().push_back(array_join_column_node); + const auto * array_join_column_node = &array_join_action_dag.addAlias(*expression_dag_index_node, array_join_column_identifier); + array_join_action_dag.getOutputs().push_back(array_join_column_node); array_join_expressions_output_nodes.insert(array_join_column_node->result_name); } } - array_join_action_dag->appendInputsForUnusedColumns(plan.getCurrentDataStream().header); + array_join_action_dag.appendInputsForUnusedColumns(plan.getCurrentDataStream().header); - join_tree_query_plan.actions_dags.push_back(array_join_action_dag); - - auto array_join_actions = std::make_unique(plan.getCurrentDataStream(), array_join_action_dag); + auto array_join_actions = std::make_unique(plan.getCurrentDataStream(), std::move(array_join_action_dag)); array_join_actions->setStepDescription("ARRAY JOIN actions"); + appendSetsFromActionsDAG(array_join_actions->getExpression(), join_tree_query_plan.useful_sets); plan.addStep(std::move(array_join_actions)); - auto drop_unused_columns_before_array_join_actions_dag = std::make_shared(plan.getCurrentDataStream().header.getColumnsWithTypeAndName()); + ActionsDAG drop_unused_columns_before_array_join_actions_dag(plan.getCurrentDataStream().header.getColumnsWithTypeAndName()); ActionsDAG::NodeRawConstPtrs drop_unused_columns_before_array_join_actions_dag_updated_outputs; std::unordered_set drop_unused_columns_before_array_join_actions_dag_updated_outputs_names; - auto & drop_unused_columns_before_array_join_actions_dag_outputs = drop_unused_columns_before_array_join_actions_dag->getOutputs(); + auto & drop_unused_columns_before_array_join_actions_dag_outputs = drop_unused_columns_before_array_join_actions_dag.getOutputs(); size_t drop_unused_columns_before_array_join_actions_dag_outputs_size = drop_unused_columns_before_array_join_actions_dag_outputs.size(); for (size_t i = 0; i < drop_unused_columns_before_array_join_actions_dag_outputs_size; ++i) @@ -1723,7 +1716,7 @@ JoinTreeQueryPlan buildQueryPlanForArrayJoinNode(const QueryTreeNodePtr & array_ .query_plan = std::move(plan), .from_stage = QueryProcessingStage::FetchColumns, .used_row_policies = std::move(join_tree_query_plan.used_row_policies), - .actions_dags = std::move(join_tree_query_plan.actions_dags), + .useful_sets = std::move(join_tree_query_plan.useful_sets), .query_node_to_plan_step_mapping = std::move(join_tree_query_plan.query_node_to_plan_step_mapping), }; } diff --git a/src/Planner/PlannerJoinTree.h b/src/Planner/PlannerJoinTree.h index 9110b2bfef9..259622b1d50 100644 --- a/src/Planner/PlannerJoinTree.h +++ b/src/Planner/PlannerJoinTree.h @@ -11,12 +11,14 @@ namespace DB { +using UsefulSets = std::unordered_set; + struct JoinTreeQueryPlan { QueryPlan query_plan; QueryProcessingStage::Enum from_stage; std::set used_row_policies{}; - std::vector actions_dags{}; + UsefulSets useful_sets{}; std::unordered_map query_node_to_plan_step_mapping{}; }; diff --git a/src/Planner/PlannerJoins.cpp b/src/Planner/PlannerJoins.cpp index d9360a58240..5acff9dac82 100644 --- a/src/Planner/PlannerJoins.cpp +++ b/src/Planner/PlannerJoins.cpp @@ -180,13 +180,13 @@ std::set extractJoinTableSidesFromExpression(//const ActionsDAG:: } const ActionsDAG::Node * appendExpression( - ActionsDAGPtr & dag, + ActionsDAG & dag, const QueryTreeNodePtr & expression, const PlannerContextPtr & planner_context, const JoinNode & join_node) { PlannerActionsVisitor join_expression_visitor(planner_context); - auto join_expression_dag_node_raw_pointers = join_expression_visitor.visit(*dag, expression); + auto join_expression_dag_node_raw_pointers = join_expression_visitor.visit(dag, expression); if (join_expression_dag_node_raw_pointers.size() != 1) throw Exception(ErrorCodes::LOGICAL_ERROR, "JOIN {} ON clause contains multiple expressions", @@ -196,9 +196,9 @@ const ActionsDAG::Node * appendExpression( } void buildJoinClause( - ActionsDAGPtr & left_dag, - ActionsDAGPtr & right_dag, - ActionsDAGPtr & mixed_dag, + ActionsDAG & left_dag, + ActionsDAG & right_dag, + ActionsDAG & mixed_dag, const PlannerContextPtr & planner_context, const QueryTreeNodePtr & join_expression, const TableExpressionSet & left_table_expressions, @@ -379,8 +379,8 @@ JoinClausesAndActions buildJoinClausesAndActions( const JoinNode & join_node, const PlannerContextPtr & planner_context) { - ActionsDAGPtr left_join_actions = std::make_shared(left_table_expression_columns); - ActionsDAGPtr right_join_actions = std::make_shared(right_table_expression_columns); + ActionsDAG left_join_actions(left_table_expression_columns); + ActionsDAG right_join_actions(right_table_expression_columns); ColumnsWithTypeAndName mixed_table_expression_columns; for (const auto & left_column : left_table_expression_columns) { @@ -390,7 +390,7 @@ JoinClausesAndActions buildJoinClausesAndActions( { mixed_table_expression_columns.push_back(right_column); } - ActionsDAGPtr mixed_join_actions = std::make_shared(mixed_table_expression_columns); + ActionsDAG mixed_join_actions(mixed_table_expression_columns); /** It is possible to have constant value in JOIN ON section, that we need to ignore during DAG construction. * If we do not ignore it, this function will be replaced by underlying constant. @@ -501,12 +501,12 @@ JoinClausesAndActions buildJoinClausesAndActions( { const ActionsDAG::Node * dag_filter_condition_node = nullptr; if (left_filter_condition_nodes.size() > 1) - dag_filter_condition_node = &left_join_actions->addFunction(and_function, left_filter_condition_nodes, {}); + dag_filter_condition_node = &left_join_actions.addFunction(and_function, left_filter_condition_nodes, {}); else dag_filter_condition_node = left_filter_condition_nodes[0]; join_clause.getLeftFilterConditionNodes() = {dag_filter_condition_node}; - left_join_actions->addOrReplaceInOutputs(*dag_filter_condition_node); + left_join_actions.addOrReplaceInOutputs(*dag_filter_condition_node); add_necessary_name_if_needed(JoinTableSide::Left, dag_filter_condition_node->result_name); } @@ -517,12 +517,12 @@ JoinClausesAndActions buildJoinClausesAndActions( const ActionsDAG::Node * dag_filter_condition_node = nullptr; if (right_filter_condition_nodes.size() > 1) - dag_filter_condition_node = &right_join_actions->addFunction(and_function, right_filter_condition_nodes, {}); + dag_filter_condition_node = &right_join_actions.addFunction(and_function, right_filter_condition_nodes, {}); else dag_filter_condition_node = right_filter_condition_nodes[0]; join_clause.getRightFilterConditionNodes() = {dag_filter_condition_node}; - right_join_actions->addOrReplaceInOutputs(*dag_filter_condition_node); + right_join_actions.addOrReplaceInOutputs(*dag_filter_condition_node); add_necessary_name_if_needed(JoinTableSide::Right, dag_filter_condition_node->result_name); } @@ -559,10 +559,10 @@ JoinClausesAndActions buildJoinClausesAndActions( } if (!left_key_node->result_type->equals(*common_type)) - left_key_node = &left_join_actions->addCast(*left_key_node, common_type, {}); + left_key_node = &left_join_actions.addCast(*left_key_node, common_type, {}); if (!right_key_node->result_type->equals(*common_type)) - right_key_node = &right_join_actions->addCast(*right_key_node, common_type, {}); + right_key_node = &right_join_actions.addCast(*right_key_node, common_type, {}); } if (join_clause.isNullsafeCompareKey(i) && left_key_node->result_type->isNullable() && right_key_node->result_type->isNullable()) @@ -579,24 +579,24 @@ JoinClausesAndActions buildJoinClausesAndActions( * SELECT * FROM t1 JOIN t2 ON tuple(t1.a) == tuple(t2.b) */ auto wrap_nullsafe_function = FunctionFactory::instance().get("tuple", planner_context->getQueryContext()); - left_key_node = &left_join_actions->addFunction(wrap_nullsafe_function, {left_key_node}, {}); - right_key_node = &right_join_actions->addFunction(wrap_nullsafe_function, {right_key_node}, {}); + left_key_node = &left_join_actions.addFunction(wrap_nullsafe_function, {left_key_node}, {}); + right_key_node = &right_join_actions.addFunction(wrap_nullsafe_function, {right_key_node}, {}); } - left_join_actions->addOrReplaceInOutputs(*left_key_node); - right_join_actions->addOrReplaceInOutputs(*right_key_node); + left_join_actions.addOrReplaceInOutputs(*left_key_node); + right_join_actions.addOrReplaceInOutputs(*right_key_node); add_necessary_name_if_needed(JoinTableSide::Left, left_key_node->result_name); add_necessary_name_if_needed(JoinTableSide::Right, right_key_node->result_name); } } - result.left_join_expressions_actions = left_join_actions->clone(); + result.left_join_expressions_actions = left_join_actions.clone(); result.left_join_tmp_expression_actions = std::move(left_join_actions); - result.left_join_expressions_actions->removeUnusedActions(join_left_actions_names); - result.right_join_expressions_actions = right_join_actions->clone(); + result.left_join_expressions_actions.removeUnusedActions(join_left_actions_names); + result.right_join_expressions_actions = right_join_actions.clone(); result.right_join_tmp_expression_actions = std::move(right_join_actions); - result.right_join_expressions_actions->removeUnusedActions(join_right_actions_names); + result.right_join_expressions_actions.removeUnusedActions(join_right_actions_names); if (is_inequal_join) { @@ -604,24 +604,24 @@ JoinClausesAndActions buildJoinClausesAndActions( /// So, for each column, we recalculate the value of the whole expression from JOIN ON to check if rows should be joined. if (result.join_clauses.size() > 1) { - auto mixed_join_expressions_actions = std::make_shared(mixed_table_expression_columns); + ActionsDAG mixed_join_expressions_actions(mixed_table_expression_columns); PlannerActionsVisitor join_expression_visitor(planner_context); - auto join_expression_dag_node_raw_pointers = join_expression_visitor.visit(*mixed_join_expressions_actions, join_expression); + auto join_expression_dag_node_raw_pointers = join_expression_visitor.visit(mixed_join_expressions_actions, join_expression); if (join_expression_dag_node_raw_pointers.size() != 1) throw Exception( ErrorCodes::LOGICAL_ERROR, "JOIN {} ON clause contains multiple expressions", join_node.formatASTForErrorMessage()); - mixed_join_expressions_actions->addOrReplaceInOutputs(*join_expression_dag_node_raw_pointers[0]); + mixed_join_expressions_actions.addOrReplaceInOutputs(*join_expression_dag_node_raw_pointers[0]); Names required_names{join_expression_dag_node_raw_pointers[0]->result_name}; - mixed_join_expressions_actions->removeUnusedActions(required_names); - result.mixed_join_expressions_actions = mixed_join_expressions_actions; + mixed_join_expressions_actions.removeUnusedActions(required_names); + result.mixed_join_expressions_actions = std::move(mixed_join_expressions_actions); } else { const auto & join_clause = result.join_clauses.front(); const auto & mixed_filter_condition_nodes = join_clause.getMixedFilterConditionNodes(); auto mixed_join_expressions_actions = ActionsDAG::buildFilterActionsDAG(mixed_filter_condition_nodes, {}, true); - result.mixed_join_expressions_actions = mixed_join_expressions_actions; + result.mixed_join_expressions_actions = std::move(mixed_join_expressions_actions); } auto outputs = result.mixed_join_expressions_actions->getOutputs(); if (outputs.size() != 1) diff --git a/src/Planner/PlannerJoins.h b/src/Planner/PlannerJoins.h index 8adf6edd7ea..d8665ab7739 100644 --- a/src/Planner/PlannerJoins.h +++ b/src/Planner/PlannerJoins.h @@ -182,15 +182,15 @@ struct JoinClausesAndActions /// Join clauses. Actions dag nodes point into join_expression_actions. JoinClauses join_clauses; /// Whole JOIN ON section expressions - ActionsDAGPtr left_join_tmp_expression_actions; - ActionsDAGPtr right_join_tmp_expression_actions; + ActionsDAG left_join_tmp_expression_actions; + ActionsDAG right_join_tmp_expression_actions; /// Left join expressions actions - ActionsDAGPtr left_join_expressions_actions; + ActionsDAG left_join_expressions_actions; /// Right join expressions actions - ActionsDAGPtr right_join_expressions_actions; + ActionsDAG right_join_expressions_actions; /// Originally used for inequal join. it's the total join expression. /// If there is no inequal join conditions, it's null. - ActionsDAGPtr mixed_join_expressions_actions; + std::optional mixed_join_expressions_actions; }; /** Calculate join clauses and actions for JOIN ON section. diff --git a/src/Planner/TableExpressionData.h b/src/Planner/TableExpressionData.h index 9723a00a356..72412a869e4 100644 --- a/src/Planner/TableExpressionData.h +++ b/src/Planner/TableExpressionData.h @@ -73,7 +73,7 @@ public: } /// Add alias column - void addAliasColumn(const NameAndTypePair & column, const ColumnIdentifier & column_identifier, ActionsDAGPtr actions_dag, bool is_selected_column = true) + void addAliasColumn(const NameAndTypePair & column, const ColumnIdentifier & column_identifier, ActionsDAG actions_dag, bool is_selected_column = true) { alias_column_expressions.emplace(column.name, std::move(actions_dag)); addColumnImpl(column, column_identifier, is_selected_column); @@ -94,7 +94,7 @@ public: } /// Get ALIAS columns names mapped to expressions - const std::unordered_map & getAliasColumnExpressions() const + std::unordered_map & getAliasColumnExpressions() { return alias_column_expressions; } @@ -211,32 +211,32 @@ public: is_merge_tree = is_merge_tree_value; } - const ActionsDAGPtr & getPrewhereFilterActions() const + const std::optional & getPrewhereFilterActions() const { return prewhere_filter_actions; } - void setRowLevelFilterActions(ActionsDAGPtr row_level_filter_actions_value) + void setRowLevelFilterActions(ActionsDAG row_level_filter_actions_value) { row_level_filter_actions = std::move(row_level_filter_actions_value); } - const ActionsDAGPtr & getRowLevelFilterActions() const + const std::optional & getRowLevelFilterActions() const { return row_level_filter_actions; } - void setPrewhereFilterActions(ActionsDAGPtr prewhere_filter_actions_value) + void setPrewhereFilterActions(ActionsDAG prewhere_filter_actions_value) { prewhere_filter_actions = std::move(prewhere_filter_actions_value); } - const ActionsDAGPtr & getFilterActions() const + const std::optional & getFilterActions() const { return filter_actions; } - void setFilterActions(ActionsDAGPtr filter_actions_value) + void setFilterActions(ActionsDAG filter_actions_value) { filter_actions = std::move(filter_actions_value); } @@ -277,7 +277,7 @@ private: NameSet selected_column_names_set; /// Expression to calculate ALIAS columns - std::unordered_map alias_column_expressions; + std::unordered_map alias_column_expressions; /// Valid for table, table function, array join, query, union nodes ColumnNameToColumn column_name_to_column; @@ -289,16 +289,16 @@ private: ColumnIdentifierToColumnName column_identifier_to_column_name; /// Valid for table, table function - ActionsDAGPtr filter_actions; + std::optional filter_actions; /// Valid for table, table function PrewhereInfoPtr prewhere_info; /// Valid for table, table function - ActionsDAGPtr prewhere_filter_actions; + std::optional prewhere_filter_actions; /// Valid for table, table function - ActionsDAGPtr row_level_filter_actions; + std::optional row_level_filter_actions; /// Is storage remote bool is_remote = false; diff --git a/src/Planner/Utils.cpp b/src/Planner/Utils.cpp index 3c54c57a28c..a6e94a124e6 100644 --- a/src/Planner/Utils.cpp +++ b/src/Planner/Utils.cpp @@ -11,10 +11,12 @@ #include #include +#include #include #include +#include #include @@ -442,22 +444,22 @@ FilterDAGInfo buildFilterInfo(QueryTreeNodePtr filter_query_tree, collectSourceColumns(filter_query_tree, planner_context, false /*keep_alias_columns*/); collectSets(filter_query_tree, *planner_context); - auto filter_actions_dag = std::make_shared(); + ActionsDAG filter_actions_dag; PlannerActionsVisitor actions_visitor(planner_context, false /*use_column_identifier_as_action_node_name*/); - auto expression_nodes = actions_visitor.visit(*filter_actions_dag, filter_query_tree); + auto expression_nodes = actions_visitor.visit(filter_actions_dag, filter_query_tree); if (expression_nodes.size() != 1) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Filter actions must return single output node. Actual {}", expression_nodes.size()); - auto & filter_actions_outputs = filter_actions_dag->getOutputs(); + auto & filter_actions_outputs = filter_actions_dag.getOutputs(); filter_actions_outputs = std::move(expression_nodes); std::string filter_node_name = filter_actions_outputs[0]->result_name; bool remove_filter_column = true; - for (const auto & filter_input_node : filter_actions_dag->getInputs()) + for (const auto & filter_input_node : filter_actions_dag.getInputs()) if (table_expression_required_names_without_filter.contains(filter_input_node->result_name)) filter_actions_outputs.push_back(filter_input_node); @@ -477,4 +479,32 @@ ASTPtr parseAdditionalResultFilter(const Settings & settings) return additional_result_filter_ast; } +void appendSetsFromActionsDAG(const ActionsDAG & dag, UsefulSets & useful_sets) +{ + for (const auto & node : dag.getNodes()) + { + if (node.column) + { + const IColumn * column = node.column.get(); + if (const auto * column_const = typeid_cast(column)) + column = &column_const->getDataColumn(); + + if (const auto * column_set = typeid_cast(column)) + useful_sets.insert(column_set->getData()); + } + + if (node.type == ActionsDAG::ActionType::FUNCTION && node.function_base->getName() == "indexHint") + { + ActionsDAG::NodeRawConstPtrs children; + if (const auto * adaptor = typeid_cast(node.function_base.get())) + { + if (const auto * index_hint = typeid_cast(adaptor->getFunction().get())) + { + appendSetsFromActionsDAG(index_hint->getActions(), useful_sets); + } + } + } + } +} + } diff --git a/src/Planner/Utils.h b/src/Planner/Utils.h index 3172847f053..ae60976a8d6 100644 --- a/src/Planner/Utils.h +++ b/src/Planner/Utils.h @@ -88,4 +88,7 @@ FilterDAGInfo buildFilterInfo(QueryTreeNodePtr filter_query_tree, ASTPtr parseAdditionalResultFilter(const Settings & settings); +using UsefulSets = std::unordered_set; +void appendSetsFromActionsDAG(const ActionsDAG & dag, UsefulSets & useful_sets); + } diff --git a/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp b/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp index c23d717d52f..157b61aa2af 100644 --- a/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp @@ -625,8 +625,6 @@ void ValuesBlockInputFormat::readSuffix() skipWhitespaceIfAny(*buf); if (buf->hasUnreadData()) throw Exception(ErrorCodes::CANNOT_READ_ALL_DATA, "Cannot read data after semicolon"); - if (!format_settings.values.allow_data_after_semicolon && !buf->eof()) - throw Exception(ErrorCodes::CANNOT_READ_ALL_DATA, "Cannot read data after semicolon (and input_format_values_allow_data_after_semicolon=0)"); return; } diff --git a/src/Processors/QueryPlan/AggregatingStep.cpp b/src/Processors/QueryPlan/AggregatingStep.cpp index 0d7e05af1de..f31de80b22d 100644 --- a/src/Processors/QueryPlan/AggregatingStep.cpp +++ b/src/Processors/QueryPlan/AggregatingStep.cpp @@ -303,15 +303,15 @@ void AggregatingStep::transformPipeline(QueryPipelineBuilder & pipeline, const B const auto & header = ports[set_counter]->getHeader(); /// Here we create a DAG which fills missing keys and adds `__grouping_set` column - auto dag = std::make_shared(header.getColumnsWithTypeAndName()); + ActionsDAG dag(header.getColumnsWithTypeAndName()); ActionsDAG::NodeRawConstPtrs outputs; outputs.reserve(output_header.columns() + 1); auto grouping_col = ColumnConst::create(ColumnUInt64::create(1, set_counter), 0); - const auto * grouping_node = &dag->addColumn( + const auto * grouping_node = &dag.addColumn( {ColumnPtr(std::move(grouping_col)), std::make_shared(), "__grouping_set"}); - grouping_node = &dag->materializeNode(*grouping_node); + grouping_node = &dag.materializeNode(*grouping_node); outputs.push_back(grouping_node); const auto & missing_columns = grouping_sets_params[set_counter].missing_keys; @@ -332,22 +332,22 @@ void AggregatingStep::transformPipeline(QueryPipelineBuilder & pipeline, const B column_with_default->finalize(); auto column = ColumnConst::create(std::move(column_with_default), 0); - const auto * node = &dag->addColumn({ColumnPtr(std::move(column)), col.type, col.name}); - node = &dag->materializeNode(*node); + const auto * node = &dag.addColumn({ColumnPtr(std::move(column)), col.type, col.name}); + node = &dag.materializeNode(*node); outputs.push_back(node); } else { - const auto * column_node = dag->getOutputs()[header.getPositionByName(col.name)]; + const auto * column_node = dag.getOutputs()[header.getPositionByName(col.name)]; if (used_it != used_keys.end() && group_by_use_nulls && column_node->result_type->canBeInsideNullable()) - outputs.push_back(&dag->addFunction(to_nullable_function, { column_node }, col.name)); + outputs.push_back(&dag.addFunction(to_nullable_function, { column_node }, col.name)); else outputs.push_back(column_node); } } - dag->getOutputs().swap(outputs); - auto expression = std::make_shared(dag, settings.getActionsSettings()); + dag.getOutputs().swap(outputs); + auto expression = std::make_shared(std::move(dag), settings.getActionsSettings()); auto transform = std::make_shared(header, expression); connect(*ports[set_counter], transform->getInputPort()); diff --git a/src/Processors/QueryPlan/CubeStep.cpp b/src/Processors/QueryPlan/CubeStep.cpp index d010a3327a6..3a98f8e4612 100644 --- a/src/Processors/QueryPlan/CubeStep.cpp +++ b/src/Processors/QueryPlan/CubeStep.cpp @@ -36,30 +36,30 @@ CubeStep::CubeStep(const DataStream & input_stream_, Aggregator::Params params_, ProcessorPtr addGroupingSetForTotals(const Block & header, const Names & keys, bool use_nulls, const BuildQueryPipelineSettings & settings, UInt64 grouping_set_number) { - auto dag = std::make_shared(header.getColumnsWithTypeAndName()); - auto & outputs = dag->getOutputs(); + ActionsDAG dag(header.getColumnsWithTypeAndName()); + auto & outputs = dag.getOutputs(); if (use_nulls) { auto to_nullable = FunctionFactory::instance().get("toNullable", nullptr); for (const auto & key : keys) { - const auto * node = dag->getOutputs()[header.getPositionByName(key)]; + const auto * node = dag.getOutputs()[header.getPositionByName(key)]; if (node->result_type->canBeInsideNullable()) { - dag->addOrReplaceInOutputs(dag->addFunction(to_nullable, { node }, node->result_name)); + dag.addOrReplaceInOutputs(dag.addFunction(to_nullable, { node }, node->result_name)); } } } auto grouping_col = ColumnUInt64::create(1, grouping_set_number); - const auto * grouping_node = &dag->addColumn( + const auto * grouping_node = &dag.addColumn( {ColumnPtr(std::move(grouping_col)), std::make_shared(), "__grouping_set"}); - grouping_node = &dag->materializeNode(*grouping_node); + grouping_node = &dag.materializeNode(*grouping_node); outputs.insert(outputs.begin(), grouping_node); - auto expression = std::make_shared(dag, settings.getActionsSettings()); + auto expression = std::make_shared(std::move(dag), settings.getActionsSettings()); return std::make_shared(header, expression); } diff --git a/src/Processors/QueryPlan/DistributedCreateLocalPlan.cpp b/src/Processors/QueryPlan/DistributedCreateLocalPlan.cpp index 14f58585d3d..d8624a1c99b 100644 --- a/src/Processors/QueryPlan/DistributedCreateLocalPlan.cpp +++ b/src/Processors/QueryPlan/DistributedCreateLocalPlan.cpp @@ -33,7 +33,7 @@ void addConvertingActions(QueryPlan & plan, const Block & header, bool has_missi }; auto convert_actions_dag = get_converting_dag(plan.getCurrentDataStream().header, header); - auto converting = std::make_unique(plan.getCurrentDataStream(), convert_actions_dag); + auto converting = std::make_unique(plan.getCurrentDataStream(), std::move(convert_actions_dag)); plan.addStep(std::move(converting)); } diff --git a/src/Processors/QueryPlan/ExpressionStep.cpp b/src/Processors/QueryPlan/ExpressionStep.cpp index 0ccb0c4492a..6f88c4527a4 100644 --- a/src/Processors/QueryPlan/ExpressionStep.cpp +++ b/src/Processors/QueryPlan/ExpressionStep.cpp @@ -10,33 +10,33 @@ namespace DB { -static ITransformingStep::Traits getTraits(const ActionsDAGPtr & actions, const Block & header, const SortDescription & sort_description) +static ITransformingStep::Traits getTraits(const ActionsDAG & actions, const Block & header, const SortDescription & sort_description) { return ITransformingStep::Traits { { .returns_single_stream = false, .preserves_number_of_streams = true, - .preserves_sorting = actions->isSortingPreserved(header, sort_description), + .preserves_sorting = actions.isSortingPreserved(header, sort_description), }, { - .preserves_number_of_rows = !actions->hasArrayJoin(), + .preserves_number_of_rows = !actions.hasArrayJoin(), } }; } -ExpressionStep::ExpressionStep(const DataStream & input_stream_, const ActionsDAGPtr & actions_dag_) +ExpressionStep::ExpressionStep(const DataStream & input_stream_, ActionsDAG actions_dag_) : ITransformingStep( input_stream_, - ExpressionTransform::transformHeader(input_stream_.header, *actions_dag_), + ExpressionTransform::transformHeader(input_stream_.header, actions_dag_), getTraits(actions_dag_, input_stream_.header, input_stream_.sort_description)) - , actions_dag(actions_dag_) + , actions_dag(std::move(actions_dag_)) { } void ExpressionStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & settings) { - auto expression = std::make_shared(actions_dag, settings.getActionsSettings()); + auto expression = std::make_shared(std::move(actions_dag), settings.getActionsSettings()); pipeline.addSimpleTransform([&](const Block & header) { @@ -49,7 +49,7 @@ void ExpressionStep::transformPipeline(QueryPipelineBuilder & pipeline, const Bu pipeline.getHeader().getColumnsWithTypeAndName(), output_stream->header.getColumnsWithTypeAndName(), ActionsDAG::MatchColumnsMode::Name); - auto convert_actions = std::make_shared(convert_actions_dag, settings.getActionsSettings()); + auto convert_actions = std::make_shared(std::move(convert_actions_dag), settings.getActionsSettings()); pipeline.addSimpleTransform([&](const Block & header) { @@ -61,20 +61,20 @@ void ExpressionStep::transformPipeline(QueryPipelineBuilder & pipeline, const Bu void ExpressionStep::describeActions(FormatSettings & settings) const { String prefix(settings.offset, settings.indent_char); - auto expression = std::make_shared(actions_dag); + auto expression = std::make_shared(actions_dag.clone()); expression->describeActions(settings.out, prefix); } void ExpressionStep::describeActions(JSONBuilder::JSONMap & map) const { - auto expression = std::make_shared(actions_dag); + auto expression = std::make_shared(actions_dag.clone()); map.add("Expression", expression->toTree()); } void ExpressionStep::updateOutputStream() { output_stream = createOutputStream( - input_streams.front(), ExpressionTransform::transformHeader(input_streams.front().header, *actions_dag), getDataStreamTraits()); + input_streams.front(), ExpressionTransform::transformHeader(input_streams.front().header, actions_dag), getDataStreamTraits()); if (!getDataStreamTraits().preserves_sorting) return; diff --git a/src/Processors/QueryPlan/ExpressionStep.h b/src/Processors/QueryPlan/ExpressionStep.h index 3eef14ac129..f2926318cbc 100644 --- a/src/Processors/QueryPlan/ExpressionStep.h +++ b/src/Processors/QueryPlan/ExpressionStep.h @@ -1,12 +1,10 @@ #pragma once #include +#include namespace DB { -class ActionsDAG; -using ActionsDAGPtr = std::shared_ptr; - class ExpressionTransform; class JoiningTransform; @@ -15,21 +13,22 @@ class ExpressionStep : public ITransformingStep { public: - explicit ExpressionStep(const DataStream & input_stream_, const ActionsDAGPtr & actions_dag_); + explicit ExpressionStep(const DataStream & input_stream_, ActionsDAG actions_dag_); String getName() const override { return "Expression"; } void transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & settings) override; void describeActions(FormatSettings & settings) const override; - const ActionsDAGPtr & getExpression() const { return actions_dag; } + ActionsDAG & getExpression() { return actions_dag; } + const ActionsDAG & getExpression() const { return actions_dag; } void describeActions(JSONBuilder::JSONMap & map) const override; private: void updateOutputStream() override; - ActionsDAGPtr actions_dag; + ActionsDAG actions_dag; }; } diff --git a/src/Processors/QueryPlan/FilterStep.cpp b/src/Processors/QueryPlan/FilterStep.cpp index 72934665b5c..0c6b71387b7 100644 --- a/src/Processors/QueryPlan/FilterStep.cpp +++ b/src/Processors/QueryPlan/FilterStep.cpp @@ -9,9 +9,9 @@ namespace DB { -static ITransformingStep::Traits getTraits(const ActionsDAGPtr & expression, const Block & header, const SortDescription & sort_description, bool remove_filter_column, const String & filter_column_name) +static ITransformingStep::Traits getTraits(const ActionsDAG & expression, const Block & header, const SortDescription & sort_description, bool remove_filter_column, const String & filter_column_name) { - bool preserves_sorting = expression->isSortingPreserved(header, sort_description, remove_filter_column ? filter_column_name : ""); + bool preserves_sorting = expression.isSortingPreserved(header, sort_description, remove_filter_column ? filter_column_name : ""); if (remove_filter_column) { preserves_sorting &= std::find_if( @@ -35,28 +35,27 @@ static ITransformingStep::Traits getTraits(const ActionsDAGPtr & expression, con FilterStep::FilterStep( const DataStream & input_stream_, - const ActionsDAGPtr & actions_dag_, + ActionsDAG actions_dag_, String filter_column_name_, bool remove_filter_column_) : ITransformingStep( input_stream_, FilterTransform::transformHeader( input_stream_.header, - actions_dag_.get(), + &actions_dag_, filter_column_name_, remove_filter_column_), getTraits(actions_dag_, input_stream_.header, input_stream_.sort_description, remove_filter_column_, filter_column_name_)) - , actions_dag(actions_dag_) + , actions_dag(std::move(actions_dag_)) , filter_column_name(std::move(filter_column_name_)) , remove_filter_column(remove_filter_column_) { - actions_dag = actions_dag->clone(); - actions_dag->removeAliasesForFilter(filter_column_name); + actions_dag.removeAliasesForFilter(filter_column_name); } void FilterStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & settings) { - auto expression = std::make_shared(actions_dag, settings.getActionsSettings()); + auto expression = std::make_shared(std::move(actions_dag), settings.getActionsSettings()); pipeline.addSimpleTransform([&](const Block & header, QueryPipelineBuilder::StreamType stream_type) { @@ -70,7 +69,7 @@ void FilterStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQ pipeline.getHeader().getColumnsWithTypeAndName(), output_stream->header.getColumnsWithTypeAndName(), ActionsDAG::MatchColumnsMode::Name); - auto convert_actions = std::make_shared(convert_actions_dag, settings.getActionsSettings()); + auto convert_actions = std::make_shared(std::move(convert_actions_dag), settings.getActionsSettings()); pipeline.addSimpleTransform([&](const Block & header) { @@ -88,7 +87,7 @@ void FilterStep::describeActions(FormatSettings & settings) const settings.out << " (removed)"; settings.out << '\n'; - auto expression = std::make_shared(actions_dag); + auto expression = std::make_shared(actions_dag.clone()); expression->describeActions(settings.out, prefix); } @@ -97,7 +96,7 @@ void FilterStep::describeActions(JSONBuilder::JSONMap & map) const map.add("Filter Column", filter_column_name); map.add("Removes Filter", remove_filter_column); - auto expression = std::make_shared(actions_dag); + auto expression = std::make_shared(actions_dag.clone()); map.add("Expression", expression->toTree()); } @@ -105,7 +104,7 @@ void FilterStep::updateOutputStream() { output_stream = createOutputStream( input_streams.front(), - FilterTransform::transformHeader(input_streams.front().header, actions_dag.get(), filter_column_name, remove_filter_column), + FilterTransform::transformHeader(input_streams.front().header, &actions_dag, filter_column_name, remove_filter_column), getDataStreamTraits()); if (!getDataStreamTraits().preserves_sorting) diff --git a/src/Processors/QueryPlan/FilterStep.h b/src/Processors/QueryPlan/FilterStep.h index 939d0900c86..b5a31bef5fc 100644 --- a/src/Processors/QueryPlan/FilterStep.h +++ b/src/Processors/QueryPlan/FilterStep.h @@ -1,19 +1,17 @@ #pragma once #include +#include namespace DB { -class ActionsDAG; -using ActionsDAGPtr = std::shared_ptr; - /// Implements WHERE, HAVING operations. See FilterTransform. class FilterStep : public ITransformingStep { public: FilterStep( const DataStream & input_stream_, - const ActionsDAGPtr & actions_dag_, + ActionsDAG actions_dag_, String filter_column_name_, bool remove_filter_column_); @@ -23,15 +21,15 @@ public: void describeActions(JSONBuilder::JSONMap & map) const override; void describeActions(FormatSettings & settings) const override; - const ActionsDAGPtr & getExpression() const { return actions_dag; } - ActionsDAGPtr & getExpression() { return actions_dag; } + const ActionsDAG & getExpression() const { return actions_dag; } + ActionsDAG & getExpression() { return actions_dag; } const String & getFilterColumnName() const { return filter_column_name; } bool removesFilterColumn() const { return remove_filter_column; } private: void updateOutputStream() override; - ActionsDAGPtr actions_dag; + ActionsDAG actions_dag; String filter_column_name; bool remove_filter_column; }; diff --git a/src/Processors/QueryPlan/Optimizations/convertOuterJoinToInnerJoin.cpp b/src/Processors/QueryPlan/Optimizations/convertOuterJoinToInnerJoin.cpp index d9296f10a98..0c708599398 100644 --- a/src/Processors/QueryPlan/Optimizations/convertOuterJoinToInnerJoin.cpp +++ b/src/Processors/QueryPlan/Optimizations/convertOuterJoinToInnerJoin.cpp @@ -48,10 +48,10 @@ size_t tryConvertOuterJoinToInnerJoin(QueryPlan::Node * parent_node, QueryPlan:: bool right_stream_safe = true; if (check_left_stream) - left_stream_safe = filter_dag->isFilterAlwaysFalseForDefaultValueInputs(filter_column_name, left_stream_input_header); + left_stream_safe = filter_dag.isFilterAlwaysFalseForDefaultValueInputs(filter_column_name, left_stream_input_header); if (check_right_stream) - right_stream_safe = filter_dag->isFilterAlwaysFalseForDefaultValueInputs(filter_column_name, right_stream_input_header); + right_stream_safe = filter_dag.isFilterAlwaysFalseForDefaultValueInputs(filter_column_name, right_stream_input_header); if (!left_stream_safe || !right_stream_safe) return 0; diff --git a/src/Processors/QueryPlan/Optimizations/distinctReadInOrder.cpp b/src/Processors/QueryPlan/Optimizations/distinctReadInOrder.cpp index 0a3a4094a66..37e61a6c388 100644 --- a/src/Processors/QueryPlan/Optimizations/distinctReadInOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/distinctReadInOrder.cpp @@ -10,27 +10,27 @@ namespace DB::QueryPlanOptimizations { /// build actions DAG from stack of steps -static ActionsDAGPtr buildActionsForPlanPath(std::vector & dag_stack) +static std::optional buildActionsForPlanPath(std::vector & dag_stack) { if (dag_stack.empty()) - return nullptr; + return {}; - ActionsDAGPtr path_actions = dag_stack.back()->clone(); + ActionsDAG path_actions = dag_stack.back()->clone(); dag_stack.pop_back(); while (!dag_stack.empty()) { - ActionsDAGPtr clone = dag_stack.back()->clone(); + ActionsDAG clone = dag_stack.back()->clone(); dag_stack.pop_back(); - path_actions->mergeInplace(std::move(*clone)); + path_actions.mergeInplace(std::move(clone)); } return path_actions; } static std::set -getOriginalDistinctColumns(const ColumnsWithTypeAndName & distinct_columns, std::vector & dag_stack) +getOriginalDistinctColumns(const ColumnsWithTypeAndName & distinct_columns, std::vector & dag_stack) { auto actions = buildActionsForPlanPath(dag_stack); - FindOriginalNodeForOutputName original_node_finder(actions); + FindOriginalNodeForOutputName original_node_finder(*actions); std::set original_distinct_columns; for (const auto & column : distinct_columns) { @@ -65,7 +65,7 @@ size_t tryDistinctReadInOrder(QueryPlan::Node * parent_node) /// (3) gather actions DAG to find original names for columns in distinct step later std::vector steps_to_update; QueryPlan::Node * node = parent_node; - std::vector dag_stack; + std::vector dag_stack; while (!node->children.empty()) { auto * step = dynamic_cast(node->step.get()); @@ -79,9 +79,9 @@ size_t tryDistinctReadInOrder(QueryPlan::Node * parent_node) steps_to_update.push_back(step); if (const auto * const expr = typeid_cast(step); expr) - dag_stack.push_back(expr->getExpression()); + dag_stack.push_back(&expr->getExpression()); else if (const auto * const filter = typeid_cast(step); filter) - dag_stack.push_back(filter->getExpression()); + dag_stack.push_back(&filter->getExpression()); node = node->children.front(); } diff --git a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp index 263598bdca7..73314f005b6 100644 --- a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp +++ b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp @@ -101,7 +101,7 @@ static NameSet findIdentifiersOfNode(const ActionsDAG::Node * node) return res; } -static ActionsDAGPtr splitFilter(QueryPlan::Node * parent_node, const Names & available_inputs, size_t child_idx = 0) +static std::optional splitFilter(QueryPlan::Node * parent_node, const Names & available_inputs, size_t child_idx = 0) { QueryPlan::Node * child_node = parent_node->children.front(); checkChildrenSize(child_node, child_idx + 1); @@ -110,16 +110,16 @@ static ActionsDAGPtr splitFilter(QueryPlan::Node * parent_node, const Names & av auto & child = child_node->step; auto * filter = assert_cast(parent.get()); - const auto & expression = filter->getExpression(); + auto & expression = filter->getExpression(); const auto & filter_column_name = filter->getFilterColumnName(); bool removes_filter = filter->removesFilterColumn(); const auto & all_inputs = child->getInputStreams()[child_idx].header.getColumnsWithTypeAndName(); - return expression->splitActionsForFilterPushDown(filter_column_name, removes_filter, available_inputs, all_inputs); + return expression.splitActionsForFilterPushDown(filter_column_name, removes_filter, available_inputs, all_inputs); } static size_t -addNewFilterStepOrThrow(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes, const ActionsDAGPtr & split_filter, +addNewFilterStepOrThrow(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes, ActionsDAG split_filter, bool can_remove_filter = true, size_t child_idx = 0, bool update_parent_filter = true) { QueryPlan::Node * child_node = parent_node->children.front(); @@ -129,14 +129,14 @@ addNewFilterStepOrThrow(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes, auto & child = child_node->step; auto * filter = assert_cast(parent.get()); - const auto & expression = filter->getExpression(); + auto & expression = filter->getExpression(); const auto & filter_column_name = filter->getFilterColumnName(); - const auto * filter_node = expression->tryFindInOutputs(filter_column_name); + const auto * filter_node = expression.tryFindInOutputs(filter_column_name); if (update_parent_filter && !filter_node && !filter->removesFilterColumn()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Filter column {} was removed from ActionsDAG but it is needed in result. DAG:\n{}", - filter_column_name, expression->dumpDAG()); + filter_column_name, expression.dumpDAG()); /// Add new Filter step before Child. /// Expression/Filter -> Child -> Something @@ -147,10 +147,10 @@ addNewFilterStepOrThrow(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes, /// Expression/Filter -> Child -> Filter -> Something /// New filter column is the first one. - String split_filter_column_name = split_filter->getOutputs().front()->result_name; + String split_filter_column_name = split_filter.getOutputs().front()->result_name; node.step = std::make_unique( - node.children.at(0)->step->getOutputStream(), split_filter, std::move(split_filter_column_name), can_remove_filter); + node.children.at(0)->step->getOutputStream(), std::move(split_filter), std::move(split_filter_column_name), can_remove_filter); if (auto * transforming_step = dynamic_cast(child.get())) { @@ -176,7 +176,7 @@ addNewFilterStepOrThrow(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes, { /// This means that all predicates of filter were pushed down. /// Replace current actions to expression, as we don't need to filter anything. - parent = std::make_unique(child->getOutputStream(), expression); + parent = std::make_unique(child->getOutputStream(), std::move(expression)); } else { @@ -192,7 +192,7 @@ tryAddNewFilterStep(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes, con bool can_remove_filter = true, size_t child_idx = 0) { if (auto split_filter = splitFilter(parent_node, allowed_inputs, child_idx)) - return addNewFilterStepOrThrow(parent_node, nodes, split_filter, can_remove_filter, child_idx); + return addNewFilterStepOrThrow(parent_node, nodes, std::move(*split_filter), can_remove_filter, child_idx); return 0; } @@ -332,7 +332,7 @@ static size_t tryPushDownOverJoinStep(QueryPlan::Node * parent_node, QueryPlan:: Names left_stream_available_columns_to_push_down = get_available_columns_for_filter(true /*push_to_left_stream*/, left_stream_filter_push_down_input_columns_available); Names right_stream_available_columns_to_push_down = get_available_columns_for_filter(false /*push_to_left_stream*/, right_stream_filter_push_down_input_columns_available); - auto join_filter_push_down_actions = filter->getExpression()->splitActionsForJOINFilterPushDown(filter->getFilterColumnName(), + auto join_filter_push_down_actions = filter->getExpression().splitActionsForJOINFilterPushDown(filter->getFilterColumnName(), filter->removesFilterColumn(), left_stream_available_columns_to_push_down, left_stream_input_header, @@ -346,42 +346,44 @@ static size_t tryPushDownOverJoinStep(QueryPlan::Node * parent_node, QueryPlan:: if (join_filter_push_down_actions.left_stream_filter_to_push_down) { + const auto & result_name = join_filter_push_down_actions.left_stream_filter_to_push_down->getOutputs()[0]->result_name; updated_steps += addNewFilterStepOrThrow(parent_node, nodes, - join_filter_push_down_actions.left_stream_filter_to_push_down, + std::move(*join_filter_push_down_actions.left_stream_filter_to_push_down), join_filter_push_down_actions.left_stream_filter_removes_filter, 0 /*child_idx*/, false /*update_parent_filter*/); LOG_DEBUG(&Poco::Logger::get("QueryPlanOptimizations"), "Pushed down filter {} to the {} side of join", - join_filter_push_down_actions.left_stream_filter_to_push_down->getOutputs()[0]->result_name, + result_name, JoinKind::Left); } if (join_filter_push_down_actions.right_stream_filter_to_push_down && allow_push_down_to_right) { + const auto & result_name = join_filter_push_down_actions.right_stream_filter_to_push_down->getOutputs()[0]->result_name; updated_steps += addNewFilterStepOrThrow(parent_node, nodes, - join_filter_push_down_actions.right_stream_filter_to_push_down, + std::move(*join_filter_push_down_actions.right_stream_filter_to_push_down), join_filter_push_down_actions.right_stream_filter_removes_filter, 1 /*child_idx*/, false /*update_parent_filter*/); LOG_DEBUG(&Poco::Logger::get("QueryPlanOptimizations"), "Pushed down filter {} to the {} side of join", - join_filter_push_down_actions.right_stream_filter_to_push_down->getOutputs()[0]->result_name, + result_name, JoinKind::Right); } if (updated_steps > 0) { const auto & filter_column_name = filter->getFilterColumnName(); - const auto & filter_expression = filter->getExpression(); + auto & filter_expression = filter->getExpression(); - const auto * filter_node = filter_expression->tryFindInOutputs(filter_column_name); + const auto * filter_node = filter_expression.tryFindInOutputs(filter_column_name); if (!filter_node && !filter->removesFilterColumn()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Filter column {} was removed from ActionsDAG but it is needed in result. DAG:\n{}", - filter_column_name, filter_expression->dumpDAG()); + filter_column_name, filter_expression.dumpDAG()); /// Filter column was replaced to constant. @@ -391,7 +393,7 @@ static size_t tryPushDownOverJoinStep(QueryPlan::Node * parent_node, QueryPlan:: { /// This means that all predicates of filter were pushed down. /// Replace current actions to expression, as we don't need to filter anything. - parent = std::make_unique(child->getOutputStream(), filter_expression); + parent = std::make_unique(child->getOutputStream(), std::move(filter_expression)); } else { @@ -416,7 +418,7 @@ size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes if (!filter) return 0; - if (filter->getExpression()->hasStatefulFunctions()) + if (filter->getExpression().hasStatefulFunctions()) return 0; if (auto * aggregating = typeid_cast(child.get())) @@ -430,7 +432,7 @@ size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes return 0; const auto & actions = filter->getExpression(); - const auto & filter_node = actions->findInOutputs(filter->getFilterColumnName()); + const auto & filter_node = actions.findInOutputs(filter->getFilterColumnName()); auto identifiers_in_predicate = findIdentifiersOfNode(&filter_node); @@ -597,7 +599,7 @@ size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes filter_node.step = std::make_unique( filter_node.children.front()->step->getOutputStream(), - filter->getExpression()->clone(), + filter->getExpression().clone(), filter->getFilterColumnName(), filter->removesFilterColumn()); } @@ -611,7 +613,7 @@ size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes if (auto * read_from_merge = typeid_cast(child.get())) { - FilterDAGInfo info{filter->getExpression(), filter->getFilterColumnName(), filter->removesFilterColumn()}; + FilterDAGInfo info{filter->getExpression().clone(), filter->getFilterColumnName(), filter->removesFilterColumn()}; read_from_merge->addFilter(std::move(info)); std::swap(*parent_node, *child_node); return 1; diff --git a/src/Processors/QueryPlan/Optimizations/liftUpArrayJoin.cpp b/src/Processors/QueryPlan/Optimizations/liftUpArrayJoin.cpp index 36aab41df49..0d4f2330119 100644 --- a/src/Processors/QueryPlan/Optimizations/liftUpArrayJoin.cpp +++ b/src/Processors/QueryPlan/Optimizations/liftUpArrayJoin.cpp @@ -28,10 +28,10 @@ size_t tryLiftUpArrayJoin(QueryPlan::Node * parent_node, QueryPlan::Nodes & node const auto & expression = expression_step ? expression_step->getExpression() : filter_step->getExpression(); - auto split_actions = expression->splitActionsBeforeArrayJoin(array_join->columns); + auto split_actions = expression.splitActionsBeforeArrayJoin(array_join->columns); /// No actions can be moved before ARRAY JOIN. - if (split_actions.first->trivial()) + if (split_actions.first.trivial()) return 0; auto description = parent->getStepDescription(); @@ -49,9 +49,9 @@ size_t tryLiftUpArrayJoin(QueryPlan::Node * parent_node, QueryPlan::Nodes & node array_join_step->updateInputStream(node.step->getOutputStream()); if (expression_step) - parent = std::make_unique(array_join_step->getOutputStream(), split_actions.second); + parent = std::make_unique(array_join_step->getOutputStream(), std::move(split_actions.second)); else - parent = std::make_unique(array_join_step->getOutputStream(), split_actions.second, + parent = std::make_unique(array_join_step->getOutputStream(), std::move(split_actions.second), filter_step->getFilterColumnName(), filter_step->removesFilterColumn()); parent->setStepDescription(description + " [split]"); diff --git a/src/Processors/QueryPlan/Optimizations/liftUpFunctions.cpp b/src/Processors/QueryPlan/Optimizations/liftUpFunctions.cpp index b280e2d3cc6..7794ddae8fa 100644 --- a/src/Processors/QueryPlan/Optimizations/liftUpFunctions.cpp +++ b/src/Processors/QueryPlan/Optimizations/liftUpFunctions.cpp @@ -66,13 +66,13 @@ size_t tryExecuteFunctionsAfterSorting(QueryPlan::Node * parent_node, QueryPlan: NameSet sort_columns; for (const auto & col : sorting_step->getSortDescription()) sort_columns.insert(col.column_name); - auto [needed_for_sorting, unneeded_for_sorting, _] = expression_step->getExpression()->splitActionsBySortingDescription(sort_columns); + auto [needed_for_sorting, unneeded_for_sorting, _] = expression_step->getExpression().splitActionsBySortingDescription(sort_columns); // No calculations can be postponed. - if (unneeded_for_sorting->trivial()) + if (unneeded_for_sorting.trivial()) return 0; - if (!areNodesConvertableToBlock(needed_for_sorting->getOutputs()) || !areNodesConvertableToBlock(unneeded_for_sorting->getInputs())) + if (!areNodesConvertableToBlock(needed_for_sorting.getOutputs()) || !areNodesConvertableToBlock(unneeded_for_sorting.getInputs())) return 0; // Sorting (parent_node) -> Expression (child_node) diff --git a/src/Processors/QueryPlan/Optimizations/liftUpUnion.cpp b/src/Processors/QueryPlan/Optimizations/liftUpUnion.cpp index 35d8b1a35e4..c48551732c9 100644 --- a/src/Processors/QueryPlan/Optimizations/liftUpUnion.cpp +++ b/src/Processors/QueryPlan/Optimizations/liftUpUnion.cpp @@ -49,7 +49,7 @@ size_t tryLiftUpUnion(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes) expr_node.step = std::make_unique( expr_node.children.front()->step->getOutputStream(), - expression->getExpression()->clone()); + expression->getExpression().clone()); } /// - Expression - Something diff --git a/src/Processors/QueryPlan/Optimizations/mergeExpressions.cpp b/src/Processors/QueryPlan/Optimizations/mergeExpressions.cpp index 118abdd701f..fb4cb90c4ca 100644 --- a/src/Processors/QueryPlan/Optimizations/mergeExpressions.cpp +++ b/src/Processors/QueryPlan/Optimizations/mergeExpressions.cpp @@ -37,18 +37,18 @@ size_t tryMergeExpressions(QueryPlan::Node * parent_node, QueryPlan::Nodes &) if (parent_expr && child_expr) { - const auto & child_actions = child_expr->getExpression(); - const auto & parent_actions = parent_expr->getExpression(); + auto & child_actions = child_expr->getExpression(); + auto & parent_actions = parent_expr->getExpression(); /// We cannot combine actions with arrayJoin and stateful function because we not always can reorder them. /// Example: select rowNumberInBlock() from (select arrayJoin([1, 2])) /// Such a query will return two zeroes if we combine actions together. - if (child_actions->hasArrayJoin() && parent_actions->hasStatefulFunctions()) + if (child_actions.hasArrayJoin() && parent_actions.hasStatefulFunctions()) return 0; - auto merged = ActionsDAG::merge(std::move(*child_actions), std::move(*parent_actions)); + auto merged = ActionsDAG::merge(std::move(child_actions), std::move(parent_actions)); - auto expr = std::make_unique(child_expr->getInputStreams().front(), merged); + auto expr = std::make_unique(child_expr->getInputStreams().front(), std::move(merged)); expr->setStepDescription("(" + parent_expr->getStepDescription() + " + " + child_expr->getStepDescription() + ")"); parent_node->step = std::move(expr); @@ -57,16 +57,16 @@ size_t tryMergeExpressions(QueryPlan::Node * parent_node, QueryPlan::Nodes &) } else if (parent_filter && child_expr) { - const auto & child_actions = child_expr->getExpression(); - const auto & parent_actions = parent_filter->getExpression(); + auto & child_actions = child_expr->getExpression(); + auto & parent_actions = parent_filter->getExpression(); - if (child_actions->hasArrayJoin() && parent_actions->hasStatefulFunctions()) + if (child_actions.hasArrayJoin() && parent_actions.hasStatefulFunctions()) return 0; - auto merged = ActionsDAG::merge(std::move(*child_actions), std::move(*parent_actions)); + auto merged = ActionsDAG::merge(std::move(child_actions), std::move(parent_actions)); auto filter = std::make_unique(child_expr->getInputStreams().front(), - merged, + std::move(merged), parent_filter->getFilterColumnName(), parent_filter->removesFilterColumn()); filter->setStepDescription("(" + parent_filter->getStepDescription() + " + " + child_expr->getStepDescription() + ")"); @@ -93,32 +93,31 @@ size_t tryMergeFilters(QueryPlan::Node * parent_node, QueryPlan::Nodes &) if (parent_filter && child_filter) { - const auto & child_actions = child_filter->getExpression(); - const auto & parent_actions = parent_filter->getExpression(); + auto & child_actions = child_filter->getExpression(); + auto & parent_actions = parent_filter->getExpression(); - if (child_actions->hasArrayJoin()) + if (child_actions.hasArrayJoin()) return 0; - auto actions = child_actions->clone(); - const auto & child_filter_node = actions->findInOutputs(child_filter->getFilterColumnName()); + const auto & child_filter_node = child_actions.findInOutputs(child_filter->getFilterColumnName()); if (child_filter->removesFilterColumn()) - removeFromOutputs(*actions, child_filter_node); + removeFromOutputs(child_actions, child_filter_node); - actions->mergeInplace(std::move(*parent_actions->clone())); + child_actions.mergeInplace(std::move(parent_actions)); - const auto & parent_filter_node = actions->findInOutputs(parent_filter->getFilterColumnName()); + const auto & parent_filter_node = child_actions.findInOutputs(parent_filter->getFilterColumnName()); if (parent_filter->removesFilterColumn()) - removeFromOutputs(*actions, parent_filter_node); + removeFromOutputs(child_actions, parent_filter_node); FunctionOverloadResolverPtr func_builder_and = std::make_unique(std::make_shared()); - const auto & condition = actions->addFunction(func_builder_and, {&child_filter_node, &parent_filter_node}, {}); - auto & outputs = actions->getOutputs(); + const auto & condition = child_actions.addFunction(func_builder_and, {&child_filter_node, &parent_filter_node}, {}); + auto & outputs = child_actions.getOutputs(); outputs.insert(outputs.begin(), &condition); - actions->removeUnusedActions(false); + child_actions.removeUnusedActions(false); auto filter = std::make_unique(child_filter->getInputStreams().front(), - actions, + std::move(child_actions), condition.result_name, true); filter->setStepDescription("(" + parent_filter->getStepDescription() + " + " + child_filter->getStepDescription() + ")"); diff --git a/src/Processors/QueryPlan/Optimizations/optimizePrewhere.cpp b/src/Processors/QueryPlan/Optimizations/optimizePrewhere.cpp index 3cb7f8a5d83..dc73521210a 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizePrewhere.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizePrewhere.cpp @@ -56,12 +56,12 @@ void optimizePrewhere(Stack & stack, QueryPlan::Nodes &) return; const auto & storage_prewhere_info = source_step_with_filter->getPrewhereInfo(); - if (storage_prewhere_info && storage_prewhere_info->prewhere_actions) + if (storage_prewhere_info) return; /// TODO: We can also check for UnionStep, such as StorageBuffer and local distributed plans. QueryPlan::Node * filter_node = (stack.rbegin() + 1)->node; - const auto * filter_step = typeid_cast(filter_node->step.get()); + auto * filter_step = typeid_cast(filter_node->step.get()); if (!filter_step) return; @@ -85,10 +85,11 @@ void optimizePrewhere(Stack & stack, QueryPlan::Nodes &) Names queried_columns = source_step_with_filter->requiredSourceColumns(); + const auto & source_filter_actions_dag = source_step_with_filter->getFilterActionsDAG(); MergeTreeWhereOptimizer where_optimizer{ std::move(column_compressed_sizes), storage_metadata, - storage.getConditionSelectivityEstimatorByPredicate(storage_snapshot, source_step_with_filter->getFilterActionsDAG(), context), + storage.getConditionSelectivityEstimatorByPredicate(storage_snapshot, source_filter_actions_dag ? &*source_filter_actions_dag : nullptr, context), queried_columns, storage.supportedPrewhereColumns(), getLogger("QueryPlanOptimizePrewhere")}; @@ -110,20 +111,20 @@ void optimizePrewhere(Stack & stack, QueryPlan::Nodes &) prewhere_info->need_filter = true; prewhere_info->remove_prewhere_column = optimize_result.fully_moved_to_prewhere && filter_step->removesFilterColumn(); - auto filter_expression = filter_step->getExpression(); + auto filter_expression = std::move(filter_step->getExpression()); const auto & filter_column_name = filter_step->getFilterColumnName(); if (prewhere_info->remove_prewhere_column) { - removeFromOutput(*filter_expression, filter_column_name); - auto & outputs = filter_expression->getOutputs(); + removeFromOutput(filter_expression, filter_column_name); + auto & outputs = filter_expression.getOutputs(); size_t size = outputs.size(); outputs.insert(outputs.end(), optimize_result.prewhere_nodes.begin(), optimize_result.prewhere_nodes.end()); - filter_expression->removeUnusedActions(false); + filter_expression.removeUnusedActions(false); outputs.resize(size); } - auto split_result = filter_step->getExpression()->split(optimize_result.prewhere_nodes, true, true); + auto split_result = filter_expression.split(optimize_result.prewhere_nodes, true, true); /// This is the leak of abstraction. /// Splited actions may have inputs which are needed only for PREWHERE. @@ -139,15 +140,15 @@ void optimizePrewhere(Stack & stack, QueryPlan::Nodes &) /// So, here we restore removed inputs for PREWHERE actions { std::unordered_set first_outputs( - split_result.first->getOutputs().begin(), split_result.first->getOutputs().end()); - for (const auto * input : split_result.first->getInputs()) + split_result.first.getOutputs().begin(), split_result.first.getOutputs().end()); + for (const auto * input : split_result.first.getInputs()) { if (!first_outputs.contains(input)) { - split_result.first->getOutputs().push_back(input); + split_result.first.getOutputs().push_back(input); /// Add column to second actions as input. /// Do not add it to result, so it would be removed. - split_result.second->addInput(input->result_name, input->result_type); + split_result.second.addInput(input->result_name, input->result_type); } } } @@ -164,16 +165,16 @@ void optimizePrewhere(Stack & stack, QueryPlan::Nodes &) { prewhere_info->prewhere_column_name = conditions.front()->result_name; if (prewhere_info->remove_prewhere_column) - prewhere_info->prewhere_actions->getOutputs().push_back(conditions.front()); + prewhere_info->prewhere_actions.getOutputs().push_back(conditions.front()); } else { prewhere_info->remove_prewhere_column = true; FunctionOverloadResolverPtr func_builder_and = std::make_unique(std::make_shared()); - const auto * node = &prewhere_info->prewhere_actions->addFunction(func_builder_and, std::move(conditions), {}); + const auto * node = &prewhere_info->prewhere_actions.addFunction(func_builder_and, std::move(conditions), {}); prewhere_info->prewhere_column_name = node->result_name; - prewhere_info->prewhere_actions->getOutputs().push_back(node); + prewhere_info->prewhere_actions.getOutputs().push_back(node); } source_step_with_filter->updatePrewhereInfo(prewhere_info); diff --git a/src/Processors/QueryPlan/Optimizations/optimizePrimaryKeyConditionAndLimit.cpp b/src/Processors/QueryPlan/Optimizations/optimizePrimaryKeyConditionAndLimit.cpp index da4e104d18b..490b79fbf8d 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizePrimaryKeyConditionAndLimit.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizePrimaryKeyConditionAndLimit.cpp @@ -18,16 +18,16 @@ void optimizePrimaryKeyConditionAndLimit(const Stack & stack) const auto & storage_prewhere_info = source_step_with_filter->getPrewhereInfo(); if (storage_prewhere_info) { - source_step_with_filter->addFilter(storage_prewhere_info->prewhere_actions, storage_prewhere_info->prewhere_column_name); + source_step_with_filter->addFilter(storage_prewhere_info->prewhere_actions.clone(), storage_prewhere_info->prewhere_column_name); if (storage_prewhere_info->row_level_filter) - source_step_with_filter->addFilter(storage_prewhere_info->row_level_filter, storage_prewhere_info->row_level_column_name); + source_step_with_filter->addFilter(storage_prewhere_info->row_level_filter->clone(), storage_prewhere_info->row_level_column_name); } for (auto iter = stack.rbegin() + 1; iter != stack.rend(); ++iter) { if (auto * filter_step = typeid_cast(iter->node->step.get())) { - source_step_with_filter->addFilter(filter_step->getExpression(), filter_step->getFilterColumnName()); + source_step_with_filter->addFilter(filter_step->getExpression().clone(), filter_step->getFilterColumnName()); } else if (auto * limit_step = typeid_cast(iter->node->step.get())) { diff --git a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp index 28eb4da2e17..99df6da263f 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp @@ -171,17 +171,17 @@ static void appendFixedColumnsFromFilterExpression(const ActionsDAG::Node & filt } } -static void appendExpression(ActionsDAGPtr & dag, const ActionsDAGPtr & expression) +static void appendExpression(std::optional & dag, const ActionsDAG & expression) { if (dag) - dag->mergeInplace(std::move(*expression->clone())); + dag->mergeInplace(expression.clone()); else - dag = expression->clone(); + dag = expression.clone(); } /// This function builds a common DAG which is a merge of DAGs from Filter and Expression steps chain. /// Additionally, build a set of fixed columns. -void buildSortingDAG(QueryPlan::Node & node, ActionsDAGPtr & dag, FixedColumns & fixed_columns, size_t & limit) +void buildSortingDAG(QueryPlan::Node & node, std::optional & dag, FixedColumns & fixed_columns, size_t & limit) { IQueryPlanStep * step = node.step.get(); if (auto * reading = typeid_cast(step)) @@ -191,13 +191,11 @@ void buildSortingDAG(QueryPlan::Node & node, ActionsDAGPtr & dag, FixedColumns & /// Should ignore limit if there is filtering. limit = 0; - if (prewhere_info->prewhere_actions) - { - //std::cerr << "====== Adding prewhere " << std::endl; - appendExpression(dag, prewhere_info->prewhere_actions); - if (const auto * filter_expression = dag->tryFindInOutputs(prewhere_info->prewhere_column_name)) - appendFixedColumnsFromFilterExpression(*filter_expression, fixed_columns); - } + //std::cerr << "====== Adding prewhere " << std::endl; + appendExpression(dag, prewhere_info->prewhere_actions); + if (const auto * filter_expression = dag->tryFindInOutputs(prewhere_info->prewhere_column_name)) + appendFixedColumnsFromFilterExpression(*filter_expression, fixed_columns); + } return; } @@ -212,7 +210,7 @@ void buildSortingDAG(QueryPlan::Node & node, ActionsDAGPtr & dag, FixedColumns & const auto & actions = expression->getExpression(); /// Should ignore limit because arrayJoin() can reduce the number of rows in case of empty array. - if (actions->hasArrayJoin()) + if (actions.hasArrayJoin()) limit = 0; appendExpression(dag, actions); @@ -330,7 +328,7 @@ void enreachFixedColumns(const ActionsDAG & dag, FixedColumns & fixed_columns) InputOrderInfoPtr buildInputOrderInfo( const FixedColumns & fixed_columns, - const ActionsDAGPtr & dag, + const std::optional & dag, const SortDescription & description, const KeyDescription & sorting_key, size_t limit) @@ -507,7 +505,7 @@ struct AggregationInputOrder AggregationInputOrder buildInputOrderInfo( const FixedColumns & fixed_columns, - const ActionsDAGPtr & dag, + const std::optional & dag, const Names & group_by_keys, const ActionsDAG & sorting_key_dag, const Names & sorting_key_columns) @@ -693,7 +691,7 @@ AggregationInputOrder buildInputOrderInfo( InputOrderInfoPtr buildInputOrderInfo( const ReadFromMergeTree * reading, const FixedColumns & fixed_columns, - const ActionsDAGPtr & dag, + const std::optional & dag, const SortDescription & description, size_t limit) { @@ -709,7 +707,7 @@ InputOrderInfoPtr buildInputOrderInfo( InputOrderInfoPtr buildInputOrderInfo( ReadFromMerge * merge, const FixedColumns & fixed_columns, - const ActionsDAGPtr & dag, + const std::optional & dag, const SortDescription & description, size_t limit) { @@ -745,7 +743,7 @@ InputOrderInfoPtr buildInputOrderInfo( AggregationInputOrder buildInputOrderInfo( ReadFromMergeTree * reading, const FixedColumns & fixed_columns, - const ActionsDAGPtr & dag, + const std::optional & dag, const Names & group_by_keys) { const auto & sorting_key = reading->getStorageMetadata()->getSortingKey(); @@ -760,7 +758,7 @@ AggregationInputOrder buildInputOrderInfo( AggregationInputOrder buildInputOrderInfo( ReadFromMerge * merge, const FixedColumns & fixed_columns, - const ActionsDAGPtr & dag, + const std::optional & dag, const Names & group_by_keys) { const auto & tables = merge->getSelectedTables(); @@ -801,7 +799,7 @@ InputOrderInfoPtr buildInputOrderInfo(SortingStep & sorting, QueryPlan::Node & n const auto & description = sorting.getSortDescription(); size_t limit = sorting.getLimit(); - ActionsDAGPtr dag; + std::optional dag; FixedColumns fixed_columns; buildSortingDAG(node, dag, fixed_columns, limit); @@ -855,7 +853,7 @@ AggregationInputOrder buildInputOrderInfo(AggregatingStep & aggregating, QueryPl const auto & keys = aggregating.getParams().keys; size_t limit = 0; - ActionsDAGPtr dag; + std::optional dag; FixedColumns fixed_columns; buildSortingDAG(node, dag, fixed_columns, limit); @@ -1076,13 +1074,13 @@ size_t tryReuseStorageOrderingForWindowFunctions(QueryPlan::Node * parent_node, for (const auto & actions_dag : window_desc.partition_by_actions) { order_by_elements_actions.emplace_back( - std::make_shared(actions_dag, ExpressionActionsSettings::fromContext(context, CompileExpressions::yes))); + std::make_shared(actions_dag->clone(), ExpressionActionsSettings::fromContext(context, CompileExpressions::yes))); } for (const auto & actions_dag : window_desc.order_by_actions) { order_by_elements_actions.emplace_back( - std::make_shared(actions_dag, ExpressionActionsSettings::fromContext(context, CompileExpressions::yes))); + std::make_shared(actions_dag->clone(), ExpressionActionsSettings::fromContext(context, CompileExpressions::yes))); } auto order_optimizer = std::make_shared( diff --git a/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp b/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp index 9e2c2dcc96d..ad89cec5f79 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp @@ -43,7 +43,7 @@ static DAGIndex buildDAGIndex(const ActionsDAG & dag) /// Required analysis info from aggregate projection. struct AggregateProjectionInfo { - ActionsDAGPtr before_aggregation; + std::optional before_aggregation; Names keys; AggregateDescriptions aggregates; @@ -274,7 +274,7 @@ static void appendAggregateFunctions( } } -ActionsDAGPtr analyzeAggregateProjection( +std::optional analyzeAggregateProjection( const AggregateProjectionInfo & info, const QueryDAG & query, const DAGIndex & query_index, @@ -394,7 +394,7 @@ ActionsDAGPtr analyzeAggregateProjection( // LOG_TRACE(getLogger("optimizeUseProjections"), "Folding actions by projection"); auto proj_dag = query.dag->foldActionsByProjection(new_inputs, query_key_nodes); - appendAggregateFunctions(*proj_dag, aggregates, *matched_aggregates); + appendAggregateFunctions(proj_dag, aggregates, *matched_aggregates); return proj_dag; } @@ -406,7 +406,7 @@ struct AggregateProjectionCandidate : public ProjectionCandidate /// Actions which need to be applied to columns from projection /// in order to get all the columns required for aggregation. - ActionsDAGPtr dag; + ActionsDAG dag; }; struct MinMaxProjectionCandidate @@ -481,13 +481,13 @@ AggregateProjectionCandidates getAggregateProjectionCandidates( if (auto proj_dag = analyzeAggregateProjection(info, dag, query_index, keys, aggregates)) { // LOG_TRACE(getLogger("optimizeUseProjections"), "Projection analyzed DAG {}", proj_dag->dumpDAG()); - AggregateProjectionCandidate candidate{.info = std::move(info), .dag = std::move(proj_dag)}; + AggregateProjectionCandidate candidate{.info = std::move(info), .dag = std::move(*proj_dag)}; // LOG_TRACE(getLogger("optimizeUseProjections"), "Projection sample block {}", sample_block.dumpStructure()); auto block = reading.getMergeTreeData().getMinMaxCountProjectionBlock( metadata, - candidate.dag->getRequiredColumnsNames(), - (dag.filter_node ? dag.dag : nullptr), + candidate.dag.getRequiredColumnsNames(), + (dag.filter_node ? &*dag.dag : nullptr), parts, max_added_blocks.get(), context); @@ -538,7 +538,7 @@ AggregateProjectionCandidates getAggregateProjectionCandidates( if (auto proj_dag = analyzeAggregateProjection(info, dag, query_index, keys, aggregates)) { // LOG_TRACE(getLogger("optimizeUseProjections"), "Projection analyzed DAG {}", proj_dag->dumpDAG()); - AggregateProjectionCandidate candidate{.info = std::move(info), .dag = std::move(proj_dag)}; + AggregateProjectionCandidate candidate{.info = std::move(info), .dag = std::move(*proj_dag)}; candidate.projection = projection; candidates.real.emplace_back(std::move(candidate)); } @@ -666,7 +666,7 @@ std::optional optimizeUseAggregateProjections(QueryPlan::Node & node, Qu /// Selecting best candidate. for (auto & candidate : candidates.real) { - auto required_column_names = candidate.dag->getRequiredColumnsNames(); + auto required_column_names = candidate.dag.getRequiredColumnsNames(); bool analyzed = analyzeProjectionCandidate( candidate, @@ -677,7 +677,7 @@ std::optional optimizeUseAggregateProjections(QueryPlan::Node & node, Qu query_info, context, max_added_blocks, - candidate.dag); + &candidate.dag); if (!analyzed) continue; @@ -767,7 +767,7 @@ std::optional optimizeUseAggregateProjections(QueryPlan::Node & node, Qu projection_reading = reader.readFromParts( /* parts = */ {}, /* alter_conversions = */ {}, - best_candidate->dag->getRequiredColumnsNames(), + best_candidate->dag.getRequiredColumnsNames(), proj_snapshot, projection_query_info, context, @@ -779,7 +779,7 @@ std::optional optimizeUseAggregateProjections(QueryPlan::Node & node, Qu if (!projection_reading) { - auto header = proj_snapshot->getSampleBlockForColumns(best_candidate->dag->getRequiredColumnsNames()); + auto header = proj_snapshot->getSampleBlockForColumns(best_candidate->dag.getRequiredColumnsNames()); Pipe pipe(std::make_shared(std::move(header))); projection_reading = std::make_unique(std::move(pipe)); } @@ -810,17 +810,19 @@ std::optional optimizeUseAggregateProjections(QueryPlan::Node & node, Qu if (best_candidate) { aggregate_projection_node = &nodes.emplace_back(); + if (candidates.has_filter) { + const auto & result_name = best_candidate->dag.getOutputs().front()->result_name; aggregate_projection_node->step = std::make_unique( projection_reading_node.step->getOutputStream(), - best_candidate->dag, - best_candidate->dag->getOutputs().front()->result_name, + std::move(best_candidate->dag), + result_name, true); } else aggregate_projection_node->step - = std::make_unique(projection_reading_node.step->getOutputStream(), best_candidate->dag); + = std::make_unique(projection_reading_node.step->getOutputStream(), std::move(best_candidate->dag)); aggregate_projection_node->children.push_back(&projection_reading_node); } diff --git a/src/Processors/QueryPlan/Optimizations/optimizeUseNormalProjection.cpp b/src/Processors/QueryPlan/Optimizations/optimizeUseNormalProjection.cpp index 291800c2c86..e9aee65675f 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeUseNormalProjection.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeUseNormalProjection.cpp @@ -25,7 +25,7 @@ struct NormalProjectionCandidate : public ProjectionCandidate { }; -static ActionsDAGPtr makeMaterializingDAG(const Block & proj_header, const Block main_header) +static std::optional makeMaterializingDAG(const Block & proj_header, const Block main_header) { /// Materialize constants in case we don't have it in output header. /// This may happen e.g. if we have PREWHERE. @@ -33,7 +33,7 @@ static ActionsDAGPtr makeMaterializingDAG(const Block & proj_header, const Block size_t num_columns = main_header.columns(); /// This is a error; will have block structure mismatch later. if (proj_header.columns() != num_columns) - return nullptr; + return {}; std::vector const_positions; for (size_t i = 0; i < num_columns; ++i) @@ -47,17 +47,17 @@ static ActionsDAGPtr makeMaterializingDAG(const Block & proj_header, const Block } if (const_positions.empty()) - return nullptr; + return {}; - ActionsDAGPtr dag = std::make_unique(); - auto & outputs = dag->getOutputs(); + ActionsDAG dag; + auto & outputs = dag.getOutputs(); for (const auto & col : proj_header.getColumnsWithTypeAndName()) - outputs.push_back(&dag->addInput(col)); + outputs.push_back(&dag.addInput(col)); for (auto pos : const_positions) { auto & output = outputs[pos]; - output = &dag->materializeNode(*output); + output = &dag.materializeNode(*output); } return dag; @@ -174,7 +174,7 @@ std::optional optimizeUseNormalProjections(Stack & stack, QueryPlan::Nod query_info, context, max_added_blocks, - query.filter_node ? query.dag : nullptr); + query.filter_node ? &*query.dag : nullptr); if (!analyzed) continue; @@ -244,14 +244,14 @@ std::optional optimizeUseNormalProjections(Stack & stack, QueryPlan::Nod { expr_or_filter_node.step = std::make_unique( projection_reading_node.step->getOutputStream(), - query.dag, + std::move(*query.dag), query.filter_node->result_name, true); } else expr_or_filter_node.step = std::make_unique( projection_reading_node.step->getOutputStream(), - query.dag); + std::move(*query.dag)); expr_or_filter_node.children.push_back(&projection_reading_node); next_node = &expr_or_filter_node; @@ -269,7 +269,7 @@ std::optional optimizeUseNormalProjections(Stack & stack, QueryPlan::Nod if (auto materializing = makeMaterializingDAG(proj_stream->header, main_stream.header)) { - auto converting = std::make_unique(*proj_stream, materializing); + auto converting = std::make_unique(*proj_stream, std::move(*materializing)); proj_stream = &converting->getOutputStream(); auto & expr_node = nodes.emplace_back(); expr_node.step = std::move(converting); diff --git a/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp b/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp index 4d60a1135a6..7414d479cc9 100644 --- a/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp +++ b/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp @@ -65,12 +65,12 @@ std::shared_ptr getMaxAddedBlocks(ReadFromMergeTree * rea return {}; } -void QueryDAG::appendExpression(const ActionsDAGPtr & expression) +void QueryDAG::appendExpression(const ActionsDAG & expression) { if (dag) - dag->mergeInplace(std::move(*expression->clone())); + dag->mergeInplace(expression.clone()); else - dag = expression->clone(); + dag = expression.clone(); } const ActionsDAG::Node * findInOutputs(ActionsDAG & dag, const std::string & name, bool remove) @@ -121,22 +121,19 @@ bool QueryDAG::buildImpl(QueryPlan::Node & node, ActionsDAG::NodeRawConstPtrs & { if (prewhere_info->row_level_filter) { - appendExpression(prewhere_info->row_level_filter); + appendExpression(*prewhere_info->row_level_filter); if (const auto * filter_expression = findInOutputs(*dag, prewhere_info->row_level_column_name, false)) filter_nodes.push_back(filter_expression); else return false; } - if (prewhere_info->prewhere_actions) - { - appendExpression(prewhere_info->prewhere_actions); - if (const auto * filter_expression - = findInOutputs(*dag, prewhere_info->prewhere_column_name, prewhere_info->remove_prewhere_column)) - filter_nodes.push_back(filter_expression); - else - return false; - } + appendExpression(prewhere_info->prewhere_actions); + if (const auto * filter_expression + = findInOutputs(*dag, prewhere_info->prewhere_column_name, prewhere_info->remove_prewhere_column)) + filter_nodes.push_back(filter_expression); + else + return false; } return true; } @@ -150,7 +147,7 @@ bool QueryDAG::buildImpl(QueryPlan::Node & node, ActionsDAG::NodeRawConstPtrs & if (auto * expression = typeid_cast(step)) { const auto & actions = expression->getExpression(); - if (actions->hasArrayJoin()) + if (actions.hasArrayJoin()) return false; appendExpression(actions); @@ -160,7 +157,7 @@ bool QueryDAG::buildImpl(QueryPlan::Node & node, ActionsDAG::NodeRawConstPtrs & if (auto * filter = typeid_cast(step)) { const auto & actions = filter->getExpression(); - if (actions->hasArrayJoin()) + if (actions.hasArrayJoin()) return false; appendExpression(actions); @@ -214,7 +211,7 @@ bool analyzeProjectionCandidate( const SelectQueryInfo & query_info, const ContextPtr & context, const std::shared_ptr & max_added_blocks, - const ActionsDAGPtr & dag) + const ActionsDAG * dag) { MergeTreeData::DataPartsVector projection_parts; MergeTreeData::DataPartsVector normal_parts; @@ -239,7 +236,8 @@ bool analyzeProjectionCandidate( auto projection_query_info = query_info; projection_query_info.prewhere_info = nullptr; - projection_query_info.filter_actions_dag = dag; + if (dag) + projection_query_info.filter_actions_dag = std::make_unique(dag->clone()); auto projection_result_ptr = reader.estimateNumMarksToRead( std::move(projection_parts), diff --git a/src/Processors/QueryPlan/Optimizations/projectionsCommon.h b/src/Processors/QueryPlan/Optimizations/projectionsCommon.h index e1e106b988e..ee0dfddc326 100644 --- a/src/Processors/QueryPlan/Optimizations/projectionsCommon.h +++ b/src/Processors/QueryPlan/Optimizations/projectionsCommon.h @@ -25,14 +25,14 @@ std::shared_ptr getMaxAddedBlocks(ReadFromMergeTree * rea /// Additionally, for all the Filter steps, we collect filter conditions into filter_nodes. struct QueryDAG { - ActionsDAGPtr dag; + std::optional dag; const ActionsDAG::Node * filter_node = nullptr; bool build(QueryPlan::Node & node); private: bool buildImpl(QueryPlan::Node & node, ActionsDAG::NodeRawConstPtrs & filter_nodes); - void appendExpression(const ActionsDAGPtr & expression); + void appendExpression(const ActionsDAG & expression); }; struct ProjectionCandidate @@ -60,6 +60,6 @@ bool analyzeProjectionCandidate( const SelectQueryInfo & query_info, const ContextPtr & context, const std::shared_ptr & max_added_blocks, - const ActionsDAGPtr & dag); + const ActionsDAG * dag); } diff --git a/src/Processors/QueryPlan/Optimizations/removeRedundantDistinct.cpp b/src/Processors/QueryPlan/Optimizations/removeRedundantDistinct.cpp index 51df25b35f4..7664822cc7e 100644 --- a/src/Processors/QueryPlan/Optimizations/removeRedundantDistinct.cpp +++ b/src/Processors/QueryPlan/Optimizations/removeRedundantDistinct.cpp @@ -43,10 +43,10 @@ namespace } } - void logActionsDAG(const String & prefix, const ActionsDAGPtr & actions) + void logActionsDAG(const String & prefix, const ActionsDAG & actions) { if constexpr (debug_logging_enabled) - LOG_DEBUG(getLogger("redundantDistinct"), "{} :\n{}", prefix, actions->dumpDAG()); + LOG_DEBUG(getLogger("redundantDistinct"), "{} :\n{}", prefix, actions.dumpDAG()); } using DistinctColumns = std::set; @@ -65,25 +65,25 @@ namespace } /// build actions DAG from stack of steps - ActionsDAGPtr buildActionsForPlanPath(std::vector & dag_stack) + std::optional buildActionsForPlanPath(std::vector & dag_stack) { if (dag_stack.empty()) - return nullptr; + return {}; - ActionsDAGPtr path_actions = dag_stack.back()->clone(); + ActionsDAG path_actions = dag_stack.back()->clone(); dag_stack.pop_back(); while (!dag_stack.empty()) { - ActionsDAGPtr clone = dag_stack.back()->clone(); + ActionsDAG clone = dag_stack.back()->clone(); logActionsDAG("DAG to merge", clone); dag_stack.pop_back(); - path_actions->mergeInplace(std::move(*clone)); + path_actions.mergeInplace(std::move(clone)); } return path_actions; } bool compareAggregationKeysWithDistinctColumns( - const Names & aggregation_keys, const DistinctColumns & distinct_columns, std::vector> actions_chain) + const Names & aggregation_keys, const DistinctColumns & distinct_columns, std::vector> actions_chain) { logDebug("aggregation_keys", aggregation_keys); logDebug("aggregation_keys size", aggregation_keys.size()); @@ -93,7 +93,8 @@ namespace std::set source_columns; for (auto & actions : actions_chain) { - FindOriginalNodeForOutputName original_node_finder(buildActionsForPlanPath(actions)); + auto tmp_actions = buildActionsForPlanPath(actions); + FindOriginalNodeForOutputName original_node_finder(*tmp_actions); for (const auto & column : current_columns) { logDebug("distinct column name", column); @@ -131,10 +132,10 @@ namespace return true; if (const auto * const expr = typeid_cast(step); expr) - return !expr->getExpression()->hasArrayJoin(); + return !expr->getExpression().hasArrayJoin(); if (const auto * const filter = typeid_cast(step); filter) - return !filter->getExpression()->hasArrayJoin(); + return !filter->getExpression().hasArrayJoin(); if (typeid_cast(step) || typeid_cast(step) || typeid_cast(step) || typeid_cast(step)) @@ -152,8 +153,8 @@ namespace const DistinctStep * distinct_step = typeid_cast(distinct_node->step.get()); chassert(distinct_step); - std::vector dag_stack; - std::vector> actions_chain; + std::vector dag_stack; + std::vector> actions_chain; const DistinctStep * inner_distinct_step = nullptr; const IQueryPlanStep * aggregation_before_distinct = nullptr; const QueryPlan::Node * node = distinct_node; @@ -182,9 +183,9 @@ namespace } if (const auto * const expr = typeid_cast(current_step); expr) - dag_stack.push_back(expr->getExpression()); + dag_stack.push_back(&expr->getExpression()); else if (const auto * const filter = typeid_cast(current_step); filter) - dag_stack.push_back(filter->getExpression()); + dag_stack.push_back(&filter->getExpression()); node = node->children.front(); if (inner_distinct_step = typeid_cast(node->step.get()); inner_distinct_step) @@ -222,7 +223,7 @@ namespace chassert(distinct_step); const auto distinct_columns = getDistinctColumns(distinct_step); - std::vector dag_stack; + std::vector dag_stack; const DistinctStep * inner_distinct_step = nullptr; const QueryPlan::Node * node = distinct_node; while (!node->children.empty()) @@ -235,9 +236,9 @@ namespace } if (const auto * const expr = typeid_cast(current_step); expr) - dag_stack.push_back(expr->getExpression()); + dag_stack.push_back(&expr->getExpression()); else if (const auto * const filter = typeid_cast(current_step); filter) - dag_stack.push_back(filter->getExpression()); + dag_stack.push_back(&filter->getExpression()); node = node->children.front(); inner_distinct_step = typeid_cast(node->step.get()); @@ -259,11 +260,11 @@ namespace if (distinct_columns.size() != inner_distinct_columns.size()) return false; - ActionsDAGPtr path_actions; + ActionsDAG path_actions; if (!dag_stack.empty()) { /// build actions DAG to find original column names - path_actions = buildActionsForPlanPath(dag_stack); + path_actions = std::move(*buildActionsForPlanPath(dag_stack)); logActionsDAG("distinct pass: merged DAG", path_actions); /// compare columns of two DISTINCTs diff --git a/src/Processors/QueryPlan/Optimizations/removeRedundantSorting.cpp b/src/Processors/QueryPlan/Optimizations/removeRedundantSorting.cpp index 632eba6ab5f..7cac7bee6ec 100644 --- a/src/Processors/QueryPlan/Optimizations/removeRedundantSorting.cpp +++ b/src/Processors/QueryPlan/Optimizations/removeRedundantSorting.cpp @@ -213,12 +213,12 @@ private: logStep("checking for stateful function", node); if (const auto * expr = typeid_cast(step); expr) { - if (expr->getExpression()->hasStatefulFunctions()) + if (expr->getExpression().hasStatefulFunctions()) return false; } else if (const auto * filter = typeid_cast(step); filter) { - if (filter->getExpression()->hasStatefulFunctions()) + if (filter->getExpression().hasStatefulFunctions()) return false; } else diff --git a/src/Processors/QueryPlan/Optimizations/splitFilter.cpp b/src/Processors/QueryPlan/Optimizations/splitFilter.cpp index 561ad7302c6..6aed57634b0 100644 --- a/src/Processors/QueryPlan/Optimizations/splitFilter.cpp +++ b/src/Processors/QueryPlan/Optimizations/splitFilter.cpp @@ -17,13 +17,13 @@ size_t trySplitFilter(QueryPlan::Node * node, QueryPlan::Nodes & nodes) const std::string & filter_column_name = filter_step->getFilterColumnName(); /// Do not split if there are function like runningDifference. - if (expr->hasStatefulFunctions()) + if (expr.hasStatefulFunctions()) return 0; bool filter_name_clashs_with_input = false; if (filter_step->removesFilterColumn()) { - for (const auto * input : expr->getInputs()) + for (const auto * input : expr.getInputs()) { if (input->result_name == filter_column_name) { @@ -33,14 +33,14 @@ size_t trySplitFilter(QueryPlan::Node * node, QueryPlan::Nodes & nodes) } } - auto split = expr->splitActionsForFilter(filter_column_name); + auto split = expr.splitActionsForFilter(filter_column_name); - if (split.second->trivial()) + if (split.second.trivial()) return 0; bool remove_filter = false; if (filter_step->removesFilterColumn()) - remove_filter = split.second->removeUnusedResult(filter_column_name); + remove_filter = split.second.removeUnusedResult(filter_column_name); auto description = filter_step->getStepDescription(); @@ -53,11 +53,11 @@ size_t trySplitFilter(QueryPlan::Node * node, QueryPlan::Nodes & nodes) { split_filter_name = "__split_filter"; - for (auto & filter_output : split.first->getOutputs()) + for (auto & filter_output : split.first.getOutputs()) { if (filter_output->result_name == filter_column_name) { - filter_output = &split.first->addAlias(*filter_output, split_filter_name); + filter_output = &split.first.addAlias(*filter_output, split_filter_name); break; } } diff --git a/src/Processors/QueryPlan/Optimizations/useDataParallelAggregation.cpp b/src/Processors/QueryPlan/Optimizations/useDataParallelAggregation.cpp index 124cb735d5a..0eeaec9bde7 100644 --- a/src/Processors/QueryPlan/Optimizations/useDataParallelAggregation.cpp +++ b/src/Processors/QueryPlan/Optimizations/useDataParallelAggregation.cpp @@ -74,11 +74,11 @@ void removeInjectiveFunctionsFromResultsRecursively(const ActionsDAG::Node * nod /// Our objective is to replace injective function nodes in `actions` results with its children /// until only the irreducible subset of nodes remains. Against these set of nodes we will match partition key expression /// to determine if it maps all rows with the same value of group by key to the same partition. -NodeSet removeInjectiveFunctionsFromResultsRecursively(const ActionsDAGPtr & actions) +NodeSet removeInjectiveFunctionsFromResultsRecursively(const ActionsDAG & actions) { NodeSet irreducible; NodeSet visited; - for (const auto & node : actions->getOutputs()) + for (const auto & node : actions.getOutputs()) removeInjectiveFunctionsFromResultsRecursively(node, irreducible, visited); return irreducible; } @@ -146,19 +146,19 @@ bool allOutputsDependsOnlyOnAllowedNodes( /// 3. We match partition key actions with group by key actions to find col1', ..., coln' in partition key actions. /// 4. We check that partition key is indeed a deterministic function of col1', ..., coln'. bool isPartitionKeySuitsGroupByKey( - const ReadFromMergeTree & reading, const ActionsDAGPtr & group_by_actions, const AggregatingStep & aggregating) + const ReadFromMergeTree & reading, const ActionsDAG & group_by_actions, const AggregatingStep & aggregating) { if (aggregating.isGroupingSets()) return false; - if (group_by_actions->hasArrayJoin() || group_by_actions->hasStatefulFunctions() || group_by_actions->hasNonDeterministic()) + if (group_by_actions.hasArrayJoin() || group_by_actions.hasStatefulFunctions() || group_by_actions.hasNonDeterministic()) return false; /// We are interested only in calculations required to obtain group by keys (and not aggregate function arguments for example). - auto key_nodes = group_by_actions->findInOutpus(aggregating.getParams().keys); + auto key_nodes = group_by_actions.findInOutpus(aggregating.getParams().keys); auto group_by_key_actions = ActionsDAG::cloneSubDAG(key_nodes, /*remove_aliases=*/ true); - const auto & gb_key_required_columns = group_by_key_actions->getRequiredColumnsNames(); + const auto & gb_key_required_columns = group_by_key_actions.getRequiredColumnsNames(); const auto & partition_actions = reading.getStorageMetadata()->getPartitionKey().expression->getActionsDAG(); @@ -169,7 +169,7 @@ bool isPartitionKeySuitsGroupByKey( const auto irreducibe_nodes = removeInjectiveFunctionsFromResultsRecursively(group_by_key_actions); - const auto matches = matchTrees(group_by_key_actions->getOutputs(), partition_actions); + const auto matches = matchTrees(group_by_key_actions.getOutputs(), partition_actions); return allOutputsDependsOnlyOnAllowedNodes(partition_actions, irreducibe_nodes, matches); } diff --git a/src/Processors/QueryPlan/PartsSplitter.cpp b/src/Processors/QueryPlan/PartsSplitter.cpp index ed4b1906635..a12fce95b10 100644 --- a/src/Processors/QueryPlan/PartsSplitter.cpp +++ b/src/Processors/QueryPlan/PartsSplitter.cpp @@ -943,7 +943,7 @@ SplitPartsWithRangesByPrimaryKeyResult splitPartsWithRangesByPrimaryKey( auto syntax_result = TreeRewriter(context).analyze(filter_function, primary_key.expression->getRequiredColumnsWithTypes()); auto actions = ExpressionAnalyzer(filter_function, syntax_result, context).getActionsDAG(false); - reorderColumns(*actions, result.merging_pipes[i].getHeader(), filter_function->getColumnName()); + reorderColumns(actions, result.merging_pipes[i].getHeader(), filter_function->getColumnName()); ExpressionActionsPtr expression_actions = std::make_shared(std::move(actions)); auto description = fmt::format( "filter values in ({}, {}]", i ? ::toString(borders[i - 1]) : "-inf", i < borders.size() ? ::toString(borders[i]) : "+inf"); diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index a7ce92fe8e0..483876dd293 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -109,8 +109,7 @@ bool restorePrewhereInputs(PrewhereInfo & info, const NameSet & inputs) if (info.row_level_filter) added = added || restoreDAGInputs(*info.row_level_filter, inputs); - if (info.prewhere_actions) - added = added || restoreDAGInputs(*info.prewhere_actions, inputs); + added = added || restoreDAGInputs(info.prewhere_actions, inputs); return added; } @@ -175,7 +174,6 @@ static void updateSortDescriptionForOutputStream( Block original_header = output_stream.header.cloneEmpty(); if (prewhere_info) { - if (prewhere_info->prewhere_actions) { FindOriginalNodeForOutputName original_column_finder(prewhere_info->prewhere_actions); for (auto & column : original_header) @@ -188,7 +186,7 @@ static void updateSortDescriptionForOutputStream( if (prewhere_info->row_level_filter) { - FindOriginalNodeForOutputName original_column_finder(prewhere_info->row_level_filter); + FindOriginalNodeForOutputName original_column_finder(*prewhere_info->row_level_filter); for (auto & column : original_header) { const auto * original_node = original_column_finder.find(column.name); @@ -802,7 +800,7 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreams(RangesInDataParts && parts_ info.use_uncompressed_cache); }; - auto sorting_expr = std::make_shared(metadata_for_reading->getSortingKey().expression->getActionsDAG().clone()); + auto sorting_expr = metadata_for_reading->getSortingKey().expression; SplitPartsWithRangesByPrimaryKeyResult split_ranges_result = splitPartsWithRangesByPrimaryKey( metadata_for_reading->getPrimaryKey(), @@ -834,10 +832,10 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreams(RangesInDataParts && parts_ pipes[0].getHeader().getColumnsWithTypeAndName(), pipes[1].getHeader().getColumnsWithTypeAndName(), ActionsDAG::MatchColumnsMode::Name); + auto converting_expr = std::make_shared(std::move(conversion_action)); pipes[0].addSimpleTransform( - [conversion_action](const Block & header) + [converting_expr](const Block & header) { - auto converting_expr = std::make_shared(conversion_action); return std::make_shared(header, converting_expr); }); return Pipe::unitePipes(std::move(pipes)); @@ -851,16 +849,16 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreams(RangesInDataParts && parts_ info.use_uncompressed_cache); } -static ActionsDAGPtr createProjection(const Block & header) +static ActionsDAG createProjection(const Block & header) { - return std::make_shared(header.getNamesAndTypesList()); + return ActionsDAG(header.getNamesAndTypesList()); } Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder( RangesInDataParts && parts_with_ranges, size_t num_streams, const Names & column_names, - ActionsDAGPtr & out_projection, + std::optional & out_projection, const InputOrderInfoPtr & input_order_info) { const auto & settings = context->getSettingsRef(); @@ -1052,7 +1050,7 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder( for (size_t j = 0; j < prefix_size; ++j) sort_description.emplace_back(sorting_columns[j], input_order_info->direction); - auto sorting_key_expr = std::make_shared(sorting_key_prefix_expr); + auto sorting_key_expr = std::make_shared(std::move(sorting_key_prefix_expr)); auto merge_streams = [&](Pipe & pipe) { @@ -1176,7 +1174,7 @@ bool ReadFromMergeTree::doNotMergePartsAcrossPartitionsFinal() const } Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsFinal( - RangesInDataParts && parts_with_ranges, size_t num_streams, const Names & origin_column_names, const Names & column_names, ActionsDAGPtr & out_projection) + RangesInDataParts && parts_with_ranges, size_t num_streams, const Names & origin_column_names, const Names & column_names, std::optional & out_projection) { const auto & settings = context->getSettingsRef(); const auto & data_settings = data.getSettings(); @@ -1217,7 +1215,7 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsFinal( /// we will store lonely parts with level > 0 to use parallel select on them. RangesInDataParts non_intersecting_parts_by_primary_key; - auto sorting_expr = std::make_shared(metadata_for_reading->getSortingKey().expression->getActionsDAG().clone()); + auto sorting_expr = metadata_for_reading->getSortingKey().expression; if (prewhere_info) { @@ -1338,7 +1336,7 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsFinal( if (!merging_pipes.empty() && !no_merging_pipes.empty()) { - out_projection = nullptr; /// We do projection here + out_projection = {}; /// We do projection here Pipes pipes; pipes.resize(2); pipes[0] = Pipe::unitePipes(std::move(merging_pipes)); @@ -1347,10 +1345,10 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsFinal( pipes[0].getHeader().getColumnsWithTypeAndName(), pipes[1].getHeader().getColumnsWithTypeAndName(), ActionsDAG::MatchColumnsMode::Name); + auto converting_expr = std::make_shared(std::move(conversion_action)); pipes[0].addSimpleTransform( - [conversion_action](const Block & header) + [converting_expr](const Block & header) { - auto converting_expr = std::make_shared(conversion_action); return std::make_shared(header, converting_expr); }); return Pipe::unitePipes(std::move(pipes)); @@ -1384,7 +1382,7 @@ ReadFromMergeTree::AnalysisResultPtr ReadFromMergeTree::selectRangesToRead( static void buildIndexes( std::optional & indexes, - ActionsDAGPtr filter_actions_dag, + const ActionsDAG * filter_actions_dag, const MergeTreeData & data, const MergeTreeData::DataPartsVector & parts, const ContextPtr & context, @@ -1524,11 +1522,12 @@ void ReadFromMergeTree::applyFilters(ActionDAGNodes added_filter_nodes) /// (1) SourceStepWithFilter::filter_nodes, (2) query_info.filter_actions_dag. Make sure there are consistent. /// TODO: Get rid of filter_actions_dag in query_info after we move analysis of /// parallel replicas and unused shards into optimization, similar to projection analysis. - query_info.filter_actions_dag = filter_actions_dag; + if (filter_actions_dag) + query_info.filter_actions_dag = std::make_shared(filter_actions_dag->clone()); buildIndexes( indexes, - filter_actions_dag, + query_info.filter_actions_dag.get(), data, prepared_parts, context, @@ -1570,7 +1569,7 @@ ReadFromMergeTree::AnalysisResultPtr ReadFromMergeTree::selectRangesToRead( const Names & primary_key_column_names = primary_key.column_names; if (!indexes) - buildIndexes(indexes, query_info_.filter_actions_dag, data, parts, context_, query_info_, metadata_snapshot); + buildIndexes(indexes, query_info_.filter_actions_dag.get(), data, parts, context_, query_info_, metadata_snapshot); if (indexes->part_values && indexes->part_values->empty()) return std::make_shared(std::move(result)); @@ -1838,7 +1837,7 @@ bool ReadFromMergeTree::isQueryWithSampling() const } Pipe ReadFromMergeTree::spreadMarkRanges( - RangesInDataParts && parts_with_ranges, size_t num_streams, AnalysisResult & result, ActionsDAGPtr & result_projection) + RangesInDataParts && parts_with_ranges, size_t num_streams, AnalysisResult & result, std::optional & result_projection) { const bool final = isQueryWithFinal(); Names column_names_to_read = result.column_names_to_read; @@ -1899,7 +1898,7 @@ Pipe ReadFromMergeTree::spreadMarkRanges( } } -Pipe ReadFromMergeTree::groupStreamsByPartition(AnalysisResult & result, ActionsDAGPtr & result_projection) +Pipe ReadFromMergeTree::groupStreamsByPartition(AnalysisResult & result, std::optional & result_projection) { auto && parts_with_ranges = std::move(result.parts_with_ranges); @@ -1988,7 +1987,7 @@ void ReadFromMergeTree::initializePipeline(QueryPipelineBuilder & pipeline, cons /// Projection, that needed to drop columns, which have appeared by execution /// of some extra expressions, and to allow execute the same expressions later. /// NOTE: It may lead to double computation of expressions. - ActionsDAGPtr result_projection; + std::optional result_projection; Pipe pipe = output_each_partition_through_separate_port ? groupStreamsByPartition(result, result_projection) @@ -2005,7 +2004,7 @@ void ReadFromMergeTree::initializePipeline(QueryPipelineBuilder & pipeline, cons if (result.sampling.use_sampling) { - auto sampling_actions = std::make_shared(result.sampling.filter_expression); + auto sampling_actions = std::make_shared(result.sampling.filter_expression->clone()); pipe.addSimpleTransform([&](const Block & header) { return std::make_shared( @@ -2018,12 +2017,12 @@ void ReadFromMergeTree::initializePipeline(QueryPipelineBuilder & pipeline, cons Block cur_header = pipe.getHeader(); - auto append_actions = [&result_projection](ActionsDAGPtr actions) + auto append_actions = [&result_projection](ActionsDAG actions) { if (!result_projection) result_projection = std::move(actions); else - result_projection = ActionsDAG::merge(std::move(*result_projection), std::move(*actions)); + result_projection = ActionsDAG::merge(std::move(*result_projection), std::move(actions)); }; if (result_projection) @@ -2043,7 +2042,7 @@ void ReadFromMergeTree::initializePipeline(QueryPipelineBuilder & pipeline, cons if (result_projection) { - auto projection_actions = std::make_shared(result_projection); + auto projection_actions = std::make_shared(std::move(*result_projection)); pipe.addSimpleTransform([&](const Block & header) { return std::make_shared(header, projection_actions); @@ -2060,7 +2059,7 @@ void ReadFromMergeTree::initializePipeline(QueryPipelineBuilder & pipeline, cons ActionsDAG::MatchColumnsMode::Name, true); - auto converting_dag_expr = std::make_shared(convert_actions_dag); + auto converting_dag_expr = std::make_shared(std::move(convert_actions_dag)); pipe.addSimpleTransform([&](const Block & header) { @@ -2130,7 +2129,6 @@ void ReadFromMergeTree::describeActions(FormatSettings & format_settings) const prefix.push_back(format_settings.indent_char); prefix.push_back(format_settings.indent_char); - if (prewhere_info->prewhere_actions) { format_settings.out << prefix << "Prewhere filter" << '\n'; format_settings.out << prefix << "Prewhere filter column: " << prewhere_info->prewhere_column_name; @@ -2138,7 +2136,7 @@ void ReadFromMergeTree::describeActions(FormatSettings & format_settings) const format_settings.out << " (removed)"; format_settings.out << '\n'; - auto expression = std::make_shared(prewhere_info->prewhere_actions); + auto expression = std::make_shared(prewhere_info->prewhere_actions.clone()); expression->describeActions(format_settings.out, prefix); } @@ -2147,7 +2145,7 @@ void ReadFromMergeTree::describeActions(FormatSettings & format_settings) const format_settings.out << prefix << "Row level filter" << '\n'; format_settings.out << prefix << "Row level filter column: " << prewhere_info->row_level_column_name << '\n'; - auto expression = std::make_shared(prewhere_info->row_level_filter); + auto expression = std::make_shared(prewhere_info->row_level_filter->clone()); expression->describeActions(format_settings.out, prefix); } } @@ -2168,12 +2166,11 @@ void ReadFromMergeTree::describeActions(JSONBuilder::JSONMap & map) const std::unique_ptr prewhere_info_map = std::make_unique(); prewhere_info_map->add("Need filter", prewhere_info->need_filter); - if (prewhere_info->prewhere_actions) { std::unique_ptr prewhere_filter_map = std::make_unique(); prewhere_filter_map->add("Prewhere filter column", prewhere_info->prewhere_column_name); prewhere_filter_map->add("Prewhere filter remove filter column", prewhere_info->remove_prewhere_column); - auto expression = std::make_shared(prewhere_info->prewhere_actions); + auto expression = std::make_shared(prewhere_info->prewhere_actions.clone()); prewhere_filter_map->add("Prewhere filter expression", expression->toTree()); prewhere_info_map->add("Prewhere filter", std::move(prewhere_filter_map)); @@ -2183,7 +2180,7 @@ void ReadFromMergeTree::describeActions(JSONBuilder::JSONMap & map) const { std::unique_ptr row_level_filter_map = std::make_unique(); row_level_filter_map->add("Row level filter column", prewhere_info->row_level_column_name); - auto expression = std::make_shared(prewhere_info->row_level_filter); + auto expression = std::make_shared(prewhere_info->row_level_filter->clone()); row_level_filter_map->add("Row level filter expression", expression->toTree()); prewhere_info_map->add("Row level filter", std::move(row_level_filter_map)); diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.h b/src/Processors/QueryPlan/ReadFromMergeTree.h index caa8aa2e1bd..a12f53924c3 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.h +++ b/src/Processors/QueryPlan/ReadFromMergeTree.h @@ -23,7 +23,7 @@ struct MergeTreeDataSelectSamplingData bool read_nothing = false; Float64 used_sample_factor = 1.0; std::shared_ptr filter_function; - ActionsDAGPtr filter_expression; + std::shared_ptr filter_expression; }; struct UsefulSkipIndexes @@ -243,9 +243,9 @@ private: Pipe readFromPoolParallelReplicas(RangesInDataParts parts_with_range, Names required_columns, PoolSettings pool_settings); Pipe readInOrder(RangesInDataParts parts_with_ranges, Names required_columns, PoolSettings pool_settings, ReadType read_type, UInt64 limit); - Pipe spreadMarkRanges(RangesInDataParts && parts_with_ranges, size_t num_streams, AnalysisResult & result, ActionsDAGPtr & result_projection); + Pipe spreadMarkRanges(RangesInDataParts && parts_with_ranges, size_t num_streams, AnalysisResult & result, std::optional & result_projection); - Pipe groupStreamsByPartition(AnalysisResult & result, ActionsDAGPtr & result_projection); + Pipe groupStreamsByPartition(AnalysisResult & result, std::optional & result_projection); Pipe spreadMarkRangesAmongStreams(RangesInDataParts && parts_with_ranges, size_t num_streams, const Names & column_names); @@ -253,13 +253,13 @@ private: RangesInDataParts && parts_with_ranges, size_t num_streams, const Names & column_names, - ActionsDAGPtr & out_projection, + std::optional & out_projection, const InputOrderInfoPtr & input_order_info); bool doNotMergePartsAcrossPartitionsFinal() const; Pipe spreadMarkRangesAmongStreamsFinal( - RangesInDataParts && parts, size_t num_streams, const Names & origin_column_names, const Names & column_names, ActionsDAGPtr & out_projection); + RangesInDataParts && parts, size_t num_streams, const Names & origin_column_names, const Names & column_names, std::optional & out_projection); ReadFromMergeTree::AnalysisResult getAnalysisResult() const; diff --git a/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp b/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp index 4136e2d58b4..2080e29ceba 100644 --- a/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp +++ b/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp @@ -459,7 +459,7 @@ Pipe ReadFromSystemNumbersStep::makePipe() chassert(numbers_storage.step != UInt64{0}); /// Build rpn of query filters - KeyCondition condition(filter_actions_dag, context, column_names, key_expression); + KeyCondition condition(filter_actions_dag ? &*filter_actions_dag : nullptr, context, column_names, key_expression); if (condition.extractPlainRanges(ranges)) { diff --git a/src/Processors/QueryPlan/SourceStepWithFilter.cpp b/src/Processors/QueryPlan/SourceStepWithFilter.cpp index ad0940b90b9..3de9ae37db0 100644 --- a/src/Processors/QueryPlan/SourceStepWithFilter.cpp +++ b/src/Processors/QueryPlan/SourceStepWithFilter.cpp @@ -34,9 +34,8 @@ Block SourceStepWithFilter::applyPrewhereActions(Block block, const PrewhereInfo block.erase(prewhere_info->row_level_column_name); } - if (prewhere_info->prewhere_actions) { - block = prewhere_info->prewhere_actions->updateHeader(block); + block = prewhere_info->prewhere_actions.updateHeader(block); auto & prewhere_column = block.getByName(prewhere_info->prewhere_column_name); if (!prewhere_column.type->canBeUsedInBooleanContext()) @@ -102,7 +101,6 @@ void SourceStepWithFilter::describeActions(FormatSettings & format_settings) con prefix.push_back(format_settings.indent_char); prefix.push_back(format_settings.indent_char); - if (prewhere_info->prewhere_actions) { format_settings.out << prefix << "Prewhere filter" << '\n'; format_settings.out << prefix << "Prewhere filter column: " << prewhere_info->prewhere_column_name; @@ -110,7 +108,7 @@ void SourceStepWithFilter::describeActions(FormatSettings & format_settings) con format_settings.out << " (removed)"; format_settings.out << '\n'; - auto expression = std::make_shared(prewhere_info->prewhere_actions); + auto expression = std::make_shared(prewhere_info->prewhere_actions.clone()); expression->describeActions(format_settings.out, prefix); } @@ -119,7 +117,7 @@ void SourceStepWithFilter::describeActions(FormatSettings & format_settings) con format_settings.out << prefix << "Row level filter" << '\n'; format_settings.out << prefix << "Row level filter column: " << prewhere_info->row_level_column_name << '\n'; - auto expression = std::make_shared(prewhere_info->row_level_filter); + auto expression = std::make_shared(prewhere_info->row_level_filter->clone()); expression->describeActions(format_settings.out, prefix); } } @@ -132,12 +130,11 @@ void SourceStepWithFilter::describeActions(JSONBuilder::JSONMap & map) const std::unique_ptr prewhere_info_map = std::make_unique(); prewhere_info_map->add("Need filter", prewhere_info->need_filter); - if (prewhere_info->prewhere_actions) { std::unique_ptr prewhere_filter_map = std::make_unique(); prewhere_filter_map->add("Prewhere filter column", prewhere_info->prewhere_column_name); prewhere_filter_map->add("Prewhere filter remove filter column", prewhere_info->remove_prewhere_column); - auto expression = std::make_shared(prewhere_info->prewhere_actions); + auto expression = std::make_shared(prewhere_info->prewhere_actions.clone()); prewhere_filter_map->add("Prewhere filter expression", expression->toTree()); prewhere_info_map->add("Prewhere filter", std::move(prewhere_filter_map)); @@ -147,7 +144,7 @@ void SourceStepWithFilter::describeActions(JSONBuilder::JSONMap & map) const { std::unique_ptr row_level_filter_map = std::make_unique(); row_level_filter_map->add("Row level filter column", prewhere_info->row_level_column_name); - auto expression = std::make_shared(prewhere_info->row_level_filter); + auto expression = std::make_shared(prewhere_info->row_level_filter->clone()); row_level_filter_map->add("Row level filter expression", expression->toTree()); prewhere_info_map->add("Row level filter", std::move(row_level_filter_map)); diff --git a/src/Processors/QueryPlan/SourceStepWithFilter.h b/src/Processors/QueryPlan/SourceStepWithFilter.h index ca4ea4f3704..6cea5fd7245 100644 --- a/src/Processors/QueryPlan/SourceStepWithFilter.h +++ b/src/Processors/QueryPlan/SourceStepWithFilter.h @@ -33,7 +33,8 @@ public: { } - const ActionsDAGPtr & getFilterActionsDAG() const { return filter_actions_dag; } + const std::optional & getFilterActionsDAG() const { return filter_actions_dag; } + std::optional detachFilterActionsDAG() { return std::move(filter_actions_dag); } const SelectQueryInfo & getQueryInfo() const { return query_info; } const PrewhereInfoPtr & getPrewhereInfo() const { return prewhere_info; } @@ -44,9 +45,9 @@ public: const Names & requiredSourceColumns() const { return required_source_columns; } - void addFilter(ActionsDAGPtr filter_dag, std::string column_name) + void addFilter(ActionsDAG filter_dag, std::string column_name) { - filter_nodes.nodes.push_back(&filter_dag->findInOutputs(column_name)); + filter_nodes.nodes.push_back(&filter_dag.findInOutputs(column_name)); filter_dags.push_back(std::move(filter_dag)); } @@ -59,7 +60,7 @@ public: void applyFilters() { applyFilters(std::move(filter_nodes)); - filter_dags = {}; + filter_dags.clear(); } virtual void applyFilters(ActionDAGNodes added_filter_nodes); @@ -80,12 +81,12 @@ protected: ContextPtr context; std::optional limit; - ActionsDAGPtr filter_actions_dag; + std::optional filter_actions_dag; private: /// Will be cleared after applyFilters() is called. ActionDAGNodes filter_nodes; - std::vector filter_dags; + std::vector filter_dags; }; } diff --git a/src/Processors/QueryPlan/TotalsHavingStep.cpp b/src/Processors/QueryPlan/TotalsHavingStep.cpp index 10384f6296a..2554053064f 100644 --- a/src/Processors/QueryPlan/TotalsHavingStep.cpp +++ b/src/Processors/QueryPlan/TotalsHavingStep.cpp @@ -29,7 +29,7 @@ TotalsHavingStep::TotalsHavingStep( const DataStream & input_stream_, const AggregateDescriptions & aggregates_, bool overflow_row_, - const ActionsDAGPtr & actions_dag_, + std::optional actions_dag_, const std::string & filter_column_, bool remove_filter_, TotalsMode totals_mode_, @@ -39,7 +39,7 @@ TotalsHavingStep::TotalsHavingStep( input_stream_, TotalsHavingTransform::transformHeader( input_stream_.header, - actions_dag_.get(), + actions_dag_ ? &*actions_dag_ : nullptr, filter_column_, remove_filter_, final_, @@ -47,7 +47,7 @@ TotalsHavingStep::TotalsHavingStep( getTraits(!filter_column_.empty())) , aggregates(aggregates_) , overflow_row(overflow_row_) - , actions_dag(actions_dag_) + , actions_dag(std::move(actions_dag_)) , filter_column_name(filter_column_) , remove_filter(remove_filter_) , totals_mode(totals_mode_) @@ -58,7 +58,7 @@ TotalsHavingStep::TotalsHavingStep( void TotalsHavingStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & settings) { - auto expression_actions = actions_dag ? std::make_shared(actions_dag, settings.getActionsSettings()) : nullptr; + auto expression_actions = actions_dag ? std::make_shared(std::move(*actions_dag), settings.getActionsSettings()) : nullptr; auto totals_having = std::make_shared( pipeline.getHeader(), @@ -101,13 +101,16 @@ void TotalsHavingStep::describeActions(FormatSettings & settings) const if (actions_dag) { bool first = true; - auto expression = std::make_shared(actions_dag); - for (const auto & action : expression->getActions()) + if (actions_dag) { - settings.out << prefix << (first ? "Actions: " - : " "); - first = false; - settings.out << action.toString() << '\n'; + auto expression = std::make_shared(actions_dag->clone()); + for (const auto & action : expression->getActions()) + { + settings.out << prefix << (first ? "Actions: " + : " "); + first = false; + settings.out << action.toString() << '\n'; + } } } } @@ -118,8 +121,11 @@ void TotalsHavingStep::describeActions(JSONBuilder::JSONMap & map) const if (actions_dag) { map.add("Filter column", filter_column_name); - auto expression = std::make_shared(actions_dag); - map.add("Expression", expression->toTree()); + if (actions_dag) + { + auto expression = std::make_shared(actions_dag->clone()); + map.add("Expression", expression->toTree()); + } } } @@ -129,7 +135,7 @@ void TotalsHavingStep::updateOutputStream() input_streams.front(), TotalsHavingTransform::transformHeader( input_streams.front().header, - actions_dag.get(), + getActions(), filter_column_name, remove_filter, final, diff --git a/src/Processors/QueryPlan/TotalsHavingStep.h b/src/Processors/QueryPlan/TotalsHavingStep.h index a81bc7bb1a9..4b414d41c57 100644 --- a/src/Processors/QueryPlan/TotalsHavingStep.h +++ b/src/Processors/QueryPlan/TotalsHavingStep.h @@ -1,13 +1,11 @@ #pragma once #include #include +#include namespace DB { -class ActionsDAG; -using ActionsDAGPtr = std::shared_ptr; - enum class TotalsMode : uint8_t; /// Execute HAVING and calculate totals. See TotalsHavingTransform. @@ -18,7 +16,7 @@ public: const DataStream & input_stream_, const AggregateDescriptions & aggregates_, bool overflow_row_, - const ActionsDAGPtr & actions_dag_, + std::optional actions_dag_, const std::string & filter_column_, bool remove_filter_, TotalsMode totals_mode_, @@ -32,7 +30,7 @@ public: void describeActions(JSONBuilder::JSONMap & map) const override; void describeActions(FormatSettings & settings) const override; - const ActionsDAGPtr & getActions() const { return actions_dag; } + const ActionsDAG * getActions() const { return actions_dag ? &*actions_dag : nullptr; } private: void updateOutputStream() override; @@ -40,7 +38,7 @@ private: const AggregateDescriptions aggregates; bool overflow_row; - ActionsDAGPtr actions_dag; + std::optional actions_dag; String filter_column_name; bool remove_filter; TotalsMode totals_mode; diff --git a/src/Processors/QueryPlan/WindowStep.h b/src/Processors/QueryPlan/WindowStep.h index 74a0e5930c7..d79cd7fd45e 100644 --- a/src/Processors/QueryPlan/WindowStep.h +++ b/src/Processors/QueryPlan/WindowStep.h @@ -6,9 +6,6 @@ namespace DB { -class ActionsDAG; -using ActionsDAGPtr = std::shared_ptr; - class WindowTransform; class WindowStep : public ITransformingStep diff --git a/src/Processors/SourceWithKeyCondition.h b/src/Processors/SourceWithKeyCondition.h index ee155d6f78c..cfd3eb236b7 100644 --- a/src/Processors/SourceWithKeyCondition.h +++ b/src/Processors/SourceWithKeyCondition.h @@ -16,13 +16,13 @@ protected: /// Represents pushed down filters in source std::shared_ptr key_condition; - void setKeyConditionImpl(const ActionsDAGPtr & filter_actions_dag, ContextPtr context, const Block & keys) + void setKeyConditionImpl(const std::optional & filter_actions_dag, ContextPtr context, const Block & keys) { key_condition = std::make_shared( - filter_actions_dag, + filter_actions_dag ? &*filter_actions_dag : nullptr, context, keys.getNames(), - std::make_shared(std::make_shared(keys.getColumnsWithTypeAndName()))); + std::make_shared(ActionsDAG(keys.getColumnsWithTypeAndName()))); } public: @@ -33,6 +33,6 @@ public: virtual void setKeyCondition(const std::shared_ptr & key_condition_) { key_condition = key_condition_; } /// Set key_condition created by filter_actions_dag and context. - virtual void setKeyCondition(const ActionsDAGPtr & /*filter_actions_dag*/, ContextPtr /*context*/) { } + virtual void setKeyCondition(const std::optional & /*filter_actions_dag*/, ContextPtr /*context*/) { } }; } diff --git a/src/Processors/Transforms/AddingDefaultsTransform.cpp b/src/Processors/Transforms/AddingDefaultsTransform.cpp index 7945b3999c1..da4d3a0041b 100644 --- a/src/Processors/Transforms/AddingDefaultsTransform.cpp +++ b/src/Processors/Transforms/AddingDefaultsTransform.cpp @@ -178,7 +178,7 @@ void AddingDefaultsTransform::transform(Chunk & chunk) auto dag = evaluateMissingDefaults(evaluate_block, header.getNamesAndTypesList(), columns, context, false); if (dag) { - auto actions = std::make_shared(std::move(dag), ExpressionActionsSettings::fromContext(context, CompileExpressions::yes), true); + auto actions = std::make_shared(std::move(*dag), ExpressionActionsSettings::fromContext(context, CompileExpressions::yes), true); actions->execute(evaluate_block); } diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index bb38c3e1dc5..9601f821cc8 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -203,7 +203,7 @@ FillingTransform::FillingTransform( , use_with_fill_by_sorting_prefix(use_with_fill_by_sorting_prefix_) { if (interpolate_description) - interpolate_actions = std::make_shared(interpolate_description->actions); + interpolate_actions = std::make_shared(interpolate_description->actions.clone()); std::vector is_fill_column(header_.columns()); for (size_t i = 0, size = fill_description.size(); i < size; ++i) diff --git a/src/QueryPipeline/RemoteQueryExecutor.h b/src/QueryPipeline/RemoteQueryExecutor.h index 04a59cc3b7e..78e0b02c38b 100644 --- a/src/QueryPipeline/RemoteQueryExecutor.h +++ b/src/QueryPipeline/RemoteQueryExecutor.h @@ -8,7 +8,6 @@ #include #include #include -#include #include diff --git a/src/QueryPipeline/RemoteQueryExecutorReadContext.h b/src/QueryPipeline/RemoteQueryExecutorReadContext.h index b8aa8bb9111..fbd55278f86 100644 --- a/src/QueryPipeline/RemoteQueryExecutorReadContext.h +++ b/src/QueryPipeline/RemoteQueryExecutorReadContext.h @@ -69,7 +69,7 @@ private: /// * timer is a timerfd descriptor to manually check socket timeout /// * pipe_fd is a pipe we use to cancel query and socket polling by executor. /// We put those descriptors into our own epoll which is used by external executor. - TimerDescriptor timer{CLOCK_MONOTONIC, 0}; + TimerDescriptor timer; Poco::Timespan timeout; AsyncEventTimeoutType timeout_type; std::atomic_bool is_timer_alarmed = false; diff --git a/src/Storages/Hive/StorageHive.cpp b/src/Storages/Hive/StorageHive.cpp index 9063437c065..089fb5c585c 100644 --- a/src/Storages/Hive/StorageHive.cpp +++ b/src/Storages/Hive/StorageHive.cpp @@ -516,7 +516,7 @@ void StorageHive::initMinMaxIndexExpression() partition_names = partition_name_types.getNames(); partition_types = partition_name_types.getTypes(); partition_minmax_idx_expr = std::make_shared( - std::make_shared(partition_name_types), ExpressionActionsSettings::fromContext(getContext())); + ActionsDAG(partition_name_types), ExpressionActionsSettings::fromContext(getContext())); } NamesAndTypesList all_name_types = metadata_snapshot->getColumns().getAllPhysical(); @@ -526,7 +526,7 @@ void StorageHive::initMinMaxIndexExpression() hivefile_name_types.push_back(column); } hivefile_minmax_idx_expr = std::make_shared( - std::make_shared(hivefile_name_types), ExpressionActionsSettings::fromContext(getContext())); + ActionsDAG(hivefile_name_types), ExpressionActionsSettings::fromContext(getContext())); } ASTPtr StorageHive::extractKeyExpressionList(const ASTPtr & node) @@ -583,7 +583,7 @@ static HiveFilePtr createHiveFile( HiveFiles StorageHive::collectHiveFilesFromPartition( const Apache::Hadoop::Hive::Partition & partition, - const ActionsDAGPtr & filter_actions_dag, + const ActionsDAG * filter_actions_dag, const HiveTableMetadataPtr & hive_table_metadata, const HDFSFSPtr & fs, const ContextPtr & context_, @@ -681,7 +681,7 @@ StorageHive::listDirectory(const String & path, const HiveTableMetadataPtr & hiv HiveFilePtr StorageHive::getHiveFileIfNeeded( const FileInfo & file_info, const FieldVector & fields, - const ActionsDAGPtr & filter_actions_dag, + const ActionsDAG * filter_actions_dag, const HiveTableMetadataPtr & hive_table_metadata, const ContextPtr & context_, PruneLevel prune_level) const @@ -828,7 +828,7 @@ void ReadFromHive::createFiles() if (hive_files) return; - hive_files = storage->collectHiveFiles(num_streams, filter_actions_dag, hive_table_metadata, fs, context); + hive_files = storage->collectHiveFiles(num_streams, filter_actions_dag ? &*filter_actions_dag : nullptr, hive_table_metadata, fs, context); LOG_INFO(log, "Collect {} hive files to read", hive_files->size()); } @@ -950,7 +950,7 @@ void ReadFromHive::initializePipeline(QueryPipelineBuilder & pipeline, const Bui HiveFiles StorageHive::collectHiveFiles( size_t max_threads, - const ActionsDAGPtr & filter_actions_dag, + const ActionsDAG * filter_actions_dag, const HiveTableMetadataPtr & hive_table_metadata, const HDFSFSPtr & fs, const ContextPtr & context_, @@ -1023,12 +1023,12 @@ SinkToStoragePtr StorageHive::write(const ASTPtr & /*query*/, const StorageMetad std::optional StorageHive::totalRows(const Settings & settings) const { /// query_info is not used when prune_level == PruneLevel::None - return totalRowsImpl(settings, nullptr, getContext(), PruneLevel::None); + return totalRowsImpl(settings, {}, getContext(), PruneLevel::None); } -std::optional StorageHive::totalRowsByPartitionPredicate(const ActionsDAGPtr & filter_actions_dag, ContextPtr context_) const +std::optional StorageHive::totalRowsByPartitionPredicate(const ActionsDAG & filter_actions_dag, ContextPtr context_) const { - return totalRowsImpl(context_->getSettingsRef(), filter_actions_dag, context_, PruneLevel::Partition); + return totalRowsImpl(context_->getSettingsRef(), &filter_actions_dag, context_, PruneLevel::Partition); } void StorageHive::checkAlterIsPossible(const AlterCommands & commands, ContextPtr /*local_context*/) const @@ -1043,7 +1043,7 @@ void StorageHive::checkAlterIsPossible(const AlterCommands & commands, ContextPt } std::optional -StorageHive::totalRowsImpl(const Settings & settings, const ActionsDAGPtr & filter_actions_dag, ContextPtr context_, PruneLevel prune_level) const +StorageHive::totalRowsImpl(const Settings & settings, const ActionsDAG * filter_actions_dag, ContextPtr context_, PruneLevel prune_level) const { /// Row-based format like Text doesn't support totalRowsByPartitionPredicate if (!supportsSubsetOfColumns()) diff --git a/src/Storages/Hive/StorageHive.h b/src/Storages/Hive/StorageHive.h index 8a457dd6e01..e16df22e138 100644 --- a/src/Storages/Hive/StorageHive.h +++ b/src/Storages/Hive/StorageHive.h @@ -57,7 +57,7 @@ public: bool supportsSubsetOfColumns() const; std::optional totalRows(const Settings & settings) const override; - std::optional totalRowsByPartitionPredicate(const ActionsDAGPtr & filter_actions_dag, ContextPtr context_) const override; + std::optional totalRowsByPartitionPredicate(const ActionsDAG & filter_actions_dag, ContextPtr context_) const override; void checkAlterIsPossible(const AlterCommands & commands, ContextPtr local_context) const override; protected: @@ -90,7 +90,7 @@ private: HiveFiles collectHiveFiles( size_t max_threads, - const ActionsDAGPtr & filter_actions_dag, + const ActionsDAG * filter_actions_dag, const HiveTableMetadataPtr & hive_table_metadata, const HDFSFSPtr & fs, const ContextPtr & context_, @@ -98,7 +98,7 @@ private: HiveFiles collectHiveFilesFromPartition( const Apache::Hadoop::Hive::Partition & partition, - const ActionsDAGPtr & filter_actions_dag, + const ActionsDAG * filter_actions_dag, const HiveTableMetadataPtr & hive_table_metadata, const HDFSFSPtr & fs, const ContextPtr & context_, @@ -107,7 +107,7 @@ private: HiveFilePtr getHiveFileIfNeeded( const FileInfo & file_info, const FieldVector & fields, - const ActionsDAGPtr & filter_actions_dag, + const ActionsDAG * filter_actions_dag, const HiveTableMetadataPtr & hive_table_metadata, const ContextPtr & context_, PruneLevel prune_level = PruneLevel::Max) const; @@ -115,7 +115,7 @@ private: void lazyInitialize(); std::optional - totalRowsImpl(const Settings & settings, const ActionsDAGPtr & filter_actions_dag, ContextPtr context_, PruneLevel prune_level) const; + totalRowsImpl(const Settings & settings, const ActionsDAG * filter_actions_dag, ContextPtr context_, PruneLevel prune_level) const; String hive_metastore_url; diff --git a/src/Storages/IStorage.cpp b/src/Storages/IStorage.cpp index 3b9c6ff28fa..755d71df531 100644 --- a/src/Storages/IStorage.cpp +++ b/src/Storages/IStorage.cpp @@ -237,7 +237,7 @@ StorageID IStorage::getStorageID() const return storage_id; } -ConditionSelectivityEstimator IStorage::getConditionSelectivityEstimatorByPredicate(const StorageSnapshotPtr &, const ActionsDAGPtr &, ContextPtr) const +ConditionSelectivityEstimator IStorage::getConditionSelectivityEstimatorByPredicate(const StorageSnapshotPtr &, const ActionsDAG *, ContextPtr) const { return {}; } @@ -325,9 +325,8 @@ std::string PrewhereInfo::dump() const ss << "row_level_filter " << row_level_filter->dumpDAG() << "\n"; } - if (prewhere_actions) { - ss << "prewhere_actions " << prewhere_actions->dumpDAG() << "\n"; + ss << "prewhere_actions " << prewhere_actions.dumpDAG() << "\n"; } ss << "remove_prewhere_column " << remove_prewhere_column @@ -341,10 +340,8 @@ std::string FilterDAGInfo::dump() const WriteBufferFromOwnString ss; ss << "FilterDAGInfo for column '" << column_name <<"', do_remove_column " << do_remove_column << "\n"; - if (actions) - { - ss << "actions " << actions->dumpDAG() << "\n"; - } + + ss << "actions " << actions.dumpDAG() << "\n"; return ss.str(); } diff --git a/src/Storages/IStorage.h b/src/Storages/IStorage.h index 6217470780d..0477a08b0d2 100644 --- a/src/Storages/IStorage.h +++ b/src/Storages/IStorage.h @@ -135,7 +135,7 @@ public: /// Returns true if the storage supports queries with the PREWHERE section. virtual bool supportsPrewhere() const { return false; } - virtual ConditionSelectivityEstimator getConditionSelectivityEstimatorByPredicate(const StorageSnapshotPtr &, const ActionsDAGPtr &, ContextPtr) const; + virtual ConditionSelectivityEstimator getConditionSelectivityEstimatorByPredicate(const StorageSnapshotPtr &, const ActionsDAG *, ContextPtr) const; /// Returns which columns supports PREWHERE, or empty std::nullopt if all columns is supported. /// This is needed for engines whose aggregates data from multiple tables, like Merge. @@ -684,7 +684,7 @@ public: virtual std::optional totalRows(const Settings &) const { return {}; } /// Same as above but also take partition predicate into account. - virtual std::optional totalRowsByPartitionPredicate(const ActionsDAGPtr &, ContextPtr) const { return {}; } + virtual std::optional totalRowsByPartitionPredicate(const ActionsDAG &, ContextPtr) const { return {}; } /// If it is possible to quickly determine exact number of bytes for the table on storage: /// - memory (approximated, resident) diff --git a/src/Storages/KVStorageUtils.cpp b/src/Storages/KVStorageUtils.cpp index 94319aef3b8..88783246e10 100644 --- a/src/Storages/KVStorageUtils.cpp +++ b/src/Storages/KVStorageUtils.cpp @@ -231,7 +231,7 @@ bool traverseDAGFilter( } std::pair getFilterKeys( - const String & primary_key, const DataTypePtr & primary_key_type, const ActionsDAGPtr & filter_actions_dag, const ContextPtr & context) + const String & primary_key, const DataTypePtr & primary_key_type, const std::optional & filter_actions_dag, const ContextPtr & context) { if (!filter_actions_dag) return {{}, true}; diff --git a/src/Storages/KVStorageUtils.h b/src/Storages/KVStorageUtils.h index e20a1ce4f37..64108290270 100644 --- a/src/Storages/KVStorageUtils.h +++ b/src/Storages/KVStorageUtils.h @@ -22,7 +22,7 @@ std::pair getFilterKeys( const std::string & primary_key, const DataTypePtr & primary_key_type, const SelectQueryInfo & query_info, const ContextPtr & context); std::pair getFilterKeys( - const String & primary_key, const DataTypePtr & primary_key_type, const ActionsDAGPtr & filter_actions_dag, const ContextPtr & context); + const String & primary_key, const DataTypePtr & primary_key_type, const std::optional & filter_actions_dag, const ContextPtr & context); template void fillColumns(const K & key, const V & value, size_t key_pos, const Block & header, MutableColumns & columns) diff --git a/src/Storages/KeyDescription.cpp b/src/Storages/KeyDescription.cpp index 2a697fa5654..7e43966556e 100644 --- a/src/Storages/KeyDescription.cpp +++ b/src/Storages/KeyDescription.cpp @@ -160,7 +160,7 @@ KeyDescription KeyDescription::buildEmptyKey() { KeyDescription result; result.expression_list_ast = std::make_shared(); - result.expression = std::make_shared(std::make_shared(), ExpressionActionsSettings{}); + result.expression = std::make_shared(ActionsDAG(), ExpressionActionsSettings{}); return result; } diff --git a/src/Storages/MergeTree/IMergeTreeReader.cpp b/src/Storages/MergeTree/IMergeTreeReader.cpp index 4ad7f6ef991..264b2b397f4 100644 --- a/src/Storages/MergeTree/IMergeTreeReader.cpp +++ b/src/Storages/MergeTree/IMergeTreeReader.cpp @@ -163,8 +163,8 @@ void IMergeTreeReader::evaluateMissingDefaults(Block additional_columns, Columns if (dag) { dag->addMaterializingOutputActions(); - auto actions = std::make_shared< - ExpressionActions>(std::move(dag), + auto actions = std::make_shared( + std::move(*dag), ExpressionActionsSettings::fromSettings(data_part_info_for_read->getContext()->getSettingsRef())); actions->execute(additional_columns); } diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index 40eef8e7431..69bffac9160 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -629,7 +629,7 @@ static const ActionsDAG::Node & cloneASTWithInversionPushDown( if (const auto * index_hint = typeid_cast(adaptor->getFunction().get())) { const auto & index_hint_dag = index_hint->getActions(); - children = index_hint_dag->getOutputs(); + children = index_hint_dag.getOutputs(); for (auto & arg : children) arg = &cloneASTWithInversionPushDown(*arg, inverted_dag, to_inverted, context, need_inversion); @@ -696,22 +696,22 @@ const std::unordered_map KeyConditi {"hilbertEncode", SpaceFillingCurveType::Hilbert} }; -ActionsDAGPtr KeyCondition::cloneASTWithInversionPushDown(ActionsDAG::NodeRawConstPtrs nodes, const ContextPtr & context) +ActionsDAG KeyCondition::cloneASTWithInversionPushDown(ActionsDAG::NodeRawConstPtrs nodes, const ContextPtr & context) { - auto res = std::make_shared(); + ActionsDAG res; std::unordered_map to_inverted; for (auto & node : nodes) - node = &DB::cloneASTWithInversionPushDown(*node, *res, to_inverted, context, false); + node = &DB::cloneASTWithInversionPushDown(*node, res, to_inverted, context, false); if (nodes.size() > 1) { auto function_builder = FunctionFactory::instance().get("and", context); - nodes = {&res->addFunction(function_builder, std::move(nodes), "")}; + nodes = {&res.addFunction(function_builder, std::move(nodes), "")}; } - res->getOutputs().swap(nodes); + res.getOutputs().swap(nodes); return res; } @@ -730,7 +730,7 @@ Block KeyCondition::getBlockWithConstants( if (syntax_analyzer_result) { auto actions = ExpressionAnalyzer(query, syntax_analyzer_result, context).getConstActionsDAG(); - for (const auto & action_node : actions->getOutputs()) + for (const auto & action_node : actions.getOutputs()) { if (action_node->column) result.insert(ColumnWithTypeAndName{action_node->column, action_node->result_type, action_node->result_name}); @@ -785,7 +785,7 @@ void KeyCondition::getAllSpaceFillingCurves() } KeyCondition::KeyCondition( - ActionsDAGPtr filter_dag, + const ActionsDAG * filter_dag, ContextPtr context, const Names & key_column_names, const ExpressionActionsPtr & key_expr_, @@ -826,9 +826,9 @@ KeyCondition::KeyCondition( * are pushed down and applied (when possible) to leaf nodes. */ auto inverted_dag = cloneASTWithInversionPushDown({filter_dag->getOutputs().at(0)}, context); - assert(inverted_dag->getOutputs().size() == 1); + assert(inverted_dag.getOutputs().size() == 1); - const auto * inverted_dag_filter_node = inverted_dag->getOutputs()[0]; + const auto * inverted_dag_filter_node = inverted_dag.getOutputs()[0]; RPNBuilder builder(inverted_dag_filter_node, context, [&](const RPNBuilderTreeNode & node, RPNElement & out) { diff --git a/src/Storages/MergeTree/KeyCondition.h b/src/Storages/MergeTree/KeyCondition.h index 9e2218d7a29..e9343ec08ea 100644 --- a/src/Storages/MergeTree/KeyCondition.h +++ b/src/Storages/MergeTree/KeyCondition.h @@ -42,7 +42,7 @@ class KeyCondition public: /// Construct key condition from ActionsDAG nodes KeyCondition( - ActionsDAGPtr filter_dag, + const ActionsDAG * filter_dag, ContextPtr context, const Names & key_column_names, const ExpressionActionsPtr & key_expr, @@ -134,7 +134,7 @@ public: DataTypePtr current_type, bool single_point = false); - static ActionsDAGPtr cloneASTWithInversionPushDown(ActionsDAG::NodeRawConstPtrs nodes, const ContextPtr & context); + static ActionsDAG cloneASTWithInversionPushDown(ActionsDAG::NodeRawConstPtrs nodes, const ContextPtr & context); bool matchesExactContinuousRange() const; diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 30a4a7caa0f..2e10f5a0227 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -474,7 +474,7 @@ StoragePolicyPtr MergeTreeData::getStoragePolicy() const } ConditionSelectivityEstimator MergeTreeData::getConditionSelectivityEstimatorByPredicate( - const StorageSnapshotPtr & storage_snapshot, const ActionsDAGPtr & filter_dag, ContextPtr local_context) const + const StorageSnapshotPtr & storage_snapshot, const ActionsDAG * filter_dag, ContextPtr local_context) const { if (!local_context->getSettingsRef().allow_statistics_optimize) return {}; @@ -750,7 +750,7 @@ ExpressionActionsPtr MergeTreeData::getMinMaxExpr(const KeyDescription & partiti if (!partition_key.column_names.empty()) partition_key_columns = partition_key.expression->getRequiredColumnsWithTypes(); - return std::make_shared(std::make_shared(partition_key_columns), settings); + return std::make_shared(ActionsDAG(partition_key_columns), settings); } Names MergeTreeData::getMinMaxColumnsNames(const KeyDescription & partition_key) @@ -1138,7 +1138,7 @@ Block MergeTreeData::getBlockWithVirtualsForFilter( std::optional MergeTreeData::totalRowsByPartitionPredicateImpl( - const ActionsDAGPtr & filter_actions_dag, ContextPtr local_context, const DataPartsVector & parts) const + const ActionsDAG & filter_actions_dag, ContextPtr local_context, const DataPartsVector & parts) const { if (parts.empty()) return 0; @@ -1146,7 +1146,7 @@ std::optional MergeTreeData::totalRowsByPartitionPredicateImpl( auto metadata_snapshot = getInMemoryMetadataPtr(); auto virtual_columns_block = getBlockWithVirtualsForFilter(metadata_snapshot, {parts[0]}); - auto filter_dag = VirtualColumnUtils::splitFilterDagForAllowedInputs(filter_actions_dag->getOutputs().at(0), nullptr, /*allow_non_deterministic_functions=*/ false); + auto filter_dag = VirtualColumnUtils::splitFilterDagForAllowedInputs(filter_actions_dag.getOutputs().at(0), nullptr, /*allow_non_deterministic_functions=*/ false); if (!filter_dag) return {}; @@ -1156,7 +1156,7 @@ std::optional MergeTreeData::totalRowsByPartitionPredicateImpl( if (!virtual_columns_block.has(input->result_name)) valid = false; - PartitionPruner partition_pruner(metadata_snapshot, filter_dag, local_context, true /* strict */); + PartitionPruner partition_pruner(metadata_snapshot, &*filter_dag, local_context, true /* strict */); if (partition_pruner.isUseless() && !valid) return {}; @@ -1164,7 +1164,7 @@ std::optional MergeTreeData::totalRowsByPartitionPredicateImpl( if (valid) { virtual_columns_block = getBlockWithVirtualsForFilter(metadata_snapshot, parts); - VirtualColumnUtils::filterBlockWithDAG(filter_dag, virtual_columns_block, local_context); + VirtualColumnUtils::filterBlockWithExpression(VirtualColumnUtils::buildFilterExpression(std::move(*filter_dag), local_context), virtual_columns_block); part_values = VirtualColumnUtils::extractSingleValueFromBlock(virtual_columns_block, "_part"); if (part_values.empty()) return 0; @@ -4405,25 +4405,25 @@ bool MergeTreeData::tryRemovePartImmediately(DataPartPtr && part) size_t MergeTreeData::getTotalActiveSizeInBytes() const { - return total_active_size_bytes.load(std::memory_order_acquire); + return total_active_size_bytes.load(); } size_t MergeTreeData::getTotalActiveSizeInRows() const { - return total_active_size_rows.load(std::memory_order_acquire); + return total_active_size_rows.load(); } size_t MergeTreeData::getActivePartsCount() const { - return total_active_size_parts.load(std::memory_order_acquire); + return total_active_size_parts.load(); } size_t MergeTreeData::getOutdatedPartsCount() const { - return total_outdated_parts_count.load(std::memory_order_relaxed); + return total_outdated_parts_count.load(); } size_t MergeTreeData::getNumberOfOutdatedPartsWithExpiredRemovalTime() const @@ -6839,7 +6839,7 @@ using PartitionIdToMaxBlock = std::unordered_map; Block MergeTreeData::getMinMaxCountProjectionBlock( const StorageMetadataPtr & metadata_snapshot, const Names & required_columns, - const ActionsDAGPtr & filter_dag, + const ActionsDAG * filter_dag, const DataPartsVector & parts, const PartitionIdToMaxBlock * max_block_numbers_to_read, ContextPtr query_context) const @@ -7078,7 +7078,7 @@ ActionDAGNodes MergeTreeData::getFiltersForPrimaryKeyAnalysis(const InterpreterS ActionDAGNodes filter_nodes; if (auto additional_filter_info = select.getAdditionalQueryInfo()) - filter_nodes.nodes.push_back(&additional_filter_info->actions->findInOutputs(additional_filter_info->column_name)); + filter_nodes.nodes.push_back(&additional_filter_info->actions.findInOutputs(additional_filter_info->column_name)); if (before_where) filter_nodes.nodes.push_back(&before_where->dag.findInOutputs(where_column_name)); @@ -8184,16 +8184,16 @@ void MergeTreeData::removePartContributionToDataVolume(const DataPartPtr & part) void MergeTreeData::increaseDataVolume(ssize_t bytes, ssize_t rows, ssize_t parts) { - total_active_size_bytes.fetch_add(bytes, std::memory_order_acq_rel); - total_active_size_rows.fetch_add(rows, std::memory_order_acq_rel); - total_active_size_parts.fetch_add(parts, std::memory_order_acq_rel); + total_active_size_bytes.fetch_add(bytes); + total_active_size_rows.fetch_add(rows); + total_active_size_parts.fetch_add(parts); } void MergeTreeData::setDataVolume(size_t bytes, size_t rows, size_t parts) { - total_active_size_bytes.store(bytes, std::memory_order_release); - total_active_size_rows.store(rows, std::memory_order_release); - total_active_size_parts.store(parts, std::memory_order_release); + total_active_size_bytes.store(bytes); + total_active_size_rows.store(rows); + total_active_size_parts.store(parts); } bool MergeTreeData::insertQueryIdOrThrow(const String & query_id, size_t max_queries) const diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index d185a5262fd..6662df3db84 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -403,7 +403,7 @@ public: Block getMinMaxCountProjectionBlock( const StorageMetadataPtr & metadata_snapshot, const Names & required_columns, - const ActionsDAGPtr & filter_dag, + const ActionsDAG * filter_dag, const DataPartsVector & parts, const PartitionIdToMaxBlock * max_block_numbers_to_read, ContextPtr query_context) const; @@ -426,7 +426,7 @@ public: bool supportsPrewhere() const override { return true; } - ConditionSelectivityEstimator getConditionSelectivityEstimatorByPredicate(const StorageSnapshotPtr &, const ActionsDAGPtr &, ContextPtr) const override; + ConditionSelectivityEstimator getConditionSelectivityEstimatorByPredicate(const StorageSnapshotPtr &, const ActionsDAG *, ContextPtr) const override; bool supportsFinal() const override; @@ -1227,7 +1227,7 @@ protected: boost::iterator_range range, const ColumnsDescription & storage_columns); std::optional totalRowsByPartitionPredicateImpl( - const ActionsDAGPtr & filter_actions_dag, ContextPtr context, const DataPartsVector & parts) const; + const ActionsDAG & filter_actions_dag, ContextPtr context, const DataPartsVector & parts) const; static decltype(auto) getStateModifier(DataPartState state) { diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index c5214f0f3d3..a6ef0063069 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -430,7 +430,7 @@ MergeTreeDataSelectSamplingData MergeTreeDataSelectExecutor::getSampling( ASTPtr query = sampling.filter_function; auto syntax_result = TreeRewriter(context).analyze(query, available_real_columns); - sampling.filter_expression = ExpressionAnalyzer(sampling.filter_function, syntax_result, context).getActionsDAG(false); + sampling.filter_expression = std::make_shared(ExpressionAnalyzer(sampling.filter_function, syntax_result, context).getActionsDAG(false)); } } @@ -444,7 +444,7 @@ MergeTreeDataSelectSamplingData MergeTreeDataSelectExecutor::getSampling( } void MergeTreeDataSelectExecutor::buildKeyConditionFromPartOffset( - std::optional & part_offset_condition, const ActionsDAGPtr & filter_dag, ContextPtr context) + std::optional & part_offset_condition, const ActionsDAG * filter_dag, ContextPtr context) { if (!filter_dag) return; @@ -465,10 +465,10 @@ void MergeTreeDataSelectExecutor::buildKeyConditionFromPartOffset( return; part_offset_condition.emplace(KeyCondition{ - dag, + &*dag, context, sample.getNames(), - std::make_shared(std::make_shared(sample.getColumnsWithTypeAndName()), ExpressionActionsSettings{}), + std::make_shared(ActionsDAG(sample.getColumnsWithTypeAndName()), ExpressionActionsSettings{}), {}}); } @@ -476,7 +476,7 @@ std::optional> MergeTreeDataSelectExecutor::filterPar const StorageMetadataPtr & metadata_snapshot, const MergeTreeData & data, const MergeTreeData::DataPartsVector & parts, - const ActionsDAGPtr & filter_dag, + const ActionsDAG * filter_dag, ContextPtr context) { if (!filter_dag) @@ -488,7 +488,7 @@ std::optional> MergeTreeDataSelectExecutor::filterPar return {}; auto virtual_columns_block = data.getBlockWithVirtualsForFilter(metadata_snapshot, parts); - VirtualColumnUtils::filterBlockWithDAG(dag, virtual_columns_block, context); + VirtualColumnUtils::filterBlockWithExpression(VirtualColumnUtils::buildFilterExpression(std::move(*dag), context), virtual_columns_block); return VirtualColumnUtils::extractSingleValueFromBlock(virtual_columns_block, "_part"); } diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h index 788355c1e59..39bff5eacd6 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h @@ -161,7 +161,7 @@ public: /// If possible, construct optional key condition from predicates containing _part_offset column. static void buildKeyConditionFromPartOffset( - std::optional & part_offset_condition, const ActionsDAGPtr & filter_dag, ContextPtr context); + std::optional & part_offset_condition, const ActionsDAG * filter_dag, ContextPtr context); /// If possible, filter using expression on virtual columns. /// Example: SELECT count() FROM table WHERE _part = 'part_name' @@ -170,7 +170,7 @@ public: const StorageMetadataPtr & metadata_snapshot, const MergeTreeData & data, const MergeTreeData::DataPartsVector & parts, - const ActionsDAGPtr & filter_dag, + const ActionsDAG * filter_dag, ContextPtr context); /// Filter parts using minmax index and partition key. diff --git a/src/Storages/MergeTree/MergeTreeIndexAnnoy.cpp b/src/Storages/MergeTree/MergeTreeIndexAnnoy.cpp index c66aa2373e7..497e86334f3 100644 --- a/src/Storages/MergeTree/MergeTreeIndexAnnoy.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexAnnoy.cpp @@ -333,7 +333,7 @@ MergeTreeIndexConditionPtr MergeTreeIndexAnnoy::createIndexCondition(const Selec return std::make_shared(index, query, distance_function, context); }; -MergeTreeIndexConditionPtr MergeTreeIndexAnnoy::createIndexCondition(const ActionsDAGPtr &, ContextPtr) const +MergeTreeIndexConditionPtr MergeTreeIndexAnnoy::createIndexCondition(const ActionsDAG *, ContextPtr) const { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "MergeTreeIndexAnnoy cannot be created with ActionsDAG"); } diff --git a/src/Storages/MergeTree/MergeTreeIndexAnnoy.h b/src/Storages/MergeTree/MergeTreeIndexAnnoy.h index d511ab84859..282920c608e 100644 --- a/src/Storages/MergeTree/MergeTreeIndexAnnoy.h +++ b/src/Storages/MergeTree/MergeTreeIndexAnnoy.h @@ -99,7 +99,7 @@ public: MergeTreeIndexGranulePtr createIndexGranule() const override; MergeTreeIndexAggregatorPtr createIndexAggregator(const MergeTreeWriterSettings & settings) const override; MergeTreeIndexConditionPtr createIndexCondition(const SelectQueryInfo & query, ContextPtr context) const; - MergeTreeIndexConditionPtr createIndexCondition(const ActionsDAGPtr &, ContextPtr) const override; + MergeTreeIndexConditionPtr createIndexCondition(const ActionsDAG *, ContextPtr) const override; bool isVectorSearch() const override { return true; } private: diff --git a/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp b/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp index fc5147bb56c..c6a00751f25 100644 --- a/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp @@ -201,7 +201,7 @@ bool maybeTrueOnBloomFilter(const IColumn * hash_column, const BloomFilterPtr & } MergeTreeIndexConditionBloomFilter::MergeTreeIndexConditionBloomFilter( - const ActionsDAGPtr & filter_actions_dag, ContextPtr context_, const Block & header_, size_t hash_functions_) + const ActionsDAG * filter_actions_dag, ContextPtr context_, const Block & header_, size_t hash_functions_) : WithContext(context_), header(header_), hash_functions(hash_functions_) { if (!filter_actions_dag) @@ -897,7 +897,7 @@ MergeTreeIndexAggregatorPtr MergeTreeIndexBloomFilter::createIndexAggregator(con return std::make_shared(bits_per_row, hash_functions, index.column_names); } -MergeTreeIndexConditionPtr MergeTreeIndexBloomFilter::createIndexCondition(const ActionsDAGPtr & filter_actions_dag, ContextPtr context) const +MergeTreeIndexConditionPtr MergeTreeIndexBloomFilter::createIndexCondition(const ActionsDAG * filter_actions_dag, ContextPtr context) const { return std::make_shared(filter_actions_dag, context, index.sample_block, hash_functions); } diff --git a/src/Storages/MergeTree/MergeTreeIndexBloomFilter.h b/src/Storages/MergeTree/MergeTreeIndexBloomFilter.h index d66c4b8b6ca..bd1b137176a 100644 --- a/src/Storages/MergeTree/MergeTreeIndexBloomFilter.h +++ b/src/Storages/MergeTree/MergeTreeIndexBloomFilter.h @@ -69,7 +69,7 @@ public: std::vector> predicate; }; - MergeTreeIndexConditionBloomFilter(const ActionsDAGPtr & filter_actions_dag, ContextPtr context_, const Block & header_, size_t hash_functions_); + MergeTreeIndexConditionBloomFilter(const ActionsDAG * filter_actions_dag, ContextPtr context_, const Block & header_, size_t hash_functions_); bool alwaysUnknownOrTrue() const override; @@ -142,7 +142,7 @@ public: MergeTreeIndexAggregatorPtr createIndexAggregator(const MergeTreeWriterSettings & settings) const override; - MergeTreeIndexConditionPtr createIndexCondition(const ActionsDAGPtr & filter_actions_dag, ContextPtr context) const override; + MergeTreeIndexConditionPtr createIndexCondition(const ActionsDAG * filter_actions_dag, ContextPtr context) const override; private: size_t bits_per_row; diff --git a/src/Storages/MergeTree/MergeTreeIndexBloomFilterText.cpp b/src/Storages/MergeTree/MergeTreeIndexBloomFilterText.cpp index 8cf58687125..5b6813d12e3 100644 --- a/src/Storages/MergeTree/MergeTreeIndexBloomFilterText.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexBloomFilterText.cpp @@ -138,7 +138,7 @@ void MergeTreeIndexAggregatorBloomFilterText::update(const Block & block, size_t } MergeTreeConditionBloomFilterText::MergeTreeConditionBloomFilterText( - const ActionsDAGPtr & filter_actions_dag, + const ActionsDAG * filter_actions_dag, ContextPtr context, const Block & index_sample_block, const BloomFilterParameters & params_, @@ -733,7 +733,7 @@ MergeTreeIndexAggregatorPtr MergeTreeIndexBloomFilterText::createIndexAggregator } MergeTreeIndexConditionPtr MergeTreeIndexBloomFilterText::createIndexCondition( - const ActionsDAGPtr & filter_dag, ContextPtr context) const + const ActionsDAG * filter_dag, ContextPtr context) const { return std::make_shared(filter_dag, context, index.sample_block, params, token_extractor.get()); } diff --git a/src/Storages/MergeTree/MergeTreeIndexBloomFilterText.h b/src/Storages/MergeTree/MergeTreeIndexBloomFilterText.h index 6fd969030df..fe042884550 100644 --- a/src/Storages/MergeTree/MergeTreeIndexBloomFilterText.h +++ b/src/Storages/MergeTree/MergeTreeIndexBloomFilterText.h @@ -62,7 +62,7 @@ class MergeTreeConditionBloomFilterText final : public IMergeTreeIndexCondition { public: MergeTreeConditionBloomFilterText( - const ActionsDAGPtr & filter_actions_dag, + const ActionsDAG * filter_actions_dag, ContextPtr context, const Block & index_sample_block, const BloomFilterParameters & params_, @@ -163,7 +163,7 @@ public: MergeTreeIndexAggregatorPtr createIndexAggregator(const MergeTreeWriterSettings & settings) const override; MergeTreeIndexConditionPtr createIndexCondition( - const ActionsDAGPtr & filter_dag, ContextPtr context) const override; + const ActionsDAG * filter_dag, ContextPtr context) const override; BloomFilterParameters params; /// Function for selecting next token. diff --git a/src/Storages/MergeTree/MergeTreeIndexFullText.cpp b/src/Storages/MergeTree/MergeTreeIndexFullText.cpp index 47ce24b91eb..cd6af68ebcc 100644 --- a/src/Storages/MergeTree/MergeTreeIndexFullText.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexFullText.cpp @@ -186,7 +186,7 @@ void MergeTreeIndexAggregatorFullText::update(const Block & block, size_t * pos, } MergeTreeConditionFullText::MergeTreeConditionFullText( - const ActionsDAGPtr & filter_actions_dag, + const ActionsDAG * filter_actions_dag, ContextPtr context_, const Block & index_sample_block, const GinFilterParameters & params_, @@ -768,7 +768,7 @@ MergeTreeIndexAggregatorPtr MergeTreeIndexFullText::createIndexAggregatorForPart } MergeTreeIndexConditionPtr MergeTreeIndexFullText::createIndexCondition( - const ActionsDAGPtr & filter_actions_dag, ContextPtr context) const + const ActionsDAG * filter_actions_dag, ContextPtr context) const { return std::make_shared(filter_actions_dag, context, index.sample_block, params, token_extractor.get()); }; diff --git a/src/Storages/MergeTree/MergeTreeIndexFullText.h b/src/Storages/MergeTree/MergeTreeIndexFullText.h index 1a5e848e5ac..8e0b1a22acb 100644 --- a/src/Storages/MergeTree/MergeTreeIndexFullText.h +++ b/src/Storages/MergeTree/MergeTreeIndexFullText.h @@ -63,7 +63,7 @@ class MergeTreeConditionFullText final : public IMergeTreeIndexCondition, WithCo { public: MergeTreeConditionFullText( - const ActionsDAGPtr & filter_actions_dag, + const ActionsDAG * filter_actions_dag, ContextPtr context, const Block & index_sample_block, const GinFilterParameters & params_, @@ -170,7 +170,7 @@ public: MergeTreeIndexGranulePtr createIndexGranule() const override; MergeTreeIndexAggregatorPtr createIndexAggregator(const MergeTreeWriterSettings & settings) const override; MergeTreeIndexAggregatorPtr createIndexAggregatorForPart(const GinIndexStorePtr & store, const MergeTreeWriterSettings & /*settings*/) const override; - MergeTreeIndexConditionPtr createIndexCondition(const ActionsDAGPtr & filter_actions_dag, ContextPtr context) const override; + MergeTreeIndexConditionPtr createIndexCondition(const ActionsDAG * filter_actions_dag, ContextPtr context) const override; GinFilterParameters params; /// Function for selecting next token. diff --git a/src/Storages/MergeTree/MergeTreeIndexHypothesis.cpp b/src/Storages/MergeTree/MergeTreeIndexHypothesis.cpp index 0995e2724ec..cd8065ecadf 100644 --- a/src/Storages/MergeTree/MergeTreeIndexHypothesis.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexHypothesis.cpp @@ -79,7 +79,7 @@ MergeTreeIndexAggregatorPtr MergeTreeIndexHypothesis::createIndexAggregator(cons } MergeTreeIndexConditionPtr MergeTreeIndexHypothesis::createIndexCondition( - const ActionsDAGPtr &, ContextPtr) const + const ActionsDAG *, ContextPtr) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Not supported"); } diff --git a/src/Storages/MergeTree/MergeTreeIndexHypothesis.h b/src/Storages/MergeTree/MergeTreeIndexHypothesis.h index 130e708d76f..e60335fe724 100644 --- a/src/Storages/MergeTree/MergeTreeIndexHypothesis.h +++ b/src/Storages/MergeTree/MergeTreeIndexHypothesis.h @@ -69,7 +69,7 @@ public: MergeTreeIndexAggregatorPtr createIndexAggregator(const MergeTreeWriterSettings & settings) const override; MergeTreeIndexConditionPtr createIndexCondition( - const ActionsDAGPtr & filter_actions_dag, ContextPtr context) const override; + const ActionsDAG * filter_actions_dag, ContextPtr context) const override; MergeTreeIndexMergedConditionPtr createIndexMergedCondition( const SelectQueryInfo & query_info, StorageMetadataPtr storage_metadata) const override; diff --git a/src/Storages/MergeTree/MergeTreeIndexMinMax.cpp b/src/Storages/MergeTree/MergeTreeIndexMinMax.cpp index 20dfed8cf8f..c60d63a59ba 100644 --- a/src/Storages/MergeTree/MergeTreeIndexMinMax.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexMinMax.cpp @@ -157,7 +157,7 @@ void MergeTreeIndexAggregatorMinMax::update(const Block & block, size_t * pos, s namespace { -KeyCondition buildCondition(const IndexDescription & index, const ActionsDAGPtr & filter_actions_dag, ContextPtr context) +KeyCondition buildCondition(const IndexDescription & index, const ActionsDAG * filter_actions_dag, ContextPtr context) { return KeyCondition{filter_actions_dag, context, index.column_names, index.expression}; } @@ -165,7 +165,7 @@ KeyCondition buildCondition(const IndexDescription & index, const ActionsDAGPtr } MergeTreeIndexConditionMinMax::MergeTreeIndexConditionMinMax( - const IndexDescription & index, const ActionsDAGPtr & filter_actions_dag, ContextPtr context) + const IndexDescription & index, const ActionsDAG * filter_actions_dag, ContextPtr context) : index_data_types(index.data_types) , condition(buildCondition(index, filter_actions_dag, context)) { @@ -198,7 +198,7 @@ MergeTreeIndexAggregatorPtr MergeTreeIndexMinMax::createIndexAggregator(const Me } MergeTreeIndexConditionPtr MergeTreeIndexMinMax::createIndexCondition( - const ActionsDAGPtr & filter_actions_dag, ContextPtr context) const + const ActionsDAG * filter_actions_dag, ContextPtr context) const { return std::make_shared(index, filter_actions_dag, context); } diff --git a/src/Storages/MergeTree/MergeTreeIndexMinMax.h b/src/Storages/MergeTree/MergeTreeIndexMinMax.h index dca26fb7b28..c5031ccbb27 100644 --- a/src/Storages/MergeTree/MergeTreeIndexMinMax.h +++ b/src/Storages/MergeTree/MergeTreeIndexMinMax.h @@ -50,7 +50,7 @@ class MergeTreeIndexConditionMinMax final : public IMergeTreeIndexCondition public: MergeTreeIndexConditionMinMax( const IndexDescription & index, - const ActionsDAGPtr & filter_actions_dag, + const ActionsDAG * filter_actions_dag, ContextPtr context); bool alwaysUnknownOrTrue() const override; @@ -77,7 +77,7 @@ public: MergeTreeIndexAggregatorPtr createIndexAggregator(const MergeTreeWriterSettings & settings) const override; MergeTreeIndexConditionPtr createIndexCondition( - const ActionsDAGPtr & filter_actions_dag, ContextPtr context) const override; + const ActionsDAG * filter_actions_dag, ContextPtr context) const override; const char* getSerializedFileExtension() const override { return ".idx2"; } MergeTreeIndexFormat getDeserializedFormat(const IDataPartStorage & data_part_storage, const std::string & path_prefix) const override; /// NOLINT diff --git a/src/Storages/MergeTree/MergeTreeIndexSet.cpp b/src/Storages/MergeTree/MergeTreeIndexSet.cpp index 284d47ef9e7..a92df4ac72d 100644 --- a/src/Storages/MergeTree/MergeTreeIndexSet.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexSet.cpp @@ -272,14 +272,14 @@ MergeTreeIndexGranulePtr MergeTreeIndexAggregatorSet::getGranuleAndReset() return granule; } -KeyCondition buildCondition(const IndexDescription & index, const ActionsDAGPtr & filter_actions_dag, ContextPtr context) +KeyCondition buildCondition(const IndexDescription & index, const ActionsDAG * filter_actions_dag, ContextPtr context) { return KeyCondition{filter_actions_dag, context, index.column_names, index.expression}; } MergeTreeIndexConditionSet::MergeTreeIndexConditionSet( size_t max_rows_, - const ActionsDAGPtr & filter_dag, + const ActionsDAG * filter_dag, ContextPtr context, const IndexDescription & index_description) : index_name(index_description.name) @@ -303,15 +303,15 @@ MergeTreeIndexConditionSet::MergeTreeIndexConditionSet( return; auto filter_actions_dag = filter_dag->clone(); - const auto * filter_actions_dag_node = filter_actions_dag->getOutputs().at(0); + const auto * filter_actions_dag_node = filter_actions_dag.getOutputs().at(0); std::unordered_map node_to_result_node; - filter_actions_dag->getOutputs()[0] = &traverseDAG(*filter_actions_dag_node, filter_actions_dag, context, node_to_result_node); + filter_actions_dag.getOutputs()[0] = &traverseDAG(*filter_actions_dag_node, filter_actions_dag, context, node_to_result_node); - filter_actions_dag->removeUnusedActions(); - actions = std::make_shared(filter_actions_dag); + filter_actions_dag.removeUnusedActions(); - actions_output_column_name = filter_actions_dag->getOutputs().at(0)->result_name; + actions_output_column_name = filter_actions_dag.getOutputs().at(0)->result_name; + actions = std::make_shared(std::move(filter_actions_dag)); } bool MergeTreeIndexConditionSet::alwaysUnknownOrTrue() const @@ -346,7 +346,7 @@ bool MergeTreeIndexConditionSet::mayBeTrueOnGranule(MergeTreeIndexGranulePtr idx } -static const ActionsDAG::NodeRawConstPtrs & getArguments(const ActionsDAG::Node & node, const ActionsDAGPtr & result_dag_or_null, ActionsDAG::NodeRawConstPtrs * storage) +static const ActionsDAG::NodeRawConstPtrs & getArguments(const ActionsDAG::Node & node, ActionsDAG * result_dag_or_null, ActionsDAG::NodeRawConstPtrs * storage) { chassert(node.type == ActionsDAG::ActionType::FUNCTION); if (node.function_base->getName() != "indexHint") @@ -356,17 +356,17 @@ static const ActionsDAG::NodeRawConstPtrs & getArguments(const ActionsDAG::Node const auto & adaptor = typeid_cast(*node.function_base); const auto & index_hint = typeid_cast(*adaptor.getFunction()); if (!result_dag_or_null) - return index_hint.getActions()->getOutputs(); + return index_hint.getActions().getOutputs(); /// Import the DAG and map argument pointers. - ActionsDAGPtr actions_clone = index_hint.getActions()->clone(); + auto actions_clone = index_hint.getActions().clone(); chassert(storage); - result_dag_or_null->mergeNodes(std::move(*actions_clone), storage); + result_dag_or_null->mergeNodes(std::move(actions_clone), storage); return *storage; } const ActionsDAG::Node & MergeTreeIndexConditionSet::traverseDAG(const ActionsDAG::Node & node, - ActionsDAGPtr & result_dag, + ActionsDAG & result_dag, const ContextPtr & context, std::unordered_map & node_to_result_node) const { @@ -388,7 +388,7 @@ const ActionsDAG::Node & MergeTreeIndexConditionSet::traverseDAG(const ActionsDA atom_node_ptr->type == ActionsDAG::ActionType::FUNCTION) { auto bit_wrapper_function = FunctionFactory::instance().get("__bitWrapperFunc", context); - result_node = &result_dag->addFunction(bit_wrapper_function, {atom_node_ptr}, {}); + result_node = &result_dag.addFunction(bit_wrapper_function, {atom_node_ptr}, {}); } } else @@ -399,14 +399,14 @@ const ActionsDAG::Node & MergeTreeIndexConditionSet::traverseDAG(const ActionsDA unknown_field_column_with_type.type = std::make_shared(); unknown_field_column_with_type.column = unknown_field_column_with_type.type->createColumnConst(1, UNKNOWN_FIELD); - result_node = &result_dag->addColumn(unknown_field_column_with_type); + result_node = &result_dag.addColumn(unknown_field_column_with_type); } node_to_result_node.emplace(&node, result_node); return *result_node; } -const ActionsDAG::Node * MergeTreeIndexConditionSet::atomFromDAG(const ActionsDAG::Node & node, ActionsDAGPtr & result_dag, const ContextPtr & context) const +const ActionsDAG::Node * MergeTreeIndexConditionSet::atomFromDAG(const ActionsDAG::Node & node, ActionsDAG & result_dag, const ContextPtr & context) const { /// Function, literal or column @@ -426,7 +426,7 @@ const ActionsDAG::Node * MergeTreeIndexConditionSet::atomFromDAG(const ActionsDA const auto * result_node = node_to_check; if (node.type != ActionsDAG::ActionType::INPUT) - result_node = &result_dag->addInput(column_name, node.result_type); + result_node = &result_dag.addInput(column_name, node.result_type); return result_node; } @@ -447,11 +447,11 @@ const ActionsDAG::Node * MergeTreeIndexConditionSet::atomFromDAG(const ActionsDA return nullptr; } - return &result_dag->addFunction(node.function_base, children, {}); + return &result_dag.addFunction(node.function_base, children, {}); } const ActionsDAG::Node * MergeTreeIndexConditionSet::operatorFromDAG(const ActionsDAG::Node & node, - ActionsDAGPtr & result_dag, + ActionsDAG & result_dag, const ContextPtr & context, std::unordered_map & node_to_result_node) const { @@ -469,7 +469,7 @@ const ActionsDAG::Node * MergeTreeIndexConditionSet::operatorFromDAG(const Actio auto function_name = node_to_check->function->getName(); ActionsDAG::NodeRawConstPtrs temp_ptrs_to_argument; - const auto & arguments = getArguments(*node_to_check, result_dag, &temp_ptrs_to_argument); + const auto & arguments = getArguments(*node_to_check, &result_dag, &temp_ptrs_to_argument); size_t arguments_size = arguments.size(); if (function_name == "not") @@ -480,7 +480,7 @@ const ActionsDAG::Node * MergeTreeIndexConditionSet::operatorFromDAG(const Actio const ActionsDAG::Node * argument = &traverseDAG(*arguments[0], result_dag, context, node_to_result_node); auto bit_swap_last_two_function = FunctionFactory::instance().get("__bitSwapLastTwo", context); - return &result_dag->addFunction(bit_swap_last_two_function, {argument}, {}); + return &result_dag.addFunction(bit_swap_last_two_function, {argument}, {}); } else if (function_name == "and" || function_name == "indexHint" || function_name == "or") { @@ -508,7 +508,7 @@ const ActionsDAG::Node * MergeTreeIndexConditionSet::operatorFromDAG(const Actio const auto * before_last_argument = children.back(); children.pop_back(); - last_argument = &result_dag->addFunction(function, {before_last_argument, last_argument}, {}); + last_argument = &result_dag.addFunction(function, {before_last_argument, last_argument}, {}); } return last_argument; @@ -584,7 +584,7 @@ MergeTreeIndexAggregatorPtr MergeTreeIndexSet::createIndexAggregator(const Merge } MergeTreeIndexConditionPtr MergeTreeIndexSet::createIndexCondition( - const ActionsDAGPtr & filter_actions_dag, ContextPtr context) const + const ActionsDAG * filter_actions_dag, ContextPtr context) const { return std::make_shared(max_rows, filter_actions_dag, context, index); } diff --git a/src/Storages/MergeTree/MergeTreeIndexSet.h b/src/Storages/MergeTree/MergeTreeIndexSet.h index 168262360fc..ff1f45528d7 100644 --- a/src/Storages/MergeTree/MergeTreeIndexSet.h +++ b/src/Storages/MergeTree/MergeTreeIndexSet.h @@ -84,7 +84,7 @@ class MergeTreeIndexConditionSet final : public IMergeTreeIndexCondition public: MergeTreeIndexConditionSet( size_t max_rows_, - const ActionsDAGPtr & filter_dag, + const ActionsDAG * filter_dag, ContextPtr context, const IndexDescription & index_description); @@ -95,16 +95,16 @@ public: ~MergeTreeIndexConditionSet() override = default; private: const ActionsDAG::Node & traverseDAG(const ActionsDAG::Node & node, - ActionsDAGPtr & result_dag, + ActionsDAG & result_dag, const ContextPtr & context, std::unordered_map & node_to_result_node) const; const ActionsDAG::Node * atomFromDAG(const ActionsDAG::Node & node, - ActionsDAGPtr & result_dag, + ActionsDAG & result_dag, const ContextPtr & context) const; const ActionsDAG::Node * operatorFromDAG(const ActionsDAG::Node & node, - ActionsDAGPtr & result_dag, + ActionsDAG & result_dag, const ContextPtr & context, std::unordered_map & node_to_result_node) const; @@ -143,7 +143,7 @@ public: MergeTreeIndexAggregatorPtr createIndexAggregator(const MergeTreeWriterSettings & settings) const override; MergeTreeIndexConditionPtr createIndexCondition( - const ActionsDAGPtr & filter_actions_dag, ContextPtr context) const override; + const ActionsDAG * filter_actions_dag, ContextPtr context) const override; size_t max_rows = 0; }; diff --git a/src/Storages/MergeTree/MergeTreeIndexUSearch.cpp b/src/Storages/MergeTree/MergeTreeIndexUSearch.cpp index c9df7210569..59a4b0fbf9c 100644 --- a/src/Storages/MergeTree/MergeTreeIndexUSearch.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexUSearch.cpp @@ -367,7 +367,7 @@ MergeTreeIndexConditionPtr MergeTreeIndexUSearch::createIndexCondition(const Sel return std::make_shared(index, query, distance_function, context); }; -MergeTreeIndexConditionPtr MergeTreeIndexUSearch::createIndexCondition(const ActionsDAGPtr &, ContextPtr) const +MergeTreeIndexConditionPtr MergeTreeIndexUSearch::createIndexCondition(const ActionsDAG *, ContextPtr) const { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "MergeTreeIndexAnnoy cannot be created with ActionsDAG"); } diff --git a/src/Storages/MergeTree/MergeTreeIndexUSearch.h b/src/Storages/MergeTree/MergeTreeIndexUSearch.h index 5107cfee371..41de94402c9 100644 --- a/src/Storages/MergeTree/MergeTreeIndexUSearch.h +++ b/src/Storages/MergeTree/MergeTreeIndexUSearch.h @@ -101,7 +101,7 @@ public: MergeTreeIndexGranulePtr createIndexGranule() const override; MergeTreeIndexAggregatorPtr createIndexAggregator(const MergeTreeWriterSettings & settings) const override; MergeTreeIndexConditionPtr createIndexCondition(const SelectQueryInfo & query, ContextPtr context) const; - MergeTreeIndexConditionPtr createIndexCondition(const ActionsDAGPtr &, ContextPtr) const override; + MergeTreeIndexConditionPtr createIndexCondition(const ActionsDAG *, ContextPtr) const override; bool isVectorSearch() const override { return true; } private: diff --git a/src/Storages/MergeTree/MergeTreeIndices.h b/src/Storages/MergeTree/MergeTreeIndices.h index a9f1fa9378f..1be73e1c811 100644 --- a/src/Storages/MergeTree/MergeTreeIndices.h +++ b/src/Storages/MergeTree/MergeTreeIndices.h @@ -167,7 +167,7 @@ struct IMergeTreeIndex } virtual MergeTreeIndexConditionPtr createIndexCondition( - const ActionsDAGPtr & filter_actions_dag, ContextPtr context) const = 0; + const ActionsDAG * filter_actions_dag, ContextPtr context) const = 0; virtual bool isVectorSearch() const { return false; } diff --git a/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp b/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp index 26595fbb36d..a9b77fb6c03 100644 --- a/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp +++ b/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp @@ -329,7 +329,7 @@ void MergeTreePrefetchedReadPool::fillPerPartStatistics() part_stat.sum_marks += range.end - range.begin; const auto & columns = settings.merge_tree_determine_task_size_by_prewhere_columns && prewhere_info - ? prewhere_info->prewhere_actions->getRequiredColumnsNames() + ? prewhere_info->prewhere_actions.getRequiredColumnsNames() : column_names; part_stat.approx_size_of_mark = getApproximateSizeOfGranule(*read_info.data_part, columns); diff --git a/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp b/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp index 46482bc0959..6d2560bc9c7 100644 --- a/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp @@ -65,7 +65,7 @@ static size_t calculateMinMarksPerTask( /// Which means in turn that for most of the rows we will read only the columns from prewhere clause. /// So it makes sense to use only them for the estimation. const auto & columns = settings.merge_tree_determine_task_size_by_prewhere_columns && prewhere_info - ? prewhere_info->prewhere_actions->getRequiredColumnsNames() + ? prewhere_info->prewhere_actions.getRequiredColumnsNames() : columns_to_read; const size_t part_compressed_bytes = getApproxSizeOfPart(*part.data_part, columns); diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index a6298aab3d9..1a0709faf1c 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -59,7 +59,7 @@ MergeTreeSelectProcessor::MergeTreeSelectProcessor( if (prewhere_info) LOG_TEST(log, "Original PREWHERE DAG:\n{}\nPREWHERE actions:\n{}", - (prewhere_info->prewhere_actions ? prewhere_info->prewhere_actions->dumpDAG(): std::string("")), + prewhere_info->prewhere_actions.dumpDAG(), (!prewhere_actions.steps.empty() ? prewhere_actions.dump() : std::string(""))); } @@ -80,7 +80,7 @@ PrewhereExprInfo MergeTreeSelectProcessor::getPrewhereActions(PrewhereInfoPtr pr PrewhereExprStep row_level_filter_step { .type = PrewhereExprStep::Filter, - .actions = std::make_shared(prewhere_info->row_level_filter, actions_settings), + .actions = std::make_shared(prewhere_info->row_level_filter->clone(), actions_settings), .filter_column_name = prewhere_info->row_level_column_name, .remove_filter_column = true, .need_filter = true, @@ -96,7 +96,7 @@ PrewhereExprInfo MergeTreeSelectProcessor::getPrewhereActions(PrewhereInfoPtr pr PrewhereExprStep prewhere_step { .type = PrewhereExprStep::Filter, - .actions = std::make_shared(prewhere_info->prewhere_actions, actions_settings), + .actions = std::make_shared(prewhere_info->prewhere_actions.clone(), actions_settings), .filter_column_name = prewhere_info->prewhere_column_name, .remove_filter_column = prewhere_info->remove_prewhere_column, .need_filter = prewhere_info->need_filter, diff --git a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp index 311720728e7..39aa191a3d2 100644 --- a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp +++ b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp @@ -348,7 +348,7 @@ public: MergeTreeData::DataPartPtr data_part_, Names columns_to_read_, bool apply_deleted_mask_, - ActionsDAGPtr filter_, + std::optional filter_, ContextPtr context_, LoggerPtr log_) : ISourceStep(DataStream{.header = storage_snapshot_->getSampleBlockForColumns(columns_to_read_)}) @@ -375,7 +375,7 @@ public: { const auto & primary_key = storage_snapshot->metadata->getPrimaryKey(); const Names & primary_key_column_names = primary_key.column_names; - KeyCondition key_condition(filter, context, primary_key_column_names, primary_key.expression); + KeyCondition key_condition(&*filter, context, primary_key_column_names, primary_key.expression); LOG_DEBUG(log, "Key condition: {}", key_condition.toString()); if (!key_condition.alwaysFalse()) @@ -416,7 +416,7 @@ private: MergeTreeData::DataPartPtr data_part; Names columns_to_read; bool apply_deleted_mask; - ActionsDAGPtr filter; + std::optional filter; ContextPtr context; LoggerPtr log; }; @@ -429,14 +429,14 @@ void createReadFromPartStep( MergeTreeData::DataPartPtr data_part, Names columns_to_read, bool apply_deleted_mask, - ActionsDAGPtr filter, + std::optional filter, ContextPtr context, LoggerPtr log) { auto reading = std::make_unique(type, storage, storage_snapshot, std::move(data_part), std::move(columns_to_read), apply_deleted_mask, - filter, std::move(context), log); + std::move(filter), std::move(context), log); plan.addStep(std::move(reading)); } diff --git a/src/Storages/MergeTree/MergeTreeSequentialSource.h b/src/Storages/MergeTree/MergeTreeSequentialSource.h index e6f055f776c..1b05512b9a3 100644 --- a/src/Storages/MergeTree/MergeTreeSequentialSource.h +++ b/src/Storages/MergeTree/MergeTreeSequentialSource.h @@ -38,7 +38,7 @@ void createReadFromPartStep( MergeTreeData::DataPartPtr data_part, Names columns_to_read, bool apply_deleted_mask, - ActionsDAGPtr filter, + std::optional filter, ContextPtr context, LoggerPtr log); diff --git a/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp b/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp index 43e3b0c505a..36ff6c0a4bd 100644 --- a/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp +++ b/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp @@ -10,6 +10,9 @@ namespace DB { +class ActionsDAG; +using ActionsDAGPtr = std::unique_ptr; + namespace ErrorCodes { extern const int LOGICAL_ERROR; @@ -50,7 +53,7 @@ void fillRequiredColumns(const ActionsDAG::Node * node, std::unordered_map; const ActionsDAG::Node & addClonedDAGToDAG( size_t step, const ActionsDAG::Node * original_dag_node, - ActionsDAGPtr new_dag, + const ActionsDAGPtr & new_dag, OriginalToNewNodeMap & node_remap, NodeNameToLastUsedStepMap & node_to_step_map) { @@ -72,7 +75,7 @@ const ActionsDAG::Node & addClonedDAGToDAG( { /// If the node is already in the new DAG, return it const auto & node_ref = node_remap.at(node_name); - if (node_ref.dag == new_dag) + if (node_ref.dag == new_dag.get()) return *node_ref.node; /// If the node is known from the previous steps, add it as an input, except for constants @@ -80,7 +83,7 @@ const ActionsDAG::Node & addClonedDAGToDAG( { node_ref.dag->addOrReplaceInOutputs(*node_ref.node); const auto & new_node = new_dag->addInput(node_ref.node->result_name, node_ref.node->result_type); - node_remap[node_name] = {new_dag, &new_node}; /// TODO: here we update the node reference. Is it always correct? + node_remap[node_name] = {new_dag.get(), &new_node}; /// TODO: here we update the node reference. Is it always correct? /// Remember the index of the last step which reuses this node. /// We cannot remove this node from the outputs before that step. @@ -93,7 +96,7 @@ const ActionsDAG::Node & addClonedDAGToDAG( if (original_dag_node->type == ActionsDAG::ActionType::INPUT) { const auto & new_node = new_dag->addInput(original_dag_node->result_name, original_dag_node->result_type); - node_remap[node_name] = {new_dag, &new_node}; + node_remap[node_name] = {new_dag.get(), &new_node}; return new_node; } @@ -102,7 +105,7 @@ const ActionsDAG::Node & addClonedDAGToDAG( { const auto & new_node = new_dag->addColumn( ColumnWithTypeAndName(original_dag_node->column, original_dag_node->result_type, original_dag_node->result_name)); - node_remap[node_name] = {new_dag, &new_node}; + node_remap[node_name] = {new_dag.get(), &new_node}; return new_node; } @@ -110,7 +113,7 @@ const ActionsDAG::Node & addClonedDAGToDAG( { const auto & alias_child = addClonedDAGToDAG(step, original_dag_node->children[0], new_dag, node_remap, node_to_step_map); const auto & new_node = new_dag->addAlias(alias_child, original_dag_node->result_name); - node_remap[node_name] = {new_dag, &new_node}; + node_remap[node_name] = {new_dag.get(), &new_node}; return new_node; } @@ -125,7 +128,7 @@ const ActionsDAG::Node & addClonedDAGToDAG( } const auto & new_node = new_dag->addFunction(original_dag_node->function_base, new_children, original_dag_node->result_name); - node_remap[node_name] = {new_dag, &new_node}; + node_remap[node_name] = {new_dag.get(), &new_node}; return new_node; } @@ -133,13 +136,13 @@ const ActionsDAG::Node & addClonedDAGToDAG( } const ActionsDAG::Node & addFunction( - ActionsDAGPtr new_dag, + const ActionsDAGPtr & new_dag, const FunctionOverloadResolverPtr & function, ActionsDAG::NodeRawConstPtrs children, OriginalToNewNodeMap & node_remap) { const auto & new_node = new_dag->addFunction(function, children, ""); - node_remap[new_node.result_name] = {new_dag, &new_node}; + node_remap[new_node.result_name] = {new_dag.get(), &new_node}; return new_node; } @@ -147,7 +150,7 @@ const ActionsDAG::Node & addFunction( /// This is different from ActionsDAG::addCast() because it set the name equal to the original name effectively hiding the value before cast, /// but it might be required for further steps with its original uncasted type. const ActionsDAG::Node & addCast( - ActionsDAGPtr dag, + const ActionsDAGPtr & dag, const ActionsDAG::Node & node_to_cast, const String & type_name, OriginalToNewNodeMap & node_remap) @@ -173,7 +176,7 @@ const ActionsDAG::Node & addCast( /// 1. produces a result with the proper Nullable or non-Nullable UInt8 type and /// 2. makes sure that the result contains only 0 or 1 values even if the source column contains non-boolean values. const ActionsDAG::Node & addAndTrue( - ActionsDAGPtr dag, + const ActionsDAGPtr & dag, const ActionsDAG::Node & filter_node_to_normalize, OriginalToNewNodeMap & node_remap) { @@ -213,11 +216,11 @@ const ActionsDAG::Node & addAndTrue( /// 8. Add computation of the remaining outputs to the last step with the procedure similar to 4 bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionActionsSettings & actions_settings, PrewhereExprInfo & prewhere) { - if (!prewhere_info || !prewhere_info->prewhere_actions) + if (!prewhere_info) return true; /// 1. List all condition nodes that are combined with AND into PREWHERE condition - const auto & condition_root = prewhere_info->prewhere_actions->findInOutputs(prewhere_info->prewhere_column_name); + const auto & condition_root = prewhere_info->prewhere_actions.findInOutputs(prewhere_info->prewhere_column_name); const bool is_conjunction = (condition_root.type == ActionsDAG::ActionType::FUNCTION && condition_root.function_base->getName() == "and"); if (!is_conjunction) return false; @@ -258,7 +261,7 @@ bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionAction for (size_t step_index = 0; step_index < condition_groups.size(); ++step_index) { const auto & condition_group = condition_groups[step_index]; - ActionsDAGPtr step_dag = std::make_shared(); + ActionsDAGPtr step_dag = std::make_unique(); String result_name; std::vector new_condition_nodes; @@ -299,11 +302,11 @@ bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionAction } } - steps.push_back({step_dag, result_name}); + steps.push_back({std::move(step_dag), result_name}); } /// 6. Find all outputs of the original DAG - auto original_outputs = prewhere_info->prewhere_actions->getOutputs(); + auto original_outputs = prewhere_info->prewhere_actions.getOutputs(); /// 7. Find all outputs that were computed in the already built DAGs, mark these nodes as outputs in the steps where they were computed /// 8. Add computation of the remaining outputs to the last step with the procedure similar to 4 NameSet all_output_names; @@ -345,11 +348,11 @@ bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionAction { for (size_t step_index = 0; step_index < steps.size(); ++step_index) { - const auto & step = steps[step_index]; + auto & step = steps[step_index]; PrewhereExprStep new_step { .type = PrewhereExprStep::Filter, - .actions = std::make_shared(step.actions, actions_settings), + .actions = std::make_shared(std::move(*step.actions), actions_settings), .filter_column_name = step.column_name, /// Don't remove if it's in the list of original outputs .remove_filter_column = diff --git a/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp b/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp index 44ab12bde2a..43c40dee77d 100644 --- a/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp +++ b/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp @@ -113,7 +113,7 @@ void MergeTreeWhereOptimizer::optimize(SelectQueryInfo & select_query_info, cons LOG_DEBUG(log, "MergeTreeWhereOptimizer: condition \"{}\" moved to PREWHERE", select.prewhere()->formatForLogging(log_queries_cut_to_length)); } -MergeTreeWhereOptimizer::FilterActionsOptimizeResult MergeTreeWhereOptimizer::optimize(const ActionsDAGPtr & filter_dag, +MergeTreeWhereOptimizer::FilterActionsOptimizeResult MergeTreeWhereOptimizer::optimize(const ActionsDAG & filter_dag, const std::string & filter_column_name, const ContextPtr & context, bool is_final) @@ -127,7 +127,7 @@ MergeTreeWhereOptimizer::FilterActionsOptimizeResult MergeTreeWhereOptimizer::op where_optimizer_context.use_statistics = context->getSettingsRef().allow_statistics_optimize; RPNBuilderTreeContext tree_context(context); - RPNBuilderTreeNode node(&filter_dag->findInOutputs(filter_column_name), tree_context); + RPNBuilderTreeNode node(&filter_dag.findInOutputs(filter_column_name), tree_context); auto optimize_result = optimizeImpl(node, where_optimizer_context); if (!optimize_result) diff --git a/src/Storages/MergeTree/MergeTreeWhereOptimizer.h b/src/Storages/MergeTree/MergeTreeWhereOptimizer.h index ba6b4660924..a3d035675c6 100644 --- a/src/Storages/MergeTree/MergeTreeWhereOptimizer.h +++ b/src/Storages/MergeTree/MergeTreeWhereOptimizer.h @@ -52,7 +52,7 @@ public: bool fully_moved_to_prewhere = false; }; - FilterActionsOptimizeResult optimize(const ActionsDAGPtr & filter_dag, + FilterActionsOptimizeResult optimize(const ActionsDAG & filter_dag, const std::string & filter_column_name, const ContextPtr & context, bool is_final); diff --git a/src/Storages/MergeTree/PartitionPruner.cpp b/src/Storages/MergeTree/PartitionPruner.cpp index 9de7b238f57..6df7b5aa054 100644 --- a/src/Storages/MergeTree/PartitionPruner.cpp +++ b/src/Storages/MergeTree/PartitionPruner.cpp @@ -4,7 +4,7 @@ namespace DB { -PartitionPruner::PartitionPruner(const StorageMetadataPtr & metadata, ActionsDAGPtr filter_actions_dag, ContextPtr context, bool strict) +PartitionPruner::PartitionPruner(const StorageMetadataPtr & metadata, const ActionsDAG * filter_actions_dag, ContextPtr context, bool strict) : partition_key(MergeTreePartition::adjustPartitionKey(metadata, context)) , partition_condition(filter_actions_dag, context, partition_key.column_names, partition_key.expression, true /* single_point */) , useless((strict && partition_condition.isRelaxed()) || partition_condition.alwaysUnknownOrTrue()) diff --git a/src/Storages/MergeTree/PartitionPruner.h b/src/Storages/MergeTree/PartitionPruner.h index ca24559ca01..d89dfb7b245 100644 --- a/src/Storages/MergeTree/PartitionPruner.h +++ b/src/Storages/MergeTree/PartitionPruner.h @@ -13,7 +13,7 @@ namespace DB class PartitionPruner { public: - PartitionPruner(const StorageMetadataPtr & metadata, ActionsDAGPtr filter_actions_dag, ContextPtr context, bool strict = false); + PartitionPruner(const StorageMetadataPtr & metadata, const ActionsDAG * filter_actions_dag, ContextPtr context, bool strict = false); bool canBePruned(const IMergeTreeDataPart & part) const; diff --git a/src/Storages/MergeTree/RPNBuilder.cpp b/src/Storages/MergeTree/RPNBuilder.cpp index 93087b0d3fe..6e963066f39 100644 --- a/src/Storages/MergeTree/RPNBuilder.cpp +++ b/src/Storages/MergeTree/RPNBuilder.cpp @@ -399,7 +399,7 @@ size_t RPNBuilderFunctionTreeNode::getArgumentsSize() const { const auto * adaptor = typeid_cast(dag_node->function_base.get()); const auto * index_hint = typeid_cast(adaptor->getFunction().get()); - return index_hint->getActions()->getOutputs().size(); + return index_hint->getActions().getOutputs().size(); } return dag_node->children.size(); @@ -427,7 +427,7 @@ RPNBuilderTreeNode RPNBuilderFunctionTreeNode::getArgumentAt(size_t index) const { const auto & adaptor = typeid_cast(*dag_node->function_base); const auto & index_hint = typeid_cast(*adaptor.getFunction()); - return RPNBuilderTreeNode(index_hint.getActions()->getOutputs()[index], tree_context); + return RPNBuilderTreeNode(index_hint.getActions().getOutputs()[index], tree_context); } return RPNBuilderTreeNode(dag_node->children[index], tree_context); diff --git a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp index 086482e330a..e760098f10f 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp @@ -77,7 +77,7 @@ StorageObjectStorageSource::~StorageObjectStorageSource() create_reader_pool->wait(); } -void StorageObjectStorageSource::setKeyCondition(const ActionsDAGPtr & filter_actions_dag, ContextPtr context_) +void StorageObjectStorageSource::setKeyCondition(const std::optional & filter_actions_dag, ContextPtr context_) { setKeyConditionImpl(filter_actions_dag, context_, read_from_format_info.format_header); } @@ -139,7 +139,10 @@ std::shared_ptr StorageObjectStorageSourc paths.reserve(keys.size()); for (const auto & key : keys) paths.push_back(fs::path(configuration->getNamespace()) / key); - VirtualColumnUtils::filterByPathOrFile(keys, paths, filter_dag, virtual_columns, local_context); + + VirtualColumnUtils::buildSetsForDAG(*filter_dag, local_context); + auto actions = std::make_shared(std::move(*filter_dag)); + VirtualColumnUtils::filterByPathOrFile(keys, paths, actions, virtual_columns); copy_configuration->setPaths(keys); } @@ -506,7 +509,11 @@ StorageObjectStorageSource::GlobIterator::GlobIterator( } recursive = key_with_globs == "/**"; - filter_dag = VirtualColumnUtils::createPathAndFileFilterDAG(predicate, virtual_columns); + if (auto filter_dag = VirtualColumnUtils::createPathAndFileFilterDAG(predicate, virtual_columns)) + { + VirtualColumnUtils::buildSetsForDAG(*filter_dag, getContext()); + filter_expr = std::make_shared(std::move(*filter_dag)); + } } else { @@ -570,14 +577,14 @@ StorageObjectStorage::ObjectInfoPtr StorageObjectStorageSource::GlobIterator::ne ++it; } - if (filter_dag) + if (filter_expr) { std::vector paths; paths.reserve(new_batch.size()); for (const auto & object_info : new_batch) paths.push_back(getUniqueStoragePathIdentifier(*configuration, *object_info, false)); - VirtualColumnUtils::filterByPathOrFile(new_batch, paths, filter_dag, virtual_columns, getContext()); + VirtualColumnUtils::filterByPathOrFile(new_batch, paths, filter_expr, virtual_columns); LOG_TEST(logger, "Filtered files: {} -> {}", paths.size(), new_batch.size()); } diff --git a/src/Storages/ObjectStorage/StorageObjectStorageSource.h b/src/Storages/ObjectStorage/StorageObjectStorageSource.h index 271b38fa75c..e466621e1e1 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageSource.h +++ b/src/Storages/ObjectStorage/StorageObjectStorageSource.h @@ -46,7 +46,7 @@ public: String getName() const override { return name; } - void setKeyCondition(const ActionsDAGPtr & filter_actions_dag, ContextPtr context_) override; + void setKeyCondition(const std::optional & filter_actions_dag, ContextPtr context_) override; Chunk generate() override; @@ -208,7 +208,7 @@ private: ObjectInfos object_infos; ObjectInfos * read_keys; - ActionsDAGPtr filter_dag; + ExpressionActionsPtr filter_expr; ObjectStorageIteratorPtr object_storage_iterator; bool recursive{false}; std::vector expanded_keys; diff --git a/src/Storages/ObjectStorageQueue/StorageObjectStorageQueue.cpp b/src/Storages/ObjectStorageQueue/StorageObjectStorageQueue.cpp index 14b828e7268..f51a7a913b8 100644 --- a/src/Storages/ObjectStorageQueue/StorageObjectStorageQueue.cpp +++ b/src/Storages/ObjectStorageQueue/StorageObjectStorageQueue.cpp @@ -36,6 +36,7 @@ namespace ErrorCodes { extern const int LOGICAL_ERROR; extern const int BAD_ARGUMENTS; + extern const int BAD_QUERY_PARAMETER; extern const int QUERY_NOT_ALLOWED; } @@ -150,7 +151,7 @@ StorageObjectStorageQueue::StorageObjectStorageQueue( } else if (!configuration->isPathWithGlobs()) { - throw Exception(ErrorCodes::QUERY_NOT_ALLOWED, "ObjectStorageQueue url must either end with '/' or contain globs"); + throw Exception(ErrorCodes::BAD_QUERY_PARAMETER, "ObjectStorageQueue url must either end with '/' or contain globs"); } checkAndAdjustSettings(*queue_settings, engine_args, mode > LoadingStrictnessLevel::CREATE, log); diff --git a/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp index f4c38a52a3f..a9778d5d04d 100644 --- a/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp @@ -10,7 +10,6 @@ #include #include -#include #include #include #include @@ -22,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -295,7 +295,7 @@ std::shared_ptr StorageMaterializedPostgreSQL::getMaterial auto column_declaration = std::make_shared(); column_declaration->name = std::move(name); - column_declaration->type = makeASTFunction(type); + column_declaration->type = makeASTDataType(type); column_declaration->default_specifier = "MATERIALIZED"; column_declaration->default_expression = std::make_shared(default_value); @@ -312,17 +312,17 @@ ASTPtr StorageMaterializedPostgreSQL::getColumnDeclaration(const DataTypePtr & d WhichDataType which(data_type); if (which.isNullable()) - return makeASTFunction("Nullable", getColumnDeclaration(typeid_cast(data_type.get())->getNestedType())); + return makeASTDataType("Nullable", getColumnDeclaration(typeid_cast(data_type.get())->getNestedType())); if (which.isArray()) - return makeASTFunction("Array", getColumnDeclaration(typeid_cast(data_type.get())->getNestedType())); + return makeASTDataType("Array", getColumnDeclaration(typeid_cast(data_type.get())->getNestedType())); /// getName() for decimal returns 'Decimal(precision, scale)', will get an error with it if (which.isDecimal()) { auto make_decimal_expression = [&](std::string type_name) { - auto ast_expression = std::make_shared(); + auto ast_expression = std::make_shared(); ast_expression->name = type_name; ast_expression->arguments = std::make_shared(); @@ -346,7 +346,7 @@ ASTPtr StorageMaterializedPostgreSQL::getColumnDeclaration(const DataTypePtr & d if (which.isDateTime64()) { - auto ast_expression = std::make_shared(); + auto ast_expression = std::make_shared(); ast_expression->name = "DateTime64"; ast_expression->arguments = std::make_shared(); @@ -354,7 +354,7 @@ ASTPtr StorageMaterializedPostgreSQL::getColumnDeclaration(const DataTypePtr & d return ast_expression; } - return std::make_shared(data_type->getName()); + return makeASTDataType(data_type->getName()); } diff --git a/src/Storages/ReadFinalForExternalReplicaStorage.cpp b/src/Storages/ReadFinalForExternalReplicaStorage.cpp index 5070b4a17d8..add12413853 100644 --- a/src/Storages/ReadFinalForExternalReplicaStorage.cpp +++ b/src/Storages/ReadFinalForExternalReplicaStorage.cpp @@ -80,7 +80,7 @@ void readFinalFromNestedStorage( auto step = std::make_unique( query_plan.getCurrentDataStream(), - actions, + std::move(actions), filter_column_name, false); diff --git a/src/Storages/SelectQueryInfo.cpp b/src/Storages/SelectQueryInfo.cpp index d59ccf0dfaf..c9c96ed5837 100644 --- a/src/Storages/SelectQueryInfo.cpp +++ b/src/Storages/SelectQueryInfo.cpp @@ -18,7 +18,7 @@ std::unordered_map SelectQueryInfo::buildNod std::unordered_map node_name_to_input_node_column; if (planner_context) { - const auto & table_expression_data = planner_context->getTableExpressionDataOrThrow(table_expression); + auto & table_expression_data = planner_context->getTableExpressionDataOrThrow(table_expression); const auto & alias_column_expressions = table_expression_data.getAliasColumnExpressions(); for (const auto & [column_identifier, column_name] : table_expression_data.getColumnIdentifierToColumnName()) { diff --git a/src/Storages/SelectQueryInfo.h b/src/Storages/SelectQueryInfo.h index 848aeb59dad..7ad6a733c6f 100644 --- a/src/Storages/SelectQueryInfo.h +++ b/src/Storages/SelectQueryInfo.h @@ -18,9 +18,6 @@ namespace DB class ExpressionActions; using ExpressionActionsPtr = std::shared_ptr; -class ActionsDAG; -using ActionsDAGPtr = std::shared_ptr; - struct PrewhereInfo; using PrewhereInfoPtr = std::shared_ptr; @@ -46,9 +43,9 @@ struct PrewhereInfo { /// Actions for row level security filter. Applied separately before prewhere_actions. /// This actions are separate because prewhere condition should not be executed over filtered rows. - ActionsDAGPtr row_level_filter; + std::optional row_level_filter; /// Actions which are executed on block in order to get filter column for prewhere step. - ActionsDAGPtr prewhere_actions; + ActionsDAG prewhere_actions; String row_level_column_name; String prewhere_column_name; bool remove_prewhere_column = false; @@ -56,7 +53,7 @@ struct PrewhereInfo bool generated_by_optimizer = false; PrewhereInfo() = default; - explicit PrewhereInfo(ActionsDAGPtr prewhere_actions_, String prewhere_column_name_) + explicit PrewhereInfo(ActionsDAG prewhere_actions_, String prewhere_column_name_) : prewhere_actions(std::move(prewhere_actions_)), prewhere_column_name(std::move(prewhere_column_name_)) {} std::string dump() const; @@ -68,8 +65,7 @@ struct PrewhereInfo if (row_level_filter) prewhere_info->row_level_filter = row_level_filter->clone(); - if (prewhere_actions) - prewhere_info->prewhere_actions = prewhere_actions->clone(); + prewhere_info->prewhere_actions = prewhere_actions.clone(); prewhere_info->row_level_column_name = row_level_column_name; prewhere_info->prewhere_column_name = prewhere_column_name; @@ -93,7 +89,7 @@ struct FilterInfo /// Same as FilterInfo, but with ActionsDAG. struct FilterDAGInfo { - ActionsDAGPtr actions; + ActionsDAG actions; String column_name; bool do_remove_column = false; @@ -202,7 +198,7 @@ struct SelectQueryInfo ASTPtr parallel_replica_custom_key_ast; /// Filter actions dag for current storage - ActionsDAGPtr filter_actions_dag; + std::shared_ptr filter_actions_dag; ReadInOrderOptimizerPtr order_optimizer; /// Can be modified while reading from storage diff --git a/src/Storages/StorageBuffer.cpp b/src/Storages/StorageBuffer.cpp index 1bcc602f168..04e6d6676d1 100644 --- a/src/Storages/StorageBuffer.cpp +++ b/src/Storages/StorageBuffer.cpp @@ -313,19 +313,18 @@ void StorageBuffer::read( if (src_table_query_info.prewhere_info->row_level_filter) { src_table_query_info.prewhere_info->row_level_filter = ActionsDAG::merge( - std::move(*actions_dag->clone()), + actions_dag.clone(), std::move(*src_table_query_info.prewhere_info->row_level_filter)); src_table_query_info.prewhere_info->row_level_filter->removeUnusedActions(); } - if (src_table_query_info.prewhere_info->prewhere_actions) { src_table_query_info.prewhere_info->prewhere_actions = ActionsDAG::merge( - std::move(*actions_dag->clone()), - std::move(*src_table_query_info.prewhere_info->prewhere_actions)); + actions_dag.clone(), + std::move(src_table_query_info.prewhere_info->prewhere_actions)); - src_table_query_info.prewhere_info->prewhere_actions->removeUnusedActions(); + src_table_query_info.prewhere_info->prewhere_actions.removeUnusedActions(); } } @@ -354,7 +353,7 @@ void StorageBuffer::read( header.getColumnsWithTypeAndName(), ActionsDAG::MatchColumnsMode::Name); - auto converting = std::make_unique(query_plan.getCurrentDataStream(), actions_dag); + auto converting = std::make_unique(query_plan.getCurrentDataStream(), std::move(actions_dag)); converting->setStepDescription("Convert destination table columns to Buffer table structure"); query_plan.addStep(std::move(converting)); @@ -429,21 +428,23 @@ void StorageBuffer::read( if (query_info.prewhere_info->row_level_filter) { + auto actions = std::make_shared(query_info.prewhere_info->row_level_filter->clone(), actions_settings); pipe_from_buffers.addSimpleTransform([&](const Block & header) { return std::make_shared( header, - std::make_shared(query_info.prewhere_info->row_level_filter, actions_settings), + actions, query_info.prewhere_info->row_level_column_name, false); }); } + auto actions = std::make_shared(query_info.prewhere_info->prewhere_actions.clone(), actions_settings); pipe_from_buffers.addSimpleTransform([&](const Block & header) { return std::make_shared( header, - std::make_shared(query_info.prewhere_info->prewhere_actions, actions_settings), + actions, query_info.prewhere_info->prewhere_column_name, query_info.prewhere_info->remove_prewhere_column); }); @@ -473,7 +474,7 @@ void StorageBuffer::read( result_header.getColumnsWithTypeAndName(), ActionsDAG::MatchColumnsMode::Name); - auto converting = std::make_unique(query_plan.getCurrentDataStream(), convert_actions_dag); + auto converting = std::make_unique(query_plan.getCurrentDataStream(), std::move(convert_actions_dag)); query_plan.addStep(std::move(converting)); } diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index 07892971ec2..3e38ddf830a 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -1052,7 +1052,7 @@ std::optional StorageDistributed::distributedWriteBetweenDistribu return pipeline; } -static ActionsDAGPtr getFilterFromQuery(const ASTPtr & ast, ContextPtr context) +static std::optional getFilterFromQuery(const ASTPtr & ast, ContextPtr context) { QueryPlan plan; SelectQueryOptions options; @@ -1096,9 +1096,9 @@ static ActionsDAGPtr getFilterFromQuery(const ASTPtr & ast, ContextPtr context) } if (!source) - return nullptr; + return {}; - return source->getFilterActionsDAG(); + return source->detachFilterActionsDAG(); } diff --git a/src/Storages/StorageFile.cpp b/src/Storages/StorageFile.cpp index 8a828938f7a..98cd5c4dfa9 100644 --- a/src/Storages/StorageFile.cpp +++ b/src/Storages/StorageFile.cpp @@ -1134,12 +1134,16 @@ StorageFileSource::FilesIterator::FilesIterator( bool distributed_processing_) : WithContext(context_), files(files_), archive_info(std::move(archive_info_)), distributed_processing(distributed_processing_) { - ActionsDAGPtr filter_dag; + std::optional filter_dag; if (!distributed_processing && !archive_info && !files.empty()) filter_dag = VirtualColumnUtils::createPathAndFileFilterDAG(predicate, virtual_columns); if (filter_dag) - VirtualColumnUtils::filterByPathOrFile(files, files, filter_dag, virtual_columns, context_); + { + VirtualColumnUtils::buildSetsForDAG(*filter_dag, context_); + auto actions = std::make_shared(std::move(*filter_dag)); + VirtualColumnUtils::filterByPathOrFile(files, files, actions, virtual_columns); + } } String StorageFileSource::FilesIterator::next() @@ -1248,7 +1252,7 @@ StorageFileSource::~StorageFileSource() beforeDestroy(); } -void StorageFileSource::setKeyCondition(const ActionsDAGPtr & filter_actions_dag, ContextPtr context_) +void StorageFileSource::setKeyCondition(const std::optional & filter_actions_dag, ContextPtr context_) { setKeyConditionImpl(filter_actions_dag, context_, block_for_format); } diff --git a/src/Storages/StorageFile.h b/src/Storages/StorageFile.h index f955889185c..895a8a663b8 100644 --- a/src/Storages/StorageFile.h +++ b/src/Storages/StorageFile.h @@ -266,7 +266,7 @@ private: return storage->getName(); } - void setKeyCondition(const ActionsDAGPtr & filter_actions_dag, ContextPtr context_) override; + void setKeyCondition(const std::optional & filter_actions_dag, ContextPtr context_) override; bool tryGetCountFromCache(const struct stat & file_stat); diff --git a/src/Storages/StorageMaterializedView.cpp b/src/Storages/StorageMaterializedView.cpp index b603d0ecf87..696136834d4 100644 --- a/src/Storages/StorageMaterializedView.cpp +++ b/src/Storages/StorageMaterializedView.cpp @@ -281,8 +281,8 @@ void StorageMaterializedView::read( * They may be added in case of distributed query with JOIN. * In that case underlying table returns joined columns as well. */ - converting_actions->removeUnusedActions(); - auto converting_step = std::make_unique(query_plan.getCurrentDataStream(), converting_actions); + converting_actions.removeUnusedActions(); + auto converting_step = std::make_unique(query_plan.getCurrentDataStream(), std::move(converting_actions)); converting_step->setStepDescription("Convert target table structure to MaterializedView structure"); query_plan.addStep(std::move(converting_step)); } diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index f5bc183931f..7c268d36a7b 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -369,6 +369,14 @@ void StorageMerge::read( /// What will be result structure depending on query processed stage in source tables? Block common_header = getHeaderForProcessingStage(column_names, storage_snapshot, query_info, local_context, processed_stage); + if (local_context->getSettingsRef().allow_experimental_analyzer && processed_stage == QueryProcessingStage::Complete) + { + /// Remove constants. + /// For StorageDistributed some functions like `hostName` that are constants only for local queries. + for (auto & column : common_header) + column.column = column.column->convertToFullColumnIfConst(); + } + auto step = std::make_unique( column_names, query_info, @@ -408,7 +416,7 @@ void ReadFromMerge::addFilter(FilterDAGInfo filter) { output_stream->header = FilterTransform::transformHeader( output_stream->header, - filter.actions.get(), + &filter.actions, filter.column_name, filter.do_remove_column); pushed_down_filters.push_back(std::move(filter)); @@ -629,7 +637,7 @@ std::vector ReadFromMerge::createChildrenPlans(SelectQ auto alias_actions = ExpressionAnalyzer(required_columns_expr_list, syntax_result, context).getActionsDAG(true); - column_names_as_aliases = alias_actions->getRequiredColumns().getNames(); + column_names_as_aliases = alias_actions.getRequiredColumns().getNames(); if (column_names_as_aliases.empty()) column_names_as_aliases.push_back(ExpressionActions::getSmallestColumn(storage_metadata_snapshot->getColumns().getAllPhysical()).name); } @@ -663,7 +671,7 @@ std::vector ReadFromMerge::createChildrenPlans(SelectQ { auto filter_step = std::make_unique( child.plan.getCurrentDataStream(), - filter_info.actions->clone(), + filter_info.actions.clone(), filter_info.column_name, filter_info.do_remove_column); @@ -1061,7 +1069,7 @@ void ReadFromMerge::addVirtualColumns( column.column = column.type->createColumnConst(0, Field(database_name)); auto adding_column_dag = ActionsDAG::makeAddingColumnActions(std::move(column)); - auto expression_step = std::make_unique(child.plan.getCurrentDataStream(), adding_column_dag); + auto expression_step = std::make_unique(child.plan.getCurrentDataStream(), std::move(adding_column_dag)); child.plan.addStep(std::move(expression_step)); plan_header = child.plan.getCurrentDataStream().header; } @@ -1075,7 +1083,7 @@ void ReadFromMerge::addVirtualColumns( column.column = column.type->createColumnConst(0, Field(table_name)); auto adding_column_dag = ActionsDAG::makeAddingColumnActions(std::move(column)); - auto expression_step = std::make_unique(child.plan.getCurrentDataStream(), adding_column_dag); + auto expression_step = std::make_unique(child.plan.getCurrentDataStream(), std::move(adding_column_dag)); child.plan.addStep(std::move(expression_step)); plan_header = child.plan.getCurrentDataStream().header; } @@ -1090,7 +1098,7 @@ void ReadFromMerge::addVirtualColumns( column.column = column.type->createColumnConst(0, Field(database_name)); auto adding_column_dag = ActionsDAG::makeAddingColumnActions(std::move(column)); - auto expression_step = std::make_unique(child.plan.getCurrentDataStream(), adding_column_dag); + auto expression_step = std::make_unique(child.plan.getCurrentDataStream(), std::move(adding_column_dag)); child.plan.addStep(std::move(expression_step)); plan_header = child.plan.getCurrentDataStream().header; } @@ -1103,7 +1111,7 @@ void ReadFromMerge::addVirtualColumns( column.column = column.type->createColumnConst(0, Field(table_name)); auto adding_column_dag = ActionsDAG::makeAddingColumnActions(std::move(column)); - auto expression_step = std::make_unique(child.plan.getCurrentDataStream(), adding_column_dag); + auto expression_step = std::make_unique(child.plan.getCurrentDataStream(), std::move(adding_column_dag)); child.plan.addStep(std::move(expression_step)); plan_header = child.plan.getCurrentDataStream().header; } @@ -1241,7 +1249,7 @@ ReadFromMerge::RowPolicyData::RowPolicyData(RowPolicyFilterPtr row_policy_filter auto expression_analyzer = ExpressionAnalyzer{expr, syntax_result, local_context}; actions_dag = expression_analyzer.getActionsDAG(false /* add_aliases */, false /* project_result */); - filter_actions = std::make_shared(actions_dag, + filter_actions = std::make_shared(actions_dag.clone(), ExpressionActionsSettings::fromContext(local_context, CompileExpressions::yes)); const auto & required_columns = filter_actions->getRequiredColumnsWithTypes(); const auto & sample_block_columns = filter_actions->getSampleBlock().getNamesAndTypesList(); @@ -1279,12 +1287,12 @@ void ReadFromMerge::RowPolicyData::extendNames(Names & names) const void ReadFromMerge::RowPolicyData::addStorageFilter(SourceStepWithFilter * step) const { - step->addFilter(actions_dag, filter_column_name); + step->addFilter(actions_dag.clone(), filter_column_name); } void ReadFromMerge::RowPolicyData::addFilterTransform(QueryPlan & plan) const { - auto filter_step = std::make_unique(plan.getCurrentDataStream(), actions_dag, filter_column_name, true /* remove filter column */); + auto filter_step = std::make_unique(plan.getCurrentDataStream(), actions_dag.clone(), filter_column_name, true /* remove filter column */); plan.addStep(std::move(filter_step)); } @@ -1477,7 +1485,7 @@ void ReadFromMerge::convertAndFilterSourceStream( { pipe_columns.emplace_back(NameAndTypePair(alias.name, alias.type)); - auto actions_dag = std::make_shared(pipe_columns); + ActionsDAG actions_dag(pipe_columns); QueryTreeNodePtr query_tree = buildQueryTree(alias.expression, local_context); query_tree->setAlias(alias.name); @@ -1486,13 +1494,13 @@ void ReadFromMerge::convertAndFilterSourceStream( query_analysis_pass.run(query_tree, local_context); PlannerActionsVisitor actions_visitor(modified_query_info.planner_context, false /*use_column_identifier_as_action_node_name*/); - const auto & nodes = actions_visitor.visit(*actions_dag, query_tree); + const auto & nodes = actions_visitor.visit(actions_dag, query_tree); if (nodes.size() != 1) throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected to have 1 output but got {}", nodes.size()); - actions_dag->addOrReplaceInOutputs(actions_dag->addAlias(*nodes.front(), alias.name)); - auto expression_step = std::make_unique(child.plan.getCurrentDataStream(), actions_dag); + actions_dag.addOrReplaceInOutputs(actions_dag.addAlias(*nodes.front(), alias.name)); + auto expression_step = std::make_unique(child.plan.getCurrentDataStream(), std::move(actions_dag)); child.plan.addStep(std::move(expression_step)); } } @@ -1507,7 +1515,7 @@ void ReadFromMerge::convertAndFilterSourceStream( auto dag = std::make_shared(pipe_columns); auto actions_dag = expression_analyzer.getActionsDAG(true, false); - auto expression_step = std::make_unique(child.plan.getCurrentDataStream(), actions_dag); + auto expression_step = std::make_unique(child.plan.getCurrentDataStream(), std::move(actions_dag)); child.plan.addStep(std::move(expression_step)); } } @@ -1525,7 +1533,7 @@ void ReadFromMerge::convertAndFilterSourceStream( header.getColumnsWithTypeAndName(), convert_actions_match_columns_mode); - auto expression_step = std::make_unique(child.plan.getCurrentDataStream(), convert_actions_dag); + auto expression_step = std::make_unique(child.plan.getCurrentDataStream(), std::move(convert_actions_dag)); child.plan.addStep(std::move(expression_step)); } @@ -1566,7 +1574,7 @@ bool ReadFromMerge::requestReadingInOrder(InputOrderInfoPtr order_info_) void ReadFromMerge::applyFilters(ActionDAGNodes added_filter_nodes) { for (const auto & filter_info : pushed_down_filters) - added_filter_nodes.nodes.push_back(&filter_info.actions->findInOutputs(filter_info.column_name)); + added_filter_nodes.nodes.push_back(&filter_info.actions.findInOutputs(filter_info.column_name)); SourceStepWithFilter::applyFilters(added_filter_nodes); diff --git a/src/Storages/StorageMerge.h b/src/Storages/StorageMerge.h index 94b34256d02..d6f2deca7fd 100644 --- a/src/Storages/StorageMerge.h +++ b/src/Storages/StorageMerge.h @@ -225,7 +225,7 @@ private: private: std::string filter_column_name; // complex filter, may contain logic operations - ActionsDAGPtr actions_dag; + ActionsDAG actions_dag; ExpressionActionsPtr filter_actions; StorageMetadataPtr storage_metadata_snapshot; }; diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index 527872d701e..f55f672fe5e 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -263,7 +263,7 @@ std::optional StorageMergeTree::totalRows(const Settings &) const return getTotalActiveSizeInRows(); } -std::optional StorageMergeTree::totalRowsByPartitionPredicate(const ActionsDAGPtr & filter_actions_dag, ContextPtr local_context) const +std::optional StorageMergeTree::totalRowsByPartitionPredicate(const ActionsDAG & filter_actions_dag, ContextPtr local_context) const { auto parts = getVisibleDataPartsVector(local_context); return totalRowsByPartitionPredicateImpl(filter_actions_dag, local_context, parts); diff --git a/src/Storages/StorageMergeTree.h b/src/Storages/StorageMergeTree.h index 064b51739bd..6223b5f50fa 100644 --- a/src/Storages/StorageMergeTree.h +++ b/src/Storages/StorageMergeTree.h @@ -65,7 +65,7 @@ public: size_t num_streams) override; std::optional totalRows(const Settings &) const override; - std::optional totalRowsByPartitionPredicate(const ActionsDAGPtr & filter_actions_dag, ContextPtr) const override; + std::optional totalRowsByPartitionPredicate(const ActionsDAG & filter_actions_dag, ContextPtr) const override; std::optional totalBytes(const Settings &) const override; std::optional totalBytesUncompressed(const Settings &) const override; diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index da379a466af..2d826c6c2df 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -5604,7 +5604,7 @@ std::optional StorageReplicatedMergeTree::totalRows(const Settings & set return res; } -std::optional StorageReplicatedMergeTree::totalRowsByPartitionPredicate(const ActionsDAGPtr & filter_actions_dag, ContextPtr local_context) const +std::optional StorageReplicatedMergeTree::totalRowsByPartitionPredicate(const ActionsDAG & filter_actions_dag, ContextPtr local_context) const { DataPartsVector parts; foreachActiveParts([&](auto & part) { parts.push_back(part); }, local_context->getSettingsRef().select_sequential_consistency); diff --git a/src/Storages/StorageReplicatedMergeTree.h b/src/Storages/StorageReplicatedMergeTree.h index f96206ce657..2e54f17d5d5 100644 --- a/src/Storages/StorageReplicatedMergeTree.h +++ b/src/Storages/StorageReplicatedMergeTree.h @@ -159,7 +159,7 @@ public: size_t num_streams) override; std::optional totalRows(const Settings & settings) const override; - std::optional totalRowsByPartitionPredicate(const ActionsDAGPtr & filter_actions_dag, ContextPtr context) const override; + std::optional totalRowsByPartitionPredicate(const ActionsDAG & filter_actions_dag, ContextPtr context) const override; std::optional totalBytes(const Settings & settings) const override; std::optional totalBytesUncompressed(const Settings & settings) const override; diff --git a/src/Storages/StorageTableFunction.h b/src/Storages/StorageTableFunction.h index 9507eb6ed8a..345dd62c687 100644 --- a/src/Storages/StorageTableFunction.h +++ b/src/Storages/StorageTableFunction.h @@ -112,7 +112,7 @@ public: auto step = std::make_unique( query_plan.getCurrentDataStream(), - convert_actions_dag); + std::move(convert_actions_dag)); step->setStepDescription("Converting columns"); query_plan.addStep(std::move(step)); diff --git a/src/Storages/StorageURL.cpp b/src/Storages/StorageURL.cpp index efee6151afd..318ec7f367d 100644 --- a/src/Storages/StorageURL.cpp +++ b/src/Storages/StorageURL.cpp @@ -198,7 +198,7 @@ public: { uris = parseRemoteDescription(uri_, 0, uri_.size(), ',', max_addresses); - ActionsDAGPtr filter_dag; + std::optional filter_dag; if (!uris.empty()) filter_dag = VirtualColumnUtils::createPathAndFileFilterDAG(predicate, virtual_columns); @@ -209,7 +209,9 @@ public: for (const auto & uri : uris) paths.push_back(Poco::URI(uri).getPath()); - VirtualColumnUtils::filterByPathOrFile(uris, paths, filter_dag, virtual_columns, context); + VirtualColumnUtils::buildSetsForDAG(*filter_dag, context); + auto actions = std::make_shared(std::move(*filter_dag)); + VirtualColumnUtils::filterByPathOrFile(uris, paths, actions, virtual_columns); } } diff --git a/src/Storages/StorageURL.h b/src/Storages/StorageURL.h index 1804079e75f..63d01a02417 100644 --- a/src/Storages/StorageURL.h +++ b/src/Storages/StorageURL.h @@ -185,7 +185,7 @@ public: String getName() const override { return name; } - void setKeyCondition(const ActionsDAGPtr & filter_actions_dag, ContextPtr context_) override + void setKeyCondition(const std::optional & filter_actions_dag, ContextPtr context_) override { setKeyConditionImpl(filter_actions_dag, context_, block_for_format); } diff --git a/src/Storages/StorageValues.cpp b/src/Storages/StorageValues.cpp index 894b1404a21..c1ca6244866 100644 --- a/src/Storages/StorageValues.cpp +++ b/src/Storages/StorageValues.cpp @@ -48,14 +48,14 @@ Pipe StorageValues::read( if (!prepared_pipe.empty()) { - auto dag = std::make_shared(prepared_pipe.getHeader().getColumnsWithTypeAndName()); + ActionsDAG dag(prepared_pipe.getHeader().getColumnsWithTypeAndName()); ActionsDAG::NodeRawConstPtrs outputs; outputs.reserve(column_names.size()); for (const auto & name : column_names) - outputs.push_back(dag->getOutputs()[prepared_pipe.getHeader().getPositionByName(name)]); + outputs.push_back(dag.getOutputs()[prepared_pipe.getHeader().getPositionByName(name)]); - dag->getOutputs().swap(outputs); - auto expression = std::make_shared(dag); + dag.getOutputs().swap(outputs); + auto expression = std::make_shared(std::move(dag)); prepared_pipe.addSimpleTransform([&](const Block & header) { diff --git a/src/Storages/StorageView.cpp b/src/Storages/StorageView.cpp index 5f768bce978..581c2c978c4 100644 --- a/src/Storages/StorageView.cpp +++ b/src/Storages/StorageView.cpp @@ -179,8 +179,8 @@ void StorageView::read( /// It's expected that the columns read from storage are not constant. /// Because method 'getSampleBlockForColumns' is used to obtain a structure of result in InterpreterSelectQuery. - auto materializing_actions = std::make_shared(query_plan.getCurrentDataStream().header.getColumnsWithTypeAndName()); - materializing_actions->addMaterializingOutputActions(); + ActionsDAG materializing_actions(query_plan.getCurrentDataStream().header.getColumnsWithTypeAndName()); + materializing_actions.addMaterializingOutputActions(); auto materializing = std::make_unique(query_plan.getCurrentDataStream(), std::move(materializing_actions)); materializing->setStepDescription("Materialize constants after VIEW subquery"); @@ -205,7 +205,7 @@ void StorageView::read( expected_header.getColumnsWithTypeAndName(), ActionsDAG::MatchColumnsMode::Name); - auto converting = std::make_unique(query_plan.getCurrentDataStream(), convert_actions_dag); + auto converting = std::make_unique(query_plan.getCurrentDataStream(), std::move(convert_actions_dag)); converting->setStepDescription("Convert VIEW subquery result to VIEW table structure"); query_plan.addStep(std::move(converting)); } diff --git a/src/Storages/System/StorageSystemDetachedParts.cpp b/src/Storages/System/StorageSystemDetachedParts.cpp index 7e4c1de1c65..0d0ae666c10 100644 --- a/src/Storages/System/StorageSystemDetachedParts.cpp +++ b/src/Storages/System/StorageSystemDetachedParts.cpp @@ -307,7 +307,7 @@ protected: std::shared_ptr storage; std::vector columns_mask; - ActionsDAGPtr filter; + std::optional filter; const size_t max_block_size; const size_t num_streams; }; @@ -359,7 +359,7 @@ void StorageSystemDetachedParts::read( void ReadFromSystemDetachedParts::initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) { - auto state = std::make_shared(StoragesInfoStream(nullptr, filter, context)); + auto state = std::make_shared(StoragesInfoStream({}, std::move(filter), context)); Pipe pipe; diff --git a/src/Storages/System/StorageSystemDroppedTablesParts.cpp b/src/Storages/System/StorageSystemDroppedTablesParts.cpp index c17d6402d88..c2601b8ebe3 100644 --- a/src/Storages/System/StorageSystemDroppedTablesParts.cpp +++ b/src/Storages/System/StorageSystemDroppedTablesParts.cpp @@ -11,7 +11,7 @@ namespace DB { -StoragesDroppedInfoStream::StoragesDroppedInfoStream(const ActionsDAGPtr & filter, ContextPtr context) +StoragesDroppedInfoStream::StoragesDroppedInfoStream(std::optional filter, ContextPtr context) : StoragesInfoStreamBase(context) { /// Will apply WHERE to subset of columns and then add more columns. @@ -75,7 +75,7 @@ StoragesDroppedInfoStream::StoragesDroppedInfoStream(const ActionsDAGPtr & filte { /// Filter block_to_filter with columns 'database', 'table', 'engine', 'active'. if (filter) - VirtualColumnUtils::filterBlockWithDAG(filter, block_to_filter, context); + VirtualColumnUtils::filterBlockWithExpression(VirtualColumnUtils::buildFilterExpression(std::move(*filter), context), block_to_filter); rows = block_to_filter.rows(); } diff --git a/src/Storages/System/StorageSystemDroppedTablesParts.h b/src/Storages/System/StorageSystemDroppedTablesParts.h index dff9e41cce3..32468fc31b2 100644 --- a/src/Storages/System/StorageSystemDroppedTablesParts.h +++ b/src/Storages/System/StorageSystemDroppedTablesParts.h @@ -9,7 +9,7 @@ namespace DB class StoragesDroppedInfoStream : public StoragesInfoStreamBase { public: - StoragesDroppedInfoStream(const ActionsDAGPtr & filter, ContextPtr context); + StoragesDroppedInfoStream(std::optional filter, ContextPtr context); protected: bool tryLockTable(StoragesInfo &) override { @@ -30,9 +30,9 @@ public: std::string getName() const override { return "SystemDroppedTablesParts"; } protected: - std::unique_ptr getStoragesInfoStream(const ActionsDAGPtr &, const ActionsDAGPtr & filter, ContextPtr context) override + std::unique_ptr getStoragesInfoStream(std::optional, std::optional filter, ContextPtr context) override { - return std::make_unique(filter, context); + return std::make_unique(std::move(filter), context); } }; diff --git a/src/Storages/System/StorageSystemPartsBase.cpp b/src/Storages/System/StorageSystemPartsBase.cpp index f7d1c1b3eb8..7ace8ee24aa 100644 --- a/src/Storages/System/StorageSystemPartsBase.cpp +++ b/src/Storages/System/StorageSystemPartsBase.cpp @@ -91,7 +91,7 @@ StoragesInfo::getProjectionParts(MergeTreeData::DataPartStateVector & state, boo return data->getProjectionPartsVectorForInternalUsage({State::Active}, &state); } -StoragesInfoStream::StoragesInfoStream(const ActionsDAGPtr & filter_by_database, const ActionsDAGPtr & filter_by_other_columns, ContextPtr context) +StoragesInfoStream::StoragesInfoStream(std::optional filter_by_database, std::optional filter_by_other_columns, ContextPtr context) : StoragesInfoStreamBase(context) { /// Will apply WHERE to subset of columns and then add more columns. @@ -124,7 +124,7 @@ StoragesInfoStream::StoragesInfoStream(const ActionsDAGPtr & filter_by_database, /// Filter block_to_filter with column 'database'. if (filter_by_database) - VirtualColumnUtils::filterBlockWithDAG(filter_by_database, block_to_filter, context); + VirtualColumnUtils::filterBlockWithExpression(VirtualColumnUtils::buildFilterExpression(std::move(*filter_by_database), context), block_to_filter); rows = block_to_filter.rows(); /// Block contains new columns, update database_column. @@ -204,7 +204,7 @@ StoragesInfoStream::StoragesInfoStream(const ActionsDAGPtr & filter_by_database, { /// Filter block_to_filter with columns 'database', 'table', 'engine', 'active'. if (filter_by_other_columns) - VirtualColumnUtils::filterBlockWithDAG(filter_by_other_columns, block_to_filter, context); + VirtualColumnUtils::filterBlockWithExpression(VirtualColumnUtils::buildFilterExpression(std::move(*filter_by_other_columns), context), block_to_filter); rows = block_to_filter.rows(); } @@ -236,8 +236,8 @@ protected: std::shared_ptr storage; std::vector columns_mask; const bool has_state_column; - ActionsDAGPtr filter_by_database; - ActionsDAGPtr filter_by_other_columns; + std::optional filter_by_database; + std::optional filter_by_other_columns; }; ReadFromSystemPartsBase::ReadFromSystemPartsBase( @@ -318,7 +318,7 @@ void StorageSystemPartsBase::read( void ReadFromSystemPartsBase::initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) { - auto stream = storage->getStoragesInfoStream(filter_by_database, filter_by_other_columns, context); + auto stream = storage->getStoragesInfoStream(std::move(filter_by_database), std::move(filter_by_other_columns), context); auto header = getOutputStream().header; MutableColumns res_columns = header.cloneEmptyColumns(); diff --git a/src/Storages/System/StorageSystemPartsBase.h b/src/Storages/System/StorageSystemPartsBase.h index 8671fd850f8..806af4a7bf8 100644 --- a/src/Storages/System/StorageSystemPartsBase.h +++ b/src/Storages/System/StorageSystemPartsBase.h @@ -116,7 +116,7 @@ protected: class StoragesInfoStream : public StoragesInfoStreamBase { public: - StoragesInfoStream(const ActionsDAGPtr & filter_by_database, const ActionsDAGPtr & filter_by_other_columns, ContextPtr context); + StoragesInfoStream(std::optional filter_by_database, std::optional filter_by_other_columns, ContextPtr context); }; /** Implements system table 'parts' which allows to get information about data parts for tables of MergeTree family. @@ -146,9 +146,9 @@ protected: StorageSystemPartsBase(const StorageID & table_id_, ColumnsDescription && columns); - virtual std::unique_ptr getStoragesInfoStream(const ActionsDAGPtr & filter_by_database, const ActionsDAGPtr & filter_by_other_columns, ContextPtr context) + virtual std::unique_ptr getStoragesInfoStream(std::optional filter_by_database, std::optional filter_by_other_columns, ContextPtr context) { - return std::make_unique(filter_by_database, filter_by_other_columns, context); + return std::make_unique(std::move(filter_by_database), std::move(filter_by_other_columns), context); } virtual void diff --git a/src/Storages/System/StorageSystemStackTrace.cpp b/src/Storages/System/StorageSystemStackTrace.cpp index cc221bbea69..6cc525b9340 100644 --- a/src/Storages/System/StorageSystemStackTrace.cpp +++ b/src/Storages/System/StorageSystemStackTrace.cpp @@ -277,7 +277,7 @@ public: StackTraceSource( const Names & column_names, Block header_, - ActionsDAGPtr && filter_dag_, + std::optional filter_dag_, ContextPtr context_, UInt64 max_block_size_, LoggerPtr log_) @@ -423,7 +423,7 @@ protected: private: ContextPtr context; Block header; - const ActionsDAGPtr filter_dag; + const std::optional filter_dag; const ActionsDAG::Node * predicate; const size_t max_block_size; diff --git a/src/Storages/System/StorageSystemTables.cpp b/src/Storages/System/StorageSystemTables.cpp index 049a442d494..943ce9c317a 100644 --- a/src/Storages/System/StorageSystemTables.cpp +++ b/src/Storages/System/StorageSystemTables.cpp @@ -125,7 +125,7 @@ ColumnPtr getFilteredTables( block.insert(ColumnWithTypeAndName(std::move(engine_column), std::make_shared(), "engine")); if (dag) - VirtualColumnUtils::filterBlockWithDAG(dag, block, context); + VirtualColumnUtils::filterBlockWithExpression(VirtualColumnUtils::buildFilterExpression(std::move(*dag), context), block); return block.getByPosition(0).column; } diff --git a/src/Storages/TTLDescription.cpp b/src/Storages/TTLDescription.cpp index 92f6f17583d..d674f054632 100644 --- a/src/Storages/TTLDescription.cpp +++ b/src/Storages/TTLDescription.cpp @@ -177,14 +177,14 @@ static ExpressionAndSets buildExpressionAndSets(ASTPtr & ast, const NamesAndType ExpressionAnalyzer analyzer(ast, syntax_analyzer_result, context_copy); auto dag = analyzer.getActionsDAG(false); - const auto * col = &dag->findInOutputs(ast->getColumnName()); + const auto * col = &dag.findInOutputs(ast->getColumnName()); if (col->result_name != ttl_string) - col = &dag->addAlias(*col, ttl_string); + col = &dag.addAlias(*col, ttl_string); - dag->getOutputs() = {col}; - dag->removeUnusedActions(); + dag.getOutputs() = {col}; + dag.removeUnusedActions(); - result.expression = std::make_shared(dag, ExpressionActionsSettings::fromContext(context_copy)); + result.expression = std::make_shared(std::move(dag), ExpressionActionsSettings::fromContext(context_copy)); result.sets = analyzer.getPreparedSets(); return result; diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index 151079154b1..90c2c7f93c1 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -77,15 +77,19 @@ void buildSetsForDAG(const ActionsDAG & dag, const ContextPtr & context) } } -void filterBlockWithDAG(ActionsDAGPtr dag, Block & block, ContextPtr context) +ExpressionActionsPtr buildFilterExpression(ActionsDAG dag, ContextPtr context) +{ + buildSetsForDAG(dag, context); + return std::make_shared(std::move(dag)); +} + +void filterBlockWithExpression(const ExpressionActionsPtr & actions, Block & block) { - buildSetsForDAG(*dag, context); - auto actions = std::make_shared(dag); Block block_with_filter = block; actions->execute(block_with_filter, /*dry_run=*/ false, /*allow_duplicates_in_input=*/ true); /// Filter the block. - String filter_column_name = dag->getOutputs().at(0)->result_name; + String filter_column_name = actions->getActionsDAG().getOutputs().at(0)->result_name; ColumnPtr filter_column = block_with_filter.getByName(filter_column_name).column->convertToFullColumnIfConst(); ConstantFilterDescription constant_filter(*filter_column); @@ -155,7 +159,7 @@ static void addPathAndFileToVirtualColumns(Block & block, const String & path, s block.getByName("_idx").column->assumeMutableRef().insert(idx); } -ActionsDAGPtr createPathAndFileFilterDAG(const ActionsDAG::Node * predicate, const NamesAndTypesList & virtual_columns) +std::optional createPathAndFileFilterDAG(const ActionsDAG::Node * predicate, const NamesAndTypesList & virtual_columns) { if (!predicate || virtual_columns.empty()) return {}; @@ -171,7 +175,7 @@ ActionsDAGPtr createPathAndFileFilterDAG(const ActionsDAG::Node * predicate, con return splitFilterDagForAllowedInputs(predicate, &block); } -ColumnPtr getFilterByPathAndFileIndexes(const std::vector & paths, const ActionsDAGPtr & dag, const NamesAndTypesList & virtual_columns, const ContextPtr & context) +ColumnPtr getFilterByPathAndFileIndexes(const std::vector & paths, const ExpressionActionsPtr & actions, const NamesAndTypesList & virtual_columns) { Block block; for (const auto & column : virtual_columns) @@ -184,7 +188,7 @@ ColumnPtr getFilterByPathAndFileIndexes(const std::vector & paths, const for (size_t i = 0; i != paths.size(); ++i) addPathAndFileToVirtualColumns(block, paths[i], i); - filterBlockWithDAG(dag, block, context); + filterBlockWithExpression(actions, block); return block.getByName("_idx").column; } @@ -325,9 +329,9 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( { if (const auto * index_hint = typeid_cast(adaptor->getFunction().get())) { - auto index_hint_dag = index_hint->getActions()->clone(); + auto index_hint_dag = index_hint->getActions().clone(); ActionsDAG::NodeRawConstPtrs atoms; - for (const auto & output : index_hint_dag->getOutputs()) + for (const auto & output : index_hint_dag.getOutputs()) if (const auto * child_copy = splitFilterNodeForAllowedInputs(output, allowed_inputs, additional_nodes, allow_non_deterministic_functions)) atoms.push_back(child_copy); @@ -338,13 +342,13 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( if (atoms.size() > 1) { FunctionOverloadResolverPtr func_builder_and = std::make_unique(std::make_shared()); - res = &index_hint_dag->addFunction(func_builder_and, atoms, {}); + res = &index_hint_dag.addFunction(func_builder_and, atoms, {}); } if (!res->result_type->equals(*node->result_type)) - res = &index_hint_dag->addCast(*res, node->result_type, {}); + res = &index_hint_dag.addCast(*res, node->result_type, {}); - additional_nodes.splice(additional_nodes.end(), ActionsDAG::detachNodes(std::move(*index_hint_dag))); + additional_nodes.splice(additional_nodes.end(), ActionsDAG::detachNodes(std::move(index_hint_dag))); return res; } } @@ -362,15 +366,15 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( return node; } -ActionsDAGPtr splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, const Block * allowed_inputs, bool allow_non_deterministic_functions) +std::optional splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, const Block * allowed_inputs, bool allow_non_deterministic_functions) { if (!predicate) - return nullptr; + return {}; ActionsDAG::Nodes additional_nodes; const auto * res = splitFilterNodeForAllowedInputs(predicate, allowed_inputs, additional_nodes, allow_non_deterministic_functions); if (!res) - return nullptr; + return {}; return ActionsDAG::cloneSubDAG({res}, true); } @@ -379,7 +383,7 @@ void filterBlockWithPredicate(const ActionsDAG::Node * predicate, Block & block, { auto dag = splitFilterDagForAllowedInputs(predicate, &block, /*allow_non_deterministic_functions=*/ false); if (dag) - filterBlockWithDAG(dag, block, context); + filterBlockWithExpression(buildFilterExpression(std::move(*dag), context), block); } } diff --git a/src/Storages/VirtualColumnUtils.h b/src/Storages/VirtualColumnUtils.h index e5cfa47c8f6..73b7908b75c 100644 --- a/src/Storages/VirtualColumnUtils.h +++ b/src/Storages/VirtualColumnUtils.h @@ -23,7 +23,8 @@ namespace VirtualColumnUtils void filterBlockWithPredicate(const ActionsDAG::Node * predicate, Block & block, ContextPtr context); /// Just filters block. Block should contain all the required columns. -void filterBlockWithDAG(ActionsDAGPtr dag, Block & block, ContextPtr context); +ExpressionActionsPtr buildFilterExpression(ActionsDAG dag, ContextPtr context); +void filterBlockWithExpression(const ExpressionActionsPtr & actions, Block & block); /// Builds sets used by ActionsDAG inplace. void buildSetsForDAG(const ActionsDAG & dag, const ContextPtr & context); @@ -40,7 +41,7 @@ bool isDeterministicInScopeOfQuery(const ActionsDAG::Node * node); /// The predicate will be `_partition_id = '0' AND rowNumberInBlock() = 1`, and `rowNumberInBlock()` is /// non-deterministic. If we still extract the part `_partition_id = '0'` for filtering parts, then trivial /// count optimization will be mistakenly applied to the query. -ActionsDAGPtr splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, const Block * allowed_inputs, bool allow_non_deterministic_functions = true); +std::optional splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, const Block * allowed_inputs, bool allow_non_deterministic_functions = true); /// Extract from the input stream a set of `name` column values template @@ -57,14 +58,14 @@ auto extractSingleValueFromBlock(const Block & block, const String & name) NameSet getVirtualNamesForFileLikeStorage(); VirtualColumnsDescription getVirtualsForFileLikeStorage(const ColumnsDescription & storage_columns); -ActionsDAGPtr createPathAndFileFilterDAG(const ActionsDAG::Node * predicate, const NamesAndTypesList & virtual_columns); +std::optional createPathAndFileFilterDAG(const ActionsDAG::Node * predicate, const NamesAndTypesList & virtual_columns); -ColumnPtr getFilterByPathAndFileIndexes(const std::vector & paths, const ActionsDAGPtr & dag, const NamesAndTypesList & virtual_columns, const ContextPtr & context); +ColumnPtr getFilterByPathAndFileIndexes(const std::vector & paths, const ExpressionActionsPtr & actions, const NamesAndTypesList & virtual_columns); template -void filterByPathOrFile(std::vector & sources, const std::vector & paths, const ActionsDAGPtr & dag, const NamesAndTypesList & virtual_columns, const ContextPtr & context) +void filterByPathOrFile(std::vector & sources, const std::vector & paths, const ExpressionActionsPtr & actions, const NamesAndTypesList & virtual_columns) { - auto indexes_column = getFilterByPathAndFileIndexes(paths, dag, virtual_columns, context); + auto indexes_column = getFilterByPathAndFileIndexes(paths, actions, virtual_columns); const auto & indexes = typeid_cast(*indexes_column).getData(); if (indexes.size() == sources.size()) return; diff --git a/src/Storages/WindowView/StorageWindowView.cpp b/src/Storages/WindowView/StorageWindowView.cpp index 8975f9327b1..65bf6768b1b 100644 --- a/src/Storages/WindowView/StorageWindowView.cpp +++ b/src/Storages/WindowView/StorageWindowView.cpp @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -565,11 +566,11 @@ std::pair StorageWindowView::getNewBlocks(UInt32 watermark) auto syntax_result = TreeRewriter(getContext()).analyze(filter_function, builder.getHeader().getNamesAndTypesList()); auto filter_expression = ExpressionAnalyzer(filter_function, syntax_result, getContext()).getActionsDAG(false); + auto filter_actions = std::make_shared(std::move(filter_expression)); builder.addSimpleTransform([&](const Block & header) { - return std::make_shared( - header, std::make_shared(filter_expression), filter_function->getColumnName(), true); + return std::make_shared(header, filter_actions, filter_function->getColumnName(), true); }); /// Adding window column @@ -594,7 +595,7 @@ std::pair StorageWindowView::getNewBlocks(UInt32 watermark) new_header.getColumnsWithTypeAndName(), ActionsDAG::MatchColumnsMode::Name); auto actions = std::make_shared( - convert_actions_dag, ExpressionActionsSettings::fromContext(getContext(), CompileExpressions::yes)); + std::move(convert_actions_dag), ExpressionActionsSettings::fromContext(getContext(), CompileExpressions::yes)); builder.addSimpleTransform([&](const Block & stream_header) { return std::make_shared(stream_header, actions); @@ -708,7 +709,7 @@ inline void StorageWindowView::fire(UInt32 watermark) getTargetTable()->getInMemoryMetadataPtr()->getColumns(), getContext(), getContext()->getSettingsRef().insert_null_as_default); - auto adding_missing_defaults_actions = std::make_shared(adding_missing_defaults_dag); + auto adding_missing_defaults_actions = std::make_shared(std::move(adding_missing_defaults_dag)); pipe.addSimpleTransform([&](const Block & stream_header) { return std::make_shared(stream_header, adding_missing_defaults_actions); @@ -719,7 +720,7 @@ inline void StorageWindowView::fire(UInt32 watermark) block_io.pipeline.getHeader().getColumnsWithTypeAndName(), ActionsDAG::MatchColumnsMode::Position); auto actions = std::make_shared( - convert_actions_dag, + std::move(convert_actions_dag), ExpressionActionsSettings::fromContext(getContext(), CompileExpressions::yes)); pipe.addSimpleTransform([&](const Block & stream_header) { @@ -805,7 +806,7 @@ ASTPtr StorageWindowView::getInnerTableCreateQuery(const ASTPtr & inner_query, c { auto column_window = std::make_shared(); column_window->name = window_id_name; - column_window->type = std::make_shared("UInt32"); + column_window->type = makeASTDataType("UInt32"); columns_list->children.push_back(column_window); } @@ -1139,7 +1140,7 @@ void StorageWindowView::read( { auto converting_actions = ActionsDAG::makeConvertingActions( target_header.getColumnsWithTypeAndName(), wv_header.getColumnsWithTypeAndName(), ActionsDAG::MatchColumnsMode::Name); - auto converting_step = std::make_unique(query_plan.getCurrentDataStream(), converting_actions); + auto converting_step = std::make_unique(query_plan.getCurrentDataStream(), std::move(converting_actions)); converting_step->setStepDescription("Convert Target table structure to WindowView structure"); query_plan.addStep(std::move(converting_step)); } @@ -1489,7 +1490,7 @@ void StorageWindowView::writeIntoWindowView( pipe.addSimpleTransform([&](const Block & header_) { return std::make_shared( - header_, std::make_shared(filter_expression), + header_, std::make_shared(std::move(filter_expression)), filter_function->getColumnName(), true); }); } @@ -1628,7 +1629,7 @@ void StorageWindowView::writeIntoWindowView( output->getHeader().getColumnsWithTypeAndName(), ActionsDAG::MatchColumnsMode::Name); auto convert_actions = std::make_shared( - convert_actions_dag, ExpressionActionsSettings::fromContext(local_context, CompileExpressions::yes)); + std::move(convert_actions_dag), ExpressionActionsSettings::fromContext(local_context, CompileExpressions::yes)); builder.addSimpleTransform([&](const Block & header_) { return std::make_shared(header_, convert_actions); }); } diff --git a/src/TableFunctions/ITableFunction.cpp b/src/TableFunctions/ITableFunction.cpp index e5676c5c25d..916ff7ec022 100644 --- a/src/TableFunctions/ITableFunction.cpp +++ b/src/TableFunctions/ITableFunction.cpp @@ -1,5 +1,4 @@ #include -#include #include #include #include diff --git a/tests/ci/cherry_pick.py b/tests/ci/cherry_pick.py index a7fc6d02853..0b2aa9a2d35 100644 --- a/tests/ci/cherry_pick.py +++ b/tests/ci/cherry_pick.py @@ -420,7 +420,12 @@ class Backport: fetch_release_prs = self.gh.get_release_pulls(self._fetch_from) fetch_release_branches = [pr.head.ref for pr in fetch_release_prs] self.labels_to_backport = [ - f"v{branch}-must-backport" for branch in fetch_release_branches + ( + f"v{branch}-must-backport" + if self._repo_name == "ClickHouse/ClickHouse" + else f"v{branch.replace('release/','')}-must-backport" + ) + for branch in fetch_release_branches ] logging.info("Fetching from %s", self._fetch_from) @@ -490,17 +495,23 @@ class Backport: def process_pr(self, pr: PullRequest) -> None: pr_labels = [label.name for label in pr.labels] - if ( - any(label in pr_labels for label in self.must_create_backport_labels) - or self._repo_name != self._fetch_from - ): + if any(label in pr_labels for label in self.must_create_backport_labels): branches = [ ReleaseBranch(br, pr, self.repo, self.backport_created_label) for br in self.release_branches ] # type: List[ReleaseBranch] else: branches = [ - ReleaseBranch(br, pr, self.repo, self.backport_created_label) + ReleaseBranch( + ( + br + if self._repo_name == "ClickHouse/Clickhouse" + else f"release/{br}" + ), + pr, + self.repo, + self.backport_created_label, + ) for br in [ label.split("-", 1)[0][1:] # v21.8-must-backport for label in pr_labels diff --git a/tests/ci/ci.py b/tests/ci/ci.py index e30062c32ff..935fe472e50 100644 --- a/tests/ci/ci.py +++ b/tests/ci/ci.py @@ -766,7 +766,9 @@ def _upload_build_artifacts( int(job_report.duration), GITHUB_JOB_API_URL(), head_ref=pr_info.head_ref, - pr_number=pr_info.number, + # PRInfo fetches pr number for release branches as well - set pr_number to 0 for release + # so that build results are not mistakenly treated as feature branch builds + pr_number=pr_info.number if pr_info.is_pr else 0, ) report_url = ci_cache.upload_build_report(build_result) print(f"Report file has been uploaded to [{report_url}]") diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py index a9bdb639835..a7df884a091 100644 --- a/tests/ci/ci_config.py +++ b/tests/ci/ci_config.py @@ -519,10 +519,10 @@ class CI: runner_type=Runners.STYLE_CHECKER, ), JobNames.DOCKER_SERVER: CommonJobConfigs.DOCKER_SERVER.with_properties( - required_builds=[BuildNames.PACKAGE_RELEASE] + required_builds=[BuildNames.PACKAGE_RELEASE, BuildNames.PACKAGE_AARCH64] ), JobNames.DOCKER_KEEPER: CommonJobConfigs.DOCKER_SERVER.with_properties( - required_builds=[BuildNames.PACKAGE_RELEASE] + required_builds=[BuildNames.PACKAGE_RELEASE, BuildNames.PACKAGE_AARCH64] ), JobNames.DOCS_CHECK: JobConfig( digest=DigestConfig( @@ -587,10 +587,10 @@ class CI: if job_name in REQUIRED_CHECKS: stage_type = WorkflowStages.TESTS_1 else: - stage_type = WorkflowStages.TESTS_3 + stage_type = WorkflowStages.TESTS_2 assert stage_type, f"BUG [{job_name}]" - if non_blocking_ci and stage_type == WorkflowStages.TESTS_3: - stage_type = WorkflowStages.TESTS_2 + if non_blocking_ci and stage_type == WorkflowStages.TESTS_2: + stage_type = WorkflowStages.TESTS_2_WW return stage_type @classmethod diff --git a/tests/ci/ci_definitions.py b/tests/ci/ci_definitions.py index 149177ecba5..054b554b8fa 100644 --- a/tests/ci/ci_definitions.py +++ b/tests/ci/ci_definitions.py @@ -67,10 +67,10 @@ class WorkflowStages(metaclass=WithIter): BUILDS_2 = "Builds_2" # all tests required for merge TESTS_1 = "Tests_1" - # not used atm - TESTS_2 = "Tests_2" + # used in woolenwolfdog mode + TESTS_2_WW = "Tests_2_ww" # all tests not required for merge - TESTS_3 = "Tests_3" + TESTS_2 = "Tests_2" class Runners(metaclass=WithIter): diff --git a/tests/ci/ci_utils.py b/tests/ci/ci_utils.py index d42091fb0da..447aac74c7f 100644 --- a/tests/ci/ci_utils.py +++ b/tests/ci/ci_utils.py @@ -212,7 +212,7 @@ class Shell: return res.stdout.strip() @classmethod - def run(cls, command, check=False, dry_run=False): + def run(cls, command, check=False, dry_run=False, **kwargs): if dry_run: print(f"Dry-ryn. Would run command [{command}]") return "" @@ -225,6 +225,7 @@ class Shell: stderr=subprocess.PIPE, text=True, check=False, + **kwargs, ) if result.returncode == 0: print(f"stdout: {result.stdout.strip()}") diff --git a/tests/ci/docker_images_helper.py b/tests/ci/docker_images_helper.py index 6ea679e0597..e6869852c4e 100644 --- a/tests/ci/docker_images_helper.py +++ b/tests/ci/docker_images_helper.py @@ -3,12 +3,12 @@ import json import logging import os -import subprocess from pathlib import Path from typing import Any, Dict, List, Optional from env_helper import ROOT_DIR, DOCKER_TAG from get_robot_token import get_parameter_from_ssm +from ci_utils import Shell IMAGES_FILE_PATH = Path("docker/images.json") @@ -16,20 +16,14 @@ ImagesDict = Dict[str, dict] def docker_login(relogin: bool = True) -> None: - if ( - relogin - or subprocess.run( # pylint: disable=unexpected-keyword-arg - "docker system info | grep --quiet -E 'Username|Registry'", - shell=True, - check=False, - ).returncode - == 1 + if relogin or not Shell.check( + "docker system info | grep --quiet -E 'Username|Registry'" ): - subprocess.check_output( # pylint: disable=unexpected-keyword-arg + Shell.run( # pylint: disable=unexpected-keyword-arg "docker login --username 'robotclickhouse' --password-stdin", input=get_parameter_from_ssm("dockerhub_robot_password"), encoding="utf-8", - shell=True, + check=True, ) @@ -48,14 +42,10 @@ class DockerImage: def pull_image(image: DockerImage) -> DockerImage: try: logging.info("Pulling image %s - start", image) - subprocess.check_output( - f"docker pull {image}", - stderr=subprocess.STDOUT, - shell=True, - ) + Shell.run(f"docker pull {image}", check=True) logging.info("Pulling image %s - done", image) except Exception as ex: - logging.info("Got execption pulling docker %s", ex) + logging.info("Got exception pulling docker %s", ex) raise ex return image diff --git a/tests/ci/docker_server.py b/tests/ci/docker_server.py index 413c35cbebe..3e782c079c6 100644 --- a/tests/ci/docker_server.py +++ b/tests/ci/docker_server.py @@ -128,17 +128,9 @@ def parse_args() -> argparse.Namespace: def retry_popen(cmd: str, log_file: Path) -> int: max_retries = 2 - for retry in range(max_retries): - # From time to time docker build may failed. Curl issues, or even push - # It will sleep progressively 5, 15, 30 and 50 seconds between retries - progressive_sleep = 5 * sum(i + 1 for i in range(retry)) - if progressive_sleep: - logging.warning( - "The following command failed, sleep %s before retry: %s", - progressive_sleep, - cmd, - ) - time.sleep(progressive_sleep) + sleep_seconds = 10 + retcode = -1 + for _retry in range(max_retries): with TeePopen( cmd, log_file=log_file, @@ -146,7 +138,14 @@ def retry_popen(cmd: str, log_file: Path) -> int: retcode = process.wait() if retcode == 0: return 0 - + else: + # From time to time docker build may failed. Curl issues, or even push + logging.error( + "The following command failed, sleep %s before retry: %s", + sleep_seconds, + cmd, + ) + time.sleep(sleep_seconds) return retcode @@ -377,21 +376,6 @@ def main(): direct_urls: Dict[str, List[str]] = {} for arch, build_name in zip(ARCH, ("package_release", "package_aarch64")): - if args.bucket_prefix: - assert not args.allow_build_reuse - repo_urls[arch] = f"{args.bucket_prefix}/{build_name}" - elif args.sha: - # CreateRelease workflow only. TODO - version = args.version - repo_urls[arch] = ( - f"{S3_DOWNLOAD}/{S3_BUILDS_BUCKET}/" - f"{version.major}.{version.minor}/{args.sha}/{build_name}" - ) - else: - # In all other cases urls must be fetched from build reports. TODO: script needs refactoring - repo_urls[arch] = "" - assert args.allow_build_reuse - if args.allow_build_reuse: # read s3 urls from pre-downloaded build reports if "clickhouse-server" in image_repo: @@ -413,6 +397,21 @@ def main(): for url in urls if any(package in url for package in PACKAGES) and "-dbg" not in url ] + elif args.bucket_prefix: + assert not args.allow_build_reuse + repo_urls[arch] = f"{args.bucket_prefix}/{build_name}" + print(f"Bucket prefix is set: Fetching packages from [{repo_urls}]") + elif args.sha: + version = args.version + repo_urls[arch] = ( + f"{S3_DOWNLOAD}/{S3_BUILDS_BUCKET}/" + f"{version.major}.{version.minor}/{args.sha}/{build_name}" + ) + print(f"Fetching packages from [{repo_urls}]") + else: + assert ( + False + ), "--sha, --bucket_prefix or --allow-build-reuse (to fetch packages from build report) must be provided" if push: docker_login() diff --git a/tests/ci/pr_info.py b/tests/ci/pr_info.py index 2c8ada7b983..5c051b093e0 100644 --- a/tests/ci/pr_info.py +++ b/tests/ci/pr_info.py @@ -2,6 +2,7 @@ import json import logging import os +import re from typing import Dict, List, Set, Union from urllib.parse import quote @@ -328,7 +329,13 @@ class PRInfo: @property def is_release(self) -> bool: - return self.number == 0 and not self.is_merge_queue + return self.is_master or ( + self.is_push_event + and ( + bool(re.match(r"^2[1-9]\.[1-9][0-9]*$", self.head_ref)) + or bool(re.match(r"^release/2[1-9]\.[1-9][0-9]*$", self.head_ref)) + ) + ) @property def is_pr(self): diff --git a/tests/ci/report.py b/tests/ci/report.py index 77043dfc8be..f50ed4c1f85 100644 --- a/tests/ci/report.py +++ b/tests/ci/report.py @@ -125,7 +125,7 @@ html {{ min-height: 100%; font-family: "DejaVu Sans", "Noto Sans", Arial, sans-s h1 {{ margin-left: 10px; }} th, td {{ padding: 5px 10px 5px 10px; text-align: left; vertical-align: top; line-height: 1.5; border: 1px solid var(--table-border-color); }} td {{ background: var(--td-background); }} -th {{ background: var(--th-background); }} +th {{ background: var(--th-background); white-space: nowrap; }} a {{ color: var(--link-color); text-decoration: none; }} a:hover, a:active {{ color: var(--link-hover-color); text-decoration: none; }} table {{ box-shadow: 0 8px 25px -5px rgba(0, 0, 0, var(--shadow-intensity)); border-collapse: collapse; border-spacing: 0; }} @@ -135,6 +135,7 @@ th {{ cursor: pointer; }} tr:hover {{ filter: var(--tr-hover-filter); }} .expandable {{ cursor: pointer; }} .expandable-content {{ display: none; }} +pre {{ white-space: pre-wrap; }} #fish {{ display: none; float: right; position: relative; top: -20em; right: 2vw; margin-bottom: -20em; width: 30vw; filter: brightness(7%); z-index: -1; }} .themes {{ diff --git a/tests/ci/test_ci_config.py b/tests/ci/test_ci_config.py index 4a2bd606d0e..f376a129e6f 100644 --- a/tests/ci/test_ci_config.py +++ b/tests/ci/test_ci_config.py @@ -211,7 +211,7 @@ class TestCIConfig(unittest.TestCase): else: self.assertTrue( CI.get_job_ci_stage(job) - in (CI.WorkflowStages.TESTS_1, CI.WorkflowStages.TESTS_3), + in (CI.WorkflowStages.TESTS_1, CI.WorkflowStages.TESTS_2), msg=f"Stage for [{job}] is not correct", ) @@ -242,7 +242,7 @@ class TestCIConfig(unittest.TestCase): else: self.assertTrue( CI.get_job_ci_stage(job, non_blocking_ci=True) - in (CI.WorkflowStages.TESTS_1, CI.WorkflowStages.TESTS_2), + in (CI.WorkflowStages.TESTS_1, CI.WorkflowStages.TESTS_2_WW), msg=f"Stage for [{job}] is not correct", ) @@ -478,6 +478,7 @@ class TestCIConfig(unittest.TestCase): pr_info = PRInfo(github_event=_TEST_EVENT_JSON) pr_info.event_type = EventType.PUSH pr_info.number = 0 + pr_info.head_ref = "24.12345" assert pr_info.is_release and not pr_info.is_merge_queue ci_cache = CIPY._configure_jobs( S3Helper(), pr_info, settings, skip_jobs=False, dry_run=True diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 55ce88917f4..a29c786e998 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -779,7 +779,7 @@ class SettingsRandomizer: "filesystem_prefetch_step_bytes": lambda: random.choice( [0, "100Mi"] ), # 0 means 'auto' - "compile_expressions": lambda: random.randint(0, 1), + # "compile_expressions": lambda: random.randint(0, 1), - this setting has a bug: https://github.com/ClickHouse/ClickHouse/issues/51264 "compile_aggregate_expressions": lambda: random.randint(0, 1), "compile_sort_description": lambda: random.randint(0, 1), "merge_tree_coarse_index_granularity": lambda: random.randint(2, 32), diff --git a/tests/integration/parallel_skip.json b/tests/integration/parallel_skip.json index 3c3d1b6cc96..99fa626bd1e 100644 --- a/tests/integration/parallel_skip.json +++ b/tests/integration/parallel_skip.json @@ -43,7 +43,6 @@ "test_replicated_database/test.py::test_startup_without_zk", "test_replicated_database/test.py::test_sync_replica", "test_replicated_fetches_timeouts/test.py::test_no_stall", - "test_storage_kafka/test.py::test_kafka_no_holes_when_write_suffix_failed", "test_storage_s3/test.py::test_url_reconnect_in_the_middle", "test_system_metrics/test.py::test_readonly_metrics", "test_system_replicated_fetches/test.py::test_system_replicated_fetches", @@ -73,7 +72,7 @@ "test_keeper_map/test.py::test_keeper_map_without_zk", "test_replicated_merge_tree_wait_on_shutdown/test.py::test_shutdown_and_wait", - + "test_http_failover/test.py::test_url_destination_host_with_multiple_addrs", "test_http_failover/test.py::test_url_invalid_hostname", "test_http_failover/test.py::test_url_ip_change", @@ -97,5 +96,75 @@ "test_ttl_move/test.py::TestCancelBackgroundMoving::test_cancel_background_moving_on_stop_moves_query", "test_ttl_move/test.py::TestCancelBackgroundMoving::test_cancel_background_moving_on_table_detach", - "test_ttl_move/test.py::TestCancelBackgroundMoving::test_cancel_background_moving_on_zookeeper_disconnect" + "test_ttl_move/test.py::TestCancelBackgroundMoving::test_cancel_background_moving_on_zookeeper_disconnect", + + "test_storage_kafka/test.py::test_kafka_column_types", + "test_storage_kafka/test.py::test_kafka_settings_old_syntax", + "test_storage_kafka/test.py::test_kafka_settings_new_syntax", + "test_storage_kafka/test.py::test_kafka_settings_predefined_macros", + "test_storage_kafka/test.py::test_kafka_json_as_string", + "test_storage_kafka/test.py::test_kafka_formats", + "test_storage_kafka/test.py::test_kafka_issue11308", + "test_storage_kafka/test.py::test_kafka_issue4116", + "test_storage_kafka/test.py::test_kafka_consumer_hang", + "test_storage_kafka/test.py::test_kafka_consumer_hang2", + "test_storage_kafka/test.py::test_kafka_read_consumers_in_parallel", + "test_storage_kafka/test.py::test_kafka_csv_with_delimiter", + "test_storage_kafka/test.py::test_kafka_tsv_with_delimiter", + "test_storage_kafka/test.py::test_kafka_select_empty", + "test_storage_kafka/test.py::test_kafka_json_without_delimiter", + "test_storage_kafka/test.py::test_kafka_protobuf", + "test_storage_kafka/test.py::test_kafka_string_field_on_first_position_in_protobuf", + "test_storage_kafka/test.py::test_kafka_protobuf_no_delimiter", + "test_storage_kafka/test.py::test_kafka_materialized_view", + "test_storage_kafka/test.py::test_kafka_recreate_kafka_table", + "test_storage_kafka/test.py::test_librdkafka_compression", + "test_storage_kafka/test.py::test_kafka_materialized_view_with_subquery", + "test_storage_kafka/test.py::test_kafka_many_materialized_views", + "test_storage_kafka/test.py::test_kafka_flush_on_big_message", + "test_storage_kafka/test.py::test_kafka_virtual_columns", + "test_storage_kafka/test.py::test_kafka_virtual_columns_with_materialized_view", + "test_storage_kafka/test.py::test_kafka_insert", + "test_storage_kafka/test.py::test_kafka_produce_consume", + "test_storage_kafka/test.py::test_kafka_commit_on_block_write", + "test_storage_kafka/test.py::test_kafka_virtual_columns2", + "test_storage_kafka/test.py::test_kafka_producer_consumer_separate_settings", + "test_storage_kafka/test.py::test_kafka_produce_key_timestamp", + "test_storage_kafka/test.py::test_kafka_insert_avro", + "test_storage_kafka/test.py::test_kafka_produce_consume_avro", + "test_storage_kafka/test.py::test_kafka_flush_by_time", + "test_storage_kafka/test.py::test_kafka_flush_by_block_size", + "test_storage_kafka/test.py::test_kafka_lot_of_partitions_partial_commit_of_bulk", + "test_storage_kafka/test.py::test_kafka_rebalance", + "test_storage_kafka/test.py::test_kafka_no_holes_when_write_suffix_failed", + "test_storage_kafka/test.py::test_exception_from_destructor", + "test_storage_kafka/test.py::test_commits_of_unprocessed_messages_on_drop", + "test_storage_kafka/test.py::test_bad_reschedule", + "test_storage_kafka/test.py::test_kafka_duplicates_when_commit_failed", + "test_storage_kafka/test.py::test_premature_flush_on_eof", + "test_storage_kafka/test.py::test_kafka_unavailable", + "test_storage_kafka/test.py::test_kafka_issue14202", + "test_storage_kafka/test.py::test_kafka_csv_with_thread_per_consumer", + "test_storage_kafka/test.py::test_kafka_engine_put_errors_to_stream", + "test_storage_kafka/test.py::test_kafka_engine_put_errors_to_stream_with_random_malformed_json", + "test_storage_kafka/test.py::test_kafka_formats_with_broken_message", + "test_storage_kafka/test.py::test_kafka_consumer_failover", + "test_storage_kafka/test.py::test_kafka_predefined_configuration", + "test_storage_kafka/test.py::test_issue26643", + "test_storage_kafka/test.py::test_num_consumers_limit", + "test_storage_kafka/test.py::test_format_with_prefix_and_suffix", + "test_storage_kafka/test.py::test_max_rows_per_message", + "test_storage_kafka/test.py::test_row_based_formats", + "test_storage_kafka/test.py::test_block_based_formats_1", + "test_storage_kafka/test.py::test_block_based_formats_2", + "test_storage_kafka/test.py::test_system_kafka_consumers", + "test_storage_kafka/test.py::test_system_kafka_consumers_rebalance", + "test_storage_kafka/test.py::test_system_kafka_consumers_rebalance_mv", + "test_storage_kafka/test.py::test_formats_errors", + "test_storage_kafka/test.py::test_multiple_read_in_materialized_views", + + "test_storage_kerberized_kafka/test.py::test_kafka_json_as_string", + "test_storage_kerberized_kafka/test.py::test_kafka_json_as_string_request_new_ticket_after_expiration", + "test_storage_kerberized_kafka/test.py::test_kafka_json_as_string_no_kdc", + "test_storage_kerberized_kafka/test.py::test_kafka_config_from_sql_named_collection" ] diff --git a/tests/integration/test_backup_restore_on_cluster/test.py b/tests/integration/test_backup_restore_on_cluster/test.py index 700ed6f15f5..1b7f4aaa97d 100644 --- a/tests/integration/test_backup_restore_on_cluster/test.py +++ b/tests/integration/test_backup_restore_on_cluster/test.py @@ -776,7 +776,6 @@ def test_system_users(): def test_system_functions(): node1.query("CREATE FUNCTION linear_equation AS (x, k, b) -> k*x + b;") - node1.query("CREATE FUNCTION parity_str AS (n) -> if(n % 2, 'odd', 'even');") backup_name = new_backup_name() @@ -817,6 +816,9 @@ def test_system_functions(): [[0, "even"], [1, "odd"], [2, "even"]] ) + node1.query("DROP FUNCTION linear_equation") + node1.query("DROP FUNCTION parity_str") + def test_projection(): node1.query( diff --git a/tests/integration/test_backup_restore_on_cluster/test_disallow_concurrency.py b/tests/integration/test_backup_restore_on_cluster/test_disallow_concurrency.py index cd0f2032559..f3b6993c4fd 100644 --- a/tests/integration/test_backup_restore_on_cluster/test_disallow_concurrency.py +++ b/tests/integration/test_backup_restore_on_cluster/test_disallow_concurrency.py @@ -9,13 +9,13 @@ import re cluster = ClickHouseCluster(__file__) -num_nodes = 10 +num_nodes = 2 def generate_cluster_def(): path = os.path.join( os.path.dirname(os.path.realpath(__file__)), - "./_gen/cluster_for_concurrency_test.xml", + "./_gen/cluster_for_test_disallow_concurrency.xml", ) os.makedirs(os.path.dirname(path), exist_ok=True) with open(path, "w") as f: @@ -111,22 +111,56 @@ def create_and_fill_table(): nodes[i].query(f"INSERT INTO tbl SELECT number FROM numbers(40000000)") -def wait_for_fail_backup(node, backup_id, backup_name): +def get_status_and_error(node, backup_or_restore_id): + return ( + node.query( + f"SELECT status, error FROM system.backups WHERE id == '{backup_or_restore_id}'" + ) + .rstrip("\n") + .split("\t") + ) + + +def wait_for_backup(node, backup_id): + assert_eq_with_retry( + node, + f"SELECT status FROM system.backups WHERE id = '{backup_id}'", + "BACKUP_CREATED", + sleep_time=2, + retry_count=50, + ) + + +def wait_for_restore(node, restore_id): + assert_eq_with_retry( + node, + f"SELECT status FROM system.backups WHERE id == '{restore_id}'", + "RESTORED", + sleep_time=2, + retry_count=50, + ) + + +def check_backup_error(error): expected_errors = [ "Concurrent backups not supported", - f"Backup {backup_name} already exists", + "BACKUP_ALREADY_EXISTS", ] - status = node.query( - f"SELECT status FROM system.backups WHERE id == '{backup_id}'" - ).rstrip("\n") + assert any([expected_error in error for expected_error in expected_errors]) + + +def check_restore_error(error): + expected_errors = [ + "Concurrent restores not supported", + "Cannot restore the table default.tbl because it already contains some data", + ] + assert any([expected_error in error for expected_error in expected_errors]) + + +def wait_for_backup_failure(node, backup_id): + status, error = get_status_and_error(node, backup_id) # It is possible that the second backup was picked up first, and then the async backup - if status == "BACKUP_FAILED": - error = node.query( - f"SELECT error FROM system.backups WHERE id == '{backup_id}'" - ).rstrip("\n") - assert any([expected_error in error for expected_error in expected_errors]) - return - elif status == "CREATING_BACKUP": + if status == "CREATING_BACKUP": assert_eq_with_retry( node, f"SELECT status FROM system.backups WHERE id = '{backup_id}'", @@ -134,31 +168,17 @@ def wait_for_fail_backup(node, backup_id, backup_name): sleep_time=2, retry_count=50, ) - error = node.query( - f"SELECT error FROM system.backups WHERE id == '{backup_id}'" - ).rstrip("\n") - assert re.search(f"Backup {backup_name} already exists", error) - return + status, error = get_status_and_error(node, backup_id) + if status == "BACKUP_FAILED": + check_backup_error(error) else: assert False, "Concurrent backups both passed, when one is expected to fail" -def wait_for_fail_restore(node, restore_id): - expected_errors = [ - "Concurrent restores not supported", - "Cannot restore the table default.tbl because it already contains some data", - ] - status = node.query( - f"SELECT status FROM system.backups WHERE id == '{restore_id}'" - ).rstrip("\n") +def wait_for_restore_failure(node, restore_id): + status, error = get_status_and_error(node, restore_id) # It is possible that the second backup was picked up first, and then the async backup - if status == "RESTORE_FAILED": - error = node.query( - f"SELECT error FROM system.backups WHERE id == '{restore_id}'" - ).rstrip("\n") - assert any([expected_error in error for expected_error in expected_errors]) - return - elif status == "RESTORING": + if status == "RESTORING": assert_eq_with_retry( node, f"SELECT status FROM system.backups WHERE id = '{restore_id}'", @@ -166,14 +186,9 @@ def wait_for_fail_restore(node, restore_id): sleep_time=2, retry_count=50, ) - error = node.query( - f"SELECT error FROM system.backups WHERE id == '{restore_id}'" - ).rstrip("\n") - assert re.search( - "Cannot restore the table default.tbl because it already contains some data", - error, - ) - return + status, error = get_status_and_error(node, restore_id) + if status == "RESTORE_FAILED": + check_restore_error(error) else: assert False, "Concurrent restores both passed, when one is expected to fail" @@ -188,39 +203,28 @@ def test_concurrent_backups_on_same_node(): backup_name = new_backup_name() - id = ( + # Backup #1. + id, status = ( nodes[0] .query(f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {backup_name} ASYNC") - .split("\t")[0] + .rstrip("\n") + .split("\t") ) - status = ( - nodes[0] - .query(f"SELECT status FROM system.backups WHERE id == '{id}'") - .rstrip("\n") - ) assert status in ["CREATING_BACKUP", "BACKUP_CREATED"] - result, error = nodes[0].query_and_get_answer_with_error( + # Backup #2. + _, error = nodes[0].query_and_get_answer_with_error( f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {backup_name}" ) - expected_errors = [ - "Concurrent backups not supported", - f"Backup {backup_name} already exists", - ] - if not error: - wait_for_fail_backup(nodes[0], id, backup_name) - - assert any([expected_error in error for expected_error in expected_errors]) - - assert_eq_with_retry( - nodes[0], - f"SELECT status FROM system.backups WHERE id = '{id}'", - "BACKUP_CREATED", - sleep_time=2, - retry_count=50, - ) + if error: + # Backup #2 failed, backup #1 should be successful. + check_backup_error(error) + wait_for_backup(nodes[0], id) + else: + # Backup #2 was successful, backup #1 should fail. + wait_for_backup_failure(nodes[0], id) # This restore part is added to confirm creating an internal backup & restore work # even when a concurrent backup is stopped @@ -238,40 +242,38 @@ def test_concurrent_backups_on_different_nodes(): backup_name = new_backup_name() - id = ( - nodes[1] + # Backup #1. + id, status = ( + nodes[0] .query(f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {backup_name} ASYNC") - .split("\t")[0] + .rstrip("\n") + .split("\t") ) - status = ( - nodes[1] - .query(f"SELECT status FROM system.backups WHERE id == '{id}'") - .rstrip("\n") - ) assert status in ["CREATING_BACKUP", "BACKUP_CREATED"] - result, error = nodes[0].query_and_get_answer_with_error( + # Backup #2. + _, error = nodes[1].query_and_get_answer_with_error( f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {backup_name}" ) - expected_errors = [ - "Concurrent backups not supported", - f"Backup {backup_name} already exists", - ] + if error: + # Backup #2 failed, backup #1 should be successful. + check_backup_error(error) + wait_for_backup(nodes[0], id) + else: + # Backup #2 was successful, backup #1 should fail. + wait_for_backup_failure(nodes[0], id) - if not error: - wait_for_fail_backup(nodes[1], id, backup_name) - - assert any([expected_error in error for expected_error in expected_errors]) - - assert_eq_with_retry( - nodes[1], - f"SELECT status FROM system.backups WHERE id = '{id}'", - "BACKUP_CREATED", - sleep_time=2, - retry_count=50, + # This restore part is added to confirm creating an internal backup & restore work + # even when a concurrent backup is stopped + nodes[0].query( + f"DROP TABLE tbl ON CLUSTER 'cluster' SYNC", + settings={ + "distributed_ddl_task_timeout": 360, + }, ) + nodes[0].query(f"RESTORE TABLE tbl ON CLUSTER 'cluster' FROM {backup_name}") def test_concurrent_restores_on_same_node(): @@ -288,40 +290,28 @@ def test_concurrent_restores_on_same_node(): }, ) - restore_id = ( + # Restore #1. + restore_id, status = ( nodes[0] .query(f"RESTORE TABLE tbl ON CLUSTER 'cluster' FROM {backup_name} ASYNC") - .split("\t")[0] + .rstrip("\n") + .split("\t") ) - status = ( - nodes[0] - .query(f"SELECT status FROM system.backups WHERE id == '{restore_id}'") - .rstrip("\n") - ) assert status in ["RESTORING", "RESTORED"] - result, error = nodes[0].query_and_get_answer_with_error( + # Restore #2. + _, error = nodes[0].query_and_get_answer_with_error( f"RESTORE TABLE tbl ON CLUSTER 'cluster' FROM {backup_name}" ) - expected_errors = [ - "Concurrent restores not supported", - "Cannot restore the table default.tbl because it already contains some data", - ] - - if not error: - wait_for_fail_restore(nodes[0], restore_id) - - assert any([expected_error in error for expected_error in expected_errors]) - - assert_eq_with_retry( - nodes[0], - f"SELECT status FROM system.backups WHERE id == '{restore_id}'", - "RESTORED", - sleep_time=2, - retry_count=50, - ) + if error: + # Restore #2 failed, restore #1 should be successful. + check_restore_error(error) + wait_for_restore(nodes[0], restore_id) + else: + # Restore #2 was successful, restore #1 should fail. + wait_for_restore_failure(nodes[0], restore_id) def test_concurrent_restores_on_different_node(): @@ -338,37 +328,25 @@ def test_concurrent_restores_on_different_node(): }, ) - restore_id = ( + # Restore #1. + restore_id, status = ( nodes[0] .query(f"RESTORE TABLE tbl ON CLUSTER 'cluster' FROM {backup_name} ASYNC") - .split("\t")[0] + .rstrip("\n") + .split("\t") ) - status = ( - nodes[0] - .query(f"SELECT status FROM system.backups WHERE id == '{restore_id}'") - .rstrip("\n") - ) assert status in ["RESTORING", "RESTORED"] - result, error = nodes[1].query_and_get_answer_with_error( + # Restore #2. + _, error = nodes[1].query_and_get_answer_with_error( f"RESTORE TABLE tbl ON CLUSTER 'cluster' FROM {backup_name}" ) - expected_errors = [ - "Concurrent restores not supported", - "Cannot restore the table default.tbl because it already contains some data", - ] - - if not error: - wait_for_fail_restore(nodes[0], restore_id) - - assert any([expected_error in error for expected_error in expected_errors]) - - assert_eq_with_retry( - nodes[0], - f"SELECT status FROM system.backups WHERE id == '{restore_id}'", - "RESTORED", - sleep_time=2, - retry_count=50, - ) + if error: + # Restore #2 failed, restore #1 should be successful. + check_restore_error(error) + wait_for_restore(nodes[0], restore_id) + else: + # Restore #2 was successful, restore #1 should fail. + wait_for_restore_failure(nodes[0], restore_id) diff --git a/tests/integration/test_grant_and_revoke/configs/config.xml b/tests/integration/test_grant_and_revoke/configs/config_with_table_engine_grant.xml similarity index 100% rename from tests/integration/test_grant_and_revoke/configs/config.xml rename to tests/integration/test_grant_and_revoke/configs/config_with_table_engine_grant.xml diff --git a/tests/integration/test_grant_and_revoke/configs/config_without_table_engine_grant.xml b/tests/integration/test_grant_and_revoke/configs/config_without_table_engine_grant.xml new file mode 100644 index 00000000000..d3571f281f5 --- /dev/null +++ b/tests/integration/test_grant_and_revoke/configs/config_without_table_engine_grant.xml @@ -0,0 +1,5 @@ + + + false + + diff --git a/tests/integration/test_grant_and_revoke/test.py b/tests/integration/test_grant_and_revoke/test_with_table_engine_grant.py similarity index 99% rename from tests/integration/test_grant_and_revoke/test.py rename to tests/integration/test_grant_and_revoke/test_with_table_engine_grant.py index e533cced1e4..25ca7913e4e 100644 --- a/tests/integration/test_grant_and_revoke/test.py +++ b/tests/integration/test_grant_and_revoke/test_with_table_engine_grant.py @@ -5,7 +5,7 @@ from helpers.test_tools import TSV cluster = ClickHouseCluster(__file__) instance = cluster.add_instance( "instance", - main_configs=["configs/config.xml"], + main_configs=["configs/config_with_table_engine_grant.xml"], user_configs=["configs/users.d/users.xml"], ) diff --git a/tests/integration/test_grant_and_revoke/test_without_table_engine_grant.py b/tests/integration/test_grant_and_revoke/test_without_table_engine_grant.py new file mode 100644 index 00000000000..4a5dfb83f79 --- /dev/null +++ b/tests/integration/test_grant_and_revoke/test_without_table_engine_grant.py @@ -0,0 +1,82 @@ +import pytest +from helpers.cluster import ClickHouseCluster +from helpers.test_tools import TSV + +cluster = ClickHouseCluster(__file__) +instance = cluster.add_instance( + "instance", + main_configs=["configs/config_without_table_engine_grant.xml"], + user_configs=["configs/users.d/users.xml"], +) + + +@pytest.fixture(scope="module", autouse=True) +def start_cluster(): + try: + cluster.start() + + instance.query("CREATE DATABASE test") + + yield cluster + + finally: + cluster.shutdown() + + +@pytest.fixture(autouse=True) +def cleanup_after_test(): + try: + yield + finally: + instance.query("DROP USER IF EXISTS A") + instance.query("DROP TABLE IF EXISTS test.table1") + + +def test_table_engine_and_source_grant(): + instance.query("DROP USER IF EXISTS A") + instance.query("CREATE USER A") + instance.query("GRANT CREATE TABLE ON test.table1 TO A") + + instance.query("GRANT POSTGRES ON *.* TO A") + + instance.query( + """ + CREATE TABLE test.table1(a Integer) + engine=PostgreSQL('localhost:5432', 'dummy', 'dummy', 'dummy', 'dummy'); + """, + user="A", + ) + + instance.query("DROP TABLE test.table1") + + instance.query("REVOKE POSTGRES ON *.* FROM A") + + assert "Not enough privileges" in instance.query_and_get_error( + """ + CREATE TABLE test.table1(a Integer) + engine=PostgreSQL('localhost:5432', 'dummy', 'dummy', 'dummy', 'dummy'); + """, + user="A", + ) + + # expecting grant POSTGRES instead of grant PostgreSQL due to discrepancy between source access type and table engine + # similarily, other sources should also use their own defined name instead of the name of table engine + assert "grant POSTGRES ON *.*" in instance.query_and_get_error( + """ + CREATE TABLE test.table1(a Integer) + engine=PostgreSQL('localhost:5432', 'dummy', 'dummy', 'dummy', 'dummy'); + """, + user="A", + ) + + instance.query("GRANT SOURCES ON *.* TO A") + + instance.query( + """ + CREATE TABLE test.table1(a Integer) + engine=PostgreSQL('localhost:5432', 'dummy', 'dummy', 'dummy', 'dummy'); + """, + user="A", + ) + + instance.query("DROP TABLE test.table1") diff --git a/tests/integration/test_postgresql_replica_database_engine_2/test.py b/tests/integration/test_postgresql_replica_database_engine_2/test.py index 5e04c9e4d12..406b50bc486 100644 --- a/tests/integration/test_postgresql_replica_database_engine_2/test.py +++ b/tests/integration/test_postgresql_replica_database_engine_2/test.py @@ -654,7 +654,7 @@ def test_table_override(started_cluster): instance.query(f"SELECT count() FROM {materialized_database}.{table_name}") ) - expected = "CREATE TABLE test_database.table_override\\n(\\n `key` Int32,\\n `value` String,\\n `_sign` Int8() MATERIALIZED 1,\\n `_version` UInt64() MATERIALIZED 1\\n)\\nENGINE = ReplacingMergeTree(_version)\\nPARTITION BY key\\nORDER BY tuple(key)" + expected = "CREATE TABLE test_database.table_override\\n(\\n `key` Int32,\\n `value` String,\\n `_sign` Int8 MATERIALIZED 1,\\n `_version` UInt64 MATERIALIZED 1\\n)\\nENGINE = ReplacingMergeTree(_version)\\nPARTITION BY key\\nORDER BY tuple(key)" assert ( expected == instance.query( diff --git a/tests/queries/0_stateless/00366_multi_statements.reference b/tests/queries/0_stateless/00366_multi_statements.reference index 68f9261b937..71ca552522c 100644 --- a/tests/queries/0_stateless/00366_multi_statements.reference +++ b/tests/queries/0_stateless/00366_multi_statements.reference @@ -3,8 +3,10 @@ 1 1 Syntax error -Syntax error -Syntax error +1 +2 +1 +2 Syntax error Syntax error 1 diff --git a/tests/queries/0_stateless/00366_multi_statements.sh b/tests/queries/0_stateless/00366_multi_statements.sh index 9b885bb1b32..0b2e80fe457 100755 --- a/tests/queries/0_stateless/00366_multi_statements.sh +++ b/tests/queries/0_stateless/00366_multi_statements.sh @@ -10,8 +10,8 @@ $CLICKHOUSE_CLIENT --query="SELECT 1; " $CLICKHOUSE_CLIENT --query="SELECT 1 ; " $CLICKHOUSE_CLIENT --query="SELECT 1; S" 2>&1 | grep -o 'Syntax error' -$CLICKHOUSE_CLIENT --query="SELECT 1; SELECT 2" 2>&1 | grep -o 'Syntax error' -$CLICKHOUSE_CLIENT --query="SELECT 1; SELECT 2;" 2>&1 | grep -o 'Syntax error' +$CLICKHOUSE_CLIENT --query="SELECT 1; SELECT 2" +$CLICKHOUSE_CLIENT --query="SELECT 1; SELECT 2;" $CLICKHOUSE_CLIENT --query="SELECT 1; SELECT 2; SELECT" 2>&1 | grep -o 'Syntax error' $CLICKHOUSE_CLIENT -n --query="SELECT 1; S" 2>&1 | grep -o 'Syntax error' diff --git a/tests/queries/0_stateless/00499_json_enum_insert.sql b/tests/queries/0_stateless/00499_json_enum_insert.sql index c4988d08243..24eab38c65f 100644 --- a/tests/queries/0_stateless/00499_json_enum_insert.sql +++ b/tests/queries/0_stateless/00499_json_enum_insert.sql @@ -4,12 +4,15 @@ DROP TABLE IF EXISTS json; CREATE TABLE json (x Enum8('browser' = 1, 'mobile' = 2), y String) ENGINE = Memory; INSERT INTO json (y) VALUES ('Hello'); + SELECT * FROM json ORDER BY y; INSERT INTO json (y) FORMAT JSONEachRow {"y": "World 1"}; + SELECT * FROM json ORDER BY y; INSERT INTO json (x, y) FORMAT JSONEachRow {"y": "World 2"}; + SELECT * FROM json ORDER BY y; DROP TABLE json; diff --git a/tests/queries/0_stateless/00517_date_parsing.sql b/tests/queries/0_stateless/00517_date_parsing.sql index 9067d39ba3e..4cfede7cb2b 100644 --- a/tests/queries/0_stateless/00517_date_parsing.sql +++ b/tests/queries/0_stateless/00517_date_parsing.sql @@ -7,6 +7,7 @@ INSERT INTO date VALUES ('2017-01-02'), ('2017-1-02'), ('2017-01-2'), ('2017-1-2 SELECT * FROM date; INSERT INTO date FORMAT JSONEachRow {"d": "2017-01-02"}, {"d": "2017-1-02"}, {"d": "2017-01-2"}, {"d": "2017-1-2"}, {"d": "2017/01/02"}, {"d": "2017/1/02"}, {"d": "2017/01/2"}, {"d": "2017/1/2"}, {"d": "2017-11-12"}; + SELECT * FROM date ORDER BY d; DROP TABLE date; diff --git a/tests/queries/0_stateless/00560_float_leading_plus_in_exponent.sql b/tests/queries/0_stateless/00560_float_leading_plus_in_exponent.sql index 33e010cdc9c..d2d62510e7b 100644 --- a/tests/queries/0_stateless/00560_float_leading_plus_in_exponent.sql +++ b/tests/queries/0_stateless/00560_float_leading_plus_in_exponent.sql @@ -1,3 +1,5 @@ CREATE TEMPORARY TABLE test_float (x Float64); + INSERT INTO test_float FORMAT TabSeparated 1.075e+06 + SELECT * FROM test_float; diff --git a/tests/queries/0_stateless/00700_decimal_formats.sql b/tests/queries/0_stateless/00700_decimal_formats.sql index 45b2a5f1078..f008897bf5d 100644 --- a/tests/queries/0_stateless/00700_decimal_formats.sql +++ b/tests/queries/0_stateless/00700_decimal_formats.sql @@ -9,22 +9,22 @@ CREATE TABLE IF NOT EXISTS decimal INSERT INTO decimal (a, b, c) VALUES (42.0, -42.0, 42) (0.42, -0.42, .42) (42.42, -42.42, 42.42); INSERT INTO decimal (a, b, c) FORMAT JSONEachRow {"a":1.1, "b":-1.1, "c":1.1} {"a":1.0, "b":-1.0, "c":1} {"a":0.1, "b":-0.1, "c":.1}; + INSERT INTO decimal (a, b, c) FORMAT CSV 2.0,-2.0,2 -; + INSERT INTO decimal (a, b, c) FORMAT CSV 0.2 ,-0.2 ,.2 -; + INSERT INTO decimal (a, b, c) FORMAT CSV 2.2 , -2.2 , 2.2 -; + INSERT INTO decimal (a, b, c) FORMAT TabSeparated 3.3 -3.3 3.3 -; + INSERT INTO decimal (a, b, c) FORMAT TabSeparated 3.0 -3.0 3 -; + INSERT INTO decimal (a, b, c) FORMAT TabSeparated 0.3 -0.3 .3 -; + INSERT INTO decimal (a, b, c) FORMAT CSV 4.4E+5,-4E+8,.4E+20 -; + INSERT INTO decimal (a, b, c) FORMAT CSV 5.5e-2, -5e-9 ,.5e-17 -; SELECT * FROM decimal ORDER BY a FORMAT JSONEachRow; SELECT * FROM decimal ORDER BY b DESC FORMAT CSV; diff --git a/tests/queries/0_stateless/00760_insert_json_with_defaults.sql b/tests/queries/0_stateless/00760_insert_json_with_defaults.sql index 1430d10cdb0..54987258375 100644 --- a/tests/queries/0_stateless/00760_insert_json_with_defaults.sql +++ b/tests/queries/0_stateless/00760_insert_json_with_defaults.sql @@ -15,20 +15,29 @@ CREATE TABLE defaults ) ENGINE = MergeTree ORDER BY x; INSERT INTO defaults FORMAT JSONEachRow {"x":1, "y":1}; + INSERT INTO defaults (x, y) SELECT x, y FROM defaults LIMIT 1; + INSERT INTO defaults FORMAT JSONEachRow {"x":2, "y":2, "c":2}; + INSERT INTO defaults FORMAT JSONEachRow {"x":3, "y":3, "a":3, "b":3, "c":3}; + INSERT INTO defaults FORMAT JSONEachRow {"x":4} {"y":5, "c":5} {"a":6, "b":6, "c":6}; SELECT * FROM defaults ORDER BY (x, y); ALTER TABLE defaults ADD COLUMN n Nested(a UInt64, b String); + INSERT INTO defaults FORMAT JSONEachRow {"x":7, "y":7, "n.a":[1,2], "n.b":["a","b"]}; + SELECT * FROM defaults WHERE x = 7 FORMAT JSONEachRow; ALTER TABLE defaults ADD COLUMN n.c Array(UInt8) DEFAULT arrayMap(x -> 0, n.a) AFTER n.a; + INSERT INTO defaults FORMAT JSONEachRow {"x":8, "y":8, "n.a":[3,4], "n.b":["c","d"]}; + INSERT INTO defaults FORMAT JSONEachRow {"x":9, "y":9}; + SELECT * FROM defaults WHERE x > 7 ORDER BY x FORMAT JSONEachRow; DROP TABLE defaults; diff --git a/tests/queries/0_stateless/00861_decimal_quoted_csv.sql b/tests/queries/0_stateless/00861_decimal_quoted_csv.sql index e716c607ef8..d7211c55df7 100644 --- a/tests/queries/0_stateless/00861_decimal_quoted_csv.sql +++ b/tests/queries/0_stateless/00861_decimal_quoted_csv.sql @@ -2,15 +2,14 @@ DROP TABLE IF EXISTS test_00861; CREATE TABLE test_00861 (key UInt64, d32 Decimal32(2), d64 Decimal64(2), d128 Decimal128(2)) ENGINE = Memory; INSERT INTO test_00861 FORMAT CSV "1","1","1","1" -; + INSERT INTO test_00861 FORMAT CSV "2","-1","-1","-1" -; + INSERT INTO test_00861 FORMAT CSV "3","1.0","1.0","1.0" -; + INSERT INTO test_00861 FORMAT CSV "4","-0.1","-0.1","-0.1" -; + INSERT INTO test_00861 FORMAT CSV "5","0.010","0.010","0.010" -; SELECT * FROM test_00861 ORDER BY key; diff --git a/tests/queries/0_stateless/00940_max_parts_in_total.sql b/tests/queries/0_stateless/00940_max_parts_in_total.sql index e2633c2f0f8..955f593c907 100644 --- a/tests/queries/0_stateless/00940_max_parts_in_total.sql +++ b/tests/queries/0_stateless/00940_max_parts_in_total.sql @@ -1,6 +1,7 @@ drop table if exists max_parts_in_total; create table max_parts_in_total (x UInt64) ENGINE = MergeTree PARTITION BY x ORDER BY x SETTINGS max_parts_in_total = 10; +SET max_insert_threads = 1; INSERT INTO max_parts_in_total SELECT number FROM numbers(10); SELECT 1; INSERT INTO max_parts_in_total SELECT 123; -- { serverError TOO_MANY_PARTS } diff --git a/tests/queries/0_stateless/00945_ml_test.sql b/tests/queries/0_stateless/00945_ml_test.sql index 299b550bceb..c3a3a51b98c 100644 --- a/tests/queries/0_stateless/00945_ml_test.sql +++ b/tests/queries/0_stateless/00945_ml_test.sql @@ -7,7 +7,7 @@ CREATE TABLE IF NOT EXISTS defaults predict1 Float64, predict2 Float64 ) ENGINE = Memory; -insert into defaults values (1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2) +insert into defaults values (1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2); DROP TABLE IF EXISTS model; create table model engine = Memory as select stochasticLogisticRegressionState(0.1, 0.0, 1.0, 'SGD')(target, param1, param2) as state from defaults; diff --git a/tests/queries/0_stateless/00946_ml_test.sql b/tests/queries/0_stateless/00946_ml_test.sql index b3f4941c2a7..a3da3b7e5c7 100644 --- a/tests/queries/0_stateless/00946_ml_test.sql +++ b/tests/queries/0_stateless/00946_ml_test.sql @@ -18,7 +18,7 @@ CREATE TABLE IF NOT EXISTS defaults predict7 Float64 ) ENGINE = Memory; -insert into defaults values (1.76210664421617,1.7469706406568504,0.7988286239230257,1.0938642223599824,1.167321139201246,1.7648182796261376,0.909111664354187,0.92,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.6276564089897139,-0.06763531281107672,0.7988286239230257,0.5966532121963541,1.167321139201246,0.4551512643242912,0.909111664354187,0.76,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(-0.07046681268810527,-0.5625278455750569,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-1.0056311758200724,0.909111664354187,0.72,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.4531256035702591,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-1.0614543055744028,0.11933920911869125,0.909111664354187,0.8,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(-0.24499761810756004,-0.7274920231630502,-0.952028633990455,-1.3921908284581592,-0.5042604443804907,-0.6530285178541898,-1.0999748867047898,0.65,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(1.1512488252480781,1.2520781078928702,1.674257252879766,1.0938642223599824,-0.5042604443804907,1.244309594057455,0.909111664354187,0.9,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,0.6101272780073337,-0.6698191206144725,0.909111664354187,0.75,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(-0.7685900343659244,-1.057420378339037,-0.952028633990455,-0.3977688081309026,0.6101272780073337,-1.1735372034228724,-1.0999748867047898,0.68,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(-1.2921824506242887,-0.8924562007510436,-1.8274572629471952,-1.3921908284581592,-2.175842027962227,-1.0056311758200724,-1.0999748867047898,0.5,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.5403910062799865,0.09732886477691666,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,0.0018049897967303734,-1.0999748867047898,0.45,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.7149218116994412,-0.2325994903990701,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.3340070654088696,0.909111664354187,0.52,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.889452617118896,0.5922213975408968,0.7988286239230257,0.5966532121963541,1.167321139201246,0.6734291002079332,0.909111664354187,0.84,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.9767180198286235,0.7571855751288902,0.7988286239230257,0.5966532121963541,1.167321139201246,0.8413351278107333,0.909111664354187,0.78,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(-0.8558554370756518,0.26229304236491,-0.0766000050337147,0.5966532121963541,-0.5042604443804907,-1.0056311758200724,0.909111664354187,0.62,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,1.0428223609340956,0.909111664354187),(-0.5067938262367422,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,-1.618648166768315,-0.6698191206144725,0.909111664354187,0.61,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,1.0428223609340956,0.909111664354187),(-0.24499761810756004,-0.39756366798706344,-0.0766000050337147,0.09944220203272576,-1.0614543055744028,-0.5019130930116695,-1.0999748867047898,0.54,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,1.0428223609340956,0.909111664354187),(0.016798590021622126,-0.06763531281107672,-0.0766000050337147,0.5966532121963541,-0.5042604443804907,0.16971101739953035,-1.0999748867047898,0.66,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,1.0428223609340956,0.909111664354187),(0.1913293954410769,-0.2325994903990701,-0.0766000050337147,0.5966532121963541,-0.5042604443804907,-1.0056311758200724,0.909111664354187,0.65,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,1.0428223609340956,0.909111664354187),(0.10406399273134952,0.4272572199529034,-0.0766000050337147,0.5966532121963541,-0.5042604443804907,0.3376170450023333,-1.0999748867047898,0.63,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,1.0428223609340956,0.909111664354187),(-1.2049170479145614,-0.8924562007510436,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,-0.1661010378060696,-1.0999748867047898,0.62,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,1.0428223609340956,0.909111664354187),(-0.41952842352701486,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,-1.618648166768315,-1.1735372034228724,0.909111664354187,0.64,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,1.0428223609340956,0.909111664354187),(0.7149218116994412,1.087113930304877,0.7988286239230257,-0.3977688081309026,-1.618648166768315,-0.3340070654088696,-1.0999748867047898,0.7,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,1.0428223609340956,0.909111664354187),(0.9767180198286235,1.4170422854808635,1.674257252879766,1.5910752325236108,1.724515000395158,1.5129592382219361,0.909111664354187,0.94,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,1.0428223609340956,0.909111664354187),(1.5003104360869879,1.9119348182448437,1.674257252879766,1.5910752325236108,1.167321139201246,1.848771293427536,0.909111664354187,0.95,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,1.0428223609340956,0.909111664354187),(1.6748412415064426,1.9119348182448437,1.674257252879766,0.5966532121963541,0.052933416813421515,2.016677321030339,0.909111664354187,0.97,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,1.0428223609340956,0.909111664354187),(2.023902852345352,2.076898995832837,1.674257252879766,1.0938642223599824,1.167321139201246,1.680865265824736,0.909111664354187,0.94,1.6748412415064426,1.9119348182448437,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.7144464713452956,0.909111664354187),(0.4531256035702591,0.26229304236491,1.674257252879766,1.0938642223599824,0.052933416813421515,0.3376170450023333,-1.0999748867047898,0.76,1.6748412415064426,1.9119348182448437,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.7144464713452956,0.909111664354187),(-1.6412440614631982,-1.5523129111030172,-0.952028633990455,-1.8894018386217877,-1.0614543055744028,-1.8451613138340752,0.909111664354187,0.44,1.6748412415064426,1.9119348182448437,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.7144464713452956,0.909111664354187),(-1.9030402695923805,-2.377133799042984,-1.8274572629471952,-1.3921908284581592,-1.618648166768315,-2.348879396642477,-1.0999748867047898,0.46,1.6748412415064426,1.9119348182448437,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.7144464713452956,0.909111664354187),(-0.5940592289464697,-1.3873487335150236,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-2.180973369039677,-1.0999748867047898,0.54,1.6748412415064426,1.9119348182448437,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.7144464713452956,0.909111664354187),(-1.4667132560437435,-1.7172770886910105,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,-0.8377251482172725,0.909111664354187,0.65,1.6748412415064426,1.9119348182448437,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.7144464713452956,0.909111664354187),(0.889452617118896,-0.7274920231630502,-0.0766000050337147,0.5966532121963541,0.6101272780073337,-0.5019130930116695,0.909111664354187,0.74,1.6748412415064426,1.9119348182448437,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.7144464713452956,0.909111664354187),(1.8493720469258974,1.7469706406568504,0.7988286239230257,-0.3977688081309026,1.167321139201246,1.3450532106191362,0.909111664354187,0.91,1.6748412415064426,1.9119348182448437,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.7144464713452956,0.909111664354187),(2.023902852345352,1.087113930304877,1.674257252879766,0.5966532121963541,0.6101272780073337,1.680865265824736,0.909111664354187,0.9,1.6748412415064426,1.9119348182448437,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.7144464713452956,0.909111664354187),(1.2385142279578056,0.7571855751288902,1.674257252879766,0.5966532121963541,1.724515000395158,2.016677321030339,0.909111664354187,0.94,1.6748412415064426,1.9119348182448437,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.7144464713452956,0.909111664354187),(0.2785947981508043,0.4272572199529034,1.674257252879766,1.5910752325236108,1.724515000395158,1.0092411554135332,0.909111664354187,0.88,1.6748412415064426,1.9119348182448437,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.7144464713452956,0.909111664354187),(-1.553978658753471,-0.2325994903990701,-0.952028633990455,0.5966532121963541,0.6101272780073337,-0.3340070654088696,-1.0999748867047898,0.64,1.6748412415064426,1.9119348182448437,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.7144464713452956,0.909111664354187),(-1.4667132560437435,-0.39756366798706344,-1.8274572629471952,-2.386612848785416,-1.618648166768315,-1.3414432310256739,-1.0999748867047898,0.58,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(-1.117651645204834,-0.39756366798706344,-1.8274572629471952,-0.3977688081309026,-2.175842027962227,-1.8451613138340752,-1.0999748867047898,0.52,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(-0.8558554370756518,0.09732886477691666,-0.952028633990455,0.5966532121963541,0.052933416813421515,-1.5093492586284738,-1.0999748867047898,0.48,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(-0.7685900343659244,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,-1.0056311758200724,0.909111664354187,0.46,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(-0.07046681268810527,-0.39756366798706344,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6698191206144725,0.909111664354187,0.49,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(-0.3322630208172874,-0.06763531281107672,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-0.1661010378060696,0.909111664354187,0.53,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(1.3257796306675331,1.582006463068857,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.8413351278107333,-1.0999748867047898,0.87,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(0.8021872144091686,0.9221497527168835,1.674257252879766,1.0938642223599824,0.6101272780073337,1.3450532106191362,0.909111664354187,0.91,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(0.4531256035702591,0.4272572199529034,1.674257252879766,1.5910752325236108,0.6101272780073337,0.8413351278107333,0.909111664354187,0.88,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(1.0639834225383509,1.087113930304877,1.674257252879766,0.5966532121963541,1.724515000395158,1.1771471830163363,0.909111664354187,0.86,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(1.9366374496356247,1.9119348182448437,1.674257252879766,1.0938642223599824,0.6101272780073337,1.848771293427536,-1.0999748867047898,0.89,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(0.36586020086053167,0.4272572199529034,-0.0766000050337147,0.09944220203272576,1.724515000395158,0.42157005880373183,0.909111664354187,0.82,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(0.889452617118896,0.5922213975408968,0.7988286239230257,-0.3977688081309026,0.6101272780073337,-0.3340070654088696,0.909111664354187,0.78,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(-0.3322630208172874,-1.5523129111030172,-0.0766000050337147,-0.8949798182945309,1.167321139201246,-0.5019130930116695,0.909111664354187,0.76,-0.15773221539783266,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-0.41952842352701486,-1.2223845559270303,-0.952028633990455,-1.8894018386217877,0.052933416813421515,-1.1735372034228724,0.909111664354187,0.56,-0.15773221539783266,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(1.5003104360869879,1.4170422854808635,0.7988286239230257,0.5966532121963541,-0.5042604443804907,-1.0056311758200724,0.909111664354187,0.78,-0.15773221539783266,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(0.6276564089897139,0.7571855751288902,0.7988286239230257,0.5966532121963541,-1.0614543055744028,-0.8377251482172725,0.909111664354187,0.72,-0.15773221539783266,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(0.4531256035702591,0.4272572199529034,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-1.0056311758200724,-1.0999748867047898,0.7,-0.15773221539783266,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(0.2785947981508043,-0.7274920231630502,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-1.5093492586284738,-1.0999748867047898,0.64,-0.15773221539783266,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-0.07046681268810527,-0.8924562007510436,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,-2.013067341436875,-1.0999748867047898,0.64,-0.15773221539783266,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-1.6412440614631982,-1.3873487335150236,-0.952028633990455,0.5966532121963541,-1.618648166768315,-1.6772552862312753,-1.0999748867047898,0.46,-0.15773221539783266,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-1.4667132560437435,-1.3873487335150236,-1.8274572629471952,-0.3977688081309026,-1.618648166768315,-3.0205035070536796,0.909111664354187,0.36,-0.15773221539783266,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-0.5067938262367422,-0.5625278455750569,-0.952028633990455,-1.3921908284581592,-1.618648166768315,-0.5019130930116695,-1.0999748867047898,0.42,-0.15773221539783266,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-0.681324631656197,-1.2223845559270303,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,-0.8377251482172725,-1.0999748867047898,0.48,-0.15773221539783266,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-0.8558554370756518,-1.057420378339037,-0.0766000050337147,0.5966532121963541,-0.5042604443804907,-0.6698191206144725,-1.0999748867047898,0.47,-0.15773221539783266,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-1.117651645204834,-0.39756366798706344,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,-0.6698191206144725,0.909111664354187,0.54,-1.117651645204834,-1.057420378339037,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898),(-0.15773221539783266,-0.06763531281107672,-0.952028633990455,0.5966532121963541,-0.5042604443804907,-0.1661010378060696,0.909111664354187,0.56,-1.117651645204834,-1.057420378339037,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898),(0.7149218116994412,0.5922213975408968,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.16971101739953035,-1.0999748867047898,0.52,-1.117651645204834,-1.057420378339037,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898),(0.7149218116994412,0.7571855751288902,0.7988286239230257,0.09944220203272576,0.052933416813421515,0.5391042781256927,-1.0999748867047898,0.55,-1.117651645204834,-1.057420378339037,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898),(0.889452617118896,1.087113930304877,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,0.7070103057284927,-1.0999748867047898,0.61,-1.117651645204834,-1.057420378339037,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898),(-0.07046681268810527,-0.06763531281107672,-0.952028633990455,0.09944220203272576,0.052933416813421515,0.06896740083785215,0.909111664354187,0.57,-1.117651645204834,-1.057420378339037,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898),(0.10406399273134952,0.26229304236491,-0.0766000050337147,0.09944220203272576,0.6101272780073337,1.0428223609340956,0.909111664354187,0.68,-1.117651645204834,-1.057420378339037,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898),(0.9767180198286235,1.2520781078928702,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9420787443724145,0.909111664354187,0.78,-1.117651645204834,-1.057420378339037,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898),(1.3257796306675331,1.7469706406568504,1.674257252879766,1.5910752325236108,1.724515000395158,1.7480276768658578,0.909111664354187,0.94,-1.117651645204834,-1.057420378339037,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898),(1.6748412415064426,0.7571855751288902,1.674257252879766,1.5910752325236108,1.724515000395158,1.9495149099892173,0.909111664354187,0.96,-1.117651645204834,-1.057420378339037,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898),(0.36586020086053167,0.5922213975408968,1.674257252879766,1.5910752325236108,1.724515000395158,1.4290062244205346,0.909111664354187,0.93,-1.117651645204834,-1.057420378339037,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898),(-0.24499761810756004,0.09732886477691666,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.7405915112490521,0.909111664354187,0.84,-1.117651645204834,-1.057420378339037,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898),(-0.24499761810756004,-0.2325994903990701,-0.0766000050337147,-0.3977688081309026,1.724515000395158,0.5055230726051333,-1.0999748867047898,0.74,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(1.0639834225383509,1.087113930304877,-0.952028633990455,-1.3921908284581592,0.6101272780073337,-0.06535742124438843,0.909111664354187,0.72,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(0.889452617118896,0.7571855751288902,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,0.20329222292009272,0.909111664354187,0.74,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(-1.3794478533340162,-1.3873487335150236,-0.952028633990455,-0.3977688081309026,-1.618648166768315,-0.6362379150939101,-1.0999748867047898,0.64,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(-1.8157748668826532,-2.0472054438669973,-0.952028633990455,-0.3977688081309026,-1.618648166768315,-1.777998902792955,0.909111664354187,0.44,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(-1.990305672302108,-2.377133799042984,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-2.0802297524779956,-1.0999748867047898,0.46,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(-0.41952842352701486,-0.39756366798706344,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,-0.972049970299513,0.909111664354187,0.5,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(2.023902852345352,2.076898995832837,0.7988286239230257,1.5910752325236108,1.724515000395158,1.5129592382219361,0.909111664354187,0.96,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(0.2785947981508043,0.4272572199529034,1.674257252879766,1.5910752325236108,1.167321139201246,1.0428223609340956,0.909111664354187,0.92,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(0.4531256035702591,1.2520781078928702,1.674257252879766,0.5966532121963541,1.167321139201246,1.2778907995780144,0.909111664354187,0.92,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(2.023902852345352,1.2520781078928702,1.674257252879766,1.0938642223599824,1.167321139201246,1.4290062244205346,0.909111664354187,0.94,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(0.1913293954410769,-0.7274920231630502,0.7988286239230257,1.0938642223599824,0.052933416813421515,0.10254860635841155,-1.0999748867047898,0.76,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(-0.15773221539783266,-0.2325994903990701,-0.0766000050337147,1.0938642223599824,0.052933416813421515,-0.30042585988831016,-1.0999748867047898,0.72,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(0.016798590021622126,-0.06763531281107672,-0.952028633990455,0.09944220203272576,-0.5042604443804907,-0.535494298532232,-1.0999748867047898,0.66,-2.3393672831410175,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.7444176972723957,-1.0999748867047898),(-0.24499761810756004,0.09732886477691666,-0.0766000050337147,1.0938642223599824,0.052933416813421515,-0.7705627371761508,-1.0999748867047898,0.64,-2.3393672831410175,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.7444176972723957,-1.0999748867047898),(-0.07046681268810527,0.26229304236491,0.7988286239230257,1.0938642223599824,0.052933416813421515,0.27045463396121155,0.909111664354187,0.74,-2.3393672831410175,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.7444176972723957,-1.0999748867047898),(0.10406399273134952,-0.2325994903990701,-0.952028633990455,0.5966532121963541,0.6101272780073337,-1.139955997902313,0.909111664354187,0.64,-2.3393672831410175,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.7444176972723957,-1.0999748867047898),(-1.553978658753471,-1.7172770886910105,-0.0766000050337147,1.5910752325236108,0.052933416813421515,-1.5765116696695942,-1.0999748867047898,0.38,-2.3393672831410175,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.7444176972723957,-1.0999748867047898),(-1.6412440614631982,-1.5523129111030172,-0.952028633990455,0.5966532121963541,-0.5042604443804907,-0.9552593675392334,-1.0999748867047898,0.34,-2.3393672831410175,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.7444176972723957,-1.0999748867047898),(-1.3794478533340162,-1.7172770886910105,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,-1.2071184089434333,0.909111664354187,0.44,-2.3393672831410175,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.7444176972723957,-1.0999748867047898),(-1.2049170479145614,-1.3873487335150236,-0.0766000050337147,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898,0.36,-2.3393672831410175,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.7444176972723957,-1.0999748867047898),(-1.117651645204834,-1.2223845559270303,0.7988286239230257,-1.8894018386217877,-1.0614543055744028,-1.2742808199845537,-1.0999748867047898,0.42,-2.3393672831410175,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.7444176972723957,-1.0999748867047898),(-0.9431208397853792,-1.2223845559270303,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,-1.0056311758200724,-1.0999748867047898,0.48,-2.3393672831410175,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.7444176972723957,-1.0999748867047898),(1.2385142279578056,2.076898995832837,-0.0766000050337147,0.5966532121963541,0.6101272780073337,0.6062666891668145,0.909111664354187,0.86,-2.3393672831410175,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.7444176972723957,-1.0999748867047898),(1.3257796306675331,1.9119348182448437,0.7988286239230257,1.5910752325236108,1.167321139201246,1.076403566454655,0.909111664354187,0.9,-2.3393672831410175,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.7444176972723957,-1.0999748867047898),(0.5403910062799865,0.9221497527168835,-0.0766000050337147,0.5966532121963541,0.6101272780073337,0.47194186708457386,0.909111664354187,0.79,-1.2049170479145614,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(0.4531256035702591,-0.06763531281107672,-0.0766000050337147,0.09944220203272576,0.052933416813421515,-0.2332634488471884,0.909111664354187,0.71,-1.2049170479145614,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-0.41952842352701486,-0.39756366798706344,-0.952028633990455,-0.8949798182945309,-0.5042604443804907,-0.8041439426967131,-1.0999748867047898,0.64,-1.2049170479145614,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-0.24499761810756004,-0.2325994903990701,-0.952028633990455,0.5966532121963541,0.052933416813421515,-0.585866106813071,-1.0999748867047898,0.62,-1.2049170479145614,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(0.016798590021622126,-0.5625278455750569,-0.952028633990455,1.0938642223599824,0.6101272780073337,-0.2164728460869087,-1.0999748867047898,0.57,-1.2049170479145614,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(0.8021872144091686,0.7571855751288902,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,0.7573821140093348,0.909111664354187,0.74,-1.2049170479145614,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-0.07046681268810527,0.4272572199529034,-0.0766000050337147,0.5966532121963541,1.167321139201246,0.30403583948177093,0.909111664354187,0.69,-1.2049170479145614,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,1.167321139201246,0.9756599498929738,0.909111664354187,0.87,-1.2049170479145614,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(1.8493720469258974,1.582006463068857,0.7988286239230257,0.09944220203272576,1.167321139201246,1.4457968271808173,0.909111664354187,0.91,-1.2049170479145614,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(1.2385142279578056,1.4170422854808635,1.674257252879766,1.5910752325236108,1.724515000395158,1.3114720050985766,0.909111664354187,0.93,-1.2049170479145614,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-1.117651645204834,-0.7274920231630502,1.674257252879766,1.5910752325236108,0.6101272780073337,0.06896740083785215,-1.0999748867047898,0.68,-1.2049170479145614,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-1.0303862424951067,0.09732886477691666,1.674257252879766,-0.3977688081309026,-0.5042604443804907,-0.199682243326629,-1.0999748867047898,0.61,-1.2049170479145614,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(0.36586020086053167,0.26229304236491,0.7988286239230257,0.5966532121963541,0.6101272780073337,0.13612981187897094,0.909111664354187,0.69,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(-1.3794478533340162,-0.06763531281107672,-0.0766000050337147,0.09944220203272576,0.052933416813421515,-0.4347506819705508,0.909111664354187,0.62,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(0.2785947981508043,0.4272572199529034,-0.952028633990455,0.5966532121963541,0.052933416813421515,-0.06535742124438843,-1.0999748867047898,0.72,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(-0.5067938262367422,-0.39756366798706344,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,-0.2500540516074711,0.909111664354187,0.59,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(-0.5940592289464697,-0.2325994903990701,0.7988286239230257,1.0938642223599824,1.167321139201246,0.7405915112490521,0.909111664354187,0.66,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(-1.553978658753471,-0.8924562007510436,-0.0766000050337147,0.5966532121963541,0.052933416813421515,0.03538619531728977,-1.0999748867047898,0.56,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(-2.3393672831410175,-0.5625278455750569,0.7988286239230257,-1.3921908284581592,-1.0614543055744028,-1.9123237248751956,-1.0999748867047898,0.45,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(-1.8157748668826532,-1.3873487335150236,-0.952028633990455,-0.3977688081309026,0.052933416813421515,-2.214554574560236,-1.0999748867047898,0.47,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(0.889452617118896,-0.5625278455750569,1.674257252879766,-0.3977688081309026,0.052933416813421515,0.4047794560434521,0.909111664354187,0.71,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,1.6137028547836172,0.909111664354187,0.94,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(1.5003104360869879,1.9119348182448437,1.674257252879766,1.0938642223599824,1.167321139201246,1.4793780327013768,0.909111664354187,0.94,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(-0.5940592289464697,-0.2325994903990701,0.7988286239230257,-1.8894018386217877,-1.0614543055744028,-0.40116947644999135,-1.0999748867047898,0.57,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(-0.7685900343659244,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.052933416813421515,-0.6362379150939101,-1.0999748867047898,0.61,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(-1.3794478533340162,-0.2325994903990701,0.7988286239230257,-0.8949798182945309,-0.5042604443804907,-0.2164728460869087,-1.0999748867047898,0.57,0.4531256035702591,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4047794560434521,0.909111664354187),(-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.10254860635841155,0.909111664354187,0.64,0.4531256035702591,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4047794560434521,0.909111664354187),(0.5403910062799865,0.9221497527168835,-0.0766000050337147,0.5966532121963541,-0.5042604443804907,1.2107283885368956,0.909111664354187,0.85,0.4531256035702591,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4047794560434521,0.909111664354187),(0.1913293954410769,0.7571855751288902,-0.0766000050337147,-0.8949798182945309,-1.618648166768315,0.18650162015981303,0.909111664354187,0.78,0.4531256035702591,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4047794560434521,0.909111664354187),(0.8021872144091686,0.7571855751288902,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,0.8413351278107333,0.909111664354187,0.84,0.4531256035702591,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4047794560434521,0.909111664354187),(1.4130450333772604,1.7469706406568504,1.674257252879766,1.5910752325236108,1.724515000395158,1.2611001968177347,0.909111664354187,0.92,0.4531256035702591,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4047794560434521,0.909111664354187),(1.9366374496356247,1.087113930304877,1.674257252879766,0.5966532121963541,1.167321139201246,1.9495149099892173,0.909111664354187,0.96,0.4531256035702591,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4047794560434521,0.909111664354187),(-1.2049170479145614,-0.39756366798706344,1.674257252879766,1.5910752325236108,1.167321139201246,0.08575800359813185,-1.0999748867047898,0.77,0.4531256035702591,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4047794560434521,0.909111664354187),(-0.681324631656197,-0.39756366798706344,1.674257252879766,0.09944220203272576,0.052933416813421515,-0.06535742124438843,-1.0999748867047898,0.71,0.4531256035702591,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4047794560434521,0.909111664354187),(0.5403910062799865,0.7571855751288902,1.674257252879766,0.5966532121963541,1.167321139201246,0.30403583948177093,-1.0999748867047898,0.79,0.4531256035702591,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4047794560434521,0.909111664354187),(1.4130450333772604,0.9221497527168835,1.674257252879766,0.5966532121963541,0.6101272780073337,1.1435659774957738,0.909111664354187,0.89,0.4531256035702591,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4047794560434521,0.909111664354187),(-0.24499761810756004,0.26229304236491,0.7988286239230257,0.09944220203272576,0.6101272780073337,0.2872452367214912,0.909111664354187,0.82,0.4531256035702591,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4047794560434521,0.909111664354187),(-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,1.5910752325236108,0.6101272780073337,-0.2500540516074711,-1.0999748867047898,0.76,0.1913293954410769,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.11933920911869125,0.909111664354187),(-0.07046681268810527,-1.2223845559270303,-0.952028633990455,-1.8894018386217877,-0.5042604443804907,-0.7369815316555913,0.909111664354187,0.71,0.1913293954410769,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.11933920911869125,0.909111664354187),(0.8021872144091686,1.4170422854808635,-0.952028633990455,1.0938642223599824,-0.5042604443804907,0.8077539222901738,0.909111664354187,0.8,0.1913293954410769,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.11933920911869125,0.909111664354187),(0.10406399273134952,0.26229304236491,-1.8274572629471952,0.09944220203272576,0.052933416813421515,0.8749163333312926,-1.0999748867047898,0.78,0.1913293954410769,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.11933920911869125,0.909111664354187),(1.0639834225383509,0.4272572199529034,-0.952028633990455,0.5966532121963541,-0.5042604443804907,0.9252881416121347,0.909111664354187,0.84,0.1913293954410769,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.11933920911869125,0.909111664354187),(1.3257796306675331,1.7469706406568504,-0.952028633990455,1.0938642223599824,0.052933416813421515,1.2778907995780144,0.909111664354187,0.9,0.1913293954410769,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.11933920911869125,0.909111664354187),(1.2385142279578056,1.2520781078928702,1.674257252879766,0.5966532121963541,0.052933416813421515,1.412215621660255,0.909111664354187,0.92,0.1913293954410769,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.11933920911869125,0.909111664354187),(2.023902852345352,2.076898995832837,0.7988286239230257,1.0938642223599824,0.6101272780073337,2.2181645541536983,0.909111664354187,0.97,0.1913293954410769,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.11933920911869125,0.909111664354187),(0.7149218116994412,0.7571855751288902,-0.952028633990455,-0.3977688081309026,0.052933416813421515,0.6062666891668145,0.909111664354187,0.8,0.1913293954410769,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.11933920911869125,0.909111664354187),(0.2785947981508043,0.9221497527168835,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,0.06896740083785215,0.909111664354187,0.81,0.1913293954410769,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.11933920911869125,0.909111664354187),(-0.15773221539783266,-0.39756366798706344,-0.0766000050337147,-1.3921908284581592,-1.0614543055744028,-0.199682243326629,-1.0999748867047898,0.75,0.1913293954410769,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.11933920911869125,0.909111664354187),(0.8021872144091686,1.087113930304877,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,0.8581257305710129,0.909111664354187,0.83,0.1913293954410769,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.11933920911869125,0.909111664354187),(1.9366374496356247,1.4170422854808635,0.7988286239230257,0.5966532121963541,0.052933416813421515,2.016677321030339,0.909111664354187,0.96,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(-0.5067938262367422,-0.2325994903990701,-0.952028633990455,0.09944220203272576,-0.5042604443804907,-0.5690755040527913,0.909111664354187,0.79,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(1.5003104360869879,1.087113930304877,0.7988286239230257,0.5966532121963541,0.6101272780073337,1.3954250188999753,0.909111664354187,0.93,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(1.3257796306675331,1.4170422854808635,1.674257252879766,1.5910752325236108,1.724515000395158,1.1435659774957738,0.909111664354187,0.94,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(0.36586020086053167,0.7571855751288902,1.674257252879766,1.5910752325236108,1.724515000395158,0.7741727167696144,0.909111664354187,0.86,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(0.6276564089897139,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.6101272780073337,0.25366403120093184,-1.0999748867047898,0.79,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(0.8021872144091686,0.09732886477691666,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.4887324698448536,-1.0999748867047898,0.8,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(-0.41952842352701486,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,0.15292041463925066,-1.0999748867047898,0.77,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(-0.15773221539783266,-0.39756366798706344,-0.0766000050337147,-1.3921908284581592,-1.0614543055744028,-0.4347506819705508,-1.0999748867047898,0.7,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(-0.681324631656197,-0.5625278455750569,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.5690755040527913,-1.0999748867047898,0.65,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(-0.9431208397853792,-0.2325994903990701,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.7705627371761508,-1.0999748867047898,0.61,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(-1.7285094641729257,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.1735372034228724,-1.0999748867047898,0.52,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(-0.15773221539783266,-0.7274920231630502,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.2406996144639928,-1.0999748867047898,0.57,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(-1.6412440614631982,-1.3873487335150236,-1.8274572629471952,-1.8894018386217877,-0.5042604443804907,-1.9123237248751956,-1.0999748867047898,0.53,-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5690755040527913,-1.0999748867047898),(0.10406399273134952,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.1661010378060696,-1.0999748867047898,0.67,-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5690755040527913,-1.0999748867047898),(0.016798590021622126,-0.39756366798706344,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,-0.06535742124438843,-1.0999748867047898,0.68,-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5690755040527913,-1.0999748867047898),(1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.690219702968213,0.909111664354187,0.81,-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5690755040527913,-1.0999748867047898),(0.4531256035702591,0.4272572199529034,1.674257252879766,1.0938642223599824,0.6101272780073337,0.6230572919270941,-1.0999748867047898,0.78,-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5690755040527913,-1.0999748867047898),(-1.2921824506242887,-0.8924562007510436,-0.0766000050337147,0.09944220203272576,1.724515000395158,-0.45154128473083044,-1.0999748867047898,0.65,-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5690755040527913,-1.0999748867047898),(-0.3322630208172874,-0.8924562007510436,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,-0.5522849012925116,-1.0999748867047898,0.64,-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5690755040527913,-1.0999748867047898),(-2.0775710750118352,-1.7172770886910105,-0.952028633990455,-1.3921908284581592,0.6101272780073337,-1.3414432310256739,0.909111664354187,0.64,-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5690755040527913,-1.0999748867047898),(-0.5067938262367422,-1.3873487335150236,-0.952028633990455,-0.8949798182945309,-0.5042604443804907,-1.0392123813406318,-1.0999748867047898,0.65,-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5690755040527913,-1.0999748867047898),(-0.41952842352701486,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-0.9384687647789537,0.909111664354187,0.68,-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5690755040527913,-1.0999748867047898),(1.5003104360869879,1.582006463068857,1.674257252879766,0.5966532121963541,1.167321139201246,0.7909633195298942,0.909111664354187,0.89,-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5690755040527913,-1.0999748867047898),(0.4531256035702591,0.4272572199529034,0.7988286239230257,0.5966532121963541,1.724515000395158,0.8917069360915754,0.909111664354187,0.86,-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5690755040527913,-1.0999748867047898),(0.5403910062799865,0.9221497527168835,0.7988286239230257,0.5966532121963541,1.167321139201246,1.0596129636943752,0.909111664354187,0.89,2.023902852345352,0.9221497527168835,0.7988286239230257,1.5910752325236108,1.724515000395158,1.915933704468658,0.909111664354187),(0.36586020086053167,0.5922213975408968,0.7988286239230257,0.5966532121963541,0.6101272780073337,0.6230572919270941,0.909111664354187,0.87,2.023902852345352,0.9221497527168835,0.7988286239230257,1.5910752325236108,1.724515000395158,1.915933704468658,0.909111664354187),(0.2785947981508043,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.052933416813421515,0.4551512643242912,0.909111664354187,0.85,2.023902852345352,0.9221497527168835,0.7988286239230257,1.5910752325236108,1.724515000395158,1.915933704468658,0.909111664354187),(1.0639834225383509,1.9119348182448437,0.7988286239230257,1.0938642223599824,1.167321139201246,0.9420787443724145,0.909111664354187,0.9,2.023902852345352,0.9221497527168835,0.7988286239230257,1.5910752325236108,1.724515000395158,1.915933704468658,0.909111664354187),(0.1913293954410769,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,-1.0999748867047898,0.82,2.023902852345352,0.9221497527168835,0.7988286239230257,1.5910752325236108,1.724515000395158,1.915933704468658,0.909111664354187),(-0.681324631656197,0.09732886477691666,-0.0766000050337147,-0.8949798182945309,-0.5042604443804907,-0.8041439426967131,-1.0999748867047898,0.72,2.023902852345352,0.9221497527168835,0.7988286239230257,1.5910752325236108,1.724515000395158,1.915933704468658,0.909111664354187),(-0.8558554370756518,-0.8924562007510436,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.5522849012925116,-1.0999748867047898,0.73,2.023902852345352,0.9221497527168835,0.7988286239230257,1.5910752325236108,1.724515000395158,1.915933704468658,0.909111664354187),(-1.4667132560437435,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,-0.7369815316555913,-1.0999748867047898,0.71,2.023902852345352,0.9221497527168835,0.7988286239230257,1.5910752325236108,1.724515000395158,1.915933704468658,0.909111664354187),(-1.0303862424951067,-0.06763531281107672,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.30042585988831016,-1.0999748867047898,0.71,2.023902852345352,0.9221497527168835,0.7988286239230257,1.5910752325236108,1.724515000395158,1.915933704468658,0.909111664354187),(-1.553978658753471,-1.2223845559270303,-0.952028633990455,-0.3977688081309026,0.052933416813421515,-1.2071184089434333,-1.0999748867047898,0.68,2.023902852345352,0.9221497527168835,0.7988286239230257,1.5910752325236108,1.724515000395158,1.915933704468658,0.909111664354187),(-0.24499761810756004,0.4272572199529034,-0.0766000050337147,0.5966532121963541,0.6101272780073337,0.3376170450023333,-1.0999748867047898,0.75,2.023902852345352,0.9221497527168835,0.7988286239230257,1.5910752325236108,1.724515000395158,1.915933704468658,0.909111664354187),(-0.07046681268810527,-0.2325994903990701,-0.952028633990455,-0.8949798182945309,0.6101272780073337,-0.46833188749111015,-1.0999748867047898,0.72,2.023902852345352,0.9221497527168835,0.7988286239230257,1.5910752325236108,1.724515000395158,1.915933704468658,0.909111664354187),(0.889452617118896,0.9221497527168835,0.7988286239230257,1.0938642223599824,1.167321139201246,0.8581257305710129,0.909111664354187,0.89,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(0.016798590021622126,-0.06763531281107672,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,0.13612981187897094,0.909111664354187,0.84,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(1.5875758387967152,1.7469706406568504,1.674257252879766,1.0938642223599824,0.052933416813421515,1.412215621660255,0.909111664354187,0.93,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(1.2385142279578056,1.2520781078928702,1.674257252879766,1.0938642223599824,0.052933416813421515,1.2778907995780144,0.909111664354187,0.93,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(0.6276564089897139,0.7571855751288902,1.674257252879766,1.5910752325236108,1.724515000395158,0.8077539222901738,0.909111664354187,0.88,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(0.6276564089897139,0.5922213975408968,1.674257252879766,1.0938642223599824,0.6101272780073337,0.9420787443724145,0.909111664354187,0.9,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(0.5403910062799865,0.4272572199529034,1.674257252879766,0.5966532121963541,1.724515000395158,0.6398478946873739,0.909111664354187,0.87,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(0.4531256035702591,1.087113930304877,1.674257252879766,1.0938642223599824,0.6101272780073337,0.572685483646252,0.909111664354187,0.86,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(1.6748412415064426,1.7469706406568504,1.674257252879766,1.0938642223599824,1.724515000395158,1.5633310465027752,0.909111664354187,0.94,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(-0.07046681268810527,0.26229304236491,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,0.27045463396121155,-1.0999748867047898,0.77,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(-0.8558554370756518,-0.06763531281107672,-0.952028633990455,-0.3977688081309026,0.052933416813421515,-0.1325198322855102,0.909111664354187,0.78,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(-0.9431208397853792,-0.39756366798706344,-0.952028633990455,-0.3977688081309026,-1.0614543055744028,-0.5690755040527913,-1.0999748867047898,0.73,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(-0.5940592289464697,-0.2325994903990701,-0.952028633990455,0.09944220203272576,-1.0614543055744028,-0.45154128473083044,-1.0999748867047898,0.73,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(-0.5067938262367422,-0.5625278455750569,-0.0766000050337147,1.0938642223599824,1.167321139201246,-0.2836352571280305,-1.0999748867047898,0.7,-1.2921824506242887,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-1.0727935868611929,-1.0999748867047898),(-0.3322630208172874,-0.06763531281107672,-0.0766000050337147,0.5966532121963541,1.167321139201246,0.15292041463925066,-1.0999748867047898,0.72,-1.2921824506242887,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-1.0727935868611929,-1.0999748867047898),(0.016798590021622126,-0.7274920231630502,-0.0766000050337147,-0.8949798182945309,-0.5042604443804907,-0.0989386267649508,0.909111664354187,0.73,-1.2921824506242887,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-1.0727935868611929,-1.0999748867047898),(-0.15773221539783266,0.4272572199529034,-0.952028633990455,0.09944220203272576,-0.5042604443804907,-0.2332634488471884,0.909111664354187,0.72,-1.2921824506242887,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-1.0727935868611929,-1.0999748867047898),(2.023902852345352,2.076898995832837,1.674257252879766,1.0938642223599824,1.167321139201246,2.2013739513934185,0.909111664354187,0.97,-1.2921824506242887,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-1.0727935868611929,-1.0999748867047898),(1.5003104360869879,2.076898995832837,1.674257252879766,0.5966532121963541,1.724515000395158,2.1342115403522968,0.909111664354187,0.97,-1.2921824506242887,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-1.0727935868611929,-1.0999748867047898),(-1.6412440614631982,-0.39756366798706344,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.0989386267649508,-1.0999748867047898,0.69,-1.2921824506242887,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-1.0727935868611929,-1.0999748867047898),(-1.9030402695923805,-1.3873487335150236,-0.952028633990455,-0.8949798182945309,-0.5042604443804907,-1.5933022724298738,-1.0999748867047898,0.57,-1.2921824506242887,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-1.0727935868611929,-1.0999748867047898),(-0.15773221539783266,-1.3873487335150236,-0.952028633990455,0.09944220203272576,-0.5042604443804907,-1.1903278061831537,-1.0999748867047898,0.63,-1.2921824506242887,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-1.0727935868611929,-1.0999748867047898),(-0.5940592289464697,-0.8924562007510436,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.972049970299513,0.909111664354187,0.66,-1.2921824506242887,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-1.0727935868611929,-1.0999748867047898),(-1.0303862424951067,-0.2325994903990701,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,-0.7369815316555913,-1.0999748867047898,0.64,-1.2921824506242887,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-1.0727935868611929,-1.0999748867047898),(-1.3794478533340162,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.8041439426967131,0.909111664354187,0.68,-1.2921824506242887,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-1.0727935868611929,-1.0999748867047898),(0.7149218116994412,0.09732886477691666,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.7741727167696144,0.909111664354187,0.79,-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8377251482172725,-1.0999748867047898),(0.9767180198286235,0.4272572199529034,0.7988286239230257,1.5910752325236108,0.6101272780073337,0.908497538851855,0.909111664354187,0.82,-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8377251482172725,-1.0999748867047898),(1.8493720469258974,2.076898995832837,0.7988286239230257,1.5910752325236108,1.724515000395158,1.7816088823864173,0.909111664354187,0.95,-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8377251482172725,-1.0999748867047898),(1.4130450333772604,1.9119348182448437,1.674257252879766,1.5910752325236108,1.167321139201246,1.9830961155097766,0.909111664354187,0.96,-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8377251482172725,-1.0999748867047898),(1.2385142279578056,1.582006463068857,0.7988286239230257,1.0938642223599824,1.724515000395158,1.3786344161396955,0.909111664354187,0.94,-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8377251482172725,-1.0999748867047898),(1.1512488252480781,1.4170422854808635,1.674257252879766,1.5910752325236108,1.167321139201246,1.2778907995780144,0.909111664354187,0.93,-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8377251482172725,-1.0999748867047898),(0.4531256035702591,0.7571855751288902,0.7988286239230257,1.0938642223599824,1.167321139201246,1.1099847719752143,0.909111664354187,0.91,-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8377251482172725,-1.0999748867047898),(0.36586020086053167,0.26229304236491,0.7988286239230257,0.5966532121963541,0.6101272780073337,0.8917069360915754,0.909111664354187,0.85,-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8377251482172725,-1.0999748867047898),(0.6276564089897139,0.4272572199529034,0.7988286239230257,-0.3977688081309026,0.052933416813421515,0.6230572919270941,0.909111664354187,0.84,-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8377251482172725,-1.0999748867047898),(-0.41952842352701486,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.052933416813421515,-0.30042585988831016,-1.0999748867047898,0.74,-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8377251482172725,-1.0999748867047898),(-0.3322630208172874,-0.7274920231630502,-0.0766000050337147,0.5966532121963541,0.6101272780073337,0.25366403120093184,-1.0999748867047898,0.76,-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8377251482172725,-1.0999748867047898),(-0.07046681268810527,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.06535742124438843,-1.0999748867047898,0.75,-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8377251482172725,-1.0999748867047898),(0.6276564089897139,0.9221497527168835,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.32082644224205065,-1.0999748867047898,0.76,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(-0.7685900343659244,0.26229304236491,-0.952028633990455,-0.3977688081309026,0.6101272780073337,-0.2500540516074711,-1.0999748867047898,0.71,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(-1.0303862424951067,-0.39756366798706344,-0.952028633990455,-0.3977688081309026,-1.618648166768315,-0.6194473123336305,-1.0999748867047898,0.67,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(-1.8157748668826532,-1.3873487335150236,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.9552593675392334,-1.0999748867047898,0.61,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(-0.9431208397853792,0.4272572199529034,-0.952028633990455,0.09944220203272576,0.6101272780073337,-0.2500540516074711,-1.0999748867047898,0.63,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(-0.41952842352701486,0.4272572199529034,-0.952028633990455,0.09944220203272576,-0.5042604443804907,-0.1157292295252305,-1.0999748867047898,0.64,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(0.10406399273134952,0.7571855751288902,-0.0766000050337147,0.5966532121963541,0.052933416813421515,0.11933920911869125,-1.0999748867047898,0.71,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(0.6276564089897139,0.5922213975408968,0.7988286239230257,-0.3977688081309026,-0.5042604443804907,0.690219702968213,0.909111664354187,0.82,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(-0.3322630208172874,-0.5625278455750569,-0.0766000050337147,0.5966532121963541,1.167321139201246,0.08575800359813185,-1.0999748867047898,0.73,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(0.1913293954410769,-0.2325994903990701,-0.0766000050337147,0.09944220203272576,-1.0614543055744028,-0.45154128473083044,0.909111664354187,0.74,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(-0.41952842352701486,-0.06763531281107672,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-0.5522849012925116,-1.0999748867047898,0.69,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(-1.117651645204834,-1.2223845559270303,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-0.8880969564981116,-1.0999748867047898,0.64,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(1.1512488252480781,0.9221497527168835,1.674257252879766,1.5910752325236108,0.6101272780073337,1.1939377857766158,0.909111664354187,0.91,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(0.8021872144091686,0.5922213975408968,1.674257252879766,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187,0.88,0.2785947981508043,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.2668446543677508,0.909111664354187),(0.7149218116994412,0.7571855751288902,0.7988286239230257,0.5966532121963541,1.167321139201246,0.9588693471326941,0.909111664354187,0.85,0.2785947981508043,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.2668446543677508,0.909111664354187),(1.0639834225383509,1.087113930304877,1.674257252879766,1.0938642223599824,1.724515000395158,0.9924505526532535,0.909111664354187,0.86,0.2785947981508043,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.2668446543677508,0.909111664354187),(-0.5940592289464697,-0.5625278455750569,-0.0766000050337147,-1.3921908284581592,0.052933416813421515,-0.3843788736897117,-1.0999748867047898,0.7,0.2785947981508043,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.2668446543677508,0.909111664354187),(-1.553978658753471,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.1903278061831537,-1.0999748867047898,0.59,0.2785947981508043,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.2668446543677508,0.909111664354187),(-1.8157748668826532,-1.057420378339037,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-1.5429304641490347,-1.0999748867047898,0.6,0.2785947981508043,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.2668446543677508,0.909111664354187),(0.016798590021622126,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-0.753772134415871,-1.0999748867047898,0.65,0.2785947981508043,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.2668446543677508,0.909111664354187),(0.6276564089897139,1.2520781078928702,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,0.27045463396121155,0.909111664354187,0.7,0.2785947981508043,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.2668446543677508,0.909111664354187),(0.7149218116994412,1.087113930304877,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,0.7405915112490521,0.909111664354187,0.76,0.2785947981508043,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.2668446543677508,0.909111664354187),(-0.24499761810756004,-0.06763531281107672,-0.952028633990455,-0.8949798182945309,0.6101272780073337,-0.06535742124438843,-1.0999748867047898,0.63,0.2785947981508043,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.2668446543677508,0.909111664354187),(0.9767180198286235,0.4272572199529034,0.7988286239230257,0.5966532121963541,-1.0614543055744028,0.7070103057284927,0.909111664354187,0.81,0.2785947981508043,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.2668446543677508,0.909111664354187),(-0.07046681268810527,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.22008282568037243,-1.0999748867047898,0.72,0.2785947981508043,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.2668446543677508,0.909111664354187),(-0.5067938262367422,-0.5625278455750569,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-0.199682243326629,-1.0999748867047898,0.71,-0.24499761810756004,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4551512643242912,0.909111664354187,0.8,-0.24499761810756004,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.36586020086053167,0.5922213975408968,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.3879888532831724,0.909111664354187,0.77,-0.24499761810756004,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.2785947981508043,-0.5625278455750569,-0.0766000050337147,-0.3977688081309026,-1.0614543055744028,-0.04856681848410872,0.909111664354187,0.74,-0.24499761810756004,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(-0.07046681268810527,-1.3873487335150236,-0.952028633990455,-0.8949798182945309,-0.5042604443804907,0.6734291002079332,-1.0999748867047898,0.7,-0.24499761810756004,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.10406399273134952,-1.2223845559270303,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-0.0989386267649508,0.909111664354187,0.71,-0.24499761810756004,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(1.5875758387967152,1.2520781078928702,0.7988286239230257,1.0938642223599824,1.167321139201246,1.8151900879069767,0.909111664354187,0.93,-0.24499761810756004,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.36586020086053167,1.087113930304877,0.7988286239230257,0.5966532121963541,1.724515000395158,0.8749163333312926,-1.0999748867047898,0.85,-0.24499761810756004,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(-0.8558554370756518,0.4272572199529034,0.7988286239230257,0.5966532121963541,1.167321139201246,-0.3843788736897117,-1.0999748867047898,0.79,-0.24499761810756004,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(-0.681324631656197,-1.3873487335150236,-0.0766000050337147,0.5966532121963541,0.6101272780073337,-0.06535742124438843,-1.0999748867047898,0.76,-0.24499761810756004,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.6276564089897139,-1.2223845559270303,-0.0766000050337147,0.5966532121963541,1.724515000395158,0.06896740083785215,0.909111664354187,0.78,-0.24499761810756004,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.8021872144091686,-0.8924562007510436,0.7988286239230257,1.5910752325236108,1.724515000395158,0.27045463396121155,0.909111664354187,0.77,-0.24499761810756004,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(1.2385142279578056,1.9119348182448437,0.7988286239230257,1.5910752325236108,1.167321139201246,1.244309594057455,0.909111664354187,0.9,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(0.889452617118896,0.09732886477691666,1.674257252879766,1.5910752325236108,0.052933416813421515,0.8917069360915754,0.909111664354187,0.87,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(-0.41952842352701486,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.8545157509775522,-1.0999748867047898,0.71,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(-0.7685900343659244,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,0.6101272780073337,-0.40116947644999135,0.909111664354187,0.7,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(0.6276564089897139,0.5922213975408968,-0.0766000050337147,-0.8949798182945309,-2.175842027962227,0.32082644224205065,0.909111664354187,0.7,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(0.7149218116994412,0.4272572199529034,-0.952028633990455,-0.3977688081309026,-1.0614543055744028,0.27045463396121155,0.909111664354187,0.75,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(-0.3322630208172874,-0.8924562007510436,-0.0766000050337147,-0.8949798182945309,-1.0614543055744028,0.13612981187897094,-1.0999748867047898,0.71,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(-0.41952842352701486,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.2500540516074711,-1.0999748867047898,0.72,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(-0.24499761810756004,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.7201909288953117,0.909111664354187,0.73,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(0.889452617118896,0.9221497527168835,0.7988286239230257,1.0938642223599824,1.724515000395158,0.908497538851855,-1.0999748867047898,0.83,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(-0.7685900343659244,0.09732886477691666,0.7988286239230257,1.0938642223599824,1.724515000395158,-0.4347506819705508,-1.0999748867047898,0.77,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(-0.9431208397853792,-0.39756366798706344,-0.952028633990455,-0.8949798182945309,-0.5042604443804907,-0.6362379150939101,0.909111664354187,0.72,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(-1.553978658753471,-1.8822412662790038,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.2406996144639928,-1.0999748867047898,0.54,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(-1.990305672302108,-2.0472054438669973,-1.8274572629471952,-1.8894018386217877,-2.175842027962227,-1.610092875190155,-1.0999748867047898,0.49,0.8021872144091686,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.8749163333312926,0.909111664354187),(-0.41952842352701486,-1.3873487335150236,-1.8274572629471952,-2.386612848785416,-2.175842027962227,-0.9888405730597928,0.909111664354187,0.52,0.8021872144091686,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.8749163333312926,0.909111664354187),(-0.15773221539783266,-1.2223845559270303,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.0895841896214724,-1.0999748867047898,0.58,0.8021872144091686,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.8749163333312926,0.909111664354187),(0.4531256035702591,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,0.6062666891668145,0.909111664354187,0.78,0.8021872144091686,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.8749163333312926,0.909111664354187),(1.0639834225383509,0.9221497527168835,1.674257252879766,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187,0.89,0.8021872144091686,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.8749163333312926,0.909111664354187),(0.2785947981508043,-1.057420378339037,-0.952028633990455,-0.8949798182945309,-0.5042604443804907,0.03538619531728977,-1.0999748867047898,0.7,0.8021872144091686,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.8749163333312926,0.909111664354187),(-0.7685900343659244,-0.7274920231630502,-0.952028633990455,-0.3977688081309026,0.052933416813421515,-0.1828916405663493,-1.0999748867047898,0.66,0.8021872144091686,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.8749163333312926,0.909111664354187),(-1.117651645204834,-0.8924562007510436,-0.952028633990455,-0.3977688081309026,0.6101272780073337,0.22008282568037243,-1.0999748867047898,0.67,0.8021872144091686,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.8749163333312926,0.909111664354187),(-0.5067938262367422,-0.8924562007510436,-0.0766000050337147,1.0938642223599824,0.6101272780073337,0.06896740083785215,0.909111664354187,0.68,0.8021872144091686,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.8749163333312926,0.909111664354187),(0.016798590021622126,0.4272572199529034,-0.0766000050337147,0.5966532121963541,1.167321139201246,0.8581257305710129,0.909111664354187,0.8,0.8021872144091686,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.8749163333312926,0.909111664354187),(-0.41952842352701486,-0.2325994903990701,-0.0766000050337147,0.5966532121963541,0.052933416813421515,0.32082644224205065,0.909111664354187,0.81,0.8021872144091686,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.8749163333312926,0.909111664354187),(0.36586020086053167,0.5922213975408968,-0.0766000050337147,-0.8949798182945309,-0.5042604443804907,0.5055230726051333,0.909111664354187,0.8,0.8021872144091686,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.8749163333312926,0.909111664354187),(2.023902852345352,0.7571855751288902,0.7988286239230257,1.5910752325236108,1.167321139201246,1.7816088823864173,0.909111664354187,0.94,0.016798590021622126,-0.5625278455750569,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,0.27045463396121155,-1.0999748867047898),(1.2385142279578056,1.4170422854808635,1.674257252879766,0.5966532121963541,0.6101272780073337,1.1099847719752143,0.909111664354187,0.93,0.016798590021622126,-0.5625278455750569,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,0.27045463396121155,-1.0999748867047898),(1.6748412415064426,1.7469706406568504,1.674257252879766,1.0938642223599824,0.6101272780073337,0.9924505526532535,0.909111664354187,0.92,0.016798590021622126,-0.5625278455750569,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,0.27045463396121155,-1.0999748867047898),(0.6276564089897139,1.087113930304877,1.674257252879766,1.5910752325236108,1.167321139201246,0.8077539222901738,0.909111664354187,0.89,0.016798590021622126,-0.5625278455750569,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,0.27045463396121155,-1.0999748867047898),(-0.24499761810756004,-0.5625278455750569,0.7988286239230257,1.5910752325236108,1.724515000395158,0.7070103057284927,-1.0999748867047898,0.82,0.016798590021622126,-0.5625278455750569,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,0.27045463396121155,-1.0999748867047898),(-0.3322630208172874,0.26229304236491,-0.0766000050337147,0.5966532121963541,0.052933416813421515,0.6734291002079332,-1.0999748867047898,0.79,0.016798590021622126,-0.5625278455750569,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,0.27045463396121155,-1.0999748867047898),(-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-0.8949798182945309,-0.5042604443804907,-1.5933022724298738,-1.0999748867047898,0.58,0.016798590021622126,-0.5625278455750569,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,0.27045463396121155,-1.0999748867047898),(-1.4667132560437435,-0.8924562007510436,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.223909011703713,-1.0999748867047898,0.56,0.016798590021622126,-0.5625278455750569,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,0.27045463396121155,-1.0999748867047898),(-1.2921824506242887,-1.3873487335150236,-0.952028633990455,-2.386612848785416,-1.618648166768315,-1.056002984100913,-1.0999748867047898,0.56,0.016798590021622126,-0.5625278455750569,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,0.27045463396121155,-1.0999748867047898),(-0.41952842352701486,-1.5523129111030172,-1.8274572629471952,0.09944220203272576,-0.5042604443804907,-0.7034003261350319,0.909111664354187,0.64,0.016798590021622126,-0.5625278455750569,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,0.27045463396121155,-1.0999748867047898),(-0.07046681268810527,-1.057420378339037,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-0.46833188749111015,0.909111664354187,0.61,0.016798590021622126,-0.5625278455750569,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,0.27045463396121155,-1.0999748867047898),(0.016798590021622126,-1.2223845559270303,-0.952028633990455,-0.3977688081309026,-1.0614543055744028,-0.04856681848410872,-1.0999748867047898,0.68,0.016798590021622126,-0.5625278455750569,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,0.27045463396121155,-1.0999748867047898),(-0.5940592289464697,-0.06763531281107672,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.11933920911869125,-1.0999748867047898,0.76,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(0.2785947981508043,2.076898995832837,-0.0766000050337147,0.5966532121963541,1.167321139201246,0.8581257305710129,-1.0999748867047898,0.86,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(1.1512488252480781,1.087113930304877,-0.0766000050337147,1.0938642223599824,1.167321139201246,1.076403566454655,0.909111664354187,0.9,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(-1.0303862424951067,0.7571855751288902,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.08575800359813185,-1.0999748867047898,0.71,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(-0.681324631656197,-0.2325994903990701,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-1.0056311758200724,-1.0999748867047898,0.62,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(0.1913293954410769,0.09732886477691666,-0.952028633990455,-0.8949798182945309,-0.5042604443804907,0.27045463396121155,-1.0999748867047898,0.66,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(0.4531256035702591,-0.39756366798706344,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,-0.2500540516074711,0.909111664354187,0.65,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(0.5403910062799865,-0.06763531281107672,-0.0766000050337147,0.09944220203272576,0.052933416813421515,-0.08214802400466813,0.909111664354187,0.73,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(-0.3322630208172874,-0.2325994903990701,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-0.2836352571280305,-1.0999748867047898,0.62,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(0.36586020086053167,0.26229304236491,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.3376170450023333,0.909111664354187,0.74,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(0.5403910062799865,0.4272572199529034,-0.0766000050337147,0.5966532121963541,0.052933416813421515,0.8413351278107333,0.909111664354187,0.79,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(0.7149218116994412,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.6101272780073337,0.6734291002079332,0.909111664354187,0.8,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(-0.41952842352701486,0.09732886477691666,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,-0.1157292295252305,-1.0999748867047898,0.69,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(-0.7685900343659244,0.4272572199529034,0.7988286239230257,0.09944220203272576,-0.5042604443804907,0.0018049897967303734,-1.0999748867047898,0.7,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,0.909111664354187),(0.2785947981508043,-0.5625278455750569,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.23687342844065212,0.909111664354187,0.76,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,0.909111664354187),(0.9767180198286235,0.09732886477691666,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9756599498929738,0.909111664354187,0.84,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,0.909111664354187),(-0.5067938262367422,-0.06763531281107672,0.7988286239230257,1.0938642223599824,1.167321139201246,0.6734291002079332,0.909111664354187,0.78,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,0.909111664354187),(-1.3794478533340162,-1.2223845559270303,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,-0.9384687647789537,-1.0999748867047898,0.67,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,0.909111664354187),(-1.0303862424951067,-0.39756366798706344,-0.952028633990455,-0.3977688081309026,0.6101272780073337,-0.7873533399364304,-1.0999748867047898,0.66,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,0.909111664354187),(-0.7685900343659244,-0.5625278455750569,-0.952028633990455,-0.8949798182945309,-0.5042604443804907,-0.8880969564981116,-1.0999748867047898,0.65,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,0.909111664354187),(-1.6412440614631982,-1.057420378339037,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.2406996144639928,-1.0999748867047898,0.54,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,0.909111664354187),(-1.4667132560437435,-1.3873487335150236,-1.8274572629471952,-2.386612848785416,-1.0614543055744028,-0.9888405730597928,-1.0999748867047898,0.58,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,0.909111664354187),(0.6276564089897139,0.5922213975408968,-0.0766000050337147,-0.8949798182945309,-1.618648166768315,0.3376170450023333,0.909111664354187,0.79,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,0.909111664354187),(0.889452617118896,0.9221497527168835,0.7988286239230257,0.09944220203272576,-0.5042604443804907,0.15292041463925066,0.909111664354187,0.8,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,0.909111664354187),(0.016798590021622126,-0.2325994903990701,-0.0766000050337147,0.5966532121963541,0.052933416813421515,-0.1661010378060696,0.909111664354187,0.75,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,0.909111664354187),(0.5403910062799865,-0.5625278455750569,-0.0766000050337147,0.5966532121963541,0.6101272780073337,-0.2668446543677508,0.909111664354187,0.73,0.7149218116994412,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.8581257305710129,0.909111664354187),(-0.24499761810756004,-0.06763531281107672,-0.952028633990455,-0.8949798182945309,0.6101272780073337,-0.5522849012925116,-1.0999748867047898,0.72,0.7149218116994412,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.8581257305710129,0.909111664354187),(-1.0303862424951067,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.7034003261350319,-1.0999748867047898,0.62,0.7149218116994412,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.8581257305710129,0.909111664354187),(-0.15773221539783266,-0.5625278455750569,-0.0766000050337147,-0.3977688081309026,-1.0614543055744028,-0.45154128473083044,-1.0999748867047898,0.67,0.7149218116994412,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.8581257305710129,0.909111664354187),(0.8021872144091686,1.4170422854808635,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.908497538851855,0.909111664354187,0.81,0.7149218116994412,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.8581257305710129,0.909111664354187),(-1.553978658753471,-1.2223845559270303,-0.0766000050337147,-1.3921908284581592,-1.618648166768315,-0.972049970299513,-1.0999748867047898,0.63,0.7149218116994412,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.8581257305710129,0.909111664354187),(-1.9030402695923805,-1.057420378339037,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.2406996144639928,-1.0999748867047898,0.69,0.7149218116994412,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.8581257305710129,0.909111664354187),(0.6276564089897139,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.2872452367214912,0.909111664354187,0.8,0.7149218116994412,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.8581257305710129,0.909111664354187),(-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-2.175842027962227,-1.1903278061831537,-1.0999748867047898,0.43,0.7149218116994412,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.8581257305710129,0.909111664354187),(0.889452617118896,0.9221497527168835,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,0.10254860635841155,0.909111664354187,0.8,0.7149218116994412,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.8581257305710129,0.909111664354187),(-0.5067938262367422,-0.39756366798706344,-0.952028633990455,-0.3977688081309026,-1.618648166768315,-0.8041439426967131,0.909111664354187,0.73,0.7149218116994412,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.8581257305710129,0.909111664354187),(-0.7685900343659244,-0.2325994903990701,-0.0766000050337147,0.09944220203272576,-1.0614543055744028,-0.6530285178541898,0.909111664354187,0.75,0.7149218116994412,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.8581257305710129,0.909111664354187),(0.1913293954410769,0.09732886477691666,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.0989386267649508,0.909111664354187,0.71,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(-0.41952842352701486,-0.06763531281107672,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.08575800359813185,0.909111664354187,0.73,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(0.7149218116994412,0.5922213975408968,0.7988286239230257,0.5966532121963541,1.167321139201246,0.8581257305710129,0.909111664354187,0.83,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(0.1913293954410769,0.4272572199529034,-0.0766000050337147,-0.3977688081309026,-1.0614543055744028,0.32082644224205065,-1.0999748867047898,0.72,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(1.3257796306675331,1.7469706406568504,1.674257252879766,1.5910752325236108,1.724515000395158,1.462587429941097,0.909111664354187,0.94,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(0.5403910062799865,0.09732886477691666,1.674257252879766,0.5966532121963541,0.6101272780073337,0.23687342844065212,0.909111664354187,0.81,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(0.6276564089897139,-0.06763531281107672,1.674257252879766,0.09944220203272576,0.6101272780073337,0.10254860635841155,0.909111664354187,0.81,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(-0.41952842352701486,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.2332634488471884,0.909111664354187,0.75,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(0.8021872144091686,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.27045463396121155,0.909111664354187,0.79,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(-0.7685900343659244,-0.2325994903990701,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.6026567095733507,-1.0999748867047898,0.58,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(-1.0303862424951067,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-0.7873533399364304,-1.0999748867047898,0.59,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(-1.9030402695923805,-1.8822412662790038,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-2.1138109579985565,-1.0999748867047898,0.47,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(-0.07046681268810527,-1.5523129111030172,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.9626955331560363,-1.0999748867047898,0.49,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(-1.117651645204834,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.610092875190155,-1.0999748867047898,0.47,-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.30403583948177093,-1.0999748867047898),(-1.553978658753471,-2.2121696214549904,-1.8274572629471952,-2.386612848785416,-2.7330358891561395,-2.1138109579985565,-1.0999748867047898,0.42,-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.30403583948177093,-1.0999748867047898),(-1.2921824506242887,-1.3873487335150236,-1.8274572629471952,-1.3921908284581592,-1.618648166768315,-2.2649263828410766,-1.0999748867047898,0.57,-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.30403583948177093,-1.0999748867047898),(-0.3322630208172874,-1.057420378339037,-0.0766000050337147,-0.8949798182945309,-0.5042604443804907,-0.9384687647789537,-1.0999748867047898,0.62,-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.30403583948177093,-1.0999748867047898),(0.10406399273134952,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5522849012925116,0.909111664354187,0.74,-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.30403583948177093,-1.0999748867047898),(0.7149218116994412,0.4272572199529034,0.7988286239230257,0.09944220203272576,0.6101272780073337,0.11933920911869125,0.909111664354187,0.73,-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.30403583948177093,-1.0999748867047898),(-1.2049170479145614,-1.2223845559270303,-0.952028633990455,-0.3977688081309026,0.052933416813421515,-0.9048875592583913,0.909111664354187,0.64,-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.30403583948177093,-1.0999748867047898),(-1.4667132560437435,-0.8924562007510436,-0.0766000050337147,0.09944220203272576,-1.0614543055744028,-0.7201909288953117,-1.0999748867047898,0.63,-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.30403583948177093,-1.0999748867047898),(-1.7285094641729257,-1.5523129111030172,-0.952028633990455,-0.8949798182945309,-0.5042604443804907,-1.5597210669093144,-1.0999748867047898,0.59,-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.30403583948177093,-1.0999748867047898),(0.016798590021622126,-0.2325994903990701,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8041439426967131,-1.0999748867047898,0.73,-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.30403583948177093,-1.0999748867047898),(0.889452617118896,0.26229304236491,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.2872452367214912,0.909111664354187,0.79,-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.30403583948177093,-1.0999748867047898),(-1.3794478533340162,-0.5625278455750569,-0.952028633990455,0.09944220203272576,0.052933416813421515,-1.1903278061831537,0.909111664354187,0.68,-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.30403583948177093,-1.0999748867047898),(-0.24499761810756004,-0.39756366798706344,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.610092875190155,-1.0999748867047898,0.7,1.4130450333772604,1.582006463068857,0.7988286239230257,1.5910752325236108,0.6101272780073337,1.7816088823864173,0.909111664354187),(0.36586020086053167,-0.06763531281107672,-0.952028633990455,-1.3921908284581592,-2.175842027962227,-0.2668446543677508,-1.0999748867047898,0.81,1.4130450333772604,1.582006463068857,0.7988286239230257,1.5910752325236108,0.6101272780073337,1.7816088823864173,0.909111664354187),(0.4531256035702591,0.4272572199529034,-0.0766000050337147,0.5966532121963541,1.724515000395158,0.06896740083785215,0.909111664354187,0.85,1.4130450333772604,1.582006463068857,0.7988286239230257,1.5910752325236108,0.6101272780073337,1.7816088823864173,0.909111664354187),(1.5003104360869879,1.4170422854808635,0.7988286239230257,0.5966532121963541,0.052933416813421515,1.580121649263055,0.909111664354187,0.93,1.4130450333772604,1.582006463068857,0.7988286239230257,1.5910752325236108,0.6101272780073337,1.7816088823864173,0.909111664354187),(1.8493720469258974,1.2520781078928702,1.674257252879766,1.0938642223599824,1.724515000395158,1.0596129636943752,0.909111664354187,0.91,1.4130450333772604,1.582006463068857,0.7988286239230257,1.5910752325236108,0.6101272780073337,1.7816088823864173,0.909111664354187),(-0.9431208397853792,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-0.5042604443804907,-0.40116947644999135,-1.0999748867047898,0.69,1.4130450333772604,1.582006463068857,0.7988286239230257,1.5910752325236108,0.6101272780073337,1.7816088823864173,0.909111664354187),(-0.3322630208172874,-0.8924562007510436,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.5055230726051333,0.909111664354187,0.77,1.4130450333772604,1.582006463068857,0.7988286239230257,1.5910752325236108,0.6101272780073337,1.7816088823864173,0.909111664354187),(1.1512488252480781,1.087113930304877,0.7988286239230257,1.0938642223599824,-0.5042604443804907,0.9588693471326941,0.909111664354187,0.86,1.4130450333772604,1.582006463068857,0.7988286239230257,1.5910752325236108,0.6101272780073337,1.7816088823864173,0.909111664354187),(0.2785947981508043,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,1.167321139201246,-0.4347506819705508,0.909111664354187,0.74,1.4130450333772604,1.582006463068857,0.7988286239230257,1.5910752325236108,0.6101272780073337,1.7816088823864173,0.909111664354187),(-0.5067938262367422,-1.5523129111030172,-1.8274572629471952,-2.386612848785416,-1.0614543055744028,-1.9123237248751956,-1.0999748867047898,0.57,1.4130450333772604,1.582006463068857,0.7988286239230257,1.5910752325236108,0.6101272780073337,1.7816088823864173,0.909111664354187),(-1.6412440614631982,-2.5420979766309775,-1.8274572629471952,-1.3921908284581592,-1.618648166768315,-1.2071184089434333,-1.0999748867047898,0.51,1.4130450333772604,1.582006463068857,0.7988286239230257,1.5910752325236108,0.6101272780073337,1.7816088823864173,0.909111664354187),(-1.3794478533340162,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-0.5042604443804907,-0.9552593675392334,0.909111664354187,0.67,1.4130450333772604,1.582006463068857,0.7988286239230257,1.5910752325236108,0.6101272780073337,1.7816088823864173,0.909111664354187) +insert into defaults values (1.76210664421617,1.7469706406568504,0.7988286239230257,1.0938642223599824,1.167321139201246,1.7648182796261376,0.909111664354187,0.92,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.6276564089897139,-0.06763531281107672,0.7988286239230257,0.5966532121963541,1.167321139201246,0.4551512643242912,0.909111664354187,0.76,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(-0.07046681268810527,-0.5625278455750569,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-1.0056311758200724,0.909111664354187,0.72,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.4531256035702591,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-1.0614543055744028,0.11933920911869125,0.909111664354187,0.8,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(-0.24499761810756004,-0.7274920231630502,-0.952028633990455,-1.3921908284581592,-0.5042604443804907,-0.6530285178541898,-1.0999748867047898,0.65,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(1.1512488252480781,1.2520781078928702,1.674257252879766,1.0938642223599824,-0.5042604443804907,1.244309594057455,0.909111664354187,0.9,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,0.6101272780073337,-0.6698191206144725,0.909111664354187,0.75,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(-0.7685900343659244,-1.057420378339037,-0.952028633990455,-0.3977688081309026,0.6101272780073337,-1.1735372034228724,-1.0999748867047898,0.68,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(-1.2921824506242887,-0.8924562007510436,-1.8274572629471952,-1.3921908284581592,-2.175842027962227,-1.0056311758200724,-1.0999748867047898,0.5,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.5403910062799865,0.09732886477691666,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,0.0018049897967303734,-1.0999748867047898,0.45,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.7149218116994412,-0.2325994903990701,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.3340070654088696,0.909111664354187,0.52,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.889452617118896,0.5922213975408968,0.7988286239230257,0.5966532121963541,1.167321139201246,0.6734291002079332,0.909111664354187,0.84,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.9767180198286235,0.7571855751288902,0.7988286239230257,0.5966532121963541,1.167321139201246,0.8413351278107333,0.909111664354187,0.78,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(-0.8558554370756518,0.26229304236491,-0.0766000050337147,0.5966532121963541,-0.5042604443804907,-1.0056311758200724,0.909111664354187,0.62,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,1.0428223609340956,0.909111664354187),(-0.5067938262367422,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,-1.618648166768315,-0.6698191206144725,0.909111664354187,0.61,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,1.0428223609340956,0.909111664354187),(-0.24499761810756004,-0.39756366798706344,-0.0766000050337147,0.09944220203272576,-1.0614543055744028,-0.5019130930116695,-1.0999748867047898,0.54,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,1.0428223609340956,0.909111664354187),(0.016798590021622126,-0.06763531281107672,-0.0766000050337147,0.5966532121963541,-0.5042604443804907,0.16971101739953035,-1.0999748867047898,0.66,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,1.0428223609340956,0.909111664354187),(0.1913293954410769,-0.2325994903990701,-0.0766000050337147,0.5966532121963541,-0.5042604443804907,-1.0056311758200724,0.909111664354187,0.65,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,1.0428223609340956,0.909111664354187),(0.10406399273134952,0.4272572199529034,-0.0766000050337147,0.5966532121963541,-0.5042604443804907,0.3376170450023333,-1.0999748867047898,0.63,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,1.0428223609340956,0.909111664354187),(-1.2049170479145614,-0.8924562007510436,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,-0.1661010378060696,-1.0999748867047898,0.62,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,1.0428223609340956,0.909111664354187),(-0.41952842352701486,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,-1.618648166768315,-1.1735372034228724,0.909111664354187,0.64,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,1.0428223609340956,0.909111664354187),(0.7149218116994412,1.087113930304877,0.7988286239230257,-0.3977688081309026,-1.618648166768315,-0.3340070654088696,-1.0999748867047898,0.7,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,1.0428223609340956,0.909111664354187),(0.9767180198286235,1.4170422854808635,1.674257252879766,1.5910752325236108,1.724515000395158,1.5129592382219361,0.909111664354187,0.94,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,1.0428223609340956,0.909111664354187),(1.5003104360869879,1.9119348182448437,1.674257252879766,1.5910752325236108,1.167321139201246,1.848771293427536,0.909111664354187,0.95,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,1.0428223609340956,0.909111664354187),(1.6748412415064426,1.9119348182448437,1.674257252879766,0.5966532121963541,0.052933416813421515,2.016677321030339,0.909111664354187,0.97,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,1.0428223609340956,0.909111664354187),(2.023902852345352,2.076898995832837,1.674257252879766,1.0938642223599824,1.167321139201246,1.680865265824736,0.909111664354187,0.94,1.6748412415064426,1.9119348182448437,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.7144464713452956,0.909111664354187),(0.4531256035702591,0.26229304236491,1.674257252879766,1.0938642223599824,0.052933416813421515,0.3376170450023333,-1.0999748867047898,0.76,1.6748412415064426,1.9119348182448437,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.7144464713452956,0.909111664354187),(-1.6412440614631982,-1.5523129111030172,-0.952028633990455,-1.8894018386217877,-1.0614543055744028,-1.8451613138340752,0.909111664354187,0.44,1.6748412415064426,1.9119348182448437,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.7144464713452956,0.909111664354187),(-1.9030402695923805,-2.377133799042984,-1.8274572629471952,-1.3921908284581592,-1.618648166768315,-2.348879396642477,-1.0999748867047898,0.46,1.6748412415064426,1.9119348182448437,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.7144464713452956,0.909111664354187),(-0.5940592289464697,-1.3873487335150236,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-2.180973369039677,-1.0999748867047898,0.54,1.6748412415064426,1.9119348182448437,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.7144464713452956,0.909111664354187),(-1.4667132560437435,-1.7172770886910105,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,-0.8377251482172725,0.909111664354187,0.65,1.6748412415064426,1.9119348182448437,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.7144464713452956,0.909111664354187),(0.889452617118896,-0.7274920231630502,-0.0766000050337147,0.5966532121963541,0.6101272780073337,-0.5019130930116695,0.909111664354187,0.74,1.6748412415064426,1.9119348182448437,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.7144464713452956,0.909111664354187),(1.8493720469258974,1.7469706406568504,0.7988286239230257,-0.3977688081309026,1.167321139201246,1.3450532106191362,0.909111664354187,0.91,1.6748412415064426,1.9119348182448437,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.7144464713452956,0.909111664354187),(2.023902852345352,1.087113930304877,1.674257252879766,0.5966532121963541,0.6101272780073337,1.680865265824736,0.909111664354187,0.9,1.6748412415064426,1.9119348182448437,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.7144464713452956,0.909111664354187),(1.2385142279578056,0.7571855751288902,1.674257252879766,0.5966532121963541,1.724515000395158,2.016677321030339,0.909111664354187,0.94,1.6748412415064426,1.9119348182448437,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.7144464713452956,0.909111664354187),(0.2785947981508043,0.4272572199529034,1.674257252879766,1.5910752325236108,1.724515000395158,1.0092411554135332,0.909111664354187,0.88,1.6748412415064426,1.9119348182448437,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.7144464713452956,0.909111664354187),(-1.553978658753471,-0.2325994903990701,-0.952028633990455,0.5966532121963541,0.6101272780073337,-0.3340070654088696,-1.0999748867047898,0.64,1.6748412415064426,1.9119348182448437,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.7144464713452956,0.909111664354187),(-1.4667132560437435,-0.39756366798706344,-1.8274572629471952,-2.386612848785416,-1.618648166768315,-1.3414432310256739,-1.0999748867047898,0.58,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(-1.117651645204834,-0.39756366798706344,-1.8274572629471952,-0.3977688081309026,-2.175842027962227,-1.8451613138340752,-1.0999748867047898,0.52,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(-0.8558554370756518,0.09732886477691666,-0.952028633990455,0.5966532121963541,0.052933416813421515,-1.5093492586284738,-1.0999748867047898,0.48,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(-0.7685900343659244,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,-1.0056311758200724,0.909111664354187,0.46,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(-0.07046681268810527,-0.39756366798706344,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6698191206144725,0.909111664354187,0.49,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(-0.3322630208172874,-0.06763531281107672,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-0.1661010378060696,0.909111664354187,0.53,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(1.3257796306675331,1.582006463068857,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.8413351278107333,-1.0999748867047898,0.87,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(0.8021872144091686,0.9221497527168835,1.674257252879766,1.0938642223599824,0.6101272780073337,1.3450532106191362,0.909111664354187,0.91,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(0.4531256035702591,0.4272572199529034,1.674257252879766,1.5910752325236108,0.6101272780073337,0.8413351278107333,0.909111664354187,0.88,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(1.0639834225383509,1.087113930304877,1.674257252879766,0.5966532121963541,1.724515000395158,1.1771471830163363,0.909111664354187,0.86,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(1.9366374496356247,1.9119348182448437,1.674257252879766,1.0938642223599824,0.6101272780073337,1.848771293427536,-1.0999748867047898,0.89,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(0.36586020086053167,0.4272572199529034,-0.0766000050337147,0.09944220203272576,1.724515000395158,0.42157005880373183,0.909111664354187,0.82,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(0.889452617118896,0.5922213975408968,0.7988286239230257,-0.3977688081309026,0.6101272780073337,-0.3340070654088696,0.909111664354187,0.78,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(-0.3322630208172874,-1.5523129111030172,-0.0766000050337147,-0.8949798182945309,1.167321139201246,-0.5019130930116695,0.909111664354187,0.76,-0.15773221539783266,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-0.41952842352701486,-1.2223845559270303,-0.952028633990455,-1.8894018386217877,0.052933416813421515,-1.1735372034228724,0.909111664354187,0.56,-0.15773221539783266,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(1.5003104360869879,1.4170422854808635,0.7988286239230257,0.5966532121963541,-0.5042604443804907,-1.0056311758200724,0.909111664354187,0.78,-0.15773221539783266,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(0.6276564089897139,0.7571855751288902,0.7988286239230257,0.5966532121963541,-1.0614543055744028,-0.8377251482172725,0.909111664354187,0.72,-0.15773221539783266,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(0.4531256035702591,0.4272572199529034,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-1.0056311758200724,-1.0999748867047898,0.7,-0.15773221539783266,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(0.2785947981508043,-0.7274920231630502,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-1.5093492586284738,-1.0999748867047898,0.64,-0.15773221539783266,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-0.07046681268810527,-0.8924562007510436,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,-2.013067341436875,-1.0999748867047898,0.64,-0.15773221539783266,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-1.6412440614631982,-1.3873487335150236,-0.952028633990455,0.5966532121963541,-1.618648166768315,-1.6772552862312753,-1.0999748867047898,0.46,-0.15773221539783266,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-1.4667132560437435,-1.3873487335150236,-1.8274572629471952,-0.3977688081309026,-1.618648166768315,-3.0205035070536796,0.909111664354187,0.36,-0.15773221539783266,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-0.5067938262367422,-0.5625278455750569,-0.952028633990455,-1.3921908284581592,-1.618648166768315,-0.5019130930116695,-1.0999748867047898,0.42,-0.15773221539783266,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-0.681324631656197,-1.2223845559270303,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,-0.8377251482172725,-1.0999748867047898,0.48,-0.15773221539783266,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-0.8558554370756518,-1.057420378339037,-0.0766000050337147,0.5966532121963541,-0.5042604443804907,-0.6698191206144725,-1.0999748867047898,0.47,-0.15773221539783266,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-1.117651645204834,-0.39756366798706344,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,-0.6698191206144725,0.909111664354187,0.54,-1.117651645204834,-1.057420378339037,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898),(-0.15773221539783266,-0.06763531281107672,-0.952028633990455,0.5966532121963541,-0.5042604443804907,-0.1661010378060696,0.909111664354187,0.56,-1.117651645204834,-1.057420378339037,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898),(0.7149218116994412,0.5922213975408968,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.16971101739953035,-1.0999748867047898,0.52,-1.117651645204834,-1.057420378339037,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898),(0.7149218116994412,0.7571855751288902,0.7988286239230257,0.09944220203272576,0.052933416813421515,0.5391042781256927,-1.0999748867047898,0.55,-1.117651645204834,-1.057420378339037,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898),(0.889452617118896,1.087113930304877,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,0.7070103057284927,-1.0999748867047898,0.61,-1.117651645204834,-1.057420378339037,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898),(-0.07046681268810527,-0.06763531281107672,-0.952028633990455,0.09944220203272576,0.052933416813421515,0.06896740083785215,0.909111664354187,0.57,-1.117651645204834,-1.057420378339037,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898),(0.10406399273134952,0.26229304236491,-0.0766000050337147,0.09944220203272576,0.6101272780073337,1.0428223609340956,0.909111664354187,0.68,-1.117651645204834,-1.057420378339037,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898),(0.9767180198286235,1.2520781078928702,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9420787443724145,0.909111664354187,0.78,-1.117651645204834,-1.057420378339037,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898),(1.3257796306675331,1.7469706406568504,1.674257252879766,1.5910752325236108,1.724515000395158,1.7480276768658578,0.909111664354187,0.94,-1.117651645204834,-1.057420378339037,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898),(1.6748412415064426,0.7571855751288902,1.674257252879766,1.5910752325236108,1.724515000395158,1.9495149099892173,0.909111664354187,0.96,-1.117651645204834,-1.057420378339037,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898),(0.36586020086053167,0.5922213975408968,1.674257252879766,1.5910752325236108,1.724515000395158,1.4290062244205346,0.909111664354187,0.93,-1.117651645204834,-1.057420378339037,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898),(-0.24499761810756004,0.09732886477691666,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.7405915112490521,0.909111664354187,0.84,-1.117651645204834,-1.057420378339037,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898),(-0.24499761810756004,-0.2325994903990701,-0.0766000050337147,-0.3977688081309026,1.724515000395158,0.5055230726051333,-1.0999748867047898,0.74,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(1.0639834225383509,1.087113930304877,-0.952028633990455,-1.3921908284581592,0.6101272780073337,-0.06535742124438843,0.909111664354187,0.72,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(0.889452617118896,0.7571855751288902,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,0.20329222292009272,0.909111664354187,0.74,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(-1.3794478533340162,-1.3873487335150236,-0.952028633990455,-0.3977688081309026,-1.618648166768315,-0.6362379150939101,-1.0999748867047898,0.64,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(-1.8157748668826532,-2.0472054438669973,-0.952028633990455,-0.3977688081309026,-1.618648166768315,-1.777998902792955,0.909111664354187,0.44,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(-1.990305672302108,-2.377133799042984,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-2.0802297524779956,-1.0999748867047898,0.46,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(-0.41952842352701486,-0.39756366798706344,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,-0.972049970299513,0.909111664354187,0.5,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(2.023902852345352,2.076898995832837,0.7988286239230257,1.5910752325236108,1.724515000395158,1.5129592382219361,0.909111664354187,0.96,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(0.2785947981508043,0.4272572199529034,1.674257252879766,1.5910752325236108,1.167321139201246,1.0428223609340956,0.909111664354187,0.92,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(0.4531256035702591,1.2520781078928702,1.674257252879766,0.5966532121963541,1.167321139201246,1.2778907995780144,0.909111664354187,0.92,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(2.023902852345352,1.2520781078928702,1.674257252879766,1.0938642223599824,1.167321139201246,1.4290062244205346,0.909111664354187,0.94,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(0.1913293954410769,-0.7274920231630502,0.7988286239230257,1.0938642223599824,0.052933416813421515,0.10254860635841155,-1.0999748867047898,0.76,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(-0.15773221539783266,-0.2325994903990701,-0.0766000050337147,1.0938642223599824,0.052933416813421515,-0.30042585988831016,-1.0999748867047898,0.72,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(0.016798590021622126,-0.06763531281107672,-0.952028633990455,0.09944220203272576,-0.5042604443804907,-0.535494298532232,-1.0999748867047898,0.66,-2.3393672831410175,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.7444176972723957,-1.0999748867047898),(-0.24499761810756004,0.09732886477691666,-0.0766000050337147,1.0938642223599824,0.052933416813421515,-0.7705627371761508,-1.0999748867047898,0.64,-2.3393672831410175,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.7444176972723957,-1.0999748867047898),(-0.07046681268810527,0.26229304236491,0.7988286239230257,1.0938642223599824,0.052933416813421515,0.27045463396121155,0.909111664354187,0.74,-2.3393672831410175,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.7444176972723957,-1.0999748867047898),(0.10406399273134952,-0.2325994903990701,-0.952028633990455,0.5966532121963541,0.6101272780073337,-1.139955997902313,0.909111664354187,0.64,-2.3393672831410175,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.7444176972723957,-1.0999748867047898),(-1.553978658753471,-1.7172770886910105,-0.0766000050337147,1.5910752325236108,0.052933416813421515,-1.5765116696695942,-1.0999748867047898,0.38,-2.3393672831410175,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.7444176972723957,-1.0999748867047898),(-1.6412440614631982,-1.5523129111030172,-0.952028633990455,0.5966532121963541,-0.5042604443804907,-0.9552593675392334,-1.0999748867047898,0.34,-2.3393672831410175,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.7444176972723957,-1.0999748867047898),(-1.3794478533340162,-1.7172770886910105,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,-1.2071184089434333,0.909111664354187,0.44,-2.3393672831410175,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.7444176972723957,-1.0999748867047898),(-1.2049170479145614,-1.3873487335150236,-0.0766000050337147,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898,0.36,-2.3393672831410175,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.7444176972723957,-1.0999748867047898),(-1.117651645204834,-1.2223845559270303,0.7988286239230257,-1.8894018386217877,-1.0614543055744028,-1.2742808199845537,-1.0999748867047898,0.42,-2.3393672831410175,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.7444176972723957,-1.0999748867047898),(-0.9431208397853792,-1.2223845559270303,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,-1.0056311758200724,-1.0999748867047898,0.48,-2.3393672831410175,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.7444176972723957,-1.0999748867047898),(1.2385142279578056,2.076898995832837,-0.0766000050337147,0.5966532121963541,0.6101272780073337,0.6062666891668145,0.909111664354187,0.86,-2.3393672831410175,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.7444176972723957,-1.0999748867047898),(1.3257796306675331,1.9119348182448437,0.7988286239230257,1.5910752325236108,1.167321139201246,1.076403566454655,0.909111664354187,0.9,-2.3393672831410175,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.7444176972723957,-1.0999748867047898),(0.5403910062799865,0.9221497527168835,-0.0766000050337147,0.5966532121963541,0.6101272780073337,0.47194186708457386,0.909111664354187,0.79,-1.2049170479145614,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(0.4531256035702591,-0.06763531281107672,-0.0766000050337147,0.09944220203272576,0.052933416813421515,-0.2332634488471884,0.909111664354187,0.71,-1.2049170479145614,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-0.41952842352701486,-0.39756366798706344,-0.952028633990455,-0.8949798182945309,-0.5042604443804907,-0.8041439426967131,-1.0999748867047898,0.64,-1.2049170479145614,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-0.24499761810756004,-0.2325994903990701,-0.952028633990455,0.5966532121963541,0.052933416813421515,-0.585866106813071,-1.0999748867047898,0.62,-1.2049170479145614,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(0.016798590021622126,-0.5625278455750569,-0.952028633990455,1.0938642223599824,0.6101272780073337,-0.2164728460869087,-1.0999748867047898,0.57,-1.2049170479145614,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(0.8021872144091686,0.7571855751288902,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,0.7573821140093348,0.909111664354187,0.74,-1.2049170479145614,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-0.07046681268810527,0.4272572199529034,-0.0766000050337147,0.5966532121963541,1.167321139201246,0.30403583948177093,0.909111664354187,0.69,-1.2049170479145614,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,1.167321139201246,0.9756599498929738,0.909111664354187,0.87,-1.2049170479145614,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(1.8493720469258974,1.582006463068857,0.7988286239230257,0.09944220203272576,1.167321139201246,1.4457968271808173,0.909111664354187,0.91,-1.2049170479145614,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(1.2385142279578056,1.4170422854808635,1.674257252879766,1.5910752325236108,1.724515000395158,1.3114720050985766,0.909111664354187,0.93,-1.2049170479145614,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-1.117651645204834,-0.7274920231630502,1.674257252879766,1.5910752325236108,0.6101272780073337,0.06896740083785215,-1.0999748867047898,0.68,-1.2049170479145614,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-1.0303862424951067,0.09732886477691666,1.674257252879766,-0.3977688081309026,-0.5042604443804907,-0.199682243326629,-1.0999748867047898,0.61,-1.2049170479145614,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(0.36586020086053167,0.26229304236491,0.7988286239230257,0.5966532121963541,0.6101272780073337,0.13612981187897094,0.909111664354187,0.69,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(-1.3794478533340162,-0.06763531281107672,-0.0766000050337147,0.09944220203272576,0.052933416813421515,-0.4347506819705508,0.909111664354187,0.62,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(0.2785947981508043,0.4272572199529034,-0.952028633990455,0.5966532121963541,0.052933416813421515,-0.06535742124438843,-1.0999748867047898,0.72,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(-0.5067938262367422,-0.39756366798706344,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,-0.2500540516074711,0.909111664354187,0.59,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(-0.5940592289464697,-0.2325994903990701,0.7988286239230257,1.0938642223599824,1.167321139201246,0.7405915112490521,0.909111664354187,0.66,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(-1.553978658753471,-0.8924562007510436,-0.0766000050337147,0.5966532121963541,0.052933416813421515,0.03538619531728977,-1.0999748867047898,0.56,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(-2.3393672831410175,-0.5625278455750569,0.7988286239230257,-1.3921908284581592,-1.0614543055744028,-1.9123237248751956,-1.0999748867047898,0.45,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(-1.8157748668826532,-1.3873487335150236,-0.952028633990455,-0.3977688081309026,0.052933416813421515,-2.214554574560236,-1.0999748867047898,0.47,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(0.889452617118896,-0.5625278455750569,1.674257252879766,-0.3977688081309026,0.052933416813421515,0.4047794560434521,0.909111664354187,0.71,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,1.6137028547836172,0.909111664354187,0.94,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(1.5003104360869879,1.9119348182448437,1.674257252879766,1.0938642223599824,1.167321139201246,1.4793780327013768,0.909111664354187,0.94,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(-0.5940592289464697,-0.2325994903990701,0.7988286239230257,-1.8894018386217877,-1.0614543055744028,-0.40116947644999135,-1.0999748867047898,0.57,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(-0.7685900343659244,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.052933416813421515,-0.6362379150939101,-1.0999748867047898,0.61,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(-1.3794478533340162,-0.2325994903990701,0.7988286239230257,-0.8949798182945309,-0.5042604443804907,-0.2164728460869087,-1.0999748867047898,0.57,0.4531256035702591,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4047794560434521,0.909111664354187),(-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.10254860635841155,0.909111664354187,0.64,0.4531256035702591,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4047794560434521,0.909111664354187),(0.5403910062799865,0.9221497527168835,-0.0766000050337147,0.5966532121963541,-0.5042604443804907,1.2107283885368956,0.909111664354187,0.85,0.4531256035702591,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4047794560434521,0.909111664354187),(0.1913293954410769,0.7571855751288902,-0.0766000050337147,-0.8949798182945309,-1.618648166768315,0.18650162015981303,0.909111664354187,0.78,0.4531256035702591,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4047794560434521,0.909111664354187),(0.8021872144091686,0.7571855751288902,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,0.8413351278107333,0.909111664354187,0.84,0.4531256035702591,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4047794560434521,0.909111664354187),(1.4130450333772604,1.7469706406568504,1.674257252879766,1.5910752325236108,1.724515000395158,1.2611001968177347,0.909111664354187,0.92,0.4531256035702591,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4047794560434521,0.909111664354187),(1.9366374496356247,1.087113930304877,1.674257252879766,0.5966532121963541,1.167321139201246,1.9495149099892173,0.909111664354187,0.96,0.4531256035702591,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4047794560434521,0.909111664354187),(-1.2049170479145614,-0.39756366798706344,1.674257252879766,1.5910752325236108,1.167321139201246,0.08575800359813185,-1.0999748867047898,0.77,0.4531256035702591,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4047794560434521,0.909111664354187),(-0.681324631656197,-0.39756366798706344,1.674257252879766,0.09944220203272576,0.052933416813421515,-0.06535742124438843,-1.0999748867047898,0.71,0.4531256035702591,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4047794560434521,0.909111664354187),(0.5403910062799865,0.7571855751288902,1.674257252879766,0.5966532121963541,1.167321139201246,0.30403583948177093,-1.0999748867047898,0.79,0.4531256035702591,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4047794560434521,0.909111664354187),(1.4130450333772604,0.9221497527168835,1.674257252879766,0.5966532121963541,0.6101272780073337,1.1435659774957738,0.909111664354187,0.89,0.4531256035702591,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4047794560434521,0.909111664354187),(-0.24499761810756004,0.26229304236491,0.7988286239230257,0.09944220203272576,0.6101272780073337,0.2872452367214912,0.909111664354187,0.82,0.4531256035702591,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4047794560434521,0.909111664354187),(-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,1.5910752325236108,0.6101272780073337,-0.2500540516074711,-1.0999748867047898,0.76,0.1913293954410769,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.11933920911869125,0.909111664354187),(-0.07046681268810527,-1.2223845559270303,-0.952028633990455,-1.8894018386217877,-0.5042604443804907,-0.7369815316555913,0.909111664354187,0.71,0.1913293954410769,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.11933920911869125,0.909111664354187),(0.8021872144091686,1.4170422854808635,-0.952028633990455,1.0938642223599824,-0.5042604443804907,0.8077539222901738,0.909111664354187,0.8,0.1913293954410769,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.11933920911869125,0.909111664354187),(0.10406399273134952,0.26229304236491,-1.8274572629471952,0.09944220203272576,0.052933416813421515,0.8749163333312926,-1.0999748867047898,0.78,0.1913293954410769,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.11933920911869125,0.909111664354187),(1.0639834225383509,0.4272572199529034,-0.952028633990455,0.5966532121963541,-0.5042604443804907,0.9252881416121347,0.909111664354187,0.84,0.1913293954410769,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.11933920911869125,0.909111664354187),(1.3257796306675331,1.7469706406568504,-0.952028633990455,1.0938642223599824,0.052933416813421515,1.2778907995780144,0.909111664354187,0.9,0.1913293954410769,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.11933920911869125,0.909111664354187),(1.2385142279578056,1.2520781078928702,1.674257252879766,0.5966532121963541,0.052933416813421515,1.412215621660255,0.909111664354187,0.92,0.1913293954410769,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.11933920911869125,0.909111664354187),(2.023902852345352,2.076898995832837,0.7988286239230257,1.0938642223599824,0.6101272780073337,2.2181645541536983,0.909111664354187,0.97,0.1913293954410769,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.11933920911869125,0.909111664354187),(0.7149218116994412,0.7571855751288902,-0.952028633990455,-0.3977688081309026,0.052933416813421515,0.6062666891668145,0.909111664354187,0.8,0.1913293954410769,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.11933920911869125,0.909111664354187),(0.2785947981508043,0.9221497527168835,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,0.06896740083785215,0.909111664354187,0.81,0.1913293954410769,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.11933920911869125,0.909111664354187),(-0.15773221539783266,-0.39756366798706344,-0.0766000050337147,-1.3921908284581592,-1.0614543055744028,-0.199682243326629,-1.0999748867047898,0.75,0.1913293954410769,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.11933920911869125,0.909111664354187),(0.8021872144091686,1.087113930304877,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,0.8581257305710129,0.909111664354187,0.83,0.1913293954410769,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.11933920911869125,0.909111664354187),(1.9366374496356247,1.4170422854808635,0.7988286239230257,0.5966532121963541,0.052933416813421515,2.016677321030339,0.909111664354187,0.96,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(-0.5067938262367422,-0.2325994903990701,-0.952028633990455,0.09944220203272576,-0.5042604443804907,-0.5690755040527913,0.909111664354187,0.79,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(1.5003104360869879,1.087113930304877,0.7988286239230257,0.5966532121963541,0.6101272780073337,1.3954250188999753,0.909111664354187,0.93,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(1.3257796306675331,1.4170422854808635,1.674257252879766,1.5910752325236108,1.724515000395158,1.1435659774957738,0.909111664354187,0.94,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(0.36586020086053167,0.7571855751288902,1.674257252879766,1.5910752325236108,1.724515000395158,0.7741727167696144,0.909111664354187,0.86,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(0.6276564089897139,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.6101272780073337,0.25366403120093184,-1.0999748867047898,0.79,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(0.8021872144091686,0.09732886477691666,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.4887324698448536,-1.0999748867047898,0.8,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(-0.41952842352701486,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,0.15292041463925066,-1.0999748867047898,0.77,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(-0.15773221539783266,-0.39756366798706344,-0.0766000050337147,-1.3921908284581592,-1.0614543055744028,-0.4347506819705508,-1.0999748867047898,0.7,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(-0.681324631656197,-0.5625278455750569,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.5690755040527913,-1.0999748867047898,0.65,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(-0.9431208397853792,-0.2325994903990701,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.7705627371761508,-1.0999748867047898,0.61,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(-1.7285094641729257,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.1735372034228724,-1.0999748867047898,0.52,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(-0.15773221539783266,-0.7274920231630502,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.2406996144639928,-1.0999748867047898,0.57,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(-1.6412440614631982,-1.3873487335150236,-1.8274572629471952,-1.8894018386217877,-0.5042604443804907,-1.9123237248751956,-1.0999748867047898,0.53,-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5690755040527913,-1.0999748867047898),(0.10406399273134952,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.1661010378060696,-1.0999748867047898,0.67,-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5690755040527913,-1.0999748867047898),(0.016798590021622126,-0.39756366798706344,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,-0.06535742124438843,-1.0999748867047898,0.68,-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5690755040527913,-1.0999748867047898),(1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.690219702968213,0.909111664354187,0.81,-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5690755040527913,-1.0999748867047898),(0.4531256035702591,0.4272572199529034,1.674257252879766,1.0938642223599824,0.6101272780073337,0.6230572919270941,-1.0999748867047898,0.78,-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5690755040527913,-1.0999748867047898),(-1.2921824506242887,-0.8924562007510436,-0.0766000050337147,0.09944220203272576,1.724515000395158,-0.45154128473083044,-1.0999748867047898,0.65,-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5690755040527913,-1.0999748867047898),(-0.3322630208172874,-0.8924562007510436,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,-0.5522849012925116,-1.0999748867047898,0.64,-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5690755040527913,-1.0999748867047898),(-2.0775710750118352,-1.7172770886910105,-0.952028633990455,-1.3921908284581592,0.6101272780073337,-1.3414432310256739,0.909111664354187,0.64,-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5690755040527913,-1.0999748867047898),(-0.5067938262367422,-1.3873487335150236,-0.952028633990455,-0.8949798182945309,-0.5042604443804907,-1.0392123813406318,-1.0999748867047898,0.65,-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5690755040527913,-1.0999748867047898),(-0.41952842352701486,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-0.9384687647789537,0.909111664354187,0.68,-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5690755040527913,-1.0999748867047898),(1.5003104360869879,1.582006463068857,1.674257252879766,0.5966532121963541,1.167321139201246,0.7909633195298942,0.909111664354187,0.89,-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5690755040527913,-1.0999748867047898),(0.4531256035702591,0.4272572199529034,0.7988286239230257,0.5966532121963541,1.724515000395158,0.8917069360915754,0.909111664354187,0.86,-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5690755040527913,-1.0999748867047898),(0.5403910062799865,0.9221497527168835,0.7988286239230257,0.5966532121963541,1.167321139201246,1.0596129636943752,0.909111664354187,0.89,2.023902852345352,0.9221497527168835,0.7988286239230257,1.5910752325236108,1.724515000395158,1.915933704468658,0.909111664354187),(0.36586020086053167,0.5922213975408968,0.7988286239230257,0.5966532121963541,0.6101272780073337,0.6230572919270941,0.909111664354187,0.87,2.023902852345352,0.9221497527168835,0.7988286239230257,1.5910752325236108,1.724515000395158,1.915933704468658,0.909111664354187),(0.2785947981508043,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.052933416813421515,0.4551512643242912,0.909111664354187,0.85,2.023902852345352,0.9221497527168835,0.7988286239230257,1.5910752325236108,1.724515000395158,1.915933704468658,0.909111664354187),(1.0639834225383509,1.9119348182448437,0.7988286239230257,1.0938642223599824,1.167321139201246,0.9420787443724145,0.909111664354187,0.9,2.023902852345352,0.9221497527168835,0.7988286239230257,1.5910752325236108,1.724515000395158,1.915933704468658,0.909111664354187),(0.1913293954410769,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,-1.0999748867047898,0.82,2.023902852345352,0.9221497527168835,0.7988286239230257,1.5910752325236108,1.724515000395158,1.915933704468658,0.909111664354187),(-0.681324631656197,0.09732886477691666,-0.0766000050337147,-0.8949798182945309,-0.5042604443804907,-0.8041439426967131,-1.0999748867047898,0.72,2.023902852345352,0.9221497527168835,0.7988286239230257,1.5910752325236108,1.724515000395158,1.915933704468658,0.909111664354187),(-0.8558554370756518,-0.8924562007510436,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.5522849012925116,-1.0999748867047898,0.73,2.023902852345352,0.9221497527168835,0.7988286239230257,1.5910752325236108,1.724515000395158,1.915933704468658,0.909111664354187),(-1.4667132560437435,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,-0.7369815316555913,-1.0999748867047898,0.71,2.023902852345352,0.9221497527168835,0.7988286239230257,1.5910752325236108,1.724515000395158,1.915933704468658,0.909111664354187),(-1.0303862424951067,-0.06763531281107672,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.30042585988831016,-1.0999748867047898,0.71,2.023902852345352,0.9221497527168835,0.7988286239230257,1.5910752325236108,1.724515000395158,1.915933704468658,0.909111664354187),(-1.553978658753471,-1.2223845559270303,-0.952028633990455,-0.3977688081309026,0.052933416813421515,-1.2071184089434333,-1.0999748867047898,0.68,2.023902852345352,0.9221497527168835,0.7988286239230257,1.5910752325236108,1.724515000395158,1.915933704468658,0.909111664354187),(-0.24499761810756004,0.4272572199529034,-0.0766000050337147,0.5966532121963541,0.6101272780073337,0.3376170450023333,-1.0999748867047898,0.75,2.023902852345352,0.9221497527168835,0.7988286239230257,1.5910752325236108,1.724515000395158,1.915933704468658,0.909111664354187),(-0.07046681268810527,-0.2325994903990701,-0.952028633990455,-0.8949798182945309,0.6101272780073337,-0.46833188749111015,-1.0999748867047898,0.72,2.023902852345352,0.9221497527168835,0.7988286239230257,1.5910752325236108,1.724515000395158,1.915933704468658,0.909111664354187),(0.889452617118896,0.9221497527168835,0.7988286239230257,1.0938642223599824,1.167321139201246,0.8581257305710129,0.909111664354187,0.89,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(0.016798590021622126,-0.06763531281107672,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,0.13612981187897094,0.909111664354187,0.84,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(1.5875758387967152,1.7469706406568504,1.674257252879766,1.0938642223599824,0.052933416813421515,1.412215621660255,0.909111664354187,0.93,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(1.2385142279578056,1.2520781078928702,1.674257252879766,1.0938642223599824,0.052933416813421515,1.2778907995780144,0.909111664354187,0.93,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(0.6276564089897139,0.7571855751288902,1.674257252879766,1.5910752325236108,1.724515000395158,0.8077539222901738,0.909111664354187,0.88,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(0.6276564089897139,0.5922213975408968,1.674257252879766,1.0938642223599824,0.6101272780073337,0.9420787443724145,0.909111664354187,0.9,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(0.5403910062799865,0.4272572199529034,1.674257252879766,0.5966532121963541,1.724515000395158,0.6398478946873739,0.909111664354187,0.87,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(0.4531256035702591,1.087113930304877,1.674257252879766,1.0938642223599824,0.6101272780073337,0.572685483646252,0.909111664354187,0.86,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(1.6748412415064426,1.7469706406568504,1.674257252879766,1.0938642223599824,1.724515000395158,1.5633310465027752,0.909111664354187,0.94,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(-0.07046681268810527,0.26229304236491,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,0.27045463396121155,-1.0999748867047898,0.77,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(-0.8558554370756518,-0.06763531281107672,-0.952028633990455,-0.3977688081309026,0.052933416813421515,-0.1325198322855102,0.909111664354187,0.78,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(-0.9431208397853792,-0.39756366798706344,-0.952028633990455,-0.3977688081309026,-1.0614543055744028,-0.5690755040527913,-1.0999748867047898,0.73,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(-0.5940592289464697,-0.2325994903990701,-0.952028633990455,0.09944220203272576,-1.0614543055744028,-0.45154128473083044,-1.0999748867047898,0.73,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(-0.5067938262367422,-0.5625278455750569,-0.0766000050337147,1.0938642223599824,1.167321139201246,-0.2836352571280305,-1.0999748867047898,0.7,-1.2921824506242887,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-1.0727935868611929,-1.0999748867047898),(-0.3322630208172874,-0.06763531281107672,-0.0766000050337147,0.5966532121963541,1.167321139201246,0.15292041463925066,-1.0999748867047898,0.72,-1.2921824506242887,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-1.0727935868611929,-1.0999748867047898),(0.016798590021622126,-0.7274920231630502,-0.0766000050337147,-0.8949798182945309,-0.5042604443804907,-0.0989386267649508,0.909111664354187,0.73,-1.2921824506242887,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-1.0727935868611929,-1.0999748867047898),(-0.15773221539783266,0.4272572199529034,-0.952028633990455,0.09944220203272576,-0.5042604443804907,-0.2332634488471884,0.909111664354187,0.72,-1.2921824506242887,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-1.0727935868611929,-1.0999748867047898),(2.023902852345352,2.076898995832837,1.674257252879766,1.0938642223599824,1.167321139201246,2.2013739513934185,0.909111664354187,0.97,-1.2921824506242887,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-1.0727935868611929,-1.0999748867047898),(1.5003104360869879,2.076898995832837,1.674257252879766,0.5966532121963541,1.724515000395158,2.1342115403522968,0.909111664354187,0.97,-1.2921824506242887,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-1.0727935868611929,-1.0999748867047898),(-1.6412440614631982,-0.39756366798706344,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.0989386267649508,-1.0999748867047898,0.69,-1.2921824506242887,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-1.0727935868611929,-1.0999748867047898),(-1.9030402695923805,-1.3873487335150236,-0.952028633990455,-0.8949798182945309,-0.5042604443804907,-1.5933022724298738,-1.0999748867047898,0.57,-1.2921824506242887,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-1.0727935868611929,-1.0999748867047898),(-0.15773221539783266,-1.3873487335150236,-0.952028633990455,0.09944220203272576,-0.5042604443804907,-1.1903278061831537,-1.0999748867047898,0.63,-1.2921824506242887,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-1.0727935868611929,-1.0999748867047898),(-0.5940592289464697,-0.8924562007510436,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.972049970299513,0.909111664354187,0.66,-1.2921824506242887,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-1.0727935868611929,-1.0999748867047898),(-1.0303862424951067,-0.2325994903990701,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,-0.7369815316555913,-1.0999748867047898,0.64,-1.2921824506242887,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-1.0727935868611929,-1.0999748867047898),(-1.3794478533340162,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.8041439426967131,0.909111664354187,0.68,-1.2921824506242887,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-1.0727935868611929,-1.0999748867047898),(0.7149218116994412,0.09732886477691666,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.7741727167696144,0.909111664354187,0.79,-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8377251482172725,-1.0999748867047898),(0.9767180198286235,0.4272572199529034,0.7988286239230257,1.5910752325236108,0.6101272780073337,0.908497538851855,0.909111664354187,0.82,-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8377251482172725,-1.0999748867047898),(1.8493720469258974,2.076898995832837,0.7988286239230257,1.5910752325236108,1.724515000395158,1.7816088823864173,0.909111664354187,0.95,-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8377251482172725,-1.0999748867047898),(1.4130450333772604,1.9119348182448437,1.674257252879766,1.5910752325236108,1.167321139201246,1.9830961155097766,0.909111664354187,0.96,-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8377251482172725,-1.0999748867047898),(1.2385142279578056,1.582006463068857,0.7988286239230257,1.0938642223599824,1.724515000395158,1.3786344161396955,0.909111664354187,0.94,-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8377251482172725,-1.0999748867047898),(1.1512488252480781,1.4170422854808635,1.674257252879766,1.5910752325236108,1.167321139201246,1.2778907995780144,0.909111664354187,0.93,-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8377251482172725,-1.0999748867047898),(0.4531256035702591,0.7571855751288902,0.7988286239230257,1.0938642223599824,1.167321139201246,1.1099847719752143,0.909111664354187,0.91,-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8377251482172725,-1.0999748867047898),(0.36586020086053167,0.26229304236491,0.7988286239230257,0.5966532121963541,0.6101272780073337,0.8917069360915754,0.909111664354187,0.85,-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8377251482172725,-1.0999748867047898),(0.6276564089897139,0.4272572199529034,0.7988286239230257,-0.3977688081309026,0.052933416813421515,0.6230572919270941,0.909111664354187,0.84,-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8377251482172725,-1.0999748867047898),(-0.41952842352701486,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.052933416813421515,-0.30042585988831016,-1.0999748867047898,0.74,-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8377251482172725,-1.0999748867047898),(-0.3322630208172874,-0.7274920231630502,-0.0766000050337147,0.5966532121963541,0.6101272780073337,0.25366403120093184,-1.0999748867047898,0.76,-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8377251482172725,-1.0999748867047898),(-0.07046681268810527,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.06535742124438843,-1.0999748867047898,0.75,-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8377251482172725,-1.0999748867047898),(0.6276564089897139,0.9221497527168835,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.32082644224205065,-1.0999748867047898,0.76,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(-0.7685900343659244,0.26229304236491,-0.952028633990455,-0.3977688081309026,0.6101272780073337,-0.2500540516074711,-1.0999748867047898,0.71,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(-1.0303862424951067,-0.39756366798706344,-0.952028633990455,-0.3977688081309026,-1.618648166768315,-0.6194473123336305,-1.0999748867047898,0.67,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(-1.8157748668826532,-1.3873487335150236,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.9552593675392334,-1.0999748867047898,0.61,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(-0.9431208397853792,0.4272572199529034,-0.952028633990455,0.09944220203272576,0.6101272780073337,-0.2500540516074711,-1.0999748867047898,0.63,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(-0.41952842352701486,0.4272572199529034,-0.952028633990455,0.09944220203272576,-0.5042604443804907,-0.1157292295252305,-1.0999748867047898,0.64,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(0.10406399273134952,0.7571855751288902,-0.0766000050337147,0.5966532121963541,0.052933416813421515,0.11933920911869125,-1.0999748867047898,0.71,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(0.6276564089897139,0.5922213975408968,0.7988286239230257,-0.3977688081309026,-0.5042604443804907,0.690219702968213,0.909111664354187,0.82,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(-0.3322630208172874,-0.5625278455750569,-0.0766000050337147,0.5966532121963541,1.167321139201246,0.08575800359813185,-1.0999748867047898,0.73,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(0.1913293954410769,-0.2325994903990701,-0.0766000050337147,0.09944220203272576,-1.0614543055744028,-0.45154128473083044,0.909111664354187,0.74,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(-0.41952842352701486,-0.06763531281107672,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-0.5522849012925116,-1.0999748867047898,0.69,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(-1.117651645204834,-1.2223845559270303,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-0.8880969564981116,-1.0999748867047898,0.64,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(1.1512488252480781,0.9221497527168835,1.674257252879766,1.5910752325236108,0.6101272780073337,1.1939377857766158,0.909111664354187,0.91,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(0.8021872144091686,0.5922213975408968,1.674257252879766,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187,0.88,0.2785947981508043,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.2668446543677508,0.909111664354187),(0.7149218116994412,0.7571855751288902,0.7988286239230257,0.5966532121963541,1.167321139201246,0.9588693471326941,0.909111664354187,0.85,0.2785947981508043,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.2668446543677508,0.909111664354187),(1.0639834225383509,1.087113930304877,1.674257252879766,1.0938642223599824,1.724515000395158,0.9924505526532535,0.909111664354187,0.86,0.2785947981508043,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.2668446543677508,0.909111664354187),(-0.5940592289464697,-0.5625278455750569,-0.0766000050337147,-1.3921908284581592,0.052933416813421515,-0.3843788736897117,-1.0999748867047898,0.7,0.2785947981508043,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.2668446543677508,0.909111664354187),(-1.553978658753471,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.1903278061831537,-1.0999748867047898,0.59,0.2785947981508043,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.2668446543677508,0.909111664354187),(-1.8157748668826532,-1.057420378339037,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-1.5429304641490347,-1.0999748867047898,0.6,0.2785947981508043,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.2668446543677508,0.909111664354187),(0.016798590021622126,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-0.753772134415871,-1.0999748867047898,0.65,0.2785947981508043,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.2668446543677508,0.909111664354187),(0.6276564089897139,1.2520781078928702,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,0.27045463396121155,0.909111664354187,0.7,0.2785947981508043,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.2668446543677508,0.909111664354187),(0.7149218116994412,1.087113930304877,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,0.7405915112490521,0.909111664354187,0.76,0.2785947981508043,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.2668446543677508,0.909111664354187),(-0.24499761810756004,-0.06763531281107672,-0.952028633990455,-0.8949798182945309,0.6101272780073337,-0.06535742124438843,-1.0999748867047898,0.63,0.2785947981508043,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.2668446543677508,0.909111664354187),(0.9767180198286235,0.4272572199529034,0.7988286239230257,0.5966532121963541,-1.0614543055744028,0.7070103057284927,0.909111664354187,0.81,0.2785947981508043,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.2668446543677508,0.909111664354187),(-0.07046681268810527,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.22008282568037243,-1.0999748867047898,0.72,0.2785947981508043,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.2668446543677508,0.909111664354187),(-0.5067938262367422,-0.5625278455750569,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-0.199682243326629,-1.0999748867047898,0.71,-0.24499761810756004,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4551512643242912,0.909111664354187,0.8,-0.24499761810756004,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.36586020086053167,0.5922213975408968,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.3879888532831724,0.909111664354187,0.77,-0.24499761810756004,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.2785947981508043,-0.5625278455750569,-0.0766000050337147,-0.3977688081309026,-1.0614543055744028,-0.04856681848410872,0.909111664354187,0.74,-0.24499761810756004,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(-0.07046681268810527,-1.3873487335150236,-0.952028633990455,-0.8949798182945309,-0.5042604443804907,0.6734291002079332,-1.0999748867047898,0.7,-0.24499761810756004,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.10406399273134952,-1.2223845559270303,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-0.0989386267649508,0.909111664354187,0.71,-0.24499761810756004,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(1.5875758387967152,1.2520781078928702,0.7988286239230257,1.0938642223599824,1.167321139201246,1.8151900879069767,0.909111664354187,0.93,-0.24499761810756004,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.36586020086053167,1.087113930304877,0.7988286239230257,0.5966532121963541,1.724515000395158,0.8749163333312926,-1.0999748867047898,0.85,-0.24499761810756004,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(-0.8558554370756518,0.4272572199529034,0.7988286239230257,0.5966532121963541,1.167321139201246,-0.3843788736897117,-1.0999748867047898,0.79,-0.24499761810756004,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(-0.681324631656197,-1.3873487335150236,-0.0766000050337147,0.5966532121963541,0.6101272780073337,-0.06535742124438843,-1.0999748867047898,0.76,-0.24499761810756004,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.6276564089897139,-1.2223845559270303,-0.0766000050337147,0.5966532121963541,1.724515000395158,0.06896740083785215,0.909111664354187,0.78,-0.24499761810756004,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.8021872144091686,-0.8924562007510436,0.7988286239230257,1.5910752325236108,1.724515000395158,0.27045463396121155,0.909111664354187,0.77,-0.24499761810756004,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(1.2385142279578056,1.9119348182448437,0.7988286239230257,1.5910752325236108,1.167321139201246,1.244309594057455,0.909111664354187,0.9,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(0.889452617118896,0.09732886477691666,1.674257252879766,1.5910752325236108,0.052933416813421515,0.8917069360915754,0.909111664354187,0.87,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(-0.41952842352701486,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.8545157509775522,-1.0999748867047898,0.71,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(-0.7685900343659244,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,0.6101272780073337,-0.40116947644999135,0.909111664354187,0.7,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(0.6276564089897139,0.5922213975408968,-0.0766000050337147,-0.8949798182945309,-2.175842027962227,0.32082644224205065,0.909111664354187,0.7,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(0.7149218116994412,0.4272572199529034,-0.952028633990455,-0.3977688081309026,-1.0614543055744028,0.27045463396121155,0.909111664354187,0.75,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(-0.3322630208172874,-0.8924562007510436,-0.0766000050337147,-0.8949798182945309,-1.0614543055744028,0.13612981187897094,-1.0999748867047898,0.71,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(-0.41952842352701486,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.2500540516074711,-1.0999748867047898,0.72,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(-0.24499761810756004,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.7201909288953117,0.909111664354187,0.73,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(0.889452617118896,0.9221497527168835,0.7988286239230257,1.0938642223599824,1.724515000395158,0.908497538851855,-1.0999748867047898,0.83,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(-0.7685900343659244,0.09732886477691666,0.7988286239230257,1.0938642223599824,1.724515000395158,-0.4347506819705508,-1.0999748867047898,0.77,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(-0.9431208397853792,-0.39756366798706344,-0.952028633990455,-0.8949798182945309,-0.5042604443804907,-0.6362379150939101,0.909111664354187,0.72,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(-1.553978658753471,-1.8822412662790038,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.2406996144639928,-1.0999748867047898,0.54,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(-1.990305672302108,-2.0472054438669973,-1.8274572629471952,-1.8894018386217877,-2.175842027962227,-1.610092875190155,-1.0999748867047898,0.49,0.8021872144091686,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.8749163333312926,0.909111664354187),(-0.41952842352701486,-1.3873487335150236,-1.8274572629471952,-2.386612848785416,-2.175842027962227,-0.9888405730597928,0.909111664354187,0.52,0.8021872144091686,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.8749163333312926,0.909111664354187),(-0.15773221539783266,-1.2223845559270303,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.0895841896214724,-1.0999748867047898,0.58,0.8021872144091686,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.8749163333312926,0.909111664354187),(0.4531256035702591,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,0.6062666891668145,0.909111664354187,0.78,0.8021872144091686,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.8749163333312926,0.909111664354187),(1.0639834225383509,0.9221497527168835,1.674257252879766,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187,0.89,0.8021872144091686,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.8749163333312926,0.909111664354187),(0.2785947981508043,-1.057420378339037,-0.952028633990455,-0.8949798182945309,-0.5042604443804907,0.03538619531728977,-1.0999748867047898,0.7,0.8021872144091686,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.8749163333312926,0.909111664354187),(-0.7685900343659244,-0.7274920231630502,-0.952028633990455,-0.3977688081309026,0.052933416813421515,-0.1828916405663493,-1.0999748867047898,0.66,0.8021872144091686,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.8749163333312926,0.909111664354187),(-1.117651645204834,-0.8924562007510436,-0.952028633990455,-0.3977688081309026,0.6101272780073337,0.22008282568037243,-1.0999748867047898,0.67,0.8021872144091686,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.8749163333312926,0.909111664354187),(-0.5067938262367422,-0.8924562007510436,-0.0766000050337147,1.0938642223599824,0.6101272780073337,0.06896740083785215,0.909111664354187,0.68,0.8021872144091686,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.8749163333312926,0.909111664354187),(0.016798590021622126,0.4272572199529034,-0.0766000050337147,0.5966532121963541,1.167321139201246,0.8581257305710129,0.909111664354187,0.8,0.8021872144091686,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.8749163333312926,0.909111664354187),(-0.41952842352701486,-0.2325994903990701,-0.0766000050337147,0.5966532121963541,0.052933416813421515,0.32082644224205065,0.909111664354187,0.81,0.8021872144091686,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.8749163333312926,0.909111664354187),(0.36586020086053167,0.5922213975408968,-0.0766000050337147,-0.8949798182945309,-0.5042604443804907,0.5055230726051333,0.909111664354187,0.8,0.8021872144091686,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.8749163333312926,0.909111664354187),(2.023902852345352,0.7571855751288902,0.7988286239230257,1.5910752325236108,1.167321139201246,1.7816088823864173,0.909111664354187,0.94,0.016798590021622126,-0.5625278455750569,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,0.27045463396121155,-1.0999748867047898),(1.2385142279578056,1.4170422854808635,1.674257252879766,0.5966532121963541,0.6101272780073337,1.1099847719752143,0.909111664354187,0.93,0.016798590021622126,-0.5625278455750569,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,0.27045463396121155,-1.0999748867047898),(1.6748412415064426,1.7469706406568504,1.674257252879766,1.0938642223599824,0.6101272780073337,0.9924505526532535,0.909111664354187,0.92,0.016798590021622126,-0.5625278455750569,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,0.27045463396121155,-1.0999748867047898),(0.6276564089897139,1.087113930304877,1.674257252879766,1.5910752325236108,1.167321139201246,0.8077539222901738,0.909111664354187,0.89,0.016798590021622126,-0.5625278455750569,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,0.27045463396121155,-1.0999748867047898),(-0.24499761810756004,-0.5625278455750569,0.7988286239230257,1.5910752325236108,1.724515000395158,0.7070103057284927,-1.0999748867047898,0.82,0.016798590021622126,-0.5625278455750569,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,0.27045463396121155,-1.0999748867047898),(-0.3322630208172874,0.26229304236491,-0.0766000050337147,0.5966532121963541,0.052933416813421515,0.6734291002079332,-1.0999748867047898,0.79,0.016798590021622126,-0.5625278455750569,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,0.27045463396121155,-1.0999748867047898),(-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-0.8949798182945309,-0.5042604443804907,-1.5933022724298738,-1.0999748867047898,0.58,0.016798590021622126,-0.5625278455750569,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,0.27045463396121155,-1.0999748867047898),(-1.4667132560437435,-0.8924562007510436,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.223909011703713,-1.0999748867047898,0.56,0.016798590021622126,-0.5625278455750569,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,0.27045463396121155,-1.0999748867047898),(-1.2921824506242887,-1.3873487335150236,-0.952028633990455,-2.386612848785416,-1.618648166768315,-1.056002984100913,-1.0999748867047898,0.56,0.016798590021622126,-0.5625278455750569,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,0.27045463396121155,-1.0999748867047898),(-0.41952842352701486,-1.5523129111030172,-1.8274572629471952,0.09944220203272576,-0.5042604443804907,-0.7034003261350319,0.909111664354187,0.64,0.016798590021622126,-0.5625278455750569,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,0.27045463396121155,-1.0999748867047898),(-0.07046681268810527,-1.057420378339037,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-0.46833188749111015,0.909111664354187,0.61,0.016798590021622126,-0.5625278455750569,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,0.27045463396121155,-1.0999748867047898),(0.016798590021622126,-1.2223845559270303,-0.952028633990455,-0.3977688081309026,-1.0614543055744028,-0.04856681848410872,-1.0999748867047898,0.68,0.016798590021622126,-0.5625278455750569,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,0.27045463396121155,-1.0999748867047898),(-0.5940592289464697,-0.06763531281107672,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.11933920911869125,-1.0999748867047898,0.76,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(0.2785947981508043,2.076898995832837,-0.0766000050337147,0.5966532121963541,1.167321139201246,0.8581257305710129,-1.0999748867047898,0.86,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(1.1512488252480781,1.087113930304877,-0.0766000050337147,1.0938642223599824,1.167321139201246,1.076403566454655,0.909111664354187,0.9,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(-1.0303862424951067,0.7571855751288902,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.08575800359813185,-1.0999748867047898,0.71,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(-0.681324631656197,-0.2325994903990701,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-1.0056311758200724,-1.0999748867047898,0.62,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(0.1913293954410769,0.09732886477691666,-0.952028633990455,-0.8949798182945309,-0.5042604443804907,0.27045463396121155,-1.0999748867047898,0.66,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(0.4531256035702591,-0.39756366798706344,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,-0.2500540516074711,0.909111664354187,0.65,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(0.5403910062799865,-0.06763531281107672,-0.0766000050337147,0.09944220203272576,0.052933416813421515,-0.08214802400466813,0.909111664354187,0.73,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(-0.3322630208172874,-0.2325994903990701,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-0.2836352571280305,-1.0999748867047898,0.62,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(0.36586020086053167,0.26229304236491,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.3376170450023333,0.909111664354187,0.74,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(0.5403910062799865,0.4272572199529034,-0.0766000050337147,0.5966532121963541,0.052933416813421515,0.8413351278107333,0.909111664354187,0.79,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(0.7149218116994412,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.6101272780073337,0.6734291002079332,0.909111664354187,0.8,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(-0.41952842352701486,0.09732886477691666,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,-0.1157292295252305,-1.0999748867047898,0.69,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(-0.7685900343659244,0.4272572199529034,0.7988286239230257,0.09944220203272576,-0.5042604443804907,0.0018049897967303734,-1.0999748867047898,0.7,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,0.909111664354187),(0.2785947981508043,-0.5625278455750569,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.23687342844065212,0.909111664354187,0.76,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,0.909111664354187),(0.9767180198286235,0.09732886477691666,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9756599498929738,0.909111664354187,0.84,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,0.909111664354187),(-0.5067938262367422,-0.06763531281107672,0.7988286239230257,1.0938642223599824,1.167321139201246,0.6734291002079332,0.909111664354187,0.78,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,0.909111664354187),(-1.3794478533340162,-1.2223845559270303,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,-0.9384687647789537,-1.0999748867047898,0.67,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,0.909111664354187),(-1.0303862424951067,-0.39756366798706344,-0.952028633990455,-0.3977688081309026,0.6101272780073337,-0.7873533399364304,-1.0999748867047898,0.66,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,0.909111664354187),(-0.7685900343659244,-0.5625278455750569,-0.952028633990455,-0.8949798182945309,-0.5042604443804907,-0.8880969564981116,-1.0999748867047898,0.65,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,0.909111664354187),(-1.6412440614631982,-1.057420378339037,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.2406996144639928,-1.0999748867047898,0.54,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,0.909111664354187),(-1.4667132560437435,-1.3873487335150236,-1.8274572629471952,-2.386612848785416,-1.0614543055744028,-0.9888405730597928,-1.0999748867047898,0.58,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,0.909111664354187),(0.6276564089897139,0.5922213975408968,-0.0766000050337147,-0.8949798182945309,-1.618648166768315,0.3376170450023333,0.909111664354187,0.79,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,0.909111664354187),(0.889452617118896,0.9221497527168835,0.7988286239230257,0.09944220203272576,-0.5042604443804907,0.15292041463925066,0.909111664354187,0.8,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,0.909111664354187),(0.016798590021622126,-0.2325994903990701,-0.0766000050337147,0.5966532121963541,0.052933416813421515,-0.1661010378060696,0.909111664354187,0.75,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,0.909111664354187),(0.5403910062799865,-0.5625278455750569,-0.0766000050337147,0.5966532121963541,0.6101272780073337,-0.2668446543677508,0.909111664354187,0.73,0.7149218116994412,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.8581257305710129,0.909111664354187),(-0.24499761810756004,-0.06763531281107672,-0.952028633990455,-0.8949798182945309,0.6101272780073337,-0.5522849012925116,-1.0999748867047898,0.72,0.7149218116994412,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.8581257305710129,0.909111664354187),(-1.0303862424951067,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.7034003261350319,-1.0999748867047898,0.62,0.7149218116994412,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.8581257305710129,0.909111664354187),(-0.15773221539783266,-0.5625278455750569,-0.0766000050337147,-0.3977688081309026,-1.0614543055744028,-0.45154128473083044,-1.0999748867047898,0.67,0.7149218116994412,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.8581257305710129,0.909111664354187),(0.8021872144091686,1.4170422854808635,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.908497538851855,0.909111664354187,0.81,0.7149218116994412,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.8581257305710129,0.909111664354187),(-1.553978658753471,-1.2223845559270303,-0.0766000050337147,-1.3921908284581592,-1.618648166768315,-0.972049970299513,-1.0999748867047898,0.63,0.7149218116994412,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.8581257305710129,0.909111664354187),(-1.9030402695923805,-1.057420378339037,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.2406996144639928,-1.0999748867047898,0.69,0.7149218116994412,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.8581257305710129,0.909111664354187),(0.6276564089897139,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.2872452367214912,0.909111664354187,0.8,0.7149218116994412,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.8581257305710129,0.909111664354187),(-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-2.175842027962227,-1.1903278061831537,-1.0999748867047898,0.43,0.7149218116994412,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.8581257305710129,0.909111664354187),(0.889452617118896,0.9221497527168835,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,0.10254860635841155,0.909111664354187,0.8,0.7149218116994412,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.8581257305710129,0.909111664354187),(-0.5067938262367422,-0.39756366798706344,-0.952028633990455,-0.3977688081309026,-1.618648166768315,-0.8041439426967131,0.909111664354187,0.73,0.7149218116994412,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.8581257305710129,0.909111664354187),(-0.7685900343659244,-0.2325994903990701,-0.0766000050337147,0.09944220203272576,-1.0614543055744028,-0.6530285178541898,0.909111664354187,0.75,0.7149218116994412,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.8581257305710129,0.909111664354187),(0.1913293954410769,0.09732886477691666,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.0989386267649508,0.909111664354187,0.71,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(-0.41952842352701486,-0.06763531281107672,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.08575800359813185,0.909111664354187,0.73,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(0.7149218116994412,0.5922213975408968,0.7988286239230257,0.5966532121963541,1.167321139201246,0.8581257305710129,0.909111664354187,0.83,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(0.1913293954410769,0.4272572199529034,-0.0766000050337147,-0.3977688081309026,-1.0614543055744028,0.32082644224205065,-1.0999748867047898,0.72,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(1.3257796306675331,1.7469706406568504,1.674257252879766,1.5910752325236108,1.724515000395158,1.462587429941097,0.909111664354187,0.94,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(0.5403910062799865,0.09732886477691666,1.674257252879766,0.5966532121963541,0.6101272780073337,0.23687342844065212,0.909111664354187,0.81,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(0.6276564089897139,-0.06763531281107672,1.674257252879766,0.09944220203272576,0.6101272780073337,0.10254860635841155,0.909111664354187,0.81,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(-0.41952842352701486,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.2332634488471884,0.909111664354187,0.75,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(0.8021872144091686,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.27045463396121155,0.909111664354187,0.79,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(-0.7685900343659244,-0.2325994903990701,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.6026567095733507,-1.0999748867047898,0.58,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(-1.0303862424951067,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-0.7873533399364304,-1.0999748867047898,0.59,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(-1.9030402695923805,-1.8822412662790038,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-2.1138109579985565,-1.0999748867047898,0.47,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(-0.07046681268810527,-1.5523129111030172,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.9626955331560363,-1.0999748867047898,0.49,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(-1.117651645204834,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.610092875190155,-1.0999748867047898,0.47,-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.30403583948177093,-1.0999748867047898),(-1.553978658753471,-2.2121696214549904,-1.8274572629471952,-2.386612848785416,-2.7330358891561395,-2.1138109579985565,-1.0999748867047898,0.42,-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.30403583948177093,-1.0999748867047898),(-1.2921824506242887,-1.3873487335150236,-1.8274572629471952,-1.3921908284581592,-1.618648166768315,-2.2649263828410766,-1.0999748867047898,0.57,-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.30403583948177093,-1.0999748867047898),(-0.3322630208172874,-1.057420378339037,-0.0766000050337147,-0.8949798182945309,-0.5042604443804907,-0.9384687647789537,-1.0999748867047898,0.62,-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.30403583948177093,-1.0999748867047898),(0.10406399273134952,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5522849012925116,0.909111664354187,0.74,-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.30403583948177093,-1.0999748867047898),(0.7149218116994412,0.4272572199529034,0.7988286239230257,0.09944220203272576,0.6101272780073337,0.11933920911869125,0.909111664354187,0.73,-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.30403583948177093,-1.0999748867047898),(-1.2049170479145614,-1.2223845559270303,-0.952028633990455,-0.3977688081309026,0.052933416813421515,-0.9048875592583913,0.909111664354187,0.64,-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.30403583948177093,-1.0999748867047898),(-1.4667132560437435,-0.8924562007510436,-0.0766000050337147,0.09944220203272576,-1.0614543055744028,-0.7201909288953117,-1.0999748867047898,0.63,-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.30403583948177093,-1.0999748867047898),(-1.7285094641729257,-1.5523129111030172,-0.952028633990455,-0.8949798182945309,-0.5042604443804907,-1.5597210669093144,-1.0999748867047898,0.59,-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.30403583948177093,-1.0999748867047898),(0.016798590021622126,-0.2325994903990701,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8041439426967131,-1.0999748867047898,0.73,-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.30403583948177093,-1.0999748867047898),(0.889452617118896,0.26229304236491,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.2872452367214912,0.909111664354187,0.79,-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.30403583948177093,-1.0999748867047898),(-1.3794478533340162,-0.5625278455750569,-0.952028633990455,0.09944220203272576,0.052933416813421515,-1.1903278061831537,0.909111664354187,0.68,-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.30403583948177093,-1.0999748867047898),(-0.24499761810756004,-0.39756366798706344,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.610092875190155,-1.0999748867047898,0.7,1.4130450333772604,1.582006463068857,0.7988286239230257,1.5910752325236108,0.6101272780073337,1.7816088823864173,0.909111664354187),(0.36586020086053167,-0.06763531281107672,-0.952028633990455,-1.3921908284581592,-2.175842027962227,-0.2668446543677508,-1.0999748867047898,0.81,1.4130450333772604,1.582006463068857,0.7988286239230257,1.5910752325236108,0.6101272780073337,1.7816088823864173,0.909111664354187),(0.4531256035702591,0.4272572199529034,-0.0766000050337147,0.5966532121963541,1.724515000395158,0.06896740083785215,0.909111664354187,0.85,1.4130450333772604,1.582006463068857,0.7988286239230257,1.5910752325236108,0.6101272780073337,1.7816088823864173,0.909111664354187),(1.5003104360869879,1.4170422854808635,0.7988286239230257,0.5966532121963541,0.052933416813421515,1.580121649263055,0.909111664354187,0.93,1.4130450333772604,1.582006463068857,0.7988286239230257,1.5910752325236108,0.6101272780073337,1.7816088823864173,0.909111664354187),(1.8493720469258974,1.2520781078928702,1.674257252879766,1.0938642223599824,1.724515000395158,1.0596129636943752,0.909111664354187,0.91,1.4130450333772604,1.582006463068857,0.7988286239230257,1.5910752325236108,0.6101272780073337,1.7816088823864173,0.909111664354187),(-0.9431208397853792,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-0.5042604443804907,-0.40116947644999135,-1.0999748867047898,0.69,1.4130450333772604,1.582006463068857,0.7988286239230257,1.5910752325236108,0.6101272780073337,1.7816088823864173,0.909111664354187),(-0.3322630208172874,-0.8924562007510436,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.5055230726051333,0.909111664354187,0.77,1.4130450333772604,1.582006463068857,0.7988286239230257,1.5910752325236108,0.6101272780073337,1.7816088823864173,0.909111664354187),(1.1512488252480781,1.087113930304877,0.7988286239230257,1.0938642223599824,-0.5042604443804907,0.9588693471326941,0.909111664354187,0.86,1.4130450333772604,1.582006463068857,0.7988286239230257,1.5910752325236108,0.6101272780073337,1.7816088823864173,0.909111664354187),(0.2785947981508043,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,1.167321139201246,-0.4347506819705508,0.909111664354187,0.74,1.4130450333772604,1.582006463068857,0.7988286239230257,1.5910752325236108,0.6101272780073337,1.7816088823864173,0.909111664354187),(-0.5067938262367422,-1.5523129111030172,-1.8274572629471952,-2.386612848785416,-1.0614543055744028,-1.9123237248751956,-1.0999748867047898,0.57,1.4130450333772604,1.582006463068857,0.7988286239230257,1.5910752325236108,0.6101272780073337,1.7816088823864173,0.909111664354187),(-1.6412440614631982,-2.5420979766309775,-1.8274572629471952,-1.3921908284581592,-1.618648166768315,-1.2071184089434333,-1.0999748867047898,0.51,1.4130450333772604,1.582006463068857,0.7988286239230257,1.5910752325236108,0.6101272780073337,1.7816088823864173,0.909111664354187),(-1.3794478533340162,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-0.5042604443804907,-0.9552593675392334,0.909111664354187,0.67,1.4130450333772604,1.582006463068857,0.7988286239230257,1.5910752325236108,0.6101272780073337,1.7816088823864173,0.909111664354187); DROP TABLE IF EXISTS model; create table model engine = Memory as select stochasticLinearRegressionState(0.1, 0.0, 5, 'SGD')(target, param1, param2, param3, param4, param5, param6, param7) as state from defaults; with (select state from model) as model select round(evalMLMethod(model, predict1, predict2, predict3, predict4, predict5, predict6, predict7), 12) from defaults; diff --git a/tests/queries/0_stateless/00976_system_stop_ttl_merges.sql b/tests/queries/0_stateless/00976_system_stop_ttl_merges.sql index b27e4275d5d..2ab85d90a66 100644 --- a/tests/queries/0_stateless/00976_system_stop_ttl_merges.sql +++ b/tests/queries/0_stateless/00976_system_stop_ttl_merges.sql @@ -4,7 +4,7 @@ create table ttl (d Date, a Int) engine = MergeTree order by a partition by toDa system stop ttl merges ttl; -insert into ttl values (toDateTime('2000-10-10 00:00:00'), 1), (toDateTime('2000-10-10 00:00:00'), 2) +insert into ttl values (toDateTime('2000-10-10 00:00:00'), 1), (toDateTime('2000-10-10 00:00:00'), 2); insert into ttl values (toDateTime('2100-10-10 00:00:00'), 3), (toDateTime('2100-10-10 00:00:00'), 4); select sleep(1) format Null; -- wait if very fast merge happen diff --git a/tests/queries/0_stateless/01034_JSONCompactEachRow.sql b/tests/queries/0_stateless/01034_JSONCompactEachRow.sql index f71597a60e5..e47e5346c01 100644 --- a/tests/queries/0_stateless/01034_JSONCompactEachRow.sql +++ b/tests/queries/0_stateless/01034_JSONCompactEachRow.sql @@ -22,51 +22,64 @@ SELECT 5; /* Check JSONCompactEachRow Input */ CREATE TABLE test_table (v1 String, v2 UInt8, v3 DEFAULT v2 * 16, v4 UInt8 DEFAULT 8) ENGINE = MergeTree() ORDER BY v2; INSERT INTO test_table FORMAT JSONCompactEachRow ["first", 1, "2", null] ["second", 2, null, 6]; + SELECT * FROM test_table FORMAT JSONCompactEachRow; TRUNCATE TABLE test_table; SELECT 6; /* Check input_format_null_as_default = 1 */ SET input_format_null_as_default = 1; INSERT INTO test_table FORMAT JSONCompactEachRow ["first", 1, "2", null] ["second", 2, null, 6]; + SELECT * FROM test_table FORMAT JSONCompactEachRow; TRUNCATE TABLE test_table; SELECT 7; /* Check Nested */ CREATE TABLE test_table_2 (v1 UInt8, n Nested(id UInt8, name String)) ENGINE = MergeTree() ORDER BY v1; INSERT INTO test_table_2 FORMAT JSONCompactEachRow [16, [15, 16, null], ["first", "second", "third"]]; + SELECT * FROM test_table_2 FORMAT JSONCompactEachRow; TRUNCATE TABLE test_table_2; SELECT 8; /* Check JSONCompactEachRowWithNamesAndTypes and JSONCompactEachRowWithNamesAndTypes Input */ SET input_format_null_as_default = 0; INSERT INTO test_table FORMAT JSONCompactEachRowWithNamesAndTypes ["v1", "v2", "v3", "v4"]["String","UInt8","UInt16","UInt8"]["first", 1, "2", null]["second", 2, null, 6]; + INSERT INTO test_table FORMAT JSONCompactEachRowWithNames ["v1", "v2", "v3", "v4"]["first", 1, "2", null]["second", 2, null, 6]; + SELECT * FROM test_table FORMAT JSONCompactEachRow; TRUNCATE TABLE test_table; SELECT 9; /* Check input_format_null_as_default = 1 */ SET input_format_null_as_default = 1; INSERT INTO test_table FORMAT JSONCompactEachRowWithNamesAndTypes ["v1", "v2", "v3", "v4"]["String","UInt8","UInt16","UInt8"]["first", 1, "2", null] ["second", 2, null, 6]; + INSERT INTO test_table FORMAT JSONCompactEachRowWithNames ["v1", "v2", "v3", "v4"]["first", 1, "2", null] ["second", 2, null, 6]; + SELECT * FROM test_table FORMAT JSONCompactEachRow; SELECT 10; /* Check Header */ TRUNCATE TABLE test_table; SET input_format_skip_unknown_fields = 1; INSERT INTO test_table FORMAT JSONCompactEachRowWithNamesAndTypes ["v1", "v2", "invalid_column"]["String", "UInt8", "UInt8"]["first", 1, 32]["second", 2, "64"]; + INSERT INTO test_table FORMAT JSONCompactEachRowWithNames ["v1", "v2", "invalid_column"]["first", 1, 32]["second", 2, "64"]; + SELECT * FROM test_table FORMAT JSONCompactEachRow; SELECT 11; TRUNCATE TABLE test_table; INSERT INTO test_table FORMAT JSONCompactEachRowWithNamesAndTypes ["v4", "v2", "v3"]["UInt8", "UInt8", "UInt16"][1, 2, 3] + INSERT INTO test_table FORMAT JSONCompactEachRowWithNames ["v4", "v2", "v3"][1, 2, 3] + SELECT * FROM test_table FORMAT JSONCompactEachRowWithNamesAndTypes; SELECT '----------'; SELECT * FROM test_table FORMAT JSONCompactEachRowWithNames; SELECT 12; /* Check Nested */ INSERT INTO test_table_2 FORMAT JSONCompactEachRowWithNamesAndTypes ["v1", "n.id", "n.name"]["UInt8", "Array(UInt8)", "Array(String)"][16, [15, 16, null], ["first", "second", "third"]]; + INSERT INTO test_table_2 FORMAT JSONCompactEachRowWithNames ["v1", "n.id", "n.name"][16, [15, 16, null], ["first", "second", "third"]]; + SELECT * FROM test_table_2 FORMAT JSONCompactEachRowWithNamesAndTypes; SELECT '----------'; SELECT * FROM test_table_2 FORMAT JSONCompactEachRowWithNames; diff --git a/tests/queries/0_stateless/01060_defaults_all_columns.sql b/tests/queries/0_stateless/01060_defaults_all_columns.sql index afbb01b8cb2..74fad7f75fb 100644 --- a/tests/queries/0_stateless/01060_defaults_all_columns.sql +++ b/tests/queries/0_stateless/01060_defaults_all_columns.sql @@ -3,6 +3,7 @@ DROP TABLE IF EXISTS defaults_all_columns; CREATE TABLE defaults_all_columns (n UInt8 DEFAULT 42, s String DEFAULT concat('test', CAST(n, 'String'))) ENGINE = Memory; INSERT INTO defaults_all_columns FORMAT JSONEachRow {"n": 1, "s": "hello"} {}; + INSERT INTO defaults_all_columns FORMAT JSONEachRow {"n": 2}, {"s": "world"}; SELECT * FROM defaults_all_columns ORDER BY n, s; diff --git a/tests/queries/0_stateless/01072_json_each_row_data_in_square_brackets.sql b/tests/queries/0_stateless/01072_json_each_row_data_in_square_brackets.sql index f7ccc309c5a..ae5e86ec387 100644 --- a/tests/queries/0_stateless/01072_json_each_row_data_in_square_brackets.sql +++ b/tests/queries/0_stateless/01072_json_each_row_data_in_square_brackets.sql @@ -2,9 +2,13 @@ DROP TABLE IF EXISTS json_square_brackets; CREATE TABLE json_square_brackets (id UInt32, name String) ENGINE = Memory; + INSERT INTO json_square_brackets FORMAT JSONEachRow [{"id": 1, "name": "name1"}, {"id": 2, "name": "name2"}]; + INSERT INTO json_square_brackets FORMAT JSONEachRow[]; + INSERT INTO json_square_brackets FORMAT JSONEachRow [ ] ; + INSERT INTO json_square_brackets FORMAT JSONEachRow ; SELECT * FROM json_square_brackets ORDER BY id; diff --git a/tests/queries/0_stateless/01179_insert_values_semicolon.expect b/tests/queries/0_stateless/01179_insert_values_semicolon.expect index 534b18a9500..4b8693126a1 100755 --- a/tests/queries/0_stateless/01179_insert_values_semicolon.expect +++ b/tests/queries/0_stateless/01179_insert_values_semicolon.expect @@ -37,10 +37,10 @@ send -- "INSERT INTO test_01179 values ('foo'); \r" expect "Ok." send -- "INSERT INTO test_01179 values ('foo'); ('bar') \r" -expect "Cannot read data after semicolon" +expect "Syntax error" send -- "SELECT val, count() FROM test_01179 GROUP BY val FORMAT TSV\r" -expect "foo\t2" +expect "foo\t3" send -- "DROP TABLE test_01179\r" expect "Ok." diff --git a/tests/queries/0_stateless/01187_set_profile_as_setting.sh b/tests/queries/0_stateless/01187_set_profile_as_setting.sh index dacb609d790..42f596c45d6 100755 --- a/tests/queries/0_stateless/01187_set_profile_as_setting.sh +++ b/tests/queries/0_stateless/01187_set_profile_as_setting.sh @@ -1,9 +1,9 @@ #!/usr/bin/env bash # Tags: no-random-settings -unset CLICKHOUSE_LOG_COMMENT - CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# reset --log_comment, because the test has to use the readonly mode +CLICKHOUSE_LOG_COMMENT= # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/01231_operator_null_in.sql b/tests/queries/0_stateless/01231_operator_null_in.sql index 0424a995b3f..26f342540e3 100644 --- a/tests/queries/0_stateless/01231_operator_null_in.sql +++ b/tests/queries/0_stateless/01231_operator_null_in.sql @@ -112,7 +112,7 @@ DROP TABLE IF EXISTS null_in_subquery; DROP TABLE IF EXISTS null_in_tuple; CREATE TABLE null_in_tuple (dt DateTime, idx int, t Tuple(Nullable(UInt64), Nullable(String))) ENGINE = MergeTree() PARTITION BY dt ORDER BY idx SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; -INSERT INTO null_in_tuple VALUES (1, 1, (1, '1')) (2, 2, (2, NULL)) (3, 3, (NULL, '3')) (4, 4, (NULL, NULL)) +INSERT INTO null_in_tuple VALUES (1, 1, (1, '1')) (2, 2, (2, NULL)) (3, 3, (NULL, '3')) (4, 4, (NULL, NULL)); SET transform_null_in = 0; diff --git a/tests/queries/0_stateless/01238_http_memory_tracking.sh b/tests/queries/0_stateless/01238_http_memory_tracking.sh index 26d3dd8acd4..ce1310cf302 100755 --- a/tests/queries/0_stateless/01238_http_memory_tracking.sh +++ b/tests/queries/0_stateless/01238_http_memory_tracking.sh @@ -8,18 +8,22 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) set -o pipefail +MISTER_USER="test_01238_http_$RANDOM" +# Create another user to check limit on the amount of memory reliabily +${CLICKHOUSE_CLIENT} --format Null -q "CREATE USER $MISTER_USER" + # This is needed to keep at least one running query for user for the time of test. # (1k http queries takes ~1 second, let's run for 5x more to avoid flaps) -${CLICKHOUSE_CLIENT} --function_sleep_max_microseconds_per_block 5000000 --format Null -n <<<'SELECT sleepEachRow(1) FROM numbers(5)' & +${CLICKHOUSE_CLIENT} --user ${MISTER_USER} --function_sleep_max_microseconds_per_block 5000000 --format Null -n <<<'SELECT sleepEachRow(1) FROM numbers(5)' & # ignore "yes: standard output: Broken pipe" yes 'SELECT 1' 2>/dev/null | { head -n1000 } | { - xargs -I{} ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&wait_end_of_query=1&max_memory_usage_for_user=$((1<<30))" -d '{}' + xargs -I{} ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&user=${MISTER_USER}&wait_end_of_query=1&max_memory_usage_for_user=$((1<<30))" -d '{}' } | grep -x -c 1 wait # Reset max_memory_usage_for_user, so it will not affect other tests -${CLICKHOUSE_CLIENT} --max_memory_usage_for_user=0 -q "SELECT 1 FORMAT Null" +${CLICKHOUSE_CLIENT} --user ${MISTER_USER} --max_memory_usage_for_user=0 -q "SELECT 1 FORMAT Null" diff --git a/tests/queries/0_stateless/01246_buffer_flush.sh b/tests/queries/0_stateless/01246_buffer_flush.sh index 27c3f01f216..aea91a0bf6b 100755 --- a/tests/queries/0_stateless/01246_buffer_flush.sh +++ b/tests/queries/0_stateless/01246_buffer_flush.sh @@ -7,6 +7,16 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) set -e +function query() +{ + local query_id + if [[ $1 == --query_id ]]; then + query_id="&query_id=$2" + shift 2 + fi + ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}$query_id" -d "$*" +} + function wait_until() { local expr=$1 && shift @@ -17,73 +27,68 @@ function wait_until() function get_buffer_delay() { local buffer_insert_id=$1 && shift - $CLICKHOUSE_CLIENT -nm -q " - SYSTEM FLUSH LOGS; - WITH - (SELECT event_time_microseconds FROM system.query_log WHERE current_database = currentDatabase() AND type = 'QueryStart' AND query_id = '$buffer_insert_id') AS begin_, - (SELECT max(event_time) FROM data_01256) AS end_ - SELECT dateDiff('seconds', begin_, end_)::UInt64; + query "SYSTEM FLUSH LOGS" + query " + WITH + (SELECT event_time_microseconds FROM system.query_log WHERE current_database = '$CLICKHOUSE_DATABASE' AND type = 'QueryStart' AND query_id = '$buffer_insert_id') AS begin_, + (SELECT max(event_time) FROM data_01256) AS end_ + SELECT dateDiff('seconds', begin_, end_)::UInt64 " } -$CLICKHOUSE_CLIENT -nm -q " - drop table if exists data_01256; - drop table if exists buffer_01256; - - create table data_01256 (key UInt64, event_time DateTime(6) MATERIALIZED now64(6)) Engine=Memory(); -" +query "drop table if exists data_01256" +query "drop table if exists buffer_01256" +query "create table data_01256 (key UInt64, event_time DateTime(6) MATERIALIZED now64(6)) Engine=Memory()" echo "min" -$CLICKHOUSE_CLIENT -q " - create table buffer_01256 (key UInt64) Engine=Buffer(currentDatabase(), data_01256, 1, +query " + create table buffer_01256 (key UInt64) Engine=Buffer($CLICKHOUSE_DATABASE, data_01256, 1, 2, 100, /* time */ 4, 100, /* rows */ 1, 1e6 /* bytes */ ) " min_query_id=$(random_str 10) -$CLICKHOUSE_CLIENT --query_id="$min_query_id" -q "insert into buffer_01256 select * from system.numbers limit 5" -$CLICKHOUSE_CLIENT -q "select count() from data_01256" -wait_until '[[ $($CLICKHOUSE_CLIENT -q "select count() from data_01256") -eq 5 ]]' +query --query_id "$min_query_id" "insert into buffer_01256 select * from system.numbers limit 5" +query "select count() from data_01256" +wait_until '[[ $(query "select count() from data_01256") -eq 5 ]]' sec=$(get_buffer_delay "$min_query_id") [[ $sec -ge 2 ]] || echo "Buffer flushed too early, min_time=2, flushed after $sec sec" [[ $sec -lt 100 ]] || echo "Buffer flushed too late, max_time=100, flushed after $sec sec" -$CLICKHOUSE_CLIENT -q "select count() from data_01256" -$CLICKHOUSE_CLIENT -q "drop table buffer_01256" +query "select count() from data_01256" +query "drop table buffer_01256" echo "max" -$CLICKHOUSE_CLIENT -q " - create table buffer_01256 (key UInt64) Engine=Buffer(currentDatabase(), data_01256, 1, +query " + create table buffer_01256 (key UInt64) Engine=Buffer($CLICKHOUSE_DATABASE, data_01256, 1, 100, 2, /* time */ 0, 100, /* rows */ 0, 1e6 /* bytes */ - ); + ) " max_query_id=$(random_str 10) -$CLICKHOUSE_CLIENT --query_id="$max_query_id" -q "insert into buffer_01256 select * from system.numbers limit 5" -$CLICKHOUSE_CLIENT -q "select count() from data_01256" -wait_until '[[ $($CLICKHOUSE_CLIENT -q "select count() from data_01256") -eq 10 ]]' +query --query_id "$max_query_id" "insert into buffer_01256 select * from system.numbers limit 5" +query "select count() from data_01256" +wait_until '[[ $(query "select count() from data_01256") -eq 10 ]]' sec=$(get_buffer_delay "$max_query_id") [[ $sec -ge 2 ]] || echo "Buffer flushed too early, max_time=2, flushed after $sec sec" -$CLICKHOUSE_CLIENT -q "select count() from data_01256" -$CLICKHOUSE_CLIENT -q "drop table buffer_01256" +query "select count() from data_01256" +query "drop table buffer_01256" echo "direct" -$CLICKHOUSE_CLIENT -nm -q " - create table buffer_01256 (key UInt64) Engine=Buffer(currentDatabase(), data_01256, 1, +query " + create table buffer_01256 (key UInt64) Engine=Buffer($CLICKHOUSE_DATABASE, data_01256, 1, 100, 100, /* time */ 0, 9, /* rows */ 0, 1e6 /* bytes */ - ); - insert into buffer_01256 select * from system.numbers limit 10; - select count() from data_01256; + ) " +query "insert into buffer_01256 select * from system.numbers limit 10" +query "select count() from data_01256" echo "drop" -$CLICKHOUSE_CLIENT -nm -q " - insert into buffer_01256 select * from system.numbers limit 10; - drop table if exists buffer_01256; - select count() from data_01256; -" +query "insert into buffer_01256 select * from system.numbers limit 10" +query "drop table if exists buffer_01256" +query "select count() from data_01256" -$CLICKHOUSE_CLIENT -q "drop table data_01256" +query "drop table data_01256" diff --git a/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh b/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh index 713d187cd88..5f82731c54e 100755 --- a/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh +++ b/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh @@ -76,7 +76,7 @@ insert into data_01278 select reinterpretAsString(number), // s6 reinterpretAsString(number), // s7 reinterpretAsString(number) // s8 -from numbers(200000); -- { serverError 241 }" > /dev/null 2>&1 +from numbers(200000);" > /dev/null 2>&1 local ret_code=$? if [[ $ret_code -eq 0 ]]; then diff --git a/tests/queries/0_stateless/01299_alter_merge_tree.sql b/tests/queries/0_stateless/01299_alter_merge_tree.sql index 3c4467926f8..1fa354040f5 100644 --- a/tests/queries/0_stateless/01299_alter_merge_tree.sql +++ b/tests/queries/0_stateless/01299_alter_merge_tree.sql @@ -3,11 +3,11 @@ drop table if exists merge_tree; set allow_deprecated_syntax_for_merge_tree=1; create table merge_tree ( CounterID UInt32, StartDate Date, Sign Int8, VisitID UInt64, UserID UInt64, StartTime DateTime, ClickLogID UInt64) ENGINE = CollapsingMergeTree(StartDate, intHash32(UserID), tuple(CounterID, StartDate, intHash32(UserID), VisitID, ClickLogID), 8192, Sign); -insert into merge_tree values (1, '2013-09-19', 1, 0, 2, '2013-09-19 12:43:06', 3) +insert into merge_tree values (1, '2013-09-19', 1, 0, 2, '2013-09-19 12:43:06', 3); alter table merge_tree add column dummy String after CounterID; describe table merge_tree; -insert into merge_tree values (1, 'Hello, Alter Table!','2013-09-19', 1, 0, 2, '2013-09-19 12:43:06', 3) +insert into merge_tree values (1, 'Hello, Alter Table!','2013-09-19', 1, 0, 2, '2013-09-19 12:43:06', 3); select CounterID, dummy from merge_tree where dummy <> '' limit 10; diff --git a/tests/queries/0_stateless/01324_insert_tsv_raw.sql b/tests/queries/0_stateless/01324_insert_tsv_raw.sql index c3812730e5b..4827f34597a 100644 --- a/tests/queries/0_stateless/01324_insert_tsv_raw.sql +++ b/tests/queries/0_stateless/01324_insert_tsv_raw.sql @@ -1,7 +1,6 @@ drop table if exists tsv_raw; create table tsv_raw (strval String, intval Int64, b1 String, b2 String, b3 String, b4 String) engine = Memory; insert into tsv_raw format TSVRaw "a 1 \ \\ "\"" "\\"" -; select * from tsv_raw format TSVRaw; select * from tsv_raw format JSONCompactEachRow; diff --git a/tests/queries/0_stateless/01396_low_cardinality_fixed_string_default.sql b/tests/queries/0_stateless/01396_low_cardinality_fixed_string_default.sql index e31c402559e..f0c89cdf826 100644 --- a/tests/queries/0_stateless/01396_low_cardinality_fixed_string_default.sql +++ b/tests/queries/0_stateless/01396_low_cardinality_fixed_string_default.sql @@ -6,6 +6,7 @@ CREATE TABLE test ) ENGINE = MergeTree() PARTITION BY id ORDER BY id; INSERT INTO test FORMAT CSV 1,RU + INSERT INTO test FORMAT CSV 1, SELECT * FROM test ORDER BY code; diff --git a/tests/queries/0_stateless/01448_json_compact_strings_each_row.sql b/tests/queries/0_stateless/01448_json_compact_strings_each_row.sql index 06374c633e5..52f0eba8ba4 100644 --- a/tests/queries/0_stateless/01448_json_compact_strings_each_row.sql +++ b/tests/queries/0_stateless/01448_json_compact_strings_each_row.sql @@ -24,51 +24,64 @@ SELECT 5; /* Check JSONCompactStringsEachRow Input */ CREATE TABLE test_table (v1 String, v2 UInt8, v3 DEFAULT v2 * 16, v4 UInt8 DEFAULT 8) ENGINE = MergeTree() ORDER BY v2; INSERT INTO test_table FORMAT JSONCompactStringsEachRow ["first", "1", "2", "3"] ["second", "2", "3", "6"]; + SELECT * FROM test_table FORMAT JSONCompactStringsEachRow; TRUNCATE TABLE test_table; SELECT 6; /* Check input_format_null_as_default = 1 */ SET input_format_null_as_default = 1; INSERT INTO test_table FORMAT JSONCompactStringsEachRow ["first", "1", "2", "ᴺᵁᴸᴸ"] ["second", "2", "null", "6"]; + SELECT * FROM test_table FORMAT JSONCompactStringsEachRow; TRUNCATE TABLE test_table; SELECT 7; /* Check Nested */ CREATE TABLE test_table_2 (v1 UInt8, n Nested(id UInt8, name String)) ENGINE = MergeTree() ORDER BY v1; INSERT INTO test_table_2 FORMAT JSONCompactStringsEachRow ["16", "[15, 16, 17]", "['first', 'second', 'third']"]; + SELECT * FROM test_table_2 FORMAT JSONCompactStringsEachRow; TRUNCATE TABLE test_table_2; SELECT 8; /* Check JSONCompactStringsEachRowWithNames and JSONCompactStringsEachRowWithNamesAndTypes Input */ SET input_format_null_as_default = 0; INSERT INTO test_table FORMAT JSONCompactStringsEachRowWithNamesAndTypes ["v1", "v2", "v3", "v4"]["String","UInt8","UInt16","UInt8"]["first", "1", "2", "3"]["second", "2", "3", "6"]; + INSERT INTO test_table FORMAT JSONCompactStringsEachRowWithNames ["v1", "v2", "v3", "v4"]["first", "1", "2", "3"]["second", "2", "3", "6"]; + SELECT * FROM test_table FORMAT JSONCompactStringsEachRow; TRUNCATE TABLE test_table; SELECT 9; /* Check input_format_null_as_default = 1 */ SET input_format_null_as_default = 1; INSERT INTO test_table FORMAT JSONCompactStringsEachRowWithNamesAndTypes ["v1", "v2", "v3", "v4"]["String","UInt8","UInt16","UInt8"]["first", "1", "2", "null"] ["second", "2", "null", "6"]; + INSERT INTO test_table FORMAT JSONCompactStringsEachRowWithNames ["v1", "v2", "v3", "v4"]["first", "1", "2", "null"] ["second", "2", "null", "6"]; + SELECT * FROM test_table FORMAT JSONCompactStringsEachRow; SELECT 10; /* Check Header */ TRUNCATE TABLE test_table; SET input_format_skip_unknown_fields = 1; INSERT INTO test_table FORMAT JSONCompactStringsEachRowWithNamesAndTypes ["v1", "v2", "invalid_column"]["String", "UInt8", "UInt8"]["first", "1", "32"]["second", "2", "64"]; + INSERT INTO test_table FORMAT JSONCompactStringsEachRowWithNames ["v1", "v2", "invalid_column"]["first", "1", "32"]["second", "2", "64"]; + SELECT * FROM test_table FORMAT JSONCompactStringsEachRow; SELECT 11; TRUNCATE TABLE test_table; INSERT INTO test_table FORMAT JSONCompactStringsEachRowWithNamesAndTypes ["v4", "v2", "v3"]["UInt8", "UInt8", "UInt16"]["1", "2", "3"] + INSERT INTO test_table FORMAT JSONCompactStringsEachRowWithNames ["v4", "v2", "v3"]["1", "2", "3"] + SELECT * FROM test_table FORMAT JSONCompactStringsEachRowWithNamesAndTypes; SELECT '---------'; SELECT * FROM test_table FORMAT JSONCompactStringsEachRowWithNames; SELECT 12; /* Check Nested */ INSERT INTO test_table_2 FORMAT JSONCompactStringsEachRowWithNamesAndTypes ["v1", "n.id", "n.name"]["UInt8", "Array(UInt8)", "Array(String)"]["16", "[15, 16, 17]", "['first', 'second', 'third']"]; + INSERT INTO test_table_2 FORMAT JSONCompactStringsEachRowWithNames ["v1", "n.id", "n.name"]["16", "[15, 16, 17]", "['first', 'second', 'third']"]; + SELECT * FROM test_table_2 FORMAT JSONCompactStringsEachRowWithNamesAndTypes; SELECT '---------'; SELECT * FROM test_table_2 FORMAT JSONCompactStringsEachRowWithNames; diff --git a/tests/queries/0_stateless/01514_input_format_csv_enum_as_number_setting.sql b/tests/queries/0_stateless/01514_input_format_csv_enum_as_number_setting.sql index 526af60434f..9e1783b7ba0 100644 --- a/tests/queries/0_stateless/01514_input_format_csv_enum_as_number_setting.sql +++ b/tests/queries/0_stateless/01514_input_format_csv_enum_as_number_setting.sql @@ -8,6 +8,7 @@ CREATE TABLE table_with_enum_column_for_csv_insert ( SET input_format_csv_enum_as_number = 1; INSERT INTO table_with_enum_column_for_csv_insert FORMAT CSV 102,2 + SELECT * FROM table_with_enum_column_for_csv_insert; SET input_format_csv_enum_as_number = 0; diff --git a/tests/queries/0_stateless/01514_input_format_json_enum_as_number.sql b/tests/queries/0_stateless/01514_input_format_json_enum_as_number.sql index 0b2cb8c64e2..a5044cd6875 100644 --- a/tests/queries/0_stateless/01514_input_format_json_enum_as_number.sql +++ b/tests/queries/0_stateless/01514_input_format_json_enum_as_number.sql @@ -8,6 +8,7 @@ CREATE TABLE table_with_enum_column_for_json_insert ( ) ENGINE=Memory(); INSERT INTO table_with_enum_column_for_json_insert FORMAT JSONEachRow {"Id":102,"Value":2} + SELECT * FROM table_with_enum_column_for_json_insert; DROP TABLE IF EXISTS table_with_enum_column_for_json_insert; diff --git a/tests/queries/0_stateless/01514_input_format_tsv_enum_as_number_setting.sql b/tests/queries/0_stateless/01514_input_format_tsv_enum_as_number_setting.sql index 033d7d282f0..5ad94eeb20e 100644 --- a/tests/queries/0_stateless/01514_input_format_tsv_enum_as_number_setting.sql +++ b/tests/queries/0_stateless/01514_input_format_tsv_enum_as_number_setting.sql @@ -8,7 +8,9 @@ CREATE TABLE table_with_enum_column_for_tsv_insert ( SET input_format_tsv_enum_as_number = 1; INSERT INTO table_with_enum_column_for_tsv_insert FORMAT TSV 102 2 + INSERT INTO table_with_enum_column_for_tsv_insert FORMAT TabSeparatedRaw 103 1 + SELECT * FROM table_with_enum_column_for_tsv_insert ORDER BY Id; SET input_format_tsv_enum_as_number = 0; diff --git a/tests/queries/0_stateless/01558_enum_as_num_in_tsv_csv_input.sql b/tests/queries/0_stateless/01558_enum_as_num_in_tsv_csv_input.sql index 6a0f2a97b4f..4dccfda4bc5 100644 --- a/tests/queries/0_stateless/01558_enum_as_num_in_tsv_csv_input.sql +++ b/tests/queries/0_stateless/01558_enum_as_num_in_tsv_csv_input.sql @@ -6,15 +6,23 @@ CREATE TABLE enum_as_num ( ) ENGINE=Memory(); INSERT INTO enum_as_num FORMAT TSV 1 1 + INSERT INTO enum_as_num FORMAT TSV 2 2 + INSERT INTO enum_as_num FORMAT TSV 3 3 + INSERT INTO enum_as_num FORMAT TSV 4 a + INSERT INTO enum_as_num FORMAT TSV 5 b INSERT INTO enum_as_num FORMAT CSV 6,1 + INSERT INTO enum_as_num FORMAT CSV 7,2 + INSERT INTO enum_as_num FORMAT CSV 8,3 + INSERT INTO enum_as_num FORMAT CSV 9,a + INSERT INTO enum_as_num FORMAT CSV 10,b SELECT * FROM enum_as_num ORDER BY Id; diff --git a/tests/queries/0_stateless/01564_test_hint_woes.reference b/tests/queries/0_stateless/01564_test_hint_woes.reference index adb4cc61816..56c07922ccb 100644 --- a/tests/queries/0_stateless/01564_test_hint_woes.reference +++ b/tests/queries/0_stateless/01564_test_hint_woes.reference @@ -20,7 +20,7 @@ select 1; insert into values_01564 values (11); -- { serverError VIOLATED_CONSTRAINT } select nonexistent column; -- { serverError UNKNOWN_IDENTIFIER } -- query after values on the same line -insert into values_01564 values (1); select 1; +insert into values_01564 values (1); select 1; 1 -- a failing insert and then a normal insert (#https://github.com/ClickHouse/ClickHouse/issues/19353) diff --git a/tests/queries/0_stateless/01600_parts_states_metrics_long.sh b/tests/queries/0_stateless/01600_parts_states_metrics_long.sh index 2e47034e528..7215f270a4c 100755 --- a/tests/queries/0_stateless/01600_parts_states_metrics_long.sh +++ b/tests/queries/0_stateless/01600_parts_states_metrics_long.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# Tags: long CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh @@ -15,17 +16,26 @@ verify_sql="SELECT # In case of test failure, this code will do infinite loop and timeout. verify() { - while true + for i in {1..5000} do - result=$( $CLICKHOUSE_CLIENT -m --query="$verify_sql" ) - [ "$result" = "1" ] && break + result=$( $CLICKHOUSE_CLIENT --query="$verify_sql" ) + [ "$result" = "1" ] && echo "$result" && break sleep 0.1 + + if [[ $i -eq 5000 ]] + then + $CLICKHOUSE_CLIENT --multiquery " + SELECT sumIf(value, metric = 'PartsActive'), sumIf(value, metric = 'PartsOutdated') FROM system.metrics; + SELECT sum(active), sum(NOT active) FROM system.parts; + SELECT sum(active), sum(NOT active) FROM system.projection_parts; + SELECT count() FROM system.dropped_tables_parts; + " + fi done - echo 1 } $CLICKHOUSE_CLIENT --database_atomic_wait_for_drop_and_detach_synchronously=1 --query="DROP TABLE IF EXISTS test_table" -$CLICKHOUSE_CLIENT --query="CREATE TABLE test_table(data Date) ENGINE = MergeTree PARTITION BY toYear(data) ORDER BY data;" +$CLICKHOUSE_CLIENT --query="CREATE TABLE test_table (data Date) ENGINE = MergeTree PARTITION BY toYear(data) ORDER BY data;" $CLICKHOUSE_CLIENT --query="INSERT INTO test_table VALUES ('1992-01-01')" verify diff --git a/tests/queries/0_stateless/01651_lc_insert_tiny_log.reference b/tests/queries/0_stateless/01651_lc_insert_tiny_log.reference deleted file mode 100644 index 5cc8909b6c8..00000000000 --- a/tests/queries/0_stateless/01651_lc_insert_tiny_log.reference +++ /dev/null @@ -1,12 +0,0 @@ -10000000 -10000000 1274991808 -30000000 -30000000 3824991808 -10000000 -10000000 1274991808 -30000000 -30000000 3824991808 -10000000 -10000000 1274991808 -30000000 -30000000 3824991808 diff --git a/tests/queries/0_stateless/01651_lc_insert_tiny_log.sql b/tests/queries/0_stateless/01651_lc_insert_tiny_log.sql deleted file mode 100644 index d11c9120c61..00000000000 --- a/tests/queries/0_stateless/01651_lc_insert_tiny_log.sql +++ /dev/null @@ -1,48 +0,0 @@ -set allow_suspicious_low_cardinality_types=1; -drop table if exists perf_lc_num; - -CREATE TABLE perf_lc_num(  num UInt8,  arr Array(LowCardinality(Int64)) default [num]  ) ENGINE = TinyLog; - -INSERT INTO perf_lc_num (num) SELECT toUInt8(number) FROM numbers(10000000); - -select sum(length(arr)) from perf_lc_num; -select sum(length(arr)), sum(num) from perf_lc_num; - -INSERT INTO perf_lc_num (num) SELECT toUInt8(number) FROM numbers(10000000, 20000000); - -select sum(length(arr)) from perf_lc_num; -select sum(length(arr)), sum(num) from perf_lc_num; - -drop table if exists perf_lc_num; - - -CREATE TABLE perf_lc_num(  num UInt8,  arr Array(LowCardinality(Int64)) default [num]  ) ENGINE = Log; - -INSERT INTO perf_lc_num (num) SELECT toUInt8(number) FROM numbers(10000000); - -select sum(length(arr)) from perf_lc_num; -select sum(length(arr)), sum(num) from perf_lc_num; - -INSERT INTO perf_lc_num (num) SELECT toUInt8(number) FROM numbers(10000000, 20000000); - -select sum(length(arr)) from perf_lc_num; -select sum(length(arr)), sum(num) from perf_lc_num; - -drop table if exists perf_lc_num; - - -CREATE TABLE perf_lc_num(  num UInt8,  arr Array(LowCardinality(Int64)) default [num]  ) ENGINE = StripeLog; - -INSERT INTO perf_lc_num (num) SELECT toUInt8(number) FROM numbers(10000000); - -select sum(length(arr)) from perf_lc_num; -select sum(length(arr)), sum(num) from perf_lc_num; - -INSERT INTO perf_lc_num (num) SELECT toUInt8(number) FROM numbers(10000000, 20000000); - -select sum(length(arr)) from perf_lc_num; -select sum(length(arr)), sum(num) from perf_lc_num; - -drop table if exists perf_lc_num; - - diff --git a/tests/queries/0_stateless/01651_lc_insert_tiny_log_1.reference b/tests/queries/0_stateless/01651_lc_insert_tiny_log_1.reference new file mode 100644 index 00000000000..0715a4212ed --- /dev/null +++ b/tests/queries/0_stateless/01651_lc_insert_tiny_log_1.reference @@ -0,0 +1,4 @@ +10000000 +10000000 1274991808 +30000000 +30000000 3824991808 diff --git a/tests/queries/0_stateless/01651_lc_insert_tiny_log_1.sql b/tests/queries/0_stateless/01651_lc_insert_tiny_log_1.sql new file mode 100644 index 00000000000..142d3b21b47 --- /dev/null +++ b/tests/queries/0_stateless/01651_lc_insert_tiny_log_1.sql @@ -0,0 +1,16 @@ +set allow_suspicious_low_cardinality_types = 1, max_rows_to_read = '31M'; +drop table if exists perf_lc_num; + +CREATE TABLE perf_lc_num( num UInt8, arr Array(LowCardinality(Int64)) default [num] ) ENGINE = TinyLog; + +INSERT INTO perf_lc_num (num) SELECT toUInt8(number) FROM numbers(10000000); + +select sum(length(arr)) from perf_lc_num; +select sum(length(arr)), sum(num) from perf_lc_num; + +INSERT INTO perf_lc_num (num) SELECT toUInt8(number) FROM numbers(10000000, 20000000); + +select sum(length(arr)) from perf_lc_num; +select sum(length(arr)), sum(num) from perf_lc_num; + +drop table if exists perf_lc_num; diff --git a/tests/queries/0_stateless/01651_lc_insert_tiny_log_2.reference b/tests/queries/0_stateless/01651_lc_insert_tiny_log_2.reference new file mode 100644 index 00000000000..0715a4212ed --- /dev/null +++ b/tests/queries/0_stateless/01651_lc_insert_tiny_log_2.reference @@ -0,0 +1,4 @@ +10000000 +10000000 1274991808 +30000000 +30000000 3824991808 diff --git a/tests/queries/0_stateless/01651_lc_insert_tiny_log_2.sql b/tests/queries/0_stateless/01651_lc_insert_tiny_log_2.sql new file mode 100644 index 00000000000..e6b68e1d682 --- /dev/null +++ b/tests/queries/0_stateless/01651_lc_insert_tiny_log_2.sql @@ -0,0 +1,16 @@ +set allow_suspicious_low_cardinality_types = 1, max_rows_to_read = '31M'; +drop table if exists perf_lc_num; + +CREATE TABLE perf_lc_num( num UInt8, arr Array(LowCardinality(Int64)) default [num] ) ENGINE = Log; + +INSERT INTO perf_lc_num (num) SELECT toUInt8(number) FROM numbers(10000000); + +select sum(length(arr)) from perf_lc_num; +select sum(length(arr)), sum(num) from perf_lc_num; + +INSERT INTO perf_lc_num (num) SELECT toUInt8(number) FROM numbers(10000000, 20000000); + +select sum(length(arr)) from perf_lc_num; +select sum(length(arr)), sum(num) from perf_lc_num; + +drop table if exists perf_lc_num; diff --git a/tests/queries/0_stateless/01651_lc_insert_tiny_log_3.reference b/tests/queries/0_stateless/01651_lc_insert_tiny_log_3.reference new file mode 100644 index 00000000000..0715a4212ed --- /dev/null +++ b/tests/queries/0_stateless/01651_lc_insert_tiny_log_3.reference @@ -0,0 +1,4 @@ +10000000 +10000000 1274991808 +30000000 +30000000 3824991808 diff --git a/tests/queries/0_stateless/01651_lc_insert_tiny_log_3.sql b/tests/queries/0_stateless/01651_lc_insert_tiny_log_3.sql new file mode 100644 index 00000000000..85a42163897 --- /dev/null +++ b/tests/queries/0_stateless/01651_lc_insert_tiny_log_3.sql @@ -0,0 +1,16 @@ +set allow_suspicious_low_cardinality_types = 1, max_rows_to_read = '31M'; +drop table if exists perf_lc_num; + +CREATE TABLE perf_lc_num( num UInt8, arr Array(LowCardinality(Int64)) default [num] ) ENGINE = StripeLog; + +INSERT INTO perf_lc_num (num) SELECT toUInt8(number) FROM numbers(10000000); + +select sum(length(arr)) from perf_lc_num; +select sum(length(arr)), sum(num) from perf_lc_num; + +INSERT INTO perf_lc_num (num) SELECT toUInt8(number) FROM numbers(10000000, 20000000); + +select sum(length(arr)) from perf_lc_num; +select sum(length(arr)), sum(num) from perf_lc_num; + +drop table if exists perf_lc_num; diff --git a/tests/queries/0_stateless/01825_type_json_1.sql b/tests/queries/0_stateless/01825_type_json_1.sql index 6876349677e..dd7c1d2b41f 100644 --- a/tests/queries/0_stateless/01825_type_json_1.sql +++ b/tests/queries/0_stateless/01825_type_json_1.sql @@ -10,6 +10,7 @@ ENGINE = MergeTree ORDER BY tuple(); SYSTEM STOP MERGES t_json; INSERT INTO t_json FORMAT JSONEachRow {"id": 1, "data": {"k1": "aa", "k2": {"k3": "bb", "k4": "c"}}} {"id": 2, "data": {"k1": "ee", "k5": "ff"}}; + INSERT INTO t_json FORMAT JSONEachRow {"id": 3, "data": {"k5":"foo"}}; SELECT id, data.k1, data.k2.k3, data.k2.k4, data.k5 FROM t_json ORDER BY id; @@ -32,6 +33,7 @@ SELECT '============'; TRUNCATE TABLE t_json; INSERT INTO t_json FORMAT JSONEachRow {"id": 1, "data": {"k1":[{"k2":"aaa","k3":[{"k4":"bbb"},{"k4":"ccc"}]},{"k2":"ddd","k3":[{"k4":"eee"},{"k4":"fff"}]}]}}; + SELECT id, data.k1.k2, data.k1.k3.k4 FROM t_json ORDER BY id; SELECT name, column, type @@ -82,4 +84,5 @@ ORDER BY name; DROP TABLE IF EXISTS t_json; -CREATE TABLE t_json(id UInt64, data Object('JSON')) ENGINE = Log; -- { serverError ILLEGAL_COLUMN } +-- Restore it after it is fixed. +-- CREATE TABLE t_json(id UInt64, data Object('JSON')) ENGINE = Log; -- { serverError ILLEGAL_COLUMN } diff --git a/tests/queries/0_stateless/01825_type_json_10.sql b/tests/queries/0_stateless/01825_type_json_10.sql index 98f1a766ed8..e13026770f6 100644 --- a/tests/queries/0_stateless/01825_type_json_10.sql +++ b/tests/queries/0_stateless/01825_type_json_10.sql @@ -7,6 +7,7 @@ DROP TABLE IF EXISTS t_json_10; CREATE TABLE t_json_10 (o JSON) ENGINE = Memory; INSERT INTO t_json_10 FORMAT JSONAsObject {"a": {"b": 1, "c": [{"d": 10, "e": [31]}, {"d": 20, "e": [63, 127]}]}} {"a": {"b": 2, "c": []}} + INSERT INTO t_json_10 FORMAT JSONAsObject {"a": {"b": 3, "c": [{"f": 20, "e": [32]}, {"f": 30, "e": [64, 128]}]}} {"a": {"b": 4, "c": []}} SELECT DISTINCT toTypeName(o) FROM t_json_10; diff --git a/tests/queries/0_stateless/01825_type_json_17.sql b/tests/queries/0_stateless/01825_type_json_17.sql index ee5cf590407..dfbeae2b1fb 100644 --- a/tests/queries/0_stateless/01825_type_json_17.sql +++ b/tests/queries/0_stateless/01825_type_json_17.sql @@ -13,6 +13,7 @@ CREATE FUNCTION hasValidSizes17 AS (arr1, arr2) -> length(arr1) = length(arr2) A SYSTEM STOP MERGES t_json_17; INSERT INTO t_json_17 FORMAT JSONAsObject {"id": 1, "arr": [{"k1": [{"k2": "aaa", "k3": "bbb"}, {"k2": "ccc"}]}]} + INSERT INTO t_json_17 FORMAT JSONAsObject {"id": 2, "arr": [{"k1": [{"k3": "ddd", "k4": 10}, {"k4": 20}], "k5": {"k6": "foo"}}]} SELECT toTypeName(obj) FROM t_json_17 LIMIT 1; @@ -24,6 +25,7 @@ SELECT obj.arr.k1.k4 FROM t_json_17 ORDER BY obj.id; TRUNCATE TABLE t_json_17; INSERT INTO t_json_17 FORMAT JSONAsObject {"id": 1, "arr": [{"k1": [{"k2": "aaa"}]}]} + INSERT INTO t_json_17 FORMAT JSONAsObject {"id": 2, "arr": [{"k1": [{"k2": "bbb", "k3": [{"k4": 10}]}, {"k2": "ccc", "k3": [{"k4": 20}]}]}]} SELECT toTypeName(obj) FROM t_json_17 LIMIT 1; @@ -35,7 +37,9 @@ SELECT obj.arr.k1.k3.k4 FROM t_json_17 ORDER BY obj.id; TRUNCATE TABLE t_json_17; INSERT INTO t_json_17 FORMAT JSONAsObject {"id": 1, "arr": [{"k3": "qqq"}, {"k3": "www"}]} + INSERT INTO t_json_17 FORMAT JSONAsObject {"id": 2, "arr": [{"k1": [{"k2": "aaa"}], "k3": "eee"}]} + INSERT INTO t_json_17 FORMAT JSONAsObject {"id": 3, "arr": [{"k1": [{"k2": "bbb", "k4": [{"k5": 10}]}, {"k2": "ccc", "k4": [{"k5": 20}]}], "k3": "rrr"}]} SELECT toTypeName(obj) FROM t_json_17 LIMIT 1; diff --git a/tests/queries/0_stateless/01825_type_json_18.sql b/tests/queries/0_stateless/01825_type_json_18.sql index b493982a12c..26bab5ff9e5 100644 --- a/tests/queries/0_stateless/01825_type_json_18.sql +++ b/tests/queries/0_stateless/01825_type_json_18.sql @@ -8,9 +8,11 @@ CREATE TABLE t_json_2(id UInt64, data Object('JSON')) ENGINE = MergeTree ORDER BY tuple(); INSERT INTO t_json_2 FORMAT JSONEachRow {"id": 1, "data" : {"k1": 1}}; + SELECT id, data, toTypeName(data) FROM t_json_2 ORDER BY id; TRUNCATE TABLE t_json_2; INSERT INTO t_json_2 FORMAT JSONEachRow {"id": 1, "data" : {"k1": [1, 2]}}; + SELECT id, data, toTypeName(data) FROM t_json_2 ORDER BY id; diff --git a/tests/queries/0_stateless/01825_type_json_9.sql b/tests/queries/0_stateless/01825_type_json_9.sql index 8fa4b335578..8f6bf2ff205 100644 --- a/tests/queries/0_stateless/01825_type_json_9.sql +++ b/tests/queries/0_stateless/01825_type_json_9.sql @@ -7,6 +7,7 @@ SET allow_experimental_object_type = 1; CREATE TABLE t_json(id UInt64, obj JSON) ENGINE = MergeTree ORDER BY id; INSERT INTO t_json format JSONEachRow {"id": 1, "obj": {"foo": 1, "k1": 2}}; + INSERT INTO t_json format JSONEachRow {"id": 2, "obj": {"foo": 1, "k2": 2}}; OPTIMIZE TABLE t_json FINAL; diff --git a/tests/queries/0_stateless/01825_type_json_in_array.sql b/tests/queries/0_stateless/01825_type_json_in_array.sql index e5c20d7ba6b..fc89bad05d4 100644 --- a/tests/queries/0_stateless/01825_type_json_in_array.sql +++ b/tests/queries/0_stateless/01825_type_json_in_array.sql @@ -6,6 +6,7 @@ DROP TABLE IF EXISTS t_json_array; CREATE TABLE t_json_array (id UInt32, arr Array(JSON)) ENGINE = MergeTree ORDER BY id; INSERT INTO t_json_array FORMAT JSONEachRow {"id": 1, "arr": [{"k1": 1, "k2": {"k3": 2, "k4": 3}}, {"k1": 2, "k2": {"k5": "foo"}}]} + INSERT INTO t_json_array FORMAT JSONEachRow {"id": 2, "arr": [{"k1": 3, "k2": {"k3": 4, "k4": 5}}]} SET output_format_json_named_tuples_as_objects = 1; @@ -18,6 +19,7 @@ SELECT toTypeName(arr) FROM t_json_array LIMIT 1; TRUNCATE TABLE t_json_array; INSERT INTO t_json_array FORMAT JSONEachRow {"id": 1, "arr": [{"k1": [{"k2": "aaa", "k3": "bbb"}, {"k2": "ccc"}]}]} + INSERT INTO t_json_array FORMAT JSONEachRow {"id": 2, "arr": [{"k1": [{"k3": "ddd", "k4": 10}, {"k4": 20}], "k5": {"k6": "foo"}}]} SELECT * FROM t_json_array ORDER BY id FORMAT JSONEachRow; diff --git a/tests/queries/0_stateless/01825_type_json_insert_select.sql b/tests/queries/0_stateless/01825_type_json_insert_select.sql index cd0b280c360..0c83a35cce3 100644 --- a/tests/queries/0_stateless/01825_type_json_insert_select.sql +++ b/tests/queries/0_stateless/01825_type_json_insert_select.sql @@ -9,12 +9,14 @@ CREATE TABLE type_json_src (id UInt32, data JSON) ENGINE = MergeTree ORDER BY id CREATE TABLE type_json_dst AS type_json_src; INSERT INTO type_json_src VALUES (1, '{"k1": 1, "k2": "foo"}'); + INSERT INTO type_json_dst SELECT * FROM type_json_src; SELECT DISTINCT toTypeName(data) FROM type_json_dst; SELECT id, data FROM type_json_dst ORDER BY id; INSERT INTO type_json_src VALUES (2, '{"k1": 2, "k2": "bar"}') (3, '{"k1": 3, "k3": "aaa"}'); + INSERT INTO type_json_dst SELECT * FROM type_json_src WHERE id > 1; SELECT DISTINCT toTypeName(data) FROM type_json_dst; @@ -23,10 +25,12 @@ SELECT id, data FROM type_json_dst ORDER BY id; INSERT INTO type_json_dst VALUES (4, '{"arr": [{"k11": 5, "k22": 6}, {"k11": 7, "k33": 8}]}'); INSERT INTO type_json_src VALUES (5, '{"arr": "not array"}'); + INSERT INTO type_json_dst SELECT * FROM type_json_src WHERE id = 5; -- { serverError INCOMPATIBLE_COLUMNS } TRUNCATE TABLE type_json_src; -INSERT INTO type_json_src VALUES (5, '{"arr": [{"k22": "str1"}]}') +INSERT INTO type_json_src VALUES (5, '{"arr": [{"k22": "str1"}]}'); + INSERT INTO type_json_dst SELECT * FROM type_json_src WHERE id = 5; SELECT DISTINCT toTypeName(data) FROM type_json_dst; @@ -45,6 +49,7 @@ SET max_insert_threads = 1; SET output_format_json_named_tuples_as_objects = 1; INSERT INTO type_json_src FORMAT JSONAsString {"k1": 1, "k10": [{"a": "1", "b": "2"}, {"a": "2", "b": "3"}]}; + INSERT INTO type_json_src FORMAT JSONAsString {"k1": 2, "k10": [{"a": "1", "b": "2", "c": {"k11": "haha"}}]}; INSERT INTO type_json_dst SELECT data FROM type_json_src; @@ -57,6 +62,7 @@ TRUNCATE TABLE type_json_dst; -- Insert in another order. Order is important, because a way how defaults are filled differs. INSERT INTO type_json_src FORMAT JSONAsString {"k1": 2, "k10": [{"a": "1", "b": "2", "c": {"k11": "haha"}}]}; + INSERT INTO type_json_src FORMAT JSONAsString {"k1": 1, "k10": [{"a": "1", "b": "2"}, {"a": "2", "b": "3"}]}; INSERT INTO type_json_dst SELECT data FROM type_json_src; diff --git a/tests/queries/0_stateless/01925_json_as_string_data_in_square_brackets.sql b/tests/queries/0_stateless/01925_json_as_string_data_in_square_brackets.sql index 3b1e5176ff9..6e6f306c109 100644 --- a/tests/queries/0_stateless/01925_json_as_string_data_in_square_brackets.sql +++ b/tests/queries/0_stateless/01925_json_as_string_data_in_square_brackets.sql @@ -3,8 +3,11 @@ DROP TABLE IF EXISTS json_square_brackets; CREATE TABLE json_square_brackets (field String) ENGINE = Memory; INSERT INTO json_square_brackets FORMAT JSONAsString [{"id": 1, "name": "name1"}, {"id": 2, "name": "name2"}]; + INSERT INTO json_square_brackets FORMAT JSONAsString[]; + INSERT INTO json_square_brackets FORMAT JSONAsString [ ] ; + INSERT INTO json_square_brackets FORMAT JSONEachRow ; SELECT * FROM json_square_brackets; diff --git a/tests/queries/0_stateless/01943_query_id_check.sql b/tests/queries/0_stateless/01943_query_id_check.sql index ad9e88e0478..74c2dad8897 100644 --- a/tests/queries/0_stateless/01943_query_id_check.sql +++ b/tests/queries/0_stateless/01943_query_id_check.sql @@ -16,7 +16,7 @@ SELECT query FROM system.query_log WHERE initial_query_id = (SELECT * FROM tmp) DROP TABLE tmp; CREATE TABLE tmp (str String) ENGINE = Log; -INSERT INTO tmp (*) VALUES ('a') +INSERT INTO tmp (*) VALUES ('a'); SELECT count() FROM (SELECT initialQueryID() FROM remote('127.0.0.{1..3}', currentDatabase(), 'tmp') GROUP BY queryID()); SELECT count() FROM (SELECT queryID() FROM remote('127.0.0.{1..3}', currentDatabase(), 'tmp') GROUP BY queryID()); SELECT count() FROM (SELECT queryID() AS t FROM remote('127.0.0.{1..3}', currentDatabase(), 'tmp') GROUP BY queryID() HAVING t == initialQueryID()); diff --git a/tests/queries/0_stateless/02002_parse_map_int_key.sql b/tests/queries/0_stateless/02002_parse_map_int_key.sql index 8d009037dd7..c4b48e3d2b6 100644 --- a/tests/queries/0_stateless/02002_parse_map_int_key.sql +++ b/tests/queries/0_stateless/02002_parse_map_int_key.sql @@ -2,7 +2,6 @@ DROP TABLE IF EXISTS t_map_int_key; CREATE TABLE t_map_int_key (m1 Map(UInt32, UInt32), m2 Map(Date, UInt32)) ENGINE = Memory; INSERT INTO t_map_int_key FORMAT CSV "{1:2, 3: 4, 5 :6, 7 : 8}","{'2021-05-20':1, '2021-05-21': 2, '2021-05-22' :3, '2021-05-23' : 4}" -; SELECT m1, m2 FROM t_map_int_key; diff --git a/tests/queries/0_stateless/02050_client_profile_events.sh b/tests/queries/0_stateless/02050_client_profile_events.sh index 05e48de771d..e20590a6d45 100755 --- a/tests/queries/0_stateless/02050_client_profile_events.sh +++ b/tests/queries/0_stateless/02050_client_profile_events.sh @@ -21,11 +21,17 @@ echo 'regression test for overlap profile events snapshots between queries (clic $CLICKHOUSE_LOCAL --print-profile-events --profile-events-delay-ms=-1 -n -q 'select 1; select 1' |& grep -F -o '[ 0 ] SelectedRows: 1 (increment)' echo 'print everything' -profile_events="$($CLICKHOUSE_CLIENT --max_block_size 1 --print-profile-events -q 'select sleep(1) from numbers(2) format Null' |& grep -c 'SelectedRows')" +profile_events="$( + $CLICKHOUSE_CLIENT --max_block_size 1 --print-profile-events -q 'select sleep(1) from numbers(2) format Null' |& + $CLICKHOUSE_LOCAL --input-format LineAsString --query "SELECT sum(extract(line, 'SelectedRows: (\\d+)')::UInt64) FROM table WHERE line LIKE '%SelectedRows: %'" +)" test "$profile_events" -gt 1 && echo OK || echo "FAIL ($profile_events)" echo 'print each 100 ms' -profile_events="$($CLICKHOUSE_CLIENT --max_block_size 1 --print-profile-events --profile-events-delay-ms=100 -q 'select sleep(0.2) from numbers(10) format Null' |& grep -c 'SelectedRows')" +profile_events="$( + $CLICKHOUSE_CLIENT --max_block_size 1 --print-profile-events --profile-events-delay-ms=100 -q 'select sleep(0.2) from numbers(10) format Null' |& + $CLICKHOUSE_LOCAL --input-format LineAsString --query "SELECT sum(extract(line, 'SelectedRows: (\\d+)')::UInt64) FROM table WHERE line LIKE '%SelectedRows: %'" +)" test "$profile_events" -gt 1 && echo OK || echo "FAIL ($profile_events)" echo 'check that ProfileEvents is new for each query' diff --git a/tests/queries/0_stateless/02105_backslash_letter_commands.expect b/tests/queries/0_stateless/02105_backslash_letter_commands.expect index 7e78b7e7280..f09e9613a87 100755 --- a/tests/queries/0_stateless/02105_backslash_letter_commands.expect +++ b/tests/queries/0_stateless/02105_backslash_letter_commands.expect @@ -33,7 +33,6 @@ expect "Syntax error: " expect ":) " send -- " \\l ; \\d; \r" -expect "Syntax error (Multi-statements are not allowed): " expect ":) " send -- " \\l ;\r" diff --git a/tests/queries/0_stateless/02114_bool_type.sql b/tests/queries/0_stateless/02114_bool_type.sql index d4ea4e54028..6ff9206bb59 100644 --- a/tests/queries/0_stateless/02114_bool_type.sql +++ b/tests/queries/0_stateless/02114_bool_type.sql @@ -15,22 +15,31 @@ SELECT value,f FROM bool_test where value > 0; set bool_true_representation='True'; set bool_false_representation='False'; + INSERT INTO bool_test (value,f) FORMAT CSV True,test + INSERT INTO bool_test (value,f) FORMAT TSV False test + SELECT value,f FROM bool_test order by value FORMAT CSV; SELECT value,f FROM bool_test order by value FORMAT TSV; set bool_true_representation='Yes'; set bool_false_representation='No'; + INSERT INTO bool_test (value,f) FORMAT CSV Yes,test + INSERT INTO bool_test (value,f) FORMAT TSV No test + SELECT value,f FROM bool_test order by value FORMAT CSV; SELECT value,f FROM bool_test order by value FORMAT TSV; set bool_true_representation='On'; set bool_false_representation='Off'; + INSERT INTO bool_test (value,f) FORMAT CSV On,test + INSERT INTO bool_test (value,f) FORMAT TSV Off test + SELECT value,f FROM bool_test order by value FORMAT CSV; SELECT value,f FROM bool_test order by value FORMAT TSV; diff --git a/tests/queries/0_stateless/02184_ipv6_select_parsing.sql b/tests/queries/0_stateless/02184_ipv6_select_parsing.sql index 2892de309c4..ba5399555f8 100644 --- a/tests/queries/0_stateless/02184_ipv6_select_parsing.sql +++ b/tests/queries/0_stateless/02184_ipv6_select_parsing.sql @@ -2,8 +2,11 @@ drop table if exists ips_v6; create table ips_v6(i IPv6) Engine=Memory; INSERT INTO ips_v6 SELECT toIPv6('::ffff:127.0.0.1'); + INSERT INTO ips_v6 values ('::ffff:127.0.0.1'); + INSERT INTO ips_v6 FORMAT TSV ::ffff:127.0.0.1 + INSERT INTO ips_v6 SELECT ('::ffff:127.0.0.1'); SELECT * FROM ips_v6; diff --git a/tests/queries/0_stateless/02192_comment_error.reference b/tests/queries/0_stateless/02192_comment_error.reference index 21da4d2be9e..5f097eb8df6 100644 --- a/tests/queries/0_stateless/02192_comment_error.reference +++ b/tests/queries/0_stateless/02192_comment_error.reference @@ -1,5 +1,5 @@ -OK -OK +FAIL +FAIL OK OK OK diff --git a/tests/queries/0_stateless/02192_comment_error.sh b/tests/queries/0_stateless/02192_comment_error.sh index 78ff474ae84..d48c95a6419 100755 --- a/tests/queries/0_stateless/02192_comment_error.sh +++ b/tests/queries/0_stateless/02192_comment_error.sh @@ -4,8 +4,10 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh +# just stop, no exception ${CLICKHOUSE_CLIENT} --query="#" 2>&1 | grep -F -q 'Syntax error' && echo 'OK' || echo 'FAIL' ${CLICKHOUSE_CLIENT} --query="#not a comemnt" 2>&1 | grep -F -q 'Syntax error' && echo 'OK' || echo 'FAIL' +# syntax error ${CLICKHOUSE_CLIENT} --query="select 1 #not a comemnt" 2>&1 | grep -F -q 'Syntax error' && echo 'OK' || echo 'FAIL' ${CLICKHOUSE_CLIENT} --query="select 1 #" 2>&1 | grep -F -q 'Syntax error' && echo 'OK' || echo 'FAIL' ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" -d "select 42 #" 2>&1 | grep -F -q 'Syntax error' && echo 'OK' || echo 'FAIL' diff --git a/tests/queries/0_stateless/02193_async_insert_tcp_client_1.sql b/tests/queries/0_stateless/02193_async_insert_tcp_client_1.sql index 86db9d04aa0..a232a4196db 100644 --- a/tests/queries/0_stateless/02193_async_insert_tcp_client_1.sql +++ b/tests/queries/0_stateless/02193_async_insert_tcp_client_1.sql @@ -6,9 +6,7 @@ DROP TABLE IF EXISTS t_async_insert_02193_1; CREATE TABLE t_async_insert_02193_1 (id UInt32, s String) ENGINE = Memory; -INSERT INTO t_async_insert_02193_1 SETTINGS async_insert = 1 FORMAT CSV -1,aaa -; +INSERT INTO t_async_insert_02193_1 SETTINGS async_insert = 1 FORMAT CSV 1,aaa INSERT INTO t_async_insert_02193_1 SETTINGS async_insert = 1 FORMAT Values (2, 'bbb'); diff --git a/tests/queries/0_stateless/02231_buffer_aggregate_states_leak.sql b/tests/queries/0_stateless/02231_buffer_aggregate_states_leak.sql index 3d6b69fe161..7cc267b7263 100644 --- a/tests/queries/0_stateless/02231_buffer_aggregate_states_leak.sql +++ b/tests/queries/0_stateless/02231_buffer_aggregate_states_leak.sql @@ -1,4 +1,4 @@ --- Tags: long +-- Tags: long, no-tsan drop table if exists buffer_02231; drop table if exists out_02231; @@ -11,8 +11,8 @@ create table buffer_02231 ( key Int, v1 AggregateFunction(groupArray, String) -) engine=Buffer(currentDatabase(), 'out_02231', - /* layers= */1, +) engine = Buffer(currentDatabase(), 'out_02231', + /* layers= */ 1, /* min/max time */ 86400, 86400, /* min/max rows */ 1e9, 1e9, /* min/max bytes */ 1e12, 1e12, @@ -29,7 +29,7 @@ from in_02231 group by key; set optimize_trivial_insert_select = 1; -insert into in_02231 select * from numbers(10e6) settings max_memory_usage='400Mi', max_threads=1; +insert into in_02231 select * from numbers(5e6) settings max_memory_usage='400Mi', max_threads=1; drop table buffer_02231; drop table out_02231; diff --git a/tests/queries/0_stateless/02234_clickhouse_local_test_mode.reference b/tests/queries/0_stateless/02234_clickhouse_local_test_mode.reference index d3605d15a3d..2c94e483710 100644 --- a/tests/queries/0_stateless/02234_clickhouse_local_test_mode.reference +++ b/tests/queries/0_stateless/02234_clickhouse_local_test_mode.reference @@ -1,2 +1,2 @@ OK -Missing columns +OK diff --git a/tests/queries/0_stateless/02234_clickhouse_local_test_mode.sh b/tests/queries/0_stateless/02234_clickhouse_local_test_mode.sh index eb4a91bd850..a6c47d80fa9 100755 --- a/tests/queries/0_stateless/02234_clickhouse_local_test_mode.sh +++ b/tests/queries/0_stateless/02234_clickhouse_local_test_mode.sh @@ -5,5 +5,5 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_LOCAL --query="SELECT n SETTINGS allow_experimental_analyzer = 1" 2>&1 | grep -q "Code: 47. DB::Exception:" && echo 'OK' || echo 'FAIL' ||: -$CLICKHOUSE_LOCAL --query="SELECT n SETTINGS allow_experimental_analyzer = 0 -- { serverError 47 }" 2>&1 | grep -o 'Missing columns' +$CLICKHOUSE_LOCAL --query="SELECT n SETTINGS allow_experimental_analyzer = 1" 2>&1 | grep -q "Code: 47. DB::Exception:" && echo 'OK' || echo 'FAIL' +$CLICKHOUSE_LOCAL --query="SELECT n SETTINGS allow_experimental_analyzer = 0" 2>&1 | grep -q "Code: 47. DB::Exception:" && echo 'OK' || echo 'FAIL' diff --git a/tests/queries/0_stateless/02242_subcolumns_sizes.sql b/tests/queries/0_stateless/02242_subcolumns_sizes.sql index 8c3d8e69238..d29241131d3 100644 --- a/tests/queries/0_stateless/02242_subcolumns_sizes.sql +++ b/tests/queries/0_stateless/02242_subcolumns_sizes.sql @@ -9,6 +9,7 @@ ENGINE = MergeTree ORDER BY id SETTINGS min_bytes_for_wide_part = 0; INSERT INTO t_subcolumns_sizes FORMAT JSONEachRow {"id": 1, "arr": [1, 2, 3], "n": null, "d": {"k1": "v1", "k2": [{"k3": 1, "k4": "v2"}, {"k3": 3}]}} + INSERT INTO t_subcolumns_sizes FORMAT JSONEachRow {"id": 2, "arr": [0], "n": "foo", "d": {"k1": "v3", "k2": [{"k4": "v4"}, {"k3": "v5", "k5": 5}]}} OPTIMIZE TABLE t_subcolumns_sizes FINAL; diff --git a/tests/queries/0_stateless/02249_insert_select_from_input_schema_inference.sql b/tests/queries/0_stateless/02249_insert_select_from_input_schema_inference.sql index eb83309f117..a0c6701faae 100644 --- a/tests/queries/0_stateless/02249_insert_select_from_input_schema_inference.sql +++ b/tests/queries/0_stateless/02249_insert_select_from_input_schema_inference.sql @@ -3,5 +3,6 @@ set use_structure_from_insertion_table_in_table_functions = 1; drop table if exists test_02249; create table test_02249 (x UInt32, y String) engine=Memory(); insert into test_02249 select * from input() format JSONEachRow {"x" : 1, "y" : "string1"}, {"y" : "string2", "x" : 2}; + select * from test_02249; drop table test_02249; diff --git a/tests/queries/0_stateless/02267_jsonlines_ndjson_format.sql b/tests/queries/0_stateless/02267_jsonlines_ndjson_format.sql index 5e32758b328..5dfb17da98c 100644 --- a/tests/queries/0_stateless/02267_jsonlines_ndjson_format.sql +++ b/tests/queries/0_stateless/02267_jsonlines_ndjson_format.sql @@ -6,6 +6,7 @@ DROP TABLE IF EXISTS 02267_t; CREATE TABLE 02267_t (n1 UInt32, n2 UInt32) ENGINE = Memory; INSERT INTO 02267_t FORMAT JSONLines {"n1": 1, "n2": 2} {"n1": 3, "n2": 4} {"n1": 5, "n2": 6}; + INSERT INTO 02267_t FORMAT NDJSON {"n1": 1, "n2": 2} {"n1": 3, "n2": 4} {"n1": 5, "n2": 6}; SELECT * FROM 02267_t ORDER BY n1, n2 FORMAT JSONLines; diff --git a/tests/queries/0_stateless/02366_kql_summarize.sql b/tests/queries/0_stateless/02366_kql_summarize.sql index ca16bc3a755..1480d10e14a 100644 --- a/tests/queries/0_stateless/02366_kql_summarize.sql +++ b/tests/queries/0_stateless/02366_kql_summarize.sql @@ -40,7 +40,7 @@ create table Dates EventTime DateTime, ) ENGINE = Memory; -Insert into Dates VALUES ('2015-10-12') , ('2016-10-12') +Insert into Dates VALUES ('2015-10-12') , ('2016-10-12'); Select '-- test summarize --' ; set dialect='kusto'; Customers | summarize count(), min(Age), max(Age), avg(Age), sum(Age); diff --git a/tests/queries/0_stateless/02387_parse_date_as_datetime.sql b/tests/queries/0_stateless/02387_parse_date_as_datetime.sql index 9727f677be2..24d367e56b6 100644 --- a/tests/queries/0_stateless/02387_parse_date_as_datetime.sql +++ b/tests/queries/0_stateless/02387_parse_date_as_datetime.sql @@ -1,13 +1,19 @@ CREATE TEMPORARY TABLE test (`i` Int64, `d` DateTime); + INSERT INTO test FORMAT JSONEachRow {"i": 123, "d": "2022-05-03"}; + INSERT INTO test FORMAT JSONEachRow {"i": 456, "d": "2022-05-03 01:02:03"}; + SELECT * FROM test ORDER BY i; DROP TABLE test; CREATE TEMPORARY TABLE test (`i` Int64, `d` DateTime64); + INSERT INTO test FORMAT JSONEachRow {"i": 123, "d": "2022-05-03"}; + INSERT INTO test FORMAT JSONEachRow {"i": 456, "d": "2022-05-03 01:02:03"}; + SELECT * FROM test ORDER BY i; DROP TABLE test; diff --git a/tests/queries/0_stateless/02435_rollback_cancelled_queries.sh b/tests/queries/0_stateless/02435_rollback_cancelled_queries.sh index 776d1f850b0..a0312b7a40c 100755 --- a/tests/queries/0_stateless/02435_rollback_cancelled_queries.sh +++ b/tests/queries/0_stateless/02435_rollback_cancelled_queries.sh @@ -25,7 +25,7 @@ function insert_data $CLICKHOUSE_CURL -sS -d 'begin transaction' "$CLICKHOUSE_URL&$TXN_SETTINGS" SETTINGS="$SETTINGS&session_check=1" BEGIN="begin transaction;" - COMMIT=$(echo -ne "\n\ncommit") + COMMIT=$(echo -ne "\n\n\ncommit") fi # max_block_size=10000, so external table will contain smaller blocks that will be squashed on insert-select (more chances to catch a bug on query cancellation) diff --git a/tests/queries/0_stateless/02444_async_broken_outdated_part_loading.sh b/tests/queries/0_stateless/02444_async_broken_outdated_part_loading.sh index d24c6afcef3..a1313ba16e5 100755 --- a/tests/queries/0_stateless/02444_async_broken_outdated_part_loading.sh +++ b/tests/queries/0_stateless/02444_async_broken_outdated_part_loading.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT -q "drop table if exists rmt sync;" -$CLICKHOUSE_CLIENT -q "create table rmt (n int) engine=ReplicatedMergeTree('/test/02444/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/rmt', '1') order by n" +$CLICKHOUSE_CLIENT -q "create table rmt (n int) engine=ReplicatedMergeTree('/test/02444/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/rmt', '1') order by n settings old_parts_lifetime=600" $CLICKHOUSE_CLIENT --insert_keeper_fault_injection_probability=0 -q "insert into rmt values (1);" $CLICKHOUSE_CLIENT --insert_keeper_fault_injection_probability=0 -q "insert into rmt values (2);" diff --git a/tests/queries/0_stateless/02458_use_structure_from_insertion_table.sql b/tests/queries/0_stateless/02458_use_structure_from_insertion_table.sql index 71a2381d7b6..845f895b7b4 100644 --- a/tests/queries/0_stateless/02458_use_structure_from_insertion_table.sql +++ b/tests/queries/0_stateless/02458_use_structure_from_insertion_table.sql @@ -7,21 +7,22 @@ create table test (x Nullable(UInt32), y UInt32) engine=Memory(); set use_structure_from_insertion_table_in_table_functions=2; set input_format_json_infer_incomplete_types_as_strings=0; + insert into test select * from file(02458_data.jsonl); insert into test select x, 1 from file(02458_data.jsonl); insert into test select x, y from file(02458_data.jsonl); insert into test select x + 1, y from file(02458_data.jsonl); -- {serverError CANNOT_EXTRACT_TABLE_STRUCTURE} insert into test select x, z from file(02458_data.jsonl); - insert into test select * from file(02458_data.jsoncompacteachrow); insert into test select x, 1 from file(02458_data.jsoncompacteachrow); -- {serverError CANNOT_EXTRACT_TABLE_STRUCTURE} insert into test select x, y from file(02458_data.jsoncompacteachrow); -- {serverError CANNOT_EXTRACT_TABLE_STRUCTURE} insert into test select x + 1, y from file(02458_data.jsoncompacteachrow); -- {serverError CANNOT_EXTRACT_TABLE_STRUCTURE} insert into test select x, z from file(02458_data.jsoncompacteachrow); -- {serverError CANNOT_EXTRACT_TABLE_STRUCTURE} - insert into test select * from input() format CSV 1,2 + insert into test select x, y from input() format CSV 1,2 -- {serverError CANNOT_EXTRACT_TABLE_STRUCTURE} -insert into test select x, y from input() format JSONEachRow {"x" : null, "y" : 42} + +insert into test select x, y from input() format JSONEachRow {"x" : null, "y" : 42}; select * from test order by y; @@ -31,9 +32,9 @@ insert into test select * from file(02458_data.jsonl); insert into test select x from file(02458_data.jsonl); insert into test select y from file(02458_data.jsonl); insert into test select y as x from file(02458_data.jsonl); - insert into test select c1 from input() format CSV 1,2; -- {serverError CANNOT_EXTRACT_TABLE_STRUCTURE} -insert into test select x from input() format JSONEachRow {"x" : null, "y" : 42} + +insert into test select x from input() format JSONEachRow {"x" : null, "y" : 42}; select * from test order by x; diff --git a/tests/queries/0_stateless/02490_benchmark_max_consecutive_errors.sh b/tests/queries/0_stateless/02490_benchmark_max_consecutive_errors.sh index 3c046020773..f747b3156a5 100755 --- a/tests/queries/0_stateless/02490_benchmark_max_consecutive_errors.sh +++ b/tests/queries/0_stateless/02490_benchmark_max_consecutive_errors.sh @@ -5,4 +5,11 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CUR_DIR"/../shell_config.sh $CLICKHOUSE_BENCHMARK -q 'select throwIf(1)' |& grep '^An error occurred while processing the query.*Exception:' -c -$CLICKHOUSE_BENCHMARK --max-consecutive-errors 10 -q 'select throwIf(1)' |& grep '^An error occurred while processing the query.*Exception:' -c +RES=$($CLICKHOUSE_BENCHMARK --max-consecutive-errors 10 -q 'select throwIf(1)' |& tee "${CLICKHOUSE_TMP}/${CLICKHOUSE_DATABASE}.log" | grep '^An error occurred while processing the query.*Exception:' -c) + +if [ "$RES" -eq 10 ] +then + echo "$RES" +else + cat "${CLICKHOUSE_TMP}/${CLICKHOUSE_DATABASE}.log" +fi diff --git a/tests/queries/0_stateless/02495_analyzer_storage_join.sql b/tests/queries/0_stateless/02495_analyzer_storage_join.sql index aeab15862c3..7e6c03971f9 100644 --- a/tests/queries/0_stateless/02495_analyzer_storage_join.sql +++ b/tests/queries/0_stateless/02495_analyzer_storage_join.sql @@ -23,7 +23,7 @@ SELECT x FROM t1 ALL RIGHT JOIN tj ON t1.id1 == tj.key1 AND t1.id2 == tj.key2 OR SELECT '--- name clashes ---'; CREATE TABLE t (key2 UInt64, key1 Int64, b UInt64, x UInt64, val UInt64) ENGINE = Memory; -INSERT INTO t VALUES (1, -1, 11, 111, 1111), (2, -2, 22, 222, 2222), (3, -3, 33, 333, 2222), (4, -4, 44, 444, 4444), (5, -5, 55, 555, 5555) +INSERT INTO t VALUES (1, -1, 11, 111, 1111), (2, -2, 22, 222, 2222), (3, -3, 33, 333, 2222), (4, -4, 44, 444, 4444), (5, -5, 55, 555, 5555); SELECT '-- using --'; diff --git a/tests/queries/0_stateless/02504_regexp_dictionary_table_source.sql b/tests/queries/0_stateless/02504_regexp_dictionary_table_source.sql index 487b6e7f58e..604c5b179cf 100644 --- a/tests/queries/0_stateless/02504_regexp_dictionary_table_source.sql +++ b/tests/queries/0_stateless/02504_regexp_dictionary_table_source.sql @@ -14,12 +14,12 @@ CREATE TABLE regexp_dictionary_source_table -- test back reference. -INSERT INTO regexp_dictionary_source_table VALUES (1, 0, 'Linux/(\d+[\.\d]*).+tlinux', ['name', 'version'], ['TencentOS', '\1']) -INSERT INTO regexp_dictionary_source_table VALUES (2, 0, '(\d+)/tclwebkit(\d+[\.\d]*)', ['name', 'version', 'comment'], ['Android', '$1', 'test $1 and $2']) -INSERT INTO regexp_dictionary_source_table VALUES (3, 2, '33/tclwebkit', ['version'], ['13']) -INSERT INTO regexp_dictionary_source_table VALUES (4, 2, '3[12]/tclwebkit', ['version'], ['12']) -INSERT INTO regexp_dictionary_source_table VALUES (5, 2, '3[12]/tclwebkit', ['version'], ['11']) -INSERT INTO regexp_dictionary_source_table VALUES (6, 2, '3[12]/tclwebkit', ['version'], ['10']) +INSERT INTO regexp_dictionary_source_table VALUES (1, 0, 'Linux/(\d+[\.\d]*).+tlinux', ['name', 'version'], ['TencentOS', '\1']); +INSERT INTO regexp_dictionary_source_table VALUES (2, 0, '(\d+)/tclwebkit(\d+[\.\d]*)', ['name', 'version', 'comment'], ['Android', '$1', 'test $1 and $2']); +INSERT INTO regexp_dictionary_source_table VALUES (3, 2, '33/tclwebkit', ['version'], ['13']); +INSERT INTO regexp_dictionary_source_table VALUES (4, 2, '3[12]/tclwebkit', ['version'], ['12']); +INSERT INTO regexp_dictionary_source_table VALUES (5, 2, '3[12]/tclwebkit', ['version'], ['11']); +INSERT INTO regexp_dictionary_source_table VALUES (6, 2, '3[12]/tclwebkit', ['version'], ['10']); create dictionary regexp_dict1 ( @@ -50,23 +50,24 @@ CREATE TABLE needle_table ENGINE=TinyLog; INSERT INTO needle_table select concat(toString(number + 30), '/tclwebkit', toString(number)) from system.numbers limit 15; + select * from needle_table; select dictGet(regexp_dict1, ('name', 'version'), key) from needle_table; -- test invalid -INSERT INTO regexp_dictionary_source_table VALUES (6, 2, '3[12]/tclwebkit', ['version'], ['10']) +INSERT INTO regexp_dictionary_source_table VALUES (6, 2, '3[12]/tclwebkit', ['version'], ['10']); SYSTEM RELOAD dictionary regexp_dict1; -- { serverError INCORRECT_DICTIONARY_DEFINITION } truncate table regexp_dictionary_source_table; -INSERT INTO regexp_dictionary_source_table VALUES (6, 2, '3[12]/tclwebkit', ['version'], ['10']) +INSERT INTO regexp_dictionary_source_table VALUES (6, 2, '3[12]/tclwebkit', ['version'], ['10']); SYSTEM RELOAD dictionary regexp_dict1; -- { serverError INCORRECT_DICTIONARY_DEFINITION } truncate table regexp_dictionary_source_table; -INSERT INTO regexp_dictionary_source_table VALUES (1, 2, 'Linux/(\d+[\.\d]*).+tlinux', ['name', 'version'], ['TencentOS', '\1']) -INSERT INTO regexp_dictionary_source_table VALUES (2, 3, '(\d+)/tclwebkit(\d+[\.\d]*)', ['name', 'version', 'comment'], ['Android', '$1', 'test $1 and $2']) -INSERT INTO regexp_dictionary_source_table VALUES (3, 1, '(\d+)/tclwebkit(\d+[\.\d]*)', ['name', 'version', 'comment'], ['Android', '$1', 'test $1 and $2']) +INSERT INTO regexp_dictionary_source_table VALUES (1, 2, 'Linux/(\d+[\.\d]*).+tlinux', ['name', 'version'], ['TencentOS', '\1']); +INSERT INTO regexp_dictionary_source_table VALUES (2, 3, '(\d+)/tclwebkit(\d+[\.\d]*)', ['name', 'version', 'comment'], ['Android', '$1', 'test $1 and $2']); +INSERT INTO regexp_dictionary_source_table VALUES (3, 1, '(\d+)/tclwebkit(\d+[\.\d]*)', ['name', 'version', 'comment'], ['Android', '$1', 'test $1 and $2']); SYSTEM RELOAD dictionary regexp_dict1; -- { serverError INCORRECT_DICTIONARY_DEFINITION } -- test priority diff --git a/tests/queries/0_stateless/02536_replace_with_nonconst_needle_and_replacement.sql b/tests/queries/0_stateless/02536_replace_with_nonconst_needle_and_replacement.sql index 926bde3a74b..b88224a89c1 100644 --- a/tests/queries/0_stateless/02536_replace_with_nonconst_needle_and_replacement.sql +++ b/tests/queries/0_stateless/02536_replace_with_nonconst_needle_and_replacement.sql @@ -7,7 +7,7 @@ CREATE TABLE test_tab engine = MergeTree() ORDER BY id; -INSERT INTO test_tab VALUES (1, 'Hello World', 'l', 'xx') (2, 'Hello World', 'll', 'x') (3, 'Hello World', 'not_found', 'x') (4, 'Hello World', '[eo]', 'x') (5, 'Hello World', '.', 'x') +INSERT INTO test_tab VALUES (1, 'Hello World', 'l', 'xx') (2, 'Hello World', 'll', 'x') (3, 'Hello World', 'not_found', 'x') (4, 'Hello World', '[eo]', 'x') (5, 'Hello World', '.', 'x'); SELECT '** replaceAll() **'; @@ -77,7 +77,7 @@ CREATE TABLE test_tab engine = MergeTree() ORDER BY id; -INSERT INTO test_tab VALUES (1, 'Hello World', 'l', 'x') (2, 'Hello World', '', 'y') +INSERT INTO test_tab VALUES (1, 'Hello World', 'l', 'x') (2, 'Hello World', '', 'y'); -- needle: non-const, replacement: const SELECT replaceAll(haystack, needle, 'x') FROM test_tab; -- { serverError ARGUMENT_OUT_OF_BOUND } diff --git a/tests/queries/0_stateless/02553_type_json_attach_partition.sql b/tests/queries/0_stateless/02553_type_json_attach_partition.sql index 9225106f767..e77f5885ec3 100644 --- a/tests/queries/0_stateless/02553_type_json_attach_partition.sql +++ b/tests/queries/0_stateless/02553_type_json_attach_partition.sql @@ -5,6 +5,7 @@ DROP TABLE IF EXISTS t_json_attach_partition; CREATE TABLE t_json_attach_partition(b UInt64, c JSON) ENGINE = MergeTree ORDER BY tuple(); INSERT INTO t_json_attach_partition FORMAT JSONEachRow {"b": 1, "c" : {"k1": 1}}; + ALTER TABLE t_json_attach_partition DETACH PARTITION tuple(); INSERT INTO t_json_attach_partition FORMAT JSONEachRow {"b": 1, "c" : {"k1": [1, 2]}}; diff --git a/tests/queries/0_stateless/02563_analyzer_merge.reference b/tests/queries/0_stateless/02563_analyzer_merge.reference index 8be01c88d6f..2b3cc2d5dfb 100644 --- a/tests/queries/0_stateless/02563_analyzer_merge.reference +++ b/tests/queries/0_stateless/02563_analyzer_merge.reference @@ -1,2 +1,3 @@ 0 Value_0 02563_db test_merge_table_1 1 Value_1 02563_db test_merge_table_2 +91138316-5127-45ac-9c25-4ad8779777b4 160 diff --git a/tests/queries/0_stateless/02563_analyzer_merge.sql b/tests/queries/0_stateless/02563_analyzer_merge.sql index c90f7dcb2a5..217fb7019c4 100644 --- a/tests/queries/0_stateless/02563_analyzer_merge.sql +++ b/tests/queries/0_stateless/02563_analyzer_merge.sql @@ -35,4 +35,49 @@ SELECT id, value, _database, _table FROM 02563_db.test_merge_table ORDER BY id; DROP TABLE 02563_db.test_merge_table; DROP TABLE 02563_db.test_merge_table_1; DROP TABLE 02563_db.test_merge_table_2; + +CREATE TABLE 02563_db.t_1 +( + timestamp DateTime64(9), + a String, + b String +) +ENGINE = MergeTree +PARTITION BY formatDateTime(toStartOfMinute(timestamp), '%Y%m%d%H', 'UTC') +ORDER BY (timestamp, a, b); + +CREATE TABLE 02563_db.dist_t_1 (timestamp DateTime64(9), a String, b String) ENGINE = Distributed('test_shard_localhost', '02563_db', 't_1'); + +CREATE TABLE 02563_db.m ENGINE = Merge('02563_db', '^dist_'); + +INSERT INTO 02563_db.t_1 (timestamp, a, b) +select + addMinutes(toDateTime64('2024-07-13 22:00:00', 9, 'UTC'), number), + randomString(5), + randomString(5) +from numbers(30); + +INSERT INTO 02563_db.t_1 (timestamp, a, b) +select + addMinutes(toDateTime64('2024-07-13 23:00:00', 9, 'UTC'), number), + randomString(5), + randomString(5) +from numbers(30); + +INSERT INTO 02563_db.t_1 (timestamp, a, b) +select + addMinutes(toDateTime64('2024-07-14 00:00:00', 9, 'UTC'), number), + randomString(5), + randomString(5) +from numbers(100); + + +SELECT '91138316-5127-45ac-9c25-4ad8779777b4', + count() +FROM 02563_db.m; + +DROP TABLE 02563_db.t_1; +DROP TABLE 02563_db.dist_t_1; +DROP TABLE 02563_db.m; + DROP DATABASE 02563_db; diff --git a/tests/queries/0_stateless/02572_query_views_log_background_thread.reference b/tests/queries/0_stateless/02572_query_views_log_background_thread.reference index 22dfaf93781..d7f2272f5b4 100644 --- a/tests/queries/0_stateless/02572_query_views_log_background_thread.reference +++ b/tests/queries/0_stateless/02572_query_views_log_background_thread.reference @@ -1,20 +1,4 @@ --- { echoOn } -insert into buffer_02572 values (1); --- ensure that the flush was not direct -select * from data_02572; -select * from copy_02572; --- we cannot use OPTIMIZE, this will attach query context, so let's wait -SET function_sleep_max_microseconds_per_block = 6000000; -select sleepEachRow(1) from numbers(3*2) format Null; -select * from data_02572; +OK 1 -select * from copy_02572; 1 -system flush logs; -select count() > 0, lower(status::String), errorCodeToName(exception_code) - from system.query_views_log where - view_name = concatWithSeparator('.', currentDatabase(), 'mv_02572') and - view_target = concatWithSeparator('.', currentDatabase(), 'copy_02572') - group by 2, 3 -; 1 queryfinish OK diff --git a/tests/queries/0_stateless/02572_query_views_log_background_thread.sh b/tests/queries/0_stateless/02572_query_views_log_background_thread.sh new file mode 100755 index 00000000000..a3e428e75c8 --- /dev/null +++ b/tests/queries/0_stateless/02572_query_views_log_background_thread.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash + +# INSERT buffer_02572 -> data_02572 -> copy_02572 +# ^^ +# push to system.query_views_log + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} --ignore-error --multiquery --query "drop table if exists buffer_02572; + drop table if exists data_02572; drop table if exists copy_02572; drop table if exists mv_02572;" + +${CLICKHOUSE_CLIENT} --query="create table copy_02572 (key Int) engine=Memory();" +${CLICKHOUSE_CLIENT} --query="create table data_02572 (key Int) engine=Memory();" +${CLICKHOUSE_CLIENT} --query="create table buffer_02572 (key Int) engine=Buffer(currentDatabase(), data_02572, 1, 3, 3, 1, 1e9, 1, 1e9);" +${CLICKHOUSE_CLIENT} --query="create materialized view mv_02572 to copy_02572 as select * from data_02572;" + +${CLICKHOUSE_CLIENT} --query="insert into buffer_02572 values (1);" + +# ensure that the flush was not direct +${CLICKHOUSE_CLIENT} --ignore-error --multiquery --query "select * from data_02572; select * from copy_02572;" + +# we cannot use OPTIMIZE, this will attach query context, so let's wait +for _ in {1..100}; do + $CLICKHOUSE_CLIENT -q "select * from data_02572;" | grep -q "1" && echo 'OK' && break + sleep 0.5 +done + + +${CLICKHOUSE_CLIENT} --ignore-error --multiquery --query "select * from data_02572; select * from copy_02572;" + +${CLICKHOUSE_CLIENT} --query="system flush logs;" +${CLICKHOUSE_CLIENT} --query="select count() > 0, lower(status::String), errorCodeToName(exception_code) + from system.query_views_log where + view_name = concatWithSeparator('.', currentDatabase(), 'mv_02572') and + view_target = concatWithSeparator('.', currentDatabase(), 'copy_02572') + group by 2, 3;" \ No newline at end of file diff --git a/tests/queries/0_stateless/02572_query_views_log_background_thread.sql b/tests/queries/0_stateless/02572_query_views_log_background_thread.sql deleted file mode 100644 index 939c189c5fe..00000000000 --- a/tests/queries/0_stateless/02572_query_views_log_background_thread.sql +++ /dev/null @@ -1,36 +0,0 @@ --- INSERT buffer_02572 -> data_02572 -> copy_02572 --- ^^ --- push to system.query_views_log - -drop table if exists buffer_02572; -drop table if exists data_02572; -drop table if exists copy_02572; -drop table if exists mv_02572; - -create table copy_02572 (key Int) engine=Memory(); -create table data_02572 (key Int) engine=Memory(); -create table buffer_02572 (key Int) engine=Buffer(currentDatabase(), data_02572, 1, - /* never direct flush for flush from background thread */ - /* min_time= */ 3, 3, - 1, 1e9, - 1, 1e9); -create materialized view mv_02572 to copy_02572 as select * from data_02572; - --- { echoOn } -insert into buffer_02572 values (1); --- ensure that the flush was not direct -select * from data_02572; -select * from copy_02572; --- we cannot use OPTIMIZE, this will attach query context, so let's wait -SET function_sleep_max_microseconds_per_block = 6000000; -select sleepEachRow(1) from numbers(3*2) format Null; -select * from data_02572; -select * from copy_02572; - -system flush logs; -select count() > 0, lower(status::String), errorCodeToName(exception_code) - from system.query_views_log where - view_name = concatWithSeparator('.', currentDatabase(), 'mv_02572') and - view_target = concatWithSeparator('.', currentDatabase(), 'copy_02572') - group by 2, 3 -; diff --git a/tests/queries/0_stateless/02726_async_insert_flush_queue.sql b/tests/queries/0_stateless/02726_async_insert_flush_queue.sql index e393d96fc40..97d644fa4d6 100644 --- a/tests/queries/0_stateless/02726_async_insert_flush_queue.sql +++ b/tests/queries/0_stateless/02726_async_insert_flush_queue.sql @@ -11,9 +11,13 @@ SET async_insert_use_adaptive_busy_timeout=0; SET async_insert_busy_timeout_max_ms = 10000000; INSERT INTO t_async_inserts_flush VALUES (1) (2); + INSERT INTO t_async_inserts_flush FORMAT JSONEachRow {"a": 10} {"a": 20}; -INSERT INTO t_async_inserts_flush FORMAT JSONEachRow {"a": "str"} -INSERT INTO t_async_inserts_flush FORMAT JSONEachRow {"a": 100} {"a": 200} + +INSERT INTO t_async_inserts_flush FORMAT JSONEachRow {"a": "str"}; + +INSERT INTO t_async_inserts_flush FORMAT JSONEachRow {"a": 100} {"a": 200}; + INSERT INTO t_async_inserts_flush VALUES (3) (4) (5); SELECT sleep(1) FORMAT Null; diff --git a/tests/queries/0_stateless/02751_multiquery_with_argument.reference b/tests/queries/0_stateless/02751_multiquery_with_argument.reference index 4b0c199ed35..2e55712e49c 100644 --- a/tests/queries/0_stateless/02751_multiquery_with_argument.reference +++ b/tests/queries/0_stateless/02751_multiquery_with_argument.reference @@ -17,8 +17,6 @@ Bad arguments Bad arguments Bad arguments Bad arguments -Bad arguments -Bad arguments 320 317 Bad arguments diff --git a/tests/queries/0_stateless/02751_multiquery_with_argument.sh b/tests/queries/0_stateless/02751_multiquery_with_argument.sh index fea2978c116..7b959a3c3dc 100755 --- a/tests/queries/0_stateless/02751_multiquery_with_argument.sh +++ b/tests/queries/0_stateless/02751_multiquery_with_argument.sh @@ -22,14 +22,17 @@ $CLICKHOUSE_CLIENT --queries-file "queries.csv" --multiquery "SELECT 251;" 2>&1 $CLICKHOUSE_LOCAL -n "SELECT 301" $CLICKHOUSE_LOCAL -n "SELECT 302;" $CLICKHOUSE_LOCAL -n "SELECT 304;SELECT 305;" +# --multiquery and -n are obsolete by now and no-ops. +# The only exception is a single --multiquery "" $CLICKHOUSE_LOCAL --multiquery --multiquery 2>&1 | grep -o 'Bad arguments' $CLICKHOUSE_LOCAL -n --multiquery 2>&1 | grep -o 'Bad arguments' $CLICKHOUSE_LOCAL --multiquery -n 2>&1 | grep -o 'Bad arguments' $CLICKHOUSE_LOCAL --multiquery --multiquery "SELECT 306; SELECT 307;" 2>&1 | grep -o 'Bad arguments' $CLICKHOUSE_LOCAL -n --multiquery "SELECT 307; SELECT 308;" 2>&1 | grep -o 'Bad arguments' -$CLICKHOUSE_LOCAL --multiquery "SELECT 309; SELECT 310;" --multiquery 2>&1 | grep -o 'Bad arguments' +$CLICKHOUSE_LOCAL --multiquery "SELECT 309; SELECT 310;" --multiquery 2>&1 | grep -o 'Bad arguments' $CLICKHOUSE_LOCAL --multiquery "SELECT 311;" --multiquery "SELECT 312;" 2>&1 | grep -o 'Bad arguments' $CLICKHOUSE_LOCAL --multiquery "SELECT 313;" -n "SELECT 314;" 2>&1 | grep -o 'Bad arguments' $CLICKHOUSE_LOCAL -n "SELECT 320" --query "SELECT 317;" +# --query should be followed by SQL $CLICKHOUSE_LOCAL --query -n "SELECT 400;" 2>&1 | grep -o 'Bad arguments' $CLICKHOUSE_LOCAL --query -n --multiquery "SELECT 401;" 2>&1 | grep -o 'Bad arguments' diff --git a/tests/queries/0_stateless/02771_multiple_query_arguments.reference b/tests/queries/0_stateless/02771_multiple_query_arguments.reference index 0c008e96965..b98ad7086a9 100644 --- a/tests/queries/0_stateless/02771_multiple_query_arguments.reference +++ b/tests/queries/0_stateless/02771_multiple_query_arguments.reference @@ -2,15 +2,17 @@ 101 202 202 -Multi-statements are not allowed -Empty query +303 +303 +303 Bad arguments Syntax error 101 101 202 202 -Multi-statements are not allowed -Empty query +303 +303 +303 Bad arguments Syntax error diff --git a/tests/queries/0_stateless/02771_multiple_query_arguments.sh b/tests/queries/0_stateless/02771_multiple_query_arguments.sh index 2958c9e9a72..ae6e23eb61a 100755 --- a/tests/queries/0_stateless/02771_multiple_query_arguments.sh +++ b/tests/queries/0_stateless/02771_multiple_query_arguments.sh @@ -7,15 +7,15 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # clickhouse-client $CLICKHOUSE_CLIENT --query "SELECT 101" --query "SELECT 101" $CLICKHOUSE_CLIENT --query "SELECT 202;" --query "SELECT 202;" -$CLICKHOUSE_CLIENT --query "SELECT 303" --query "SELECT 303; SELECT 303" 2>&1 | grep -o 'Multi-statements are not allowed' -$CLICKHOUSE_CLIENT --query "" --query "" 2>&1 | grep -o 'Empty query' +$CLICKHOUSE_CLIENT --query "SELECT 303" --query "SELECT 303; SELECT 303" +$CLICKHOUSE_CLIENT --query "" --query "" 2>&1 $CLICKHOUSE_CLIENT --query "SELECT 303" --query 2>&1 | grep -o 'Bad arguments' $CLICKHOUSE_CLIENT --query "SELECT 303" --query "SELE" 2>&1 | grep -o 'Syntax error' # clickhouse-local $CLICKHOUSE_LOCAL --query "SELECT 101" --query "SELECT 101" $CLICKHOUSE_LOCAL --query "SELECT 202;" --query "SELECT 202;" -$CLICKHOUSE_LOCAL --query "SELECT 303" --query "SELECT 303; SELECT 303" 2>&1 | grep -o 'Multi-statements are not allowed' # behaves differently than clickhouse-client, TODO make it consistent -$CLICKHOUSE_LOCAL --query "" --query "" 2>&1 | grep -o 'Empty query' # behaves equally different than clickhouse-client TODO +$CLICKHOUSE_LOCAL --query "SELECT 303" --query "SELECT 303; SELECT 303" +$CLICKHOUSE_LOCAL --query "" --query "" $CLICKHOUSE_LOCAL --query "SELECT 303" --query 2>&1 | grep -o 'Bad arguments' $CLICKHOUSE_LOCAL --query "SELECT 303" --query "SELE" 2>&1 | grep -o 'Syntax error' diff --git a/tests/queries/0_stateless/02833_concurrrent_sessions.reference b/tests/queries/0_stateless/02833_concurrent_sessions.reference similarity index 84% rename from tests/queries/0_stateless/02833_concurrrent_sessions.reference rename to tests/queries/0_stateless/02833_concurrent_sessions.reference index bfe507e8eac..904e8cefc6e 100644 --- a/tests/queries/0_stateless/02833_concurrrent_sessions.reference +++ b/tests/queries/0_stateless/02833_concurrent_sessions.reference @@ -1,34 +1,34 @@ sessions: -150 +45 port_0_sessions: 0 address_0_sessions: 0 tcp_sessions -60 +18 http_sessions -30 +9 http_with_session_id_sessions -30 -my_sql_sessions -30 +9 +mysql_sessions +9 Corresponding LoginSuccess/Logout -10 +3 LoginFailure -10 +3 Corresponding LoginSuccess/Logout -10 +3 LoginFailure -10 +3 Corresponding LoginSuccess/Logout -10 +3 LoginFailure -10 +3 Corresponding LoginSuccess/Logout -10 +3 LoginFailure -10 +3 Corresponding LoginSuccess/Logout -10 +3 LoginFailure -10 +3 diff --git a/tests/queries/0_stateless/02833_concurrrent_sessions.sh b/tests/queries/0_stateless/02833_concurrent_sessions.sh similarity index 88% rename from tests/queries/0_stateless/02833_concurrrent_sessions.sh rename to tests/queries/0_stateless/02833_concurrent_sessions.sh index c5b6204529b..846661cfeed 100755 --- a/tests/queries/0_stateless/02833_concurrrent_sessions.sh +++ b/tests/queries/0_stateless/02833_concurrent_sessions.sh @@ -37,11 +37,11 @@ done # These functions try to create a session with successful login and logout. # Sleep a small, random amount of time to make concurrency more intense. # and try to login with an invalid password. -function tcp_session() +function tcp_session() { local user=$1 local i=0 - while (( (i++) < 10 )); do + while (( (i++) < 3 )); do # login logout ${CLICKHOUSE_CLIENT} -q "SELECT 1, sleep(0.01${RANDOM})" --user="${user}" --password="pass" # login failure @@ -49,11 +49,11 @@ function tcp_session() done } -function http_session() +function http_session() { local user=$1 local i=0 - while (( (i++) < 10 )); do + while (( (i++) < 3 )); do # login logout ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&user=${user}&password=pass" -d "SELECT 3, sleep(0.01${RANDOM})" @@ -62,11 +62,11 @@ function http_session() done } -function http_with_session_id_session() +function http_with_session_id_session() { local user=$1 local i=0 - while (( (i++) < 10 )); do + while (( (i++) < 3 )); do # login logout ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=${user}&user=${user}&password=pass" -d "SELECT 5, sleep 0.01${RANDOM}" @@ -75,11 +75,11 @@ function http_with_session_id_session() done } -function mysql_session() +function mysql_session() { local user=$1 local i=0 - while (( (i++) < 10 )); do + while (( (i++) < 3 )); do # login logout ${CLICKHOUSE_CLIENT} -q "SELECT 1, sleep(0.01${RANDOM}) FROM mysql('127.0.0.1:9004', 'system', 'one', '${user}', 'pass')" @@ -97,29 +97,29 @@ export -f http_with_session_id_session; export -f mysql_session; for user in "${TCP_USERS[@]}"; do - timeout 60s bash -c "tcp_session ${user}" >/dev/null 2>&1 & + tcp_session ${user} >/dev/null 2>&1 & done for user in "${HTTP_USERS[@]}"; do - timeout 60s bash -c "http_session ${user}" >/dev/null 2>&1 & + http_session ${user} >/dev/null 2>&1 & done for user in "${HTTP_WITH_SESSION_ID_SESSION_USERS[@]}"; do - timeout 60s bash -c "http_with_session_id_session ${user}" >/dev/null 2>&1 & + http_with_session_id_session ${user} >/dev/null 2>&1 & done for user in "${MYSQL_USERS[@]}"; do - timeout 60s bash -c "mysql_session ${user}" >/dev/null 2>&1 & + mysql_session ${user} >/dev/null 2>&1 & done wait ${CLICKHOUSE_CLIENT} -q "SYSTEM FLUSH LOGS" -echo "sessions:" +echo "sessions:" ${CLICKHOUSE_CLIENT} -q "SELECT count(*) FROM system.session_log WHERE user IN (${ALL_USERS_SQL_COLLECTION_STRING})" -echo "port_0_sessions:" +echo "port_0_sessions:" ${CLICKHOUSE_CLIENT} -q "SELECT count(*) FROM system.session_log WHERE user IN (${ALL_USERS_SQL_COLLECTION_STRING}) AND client_port = 0" echo "address_0_sessions:" @@ -131,13 +131,13 @@ echo "http_sessions" ${CLICKHOUSE_CLIENT} -q "SELECT count(*) FROM system.session_log WHERE user IN (${HTTP_USERS_SQL_COLLECTION_STRING}) AND interface = 'HTTP'" echo "http_with_session_id_sessions" ${CLICKHOUSE_CLIENT} -q "SELECT count(*) FROM system.session_log WHERE user IN (${HTTP_WITH_SESSION_ID_USERS_SQL_COLLECTION_STRING}) AND interface = 'HTTP'" -echo "my_sql_sessions" +echo "mysql_sessions" ${CLICKHOUSE_CLIENT} -q "SELECT count(*) FROM system.session_log WHERE user IN (${MYSQL_USERS_SQL_COLLECTION_STRING}) AND interface = 'MySQL'" for user in "${ALL_USERS[@]}"; do ${CLICKHOUSE_CLIENT} -q "DROP USER ${user}" - echo "Corresponding LoginSuccess/Logout" + echo "Corresponding LoginSuccess/Logout" ${CLICKHOUSE_CLIENT} -q "SELECT COUNT(*) FROM (SELECT ${SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = '${user}' AND type = 'LoginSuccess' INTERSECT SELECT ${SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = '${user}' AND type = 'Logout')" echo "LoginFailure" - ${CLICKHOUSE_CLIENT} -q "SELECT COUNT(*) FROM system.session_log WHERE user = '${user}' AND type = 'LoginFailure'" + ${CLICKHOUSE_CLIENT} -q "SELECT COUNT(*) FROM system.session_log WHERE user = '${user}' AND type = 'LoginFailure'" done diff --git a/tests/queries/0_stateless/02875_clickhouse_local_multiquery.reference b/tests/queries/0_stateless/02875_clickhouse_local_multiquery.reference index d99881dfa6b..1cc8f10a4cf 100644 --- a/tests/queries/0_stateless/02875_clickhouse_local_multiquery.reference +++ b/tests/queries/0_stateless/02875_clickhouse_local_multiquery.reference @@ -1,5 +1,7 @@ -Multi-statements are not allowed -Multi-statements are not allowed +1 +2 +1 +2 1 2 1 diff --git a/tests/queries/0_stateless/02875_clickhouse_local_multiquery.sh b/tests/queries/0_stateless/02875_clickhouse_local_multiquery.sh index e2dab337dc2..3f2b732e71b 100755 --- a/tests/queries/0_stateless/02875_clickhouse_local_multiquery.sh +++ b/tests/queries/0_stateless/02875_clickhouse_local_multiquery.sh @@ -4,10 +4,11 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -# throw exception -$CLICKHOUSE_CLIENT -q "select 1; select 2;" 2>&1 | grep -o 'Multi-statements are not allowed' -$CLICKHOUSE_LOCAL -q "select 1; select 2;" 2>&1 | grep -o 'Multi-statements are not allowed' -# execute correctly +# clickhouse-local and clickhouse-client behave the same +$CLICKHOUSE_CLIENT -q "select 1; select 2;" +$CLICKHOUSE_LOCAL -q "select 1; select 2;" + +# -n is a no-op $CLICKHOUSE_CLIENT -n -q "select 1; select 2;" $CLICKHOUSE_LOCAL -n -q "select 1; select 2;" diff --git a/tests/queries/0_stateless/02898_input_format_values_allow_data_after_semicolon.reference b/tests/queries/0_stateless/02898_input_format_values_allow_data_after_semicolon.reference index 250a673a26b..e69de29bb2d 100644 --- a/tests/queries/0_stateless/02898_input_format_values_allow_data_after_semicolon.reference +++ b/tests/queries/0_stateless/02898_input_format_values_allow_data_after_semicolon.reference @@ -1,6 +0,0 @@ -client no multiquery -Cannot read data after semicolon (and input_format_values_allow_data_after_semicolon=0) -client multiquery -local no multiquery -Cannot read data after semicolon (and input_format_values_allow_data_after_semicolon=0) -local multiquery diff --git a/tests/queries/0_stateless/02898_input_format_values_allow_data_after_semicolon.sh b/tests/queries/0_stateless/02898_input_format_values_allow_data_after_semicolon.sh index 8164c91b2ae..2b9ac6bfd54 100755 --- a/tests/queries/0_stateless/02898_input_format_values_allow_data_after_semicolon.sh +++ b/tests/queries/0_stateless/02898_input_format_values_allow_data_after_semicolon.sh @@ -4,12 +4,5 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -echo "client no multiquery" -$CLICKHOUSE_CLIENT -q "insert into function null() values (1); -- { foo }" |& grep -F -o "Cannot read data after semicolon (and input_format_values_allow_data_after_semicolon=0)" -echo "client multiquery" -$CLICKHOUSE_CLIENT -n -q "insert into function null() values (1); -- { foo }" - -echo "local no multiquery" -$CLICKHOUSE_LOCAL -q "insert into function null() values (1); -- { foo }" |& grep -F -o "Cannot read data after semicolon (and input_format_values_allow_data_after_semicolon=0)" -echo "local multiquery" -$CLICKHOUSE_LOCAL -n -q "insert into function null() values (1); -- { foo }" +$CLICKHOUSE_CLIENT -q "insert into function null() values (1); -- { foo }" +$CLICKHOUSE_LOCAL -q "insert into function null() values (1); -- { foo }" diff --git a/tests/queries/0_stateless/02900_window_function_with_sparse_column.sql b/tests/queries/0_stateless/02900_window_function_with_sparse_column.sql index 0b053d3e870..6919e23ad1f 100644 --- a/tests/queries/0_stateless/02900_window_function_with_sparse_column.sql +++ b/tests/queries/0_stateless/02900_window_function_with_sparse_column.sql @@ -18,7 +18,7 @@ ENGINE = MergeTree PARTITION BY toYYYYMM(time) ORDER BY (key, id, time); -INSERT INTO test1 VALUES ('id0', now(), 3, false) +INSERT INTO test1 VALUES ('id0', now(), 3, false); SELECT last_value(value) OVER (PARTITION BY id ORDER BY time ASC) as last_value FROM test1 diff --git a/tests/queries/0_stateless/02911_backup_restore_keeper_map.sh b/tests/queries/0_stateless/02911_backup_restore_keeper_map.sh index ccdf52a6e23..ee070b40f6f 100755 --- a/tests/queries/0_stateless/02911_backup_restore_keeper_map.sh +++ b/tests/queries/0_stateless/02911_backup_restore_keeper_map.sh @@ -11,11 +11,23 @@ $CLICKHOUSE_CLIENT -nm -q " CREATE TABLE $database_name.02911_backup_restore_keeper_map1 (key UInt64, value String) Engine=KeeperMap('/' || currentDatabase() || '/test02911') PRIMARY KEY key; CREATE TABLE $database_name.02911_backup_restore_keeper_map2 (key UInt64, value String) Engine=KeeperMap('/' || currentDatabase() || '/test02911') PRIMARY KEY key; -- table using same Keeper path as 02911_backup_restore_keeper_map1 CREATE TABLE $database_name.02911_backup_restore_keeper_map3 (key UInt64, value String) Engine=KeeperMap('/' || currentDatabase() || '/test02911_different') PRIMARY KEY key; - - INSERT INTO $database_name.02911_backup_restore_keeper_map2 SELECT number, 'test' || toString(number) FROM system.numbers LIMIT 5000; - INSERT INTO $database_name.02911_backup_restore_keeper_map3 SELECT number, 'test' || toString(number) FROM system.numbers LIMIT 3000; " +# KeeperMap table engine doesn't have internal retries for interaction with Keeper. Do it on our own, otherwise tests with overloaded server can be flaky. +while true +do + $CLICKHOUSE_CLIENT -nm -q "INSERT INTO $database_name.02911_backup_restore_keeper_map2 SELECT number, 'test' || toString(number) FROM system.numbers LIMIT 5000; + " 2>&1 | grep -q "KEEPER_EXCEPTION" && sleep 1 && continue + break +done + +while true +do + $CLICKHOUSE_CLIENT -nm -q "INSERT INTO $database_name.02911_backup_restore_keeper_map3 SELECT number, 'test' || toString(number) FROM system.numbers LIMIT 3000; + " 2>&1 | grep -q "KEEPER_EXCEPTION" && sleep 1 && continue + break +done + backup_path="$database_name" for i in $(seq 1 3); do $CLICKHOUSE_CLIENT -q "SELECT count() FROM $database_name.02911_backup_restore_keeper_map$i;" @@ -45,4 +57,4 @@ for i in $(seq 1 3); do $CLICKHOUSE_CLIENT -q "SELECT count() FROM $database_name.02911_backup_restore_keeper_map$i;" done -$CLICKHOUSE_CLIENT -q "DROP DATABASE $database_name SYNC;" \ No newline at end of file +$CLICKHOUSE_CLIENT -q "DROP DATABASE $database_name SYNC;" diff --git a/tests/queries/0_stateless/02930_client_file_log_comment.sh b/tests/queries/0_stateless/02930_client_file_log_comment.sh index 3d0df6de9b4..50cd587e4b5 100755 --- a/tests/queries/0_stateless/02930_client_file_log_comment.sh +++ b/tests/queries/0_stateless/02930_client_file_log_comment.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment +# reset --log_comment, because the test has to set its own CLICKHOUSE_LOG_COMMENT= # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/02932_refreshable_materialized_views.sh b/tests/queries/0_stateless/02932_refreshable_materialized_views.sh index 9081035579d..6ffc4064413 100755 --- a/tests/queries/0_stateless/02932_refreshable_materialized_views.sh +++ b/tests/queries/0_stateless/02932_refreshable_materialized_views.sh @@ -195,7 +195,7 @@ $CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -nq " drop table d; truncate src; - insert into src values (1) + insert into src values (1); create materialized view e refresh every 1 second (x Int64) engine MergeTree order by x empty as select x + sleepEachRow(1) as x from src settings max_block_size = 1;" while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ] do @@ -213,6 +213,7 @@ done # we wait for a slow refresh, not a previous fast one.) $CLICKHOUSE_CLIENT -nq " insert into src select * from numbers(1000) settings max_block_size=1; + system start view e;" while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes -- $LINENO" | xargs`" != 'Running' ] do diff --git a/tests/queries/0_stateless/02933_replicated_database_forbid_create_as_select.reference b/tests/queries/0_stateless/02933_replicated_database_forbid_create_as_select.reference index 6ed281c757a..98fb6a68656 100644 --- a/tests/queries/0_stateless/02933_replicated_database_forbid_create_as_select.reference +++ b/tests/queries/0_stateless/02933_replicated_database_forbid_create_as_select.reference @@ -1,2 +1,4 @@ 1 1 +1 +1 diff --git a/tests/queries/0_stateless/02933_replicated_database_forbid_create_as_select.sh b/tests/queries/0_stateless/02933_replicated_database_forbid_create_as_select.sh index 15f169d880f..b587549cb60 100755 --- a/tests/queries/0_stateless/02933_replicated_database_forbid_create_as_select.sh +++ b/tests/queries/0_stateless/02933_replicated_database_forbid_create_as_select.sh @@ -18,8 +18,12 @@ ${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE MATERIAL ${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.test2 (id UInt64) ENGINE = ReplicatedMergeTree ORDER BY id AS SELECT 1" |& grep -cm1 "SUPPORT_IS_DISABLED" ${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE MATERIALIZED VIEW ${CLICKHOUSE_DATABASE}_db.test_mv2 (id UInt64) ENGINE = ReplicatedMergeTree ORDER BY id POPULATE AS SELECT 1" |& grep -cm1 "SUPPORT_IS_DISABLED" -# But it is allowed with the special setting -${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.test2 (id UInt64) ENGINE = ReplicatedMergeTree ORDER BY id AS SELECT 1" --database_replicated_allow_heavy_create=1 +# POPULATE is allowed with the special setting ${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE MATERIALIZED VIEW ${CLICKHOUSE_DATABASE}_db.test_mv2 (id UInt64) ENGINE = ReplicatedMergeTree ORDER BY id POPULATE AS SELECT 1" --database_replicated_allow_heavy_create=1 +${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE MATERIALIZED VIEW ${CLICKHOUSE_DATABASE}_db.test_mv3 (id UInt64) ENGINE = ReplicatedMergeTree ORDER BY id POPULATE AS SELECT 1" --compatibility='24.6' + +# AS SELECT is forbidden even with the setting +${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.test2 (id UInt64) ENGINE = ReplicatedMergeTree ORDER BY id AS SELECT 1" --database_replicated_allow_heavy_create=1 |& grep -cm1 "SUPPORT_IS_DISABLED" +${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.test2 (id UInt64) ENGINE = ReplicatedMergeTree ORDER BY id AS SELECT 1" --compatibility='24.6' |& grep -cm1 "SUPPORT_IS_DISABLED" ${CLICKHOUSE_CLIENT} --query "DROP DATABASE ${CLICKHOUSE_DATABASE}_db" diff --git a/tests/queries/0_stateless/02941_variant_type_1.sh b/tests/queries/0_stateless/02941_variant_type_1.sh index 723de45eaad..c12bece6d54 100755 --- a/tests/queries/0_stateless/02941_variant_type_1.sh +++ b/tests/queries/0_stateless/02941_variant_type_1.sh @@ -2,8 +2,6 @@ # Tags: long CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/02941_variant_type_2.sh b/tests/queries/0_stateless/02941_variant_type_2.sh index 8453bce98dc..e93dfac8510 100755 --- a/tests/queries/0_stateless/02941_variant_type_2.sh +++ b/tests/queries/0_stateless/02941_variant_type_2.sh @@ -2,8 +2,6 @@ # Tags: long CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/02941_variant_type_3.sh b/tests/queries/0_stateless/02941_variant_type_3.sh index 990eb25b5be..cc0fde5b689 100755 --- a/tests/queries/0_stateless/02941_variant_type_3.sh +++ b/tests/queries/0_stateless/02941_variant_type_3.sh @@ -2,8 +2,6 @@ # Tags: long CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/02941_variant_type_4.sh b/tests/queries/0_stateless/02941_variant_type_4.sh index b8f619694b0..93a1770d05e 100755 --- a/tests/queries/0_stateless/02941_variant_type_4.sh +++ b/tests/queries/0_stateless/02941_variant_type_4.sh @@ -2,8 +2,6 @@ # Tags: long CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/02941_variant_type_alters.sh b/tests/queries/0_stateless/02941_variant_type_alters.sh index 7c151d1fe9e..0b3ab8e925b 100755 --- a/tests/queries/0_stateless/02941_variant_type_alters.sh +++ b/tests/queries/0_stateless/02941_variant_type_alters.sh @@ -2,8 +2,6 @@ # Tags: long CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/02943_variant_read_subcolumns.sh b/tests/queries/0_stateless/02943_variant_read_subcolumns.sh index 5ca8dd5f36f..21ab877eaf3 100755 --- a/tests/queries/0_stateless/02943_variant_read_subcolumns.sh +++ b/tests/queries/0_stateless/02943_variant_read_subcolumns.sh @@ -2,8 +2,6 @@ # Tags: long CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh @@ -34,4 +32,3 @@ echo "MergeTree wide" $CH_CLIENT -q "create table test (id UInt64, v Variant(UInt64, Array(Variant(String, UInt64)))) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;" test $CH_CLIENT -q "drop table test;" - diff --git a/tests/queries/0_stateless/02943_variant_type_with_different_local_and_global_order.sh b/tests/queries/0_stateless/02943_variant_type_with_different_local_and_global_order.sh index 1d88757a5d6..4aa4b11cecb 100755 --- a/tests/queries/0_stateless/02943_variant_type_with_different_local_and_global_order.sh +++ b/tests/queries/0_stateless/02943_variant_type_with_different_local_and_global_order.sh @@ -2,8 +2,6 @@ # Tags: long, no-debug CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/02950_dictionary_short_circuit.sql b/tests/queries/0_stateless/02950_dictionary_short_circuit.sql index 12c934a8d2d..c613709a975 100644 --- a/tests/queries/0_stateless/02950_dictionary_short_circuit.sql +++ b/tests/queries/0_stateless/02950_dictionary_short_circuit.sql @@ -241,12 +241,12 @@ CREATE TABLE regexp_dictionary_source_table values Array(String), ) ENGINE=TinyLog; -INSERT INTO regexp_dictionary_source_table VALUES (1, 0, 'Linux/(\d+[\.\d]*).+tlinux', ['name', 'version'], ['TencentOS', '\1']) -INSERT INTO regexp_dictionary_source_table VALUES (2, 0, '(\d+)/tclwebkit(\d+[\.\d]*)', ['name', 'version', 'comment'], ['Android', '$1', 'test $1 and $2']) -INSERT INTO regexp_dictionary_source_table VALUES (3, 2, '33/tclwebkit', ['version'], ['13']) -INSERT INTO regexp_dictionary_source_table VALUES (4, 2, '3[12]/tclwebkit', ['version'], ['12']) -INSERT INTO regexp_dictionary_source_table VALUES (5, 2, '3[12]/tclwebkit', ['version'], ['11']) -INSERT INTO regexp_dictionary_source_table VALUES (6, 2, '3[12]/tclwebkit', ['version'], ['10']) +INSERT INTO regexp_dictionary_source_table VALUES (1, 0, 'Linux/(\d+[\.\d]*).+tlinux', ['name', 'version'], ['TencentOS', '\1']); +INSERT INTO regexp_dictionary_source_table VALUES (2, 0, '(\d+)/tclwebkit(\d+[\.\d]*)', ['name', 'version', 'comment'], ['Android', '$1', 'test $1 and $2']); +INSERT INTO regexp_dictionary_source_table VALUES (3, 2, '33/tclwebkit', ['version'], ['13']); +INSERT INTO regexp_dictionary_source_table VALUES (4, 2, '3[12]/tclwebkit', ['version'], ['12']); +INSERT INTO regexp_dictionary_source_table VALUES (5, 2, '3[12]/tclwebkit', ['version'], ['11']); +INSERT INTO regexp_dictionary_source_table VALUES (6, 2, '3[12]/tclwebkit', ['version'], ['10']); DROP DICTIONARY IF EXISTS regexp_dict; create dictionary regexp_dict diff --git a/tests/queries/0_stateless/02974_analyzer_array_join_subcolumn.sql b/tests/queries/0_stateless/02974_analyzer_array_join_subcolumn.sql index 14823644b96..30fb3c76c1f 100644 --- a/tests/queries/0_stateless/02974_analyzer_array_join_subcolumn.sql +++ b/tests/queries/0_stateless/02974_analyzer_array_join_subcolumn.sql @@ -2,7 +2,7 @@ DROP TABLE IF EXISTS t2; DROP TABLE IF EXISTS t3; CREATE TABLE t2 (id Int32, pe Map(String, Tuple(a UInt64, b UInt64))) ENGINE = MergeTree ORDER BY id; -INSERT INTO t2 VALUES (1, {'a': (1, 2), 'b': (2, 3)}), +INSERT INTO t2 VALUES (1, {'a': (1, 2), 'b': (2, 3)}),; CREATE TABLE t3 (id Int32, c Tuple(v String, pe Map(String, Tuple(a UInt64, b UInt64)))) ENGINE = MergeTree ORDER BY id; INSERT INTO t3 VALUES (1, ('A', {'a':(1, 2),'b':(2, 3)})); diff --git a/tests/queries/0_stateless/03005_input_function_in_join.sql b/tests/queries/0_stateless/03005_input_function_in_join.sql index 8a6b4a48a8d..a6fc27cd8d0 100644 --- a/tests/queries/0_stateless/03005_input_function_in_join.sql +++ b/tests/queries/0_stateless/03005_input_function_in_join.sql @@ -8,7 +8,7 @@ SELECT x.number FROM ( ) AS x INNER JOIN input('a UInt64') AS y ON x.number = y.a Format CSV 2 -; + select * from test; drop table test; diff --git a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_1.sh b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_1.sh index aabba731816..ed548ae74e9 100755 --- a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_1.sh +++ b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_1.sh @@ -2,8 +2,6 @@ # Tags: long CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_2.sh b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_2.sh index 872f4c20a98..95dafcf5832 100755 --- a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_2.sh +++ b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_2.sh @@ -2,8 +2,6 @@ # Tags: long CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_3.sh b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_3.sh index 96276c96add..a3c2d93e568 100755 --- a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_3.sh +++ b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_3.sh @@ -2,8 +2,6 @@ # Tags: long CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/03039_dynamic_aggregating_merge_tree.sh b/tests/queries/0_stateless/03039_dynamic_aggregating_merge_tree.sh index b8760ec0e1d..9ea86105a3a 100755 --- a/tests/queries/0_stateless/03039_dynamic_aggregating_merge_tree.sh +++ b/tests/queries/0_stateless/03039_dynamic_aggregating_merge_tree.sh @@ -2,8 +2,6 @@ # Tags: long CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/03039_dynamic_collapsing_merge_tree.sh b/tests/queries/0_stateless/03039_dynamic_collapsing_merge_tree.sh index 881c9ec64cc..9a2a6dd957c 100755 --- a/tests/queries/0_stateless/03039_dynamic_collapsing_merge_tree.sh +++ b/tests/queries/0_stateless/03039_dynamic_collapsing_merge_tree.sh @@ -2,8 +2,6 @@ # Tags: long CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/03039_dynamic_replacing_merge_tree.sh b/tests/queries/0_stateless/03039_dynamic_replacing_merge_tree.sh index fc9039ac98c..0199035a3df 100755 --- a/tests/queries/0_stateless/03039_dynamic_replacing_merge_tree.sh +++ b/tests/queries/0_stateless/03039_dynamic_replacing_merge_tree.sh @@ -2,8 +2,6 @@ # Tags: long CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/03039_dynamic_summing_merge_tree.sh b/tests/queries/0_stateless/03039_dynamic_summing_merge_tree.sh index f9da70e95ca..e2ea5bc3466 100755 --- a/tests/queries/0_stateless/03039_dynamic_summing_merge_tree.sh +++ b/tests/queries/0_stateless/03039_dynamic_summing_merge_tree.sh @@ -2,8 +2,6 @@ # Tags: long CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/03039_dynamic_versioned_collapsing_merge_tree.sh b/tests/queries/0_stateless/03039_dynamic_versioned_collapsing_merge_tree.sh index 60248f4453a..43607cf95a8 100755 --- a/tests/queries/0_stateless/03039_dynamic_versioned_collapsing_merge_tree.sh +++ b/tests/queries/0_stateless/03039_dynamic_versioned_collapsing_merge_tree.sh @@ -2,8 +2,6 @@ # Tags: long CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/03041_dynamic_type_check_table.sh b/tests/queries/0_stateless/03041_dynamic_type_check_table.sh index 3d802485be3..c8bd533e253 100755 --- a/tests/queries/0_stateless/03041_dynamic_type_check_table.sh +++ b/tests/queries/0_stateless/03041_dynamic_type_check_table.sh @@ -2,8 +2,6 @@ # Tags: long CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh @@ -42,4 +40,3 @@ echo "MergeTree wide" $CH_CLIENT -q "create table test (x UInt64, y UInt64 ) engine=MergeTree order by x settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;" run $CH_CLIENT -q "drop table test;" - diff --git a/tests/queries/0_stateless/03156_default_multiquery_split.reference b/tests/queries/0_stateless/03156_default_multiquery_split.reference new file mode 100644 index 00000000000..0f3a1baff45 --- /dev/null +++ b/tests/queries/0_stateless/03156_default_multiquery_split.reference @@ -0,0 +1,10 @@ +Syntax error +101 +102 +1 +2; +3 +4 +7 +8 +9 diff --git a/tests/queries/0_stateless/03156_default_multiquery_split.sh b/tests/queries/0_stateless/03156_default_multiquery_split.sh new file mode 100755 index 00000000000..ac64c2d093d --- /dev/null +++ b/tests/queries/0_stateless/03156_default_multiquery_split.sh @@ -0,0 +1,59 @@ +#!/usr/bin/env bash +# Tags: no-parallel, no-ordinary-database + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +SQL_FILE_NAME=$"03156_default_multiquery_split_$$.sql" + +# The old multiquery implementation uses '\n' to split INSERT query segmentation +# this case is mainly to test the following situations +# 1. INSERT(format=Values) query: split by ';' +# 2. INSERT(format is not Values) query: split by '\n\n' instead of ';', then discard the ramaining data +# 3. INSERT(format is not Values) query: split by '\n\n', causing the trailing ';'' to be treated as the part of last value +# 4. The client uses multiquery by default, regardless of whether multiquery option is used. + +# create table test1, test2, then +# 1. insert 101, 102 into test1 +# 2. insert 1, 2; into test2, ';' will be treated as a part of a value +# 3. insert 3, 4; '6' will be treated as the next query because of the empty line, we use empty line to determine the end of insert query(format IS NOT VALUES) +# '6' will cause Syntax error +cat << EOF > "$SQL_FILE_NAME" +DROP TABLE IF EXISTS TEST1; +DROP TABLE IF EXISTS TEST2; +CREATE TABLE TEST1 (value Float64) ENGINE=MergeTree ORDER BY tuple(); +CREATE TABLE TEST2 (value String) ENGINE=MergeTree ORDER BY tuple(); +INSERT INTO TEST1 VALUES +(101), +(102); +INSERT INTO TEST2 FORMAT CSV +1 +2; + +INSERT INTO TEST2 FORMAT CSV +3 +4 + +6 +EOF + +$CLICKHOUSE_CLIENT -m < "$SQL_FILE_NAME" 2>&1 | grep -o 'Syntax error' + +# insert 7, 8, 9 into test2, because we use semicolon to determine the end of insert query(format is VALUES) +# then select all data from test1 and test2 +cat << EOF > "$SQL_FILE_NAME" +INSERT INTO TEST2 VALUES +('7'), +('8'), + +('9'); + +SELECT * FROM TEST1 ORDER BY value; +SELECT * FROM TEST2 ORDER BY value; +DROP TABLE TEST1; DROP TABLE TEST2; +EOF + +$CLICKHOUSE_CLIENT -m -n < "$SQL_FILE_NAME" + +rm "$SQL_FILE_NAME" diff --git a/tests/queries/0_stateless/03201_variant_null_map_subcolumn.sh b/tests/queries/0_stateless/03201_variant_null_map_subcolumn.sh index 57dc36d8a8f..c9dca46f41e 100755 --- a/tests/queries/0_stateless/03201_variant_null_map_subcolumn.sh +++ b/tests/queries/0_stateless/03201_variant_null_map_subcolumn.sh @@ -2,8 +2,6 @@ # Tags: long CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/03202_dynamic_null_map_subcolumn.sh b/tests/queries/0_stateless/03202_dynamic_null_map_subcolumn.sh index 968c9e5271f..bbe25ea72bb 100755 --- a/tests/queries/0_stateless/03202_dynamic_null_map_subcolumn.sh +++ b/tests/queries/0_stateless/03202_dynamic_null_map_subcolumn.sh @@ -2,8 +2,6 @@ # Tags: long CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/03209_parallel_replicas_order_by_all.reference b/tests/queries/0_stateless/03209_parallel_replicas_order_by_all.reference new file mode 100644 index 00000000000..fd453d088a6 --- /dev/null +++ b/tests/queries/0_stateless/03209_parallel_replicas_order_by_all.reference @@ -0,0 +1,12 @@ +-- { echoOn } +SELECT a, b, all FROM order_by_all ORDER BY all SETTINGS enable_order_by_all = 0, allow_experimental_parallel_reading_from_replicas=0; +B 3 10 +D 1 20 +A 2 30 +C \N 40 +SELECT a, b, all FROM order_by_all ORDER BY all SETTINGS enable_order_by_all = 0, allow_experimental_parallel_reading_from_replicas=1; +B 3 10 +D 1 20 +A 2 30 +C \N 40 +DROP TABLE order_by_all SYNC; diff --git a/tests/queries/0_stateless/03209_parallel_replicas_order_by_all.sql b/tests/queries/0_stateless/03209_parallel_replicas_order_by_all.sql new file mode 100644 index 00000000000..eb3cddbbe07 --- /dev/null +++ b/tests/queries/0_stateless/03209_parallel_replicas_order_by_all.sql @@ -0,0 +1,20 @@ +DROP TABLE IF EXISTS order_by_all SYNC; +CREATE TABLE order_by_all +( + a String, + b Nullable(Int32), + all UInt64 +) +ENGINE = ReplicatedMergeTree('/clickhouse/{database}/test_03210', 'r1') ORDER BY tuple(); + +INSERT INTO order_by_all VALUES ('B', 3, 10), ('C', NULL, 40), ('D', 1, 20), ('A', 2, 30); + +SET allow_experimental_parallel_reading_from_replicas=1, max_parallel_replicas=3, cluster_for_parallel_replicas='parallel_replicas'; +SET allow_experimental_analyzer=1; -- fix has been done only for the analyzer +SET enable_order_by_all = 0; + +-- { echoOn } +SELECT a, b, all FROM order_by_all ORDER BY all SETTINGS enable_order_by_all = 0, allow_experimental_parallel_reading_from_replicas=0; +SELECT a, b, all FROM order_by_all ORDER BY all SETTINGS enable_order_by_all = 0, allow_experimental_parallel_reading_from_replicas=1; + +DROP TABLE order_by_all SYNC; diff --git a/tests/queries/0_stateless/03210_inconsistent_formatting_of_data_types.reference b/tests/queries/0_stateless/03210_inconsistent_formatting_of_data_types.reference new file mode 100644 index 00000000000..836b526905a --- /dev/null +++ b/tests/queries/0_stateless/03210_inconsistent_formatting_of_data_types.reference @@ -0,0 +1,7 @@ +ALTER TABLE columns_with_multiple_streams MODIFY COLUMN `field1` Nullable(tupleElement(x, 2), UInt8) +ALTER TABLE t_update_empty_nested ADD COLUMN `nested.arr2` Array(tuple('- ON NULL -', toLowCardinality(11), 11, 11, toLowCardinality(11), 11), UInt64) +ALTER TABLE t ADD COLUMN `x` Array(tuple(1), UInt8) +ALTER TABLE enum_alter_issue MODIFY COLUMN `a` Enum8(equals('one', timeSlots(timeSlots(arrayEnumerateDense(tuple('0.2147483646', toLowCardinality(toUInt128)), NULL), 4, 12.34, materialize(73), 2)), 1)) +ALTER TABLE t_sparse_mutations_3 MODIFY COLUMN `s` Tuple(Nullable(tupleElement(s, 1), UInt64), Nullable(UInt64), Nullable(UInt64), Nullable(UInt64), Nullable(String)) +Syntax error +Syntax error diff --git a/tests/queries/0_stateless/03210_inconsistent_formatting_of_data_types.sh b/tests/queries/0_stateless/03210_inconsistent_formatting_of_data_types.sh new file mode 100755 index 00000000000..86c7a5469ca --- /dev/null +++ b/tests/queries/0_stateless/03210_inconsistent_formatting_of_data_types.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +# Ensure that these (possibly incorrect) queries can at least be parsed back after formatting. +$CLICKHOUSE_FORMAT --oneline --query "ALTER TABLE columns_with_multiple_streams MODIFY COLUMN field1 Nullable(tupleElement(x, 2), UInt8)" | $CLICKHOUSE_FORMAT --oneline +$CLICKHOUSE_FORMAT --oneline --query "ALTER TABLE t_update_empty_nested ADD COLUMN \`nested.arr2\` Array(tuple('- ON NULL -', toLowCardinality(11), 11, 11, toLowCardinality(11), 11), UInt64)" | $CLICKHOUSE_FORMAT --oneline +$CLICKHOUSE_FORMAT --oneline --query "ALTER TABLE t ADD COLUMN x Array((1), UInt8)" | $CLICKHOUSE_FORMAT --oneline +$CLICKHOUSE_FORMAT --oneline --query "ALTER TABLE enum_alter_issue (MODIFY COLUMN a Enum8(equals('one', timeSlots(timeSlots(arrayEnumerateDense(tuple('0.2147483646', toLowCardinality(toUInt128(12))), NULL), 4, 12.34, materialize(73), 2)), 1)))" | $CLICKHOUSE_FORMAT --oneline +$CLICKHOUSE_FORMAT --oneline --query "ALTER TABLE t_sparse_mutations_3 MODIFY COLUMN s Tuple(Nullable(tupleElement(s, 1), UInt64), Nullable(UInt64), Nullable(UInt64), Nullable(UInt64), Nullable(String))" | $CLICKHOUSE_FORMAT --oneline + +# These invalid queries don't parse and this is normal. +$CLICKHOUSE_FORMAT --oneline --query "ALTER TABLE alter_compression_codec1 MODIFY COLUMN alter_column CODEC((2 + ignore(1, toUInt128(materialize(2)), 2 + toNullable(toNullable(3))), 3), NONE)" 2>&1 | grep -o -F 'Syntax error' +$CLICKHOUSE_FORMAT --oneline --query "ALTER TABLE test_table ADD COLUMN \`array\` Array(('110', 3, toLowCardinality(3), 3, toNullable(3), toLowCardinality(toNullable(3)), 3), UInt8) DEFAULT [1, 2, 3]" 2>&1 | grep -o -F 'Syntax error' diff --git a/tests/queries/0_stateless/03212_thousand_exceptions.reference b/tests/queries/0_stateless/03212_thousand_exceptions.reference new file mode 100644 index 00000000000..2409ba16fcb --- /dev/null +++ b/tests/queries/0_stateless/03212_thousand_exceptions.reference @@ -0,0 +1 @@ +('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.')('.') \ No newline at end of file diff --git a/tests/queries/0_stateless/03212_thousand_exceptions.sh b/tests/queries/0_stateless/03212_thousand_exceptions.sh new file mode 100755 index 00000000000..0a6abf35c10 --- /dev/null +++ b/tests/queries/0_stateless/03212_thousand_exceptions.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# Tags: long + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +# This should not be too slow, even under sanitizers. +yes "SELECT throwIf(1); SELECT '.' FORMAT Values;" | head -n 1000 | $CLICKHOUSE_CLIENT --multiquery --ignore-error 2>/dev/null diff --git a/tests/queries/0_stateless/03213_array_element_msan.reference b/tests/queries/0_stateless/03213_array_element_msan.reference new file mode 100644 index 00000000000..6844e39d0aa --- /dev/null +++ b/tests/queries/0_stateless/03213_array_element_msan.reference @@ -0,0 +1,5 @@ +10 +-- system.settings_profiles +-- system.settings_profiles +-- system.settings_profiles +-- system.settings_profiles diff --git a/tests/queries/0_stateless/03213_array_element_msan.sql b/tests/queries/0_stateless/03213_array_element_msan.sql new file mode 100644 index 00000000000..68955856792 --- /dev/null +++ b/tests/queries/0_stateless/03213_array_element_msan.sql @@ -0,0 +1,2 @@ +SELECT [[10, 2, 13, 15][toNullable(toLowCardinality(1))]][materialize(toLowCardinality(1))]; +SELECT '-- system.settings_profiles' GROUP BY [[[[[[[[[[10, toNullable(10)][1], [materialize(toLowCardinality(10)), 2][materialize(toLowCardinality(1))]][1]][materialize(materialize(1))], [10, 2, 1][1]][1]][1], 1][toLowCardinality(1)]][1], 1][1], 10][1], [[10, toLowCardinality(2)][toNullable(toLowCardinality(1))]][materialize(toLowCardinality(1))]][1], [[[[10, 2][1]][1]][1], [10, 2][materialize(1)], [[[2][1]][materialize(1)], 2, 1][1], [2, 10, toNullable(1)][1]] WITH CUBE; diff --git a/tests/queries/0_stateless/03213_distributed_analyzer.reference b/tests/queries/0_stateless/03213_distributed_analyzer.reference new file mode 100644 index 00000000000..9d63c0a7a5e --- /dev/null +++ b/tests/queries/0_stateless/03213_distributed_analyzer.reference @@ -0,0 +1 @@ +['digraph','{',' rankdir="LR";',' { node [shape = rect]',' n1 [label="Remote"];',' }','}','digraph','{',' rankdir="LR";',' { node [shape = rect]',' n1 [label="Remote"];',' }','}'] diff --git a/tests/queries/0_stateless/03213_distributed_analyzer.sql b/tests/queries/0_stateless/03213_distributed_analyzer.sql new file mode 100644 index 00000000000..ae0f13f08f0 --- /dev/null +++ b/tests/queries/0_stateless/03213_distributed_analyzer.sql @@ -0,0 +1,3 @@ +-- This triggered a nullptr dereference due to the confusion between old and new analyzers: +SELECT sum(*) FROM remote('127.0.0.4', currentDatabase(), viewExplain('EXPLAIN PIPELINE', 'graph = 1', (SELECT * FROM remote('127.0.0.4', system, one)))); -- { serverError UNKNOWN_FUNCTION } +SELECT groupArray(*) FROM cluster(test_cluster_two_shards, viewExplain('EXPLAIN PIPELINE', 'graph = 1', (SELECT * FROM remote('127.0.0.4', system, one)))); diff --git a/tests/queries/0_stateless/03213_rand_dos.reference b/tests/queries/0_stateless/03213_rand_dos.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03213_rand_dos.sql b/tests/queries/0_stateless/03213_rand_dos.sql new file mode 100644 index 00000000000..1250995bfe5 --- /dev/null +++ b/tests/queries/0_stateless/03213_rand_dos.sql @@ -0,0 +1,5 @@ +SELECT randChiSquared(-0.0000001); -- { serverError BAD_ARGUMENTS } +SELECT randChiSquared(-0.0); -- { serverError BAD_ARGUMENTS } +SELECT randStudentT(-0.); -- { serverError BAD_ARGUMENTS } +SELECT randFisherF(-0., 1); -- { serverError BAD_ARGUMENTS } +SELECT randFisherF(1, -0.); -- { serverError BAD_ARGUMENTS } diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index e116dcd9fe4..b21ae0764c6 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -1870,6 +1870,7 @@ joinGetOrNull json jsonMergePatch jsonasstring +jsonasobject jsoncolumns jsoncolumnsmonoblock jsoncompact