Merge remote-tracking branch 'origin/master' into pr-local-plan

This commit is contained in:
Igor Nikonov 2024-08-15 11:12:56 +00:00
commit b4e8c14729
237 changed files with 11246 additions and 4059 deletions

View File

@ -101,6 +101,7 @@ jobs:
--volume=".:/wd" --workdir="/wd" \
clickhouse/style-test \
./tests/ci/changelog.py -v --debug-helpers \
--gh-user-or-token ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }} \
--jobs=5 \
--output="./docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }}
git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md
@ -129,9 +130,9 @@ jobs:
if: ${{ inputs.type == 'patch' && ! inputs.only-repo }}
shell: bash
run: |
python3 ./tests/ci/create_release.py --set-progress-completed
git reset --hard HEAD
git checkout "$GITHUB_REF_NAME"
python3 ./tests/ci/create_release.py --set-progress-completed
- name: Create GH Release
if: ${{ inputs.type == 'patch' && ! inputs.only-repo }}
shell: bash

3
.gitmodules vendored
View File

@ -345,9 +345,6 @@
[submodule "contrib/FP16"]
path = contrib/FP16
url = https://github.com/Maratyszcza/FP16.git
[submodule "contrib/robin-map"]
path = contrib/robin-map
url = https://github.com/Tessil/robin-map.git
[submodule "contrib/aklomp-base64"]
path = contrib/aklomp-base64
url = https://github.com/aklomp/base64.git

View File

@ -322,17 +322,21 @@ if (DISABLE_OMIT_FRAME_POINTER)
set (CMAKE_ASM_FLAGS_ADD "${CMAKE_ASM_FLAGS_ADD} -fno-omit-frame-pointer -mno-omit-leaf-frame-pointer")
endif()
# Before you start hating your debugger because it refuses to show variables ('<optimized out>'), try building with -DDEBUG_O_LEVEL="0"
# https://stackoverflow.com/questions/63386189/whats-the-difference-between-a-compilers-o0-option-and-og-option/63386263#63386263
set(DEBUG_O_LEVEL "g" CACHE STRING "The -Ox level used for debug builds")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMPILER_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -Og ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O${DEBUG_O_LEVEL} ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMPILER_FLAGS} ${CMAKE_C_FLAGS_ADD}")
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}")
set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -Og ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}")
set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O${DEBUG_O_LEVEL} ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}")
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} ${COMPILER_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
set (CMAKE_ASM_FLAGS_RELWITHDEBINFO "${CMAKE_ASM_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -Og ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -O${DEBUG_O_LEVEL} ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
if (OS_DARWIN)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")

View File

@ -27,27 +27,6 @@ bool cgroupsV2Enabled()
#endif
}
bool cgroupsV2MemoryControllerEnabled()
{
#if defined(OS_LINUX)
chassert(cgroupsV2Enabled());
/// According to https://docs.kernel.org/admin-guide/cgroup-v2.html, file "cgroup.controllers" defines which controllers are available
/// for the current + child cgroups. The set of available controllers can be restricted from level to level using file
/// "cgroups.subtree_control". It is therefore sufficient to check the bottom-most nested "cgroup.controllers" file.
fs::path cgroup_dir = cgroupV2PathOfProcess();
if (cgroup_dir.empty())
return false;
std::ifstream controllers_file(cgroup_dir / "cgroup.controllers");
if (!controllers_file.is_open())
return false;
std::string controllers;
std::getline(controllers_file, controllers);
return controllers.find("memory") != std::string::npos;
#else
return false;
#endif
}
fs::path cgroupV2PathOfProcess()
{
#if defined(OS_LINUX)
@ -71,3 +50,28 @@ fs::path cgroupV2PathOfProcess()
return {};
#endif
}
std::optional<std::string> getCgroupsV2PathContainingFile([[maybe_unused]] std::string_view file_name)
{
#if defined(OS_LINUX)
if (!cgroupsV2Enabled())
return {};
fs::path current_cgroup = cgroupV2PathOfProcess();
if (current_cgroup.empty())
return {};
/// Return the bottom-most nested file. If there is no such file at the current
/// level, try again at the parent level as settings are inherited.
while (current_cgroup != default_cgroups_mount.parent_path())
{
const auto path = current_cgroup / file_name;
if (fs::exists(path))
return {current_cgroup};
current_cgroup = current_cgroup.parent_path();
}
return {};
#else
return {};
#endif
}

View File

@ -1,6 +1,7 @@
#pragma once
#include <filesystem>
#include <string_view>
#if defined(OS_LINUX)
/// I think it is possible to mount the cgroups hierarchy somewhere else (e.g. when in containers).
@ -11,11 +12,11 @@ static inline const std::filesystem::path default_cgroups_mount = "/sys/fs/cgrou
/// Is cgroups v2 enabled on the system?
bool cgroupsV2Enabled();
/// Is the memory controller of cgroups v2 enabled on the system?
/// Assumes that cgroupsV2Enabled() is enabled.
bool cgroupsV2MemoryControllerEnabled();
/// Detects which cgroup v2 the process belongs to and returns the filesystem path to the cgroup.
/// Returns an empty path the cgroup cannot be determined.
/// Assumes that cgroupsV2Enabled() is enabled.
std::filesystem::path cgroupV2PathOfProcess();
/// Returns the most nested cgroup dir containing the specified file.
/// If cgroups v2 is not enabled - returns an empty optional.
std::optional<std::string> getCgroupsV2PathContainingFile([[maybe_unused]] std::string_view file_name);

View File

@ -19,9 +19,6 @@ std::optional<uint64_t> getCgroupsV2MemoryLimit()
if (!cgroupsV2Enabled())
return {};
if (!cgroupsV2MemoryControllerEnabled())
return {};
std::filesystem::path current_cgroup = cgroupV2PathOfProcess();
if (current_cgroup.empty())
return {};

View File

@ -2,11 +2,11 @@
# NOTE: VERSION_REVISION has nothing common with DBMS_TCP_PROTOCOL_VERSION,
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
SET(VERSION_REVISION 54489)
SET(VERSION_REVISION 54490)
SET(VERSION_MAJOR 24)
SET(VERSION_MINOR 8)
SET(VERSION_MINOR 9)
SET(VERSION_PATCH 1)
SET(VERSION_GITHASH 3f8b27d7accd2b5ec4afe7d0dd459115323304af)
SET(VERSION_DESCRIBE v24.8.1.1-testing)
SET(VERSION_STRING 24.8.1.1)
SET(VERSION_GITHASH e02b434d2fc0c4fbee29ca675deab7474d274608)
SET(VERSION_DESCRIBE v24.9.1.1-testing)
SET(VERSION_STRING 24.9.1.1)
# end of autochange

View File

@ -209,9 +209,8 @@ endif()
option(ENABLE_USEARCH "Enable USearch" ${ENABLE_LIBRARIES})
if (ENABLE_USEARCH)
add_contrib (FP16-cmake FP16)
add_contrib (robin-map-cmake robin-map)
add_contrib (SimSIMD-cmake SimSIMD)
add_contrib (usearch-cmake usearch) # requires: FP16, robin-map, SimdSIMD
add_contrib (usearch-cmake usearch) # requires: FP16, SimdSIMD
else ()
message(STATUS "Not using USearch")
endif ()

2
contrib/SimSIMD vendored

@ -1 +1 @@
Subproject commit de2cb75b9e9e3389d5e1e51fd9f8ed151f3c17cf
Subproject commit 91a76d1ac519b3b9dc8957734a3dabd985f00c26

2
contrib/libunwind vendored

@ -1 +1 @@
Subproject commit a89d904befea07814628c6ce0b44083c4e149c62
Subproject commit 601db0b0e03018c01710470a37703b618f9cf08b

1
contrib/robin-map vendored

@ -1 +0,0 @@
Subproject commit 851a59e0e3063ee0e23089062090a73fd3de482d

View File

@ -1 +0,0 @@
# See contrib/usearch-cmake/CMakeLists.txt

2
contrib/usearch vendored

@ -1 +1 @@
Subproject commit 30810452bec5d3d3aa0931bb5d761e2f09aa6356
Subproject commit e21a5778a0d4469ddaf38c94b7be0196bb701ee4

View File

@ -1,5 +1,4 @@
set(FP16_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/FP16")
set(ROBIN_MAP_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/robin-map")
set(SIMSIMD_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/SimSIMD")
set(USEARCH_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/usearch")
@ -7,7 +6,6 @@ add_library(_usearch INTERFACE)
target_include_directories(_usearch SYSTEM INTERFACE
${FP16_PROJECT_DIR}/include
${ROBIN_MAP_PROJECT_DIR}/include
${SIMSIMD_PROJECT_DIR}/include
${USEARCH_PROJECT_DIR}/include)

View File

@ -0,0 +1,29 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.3.7.30-lts (c8a28cf4331) FIXME as compared to v24.3.6.48-lts (b2d33c3c45d)
#### Improvement
* Backported in [#68103](https://github.com/ClickHouse/ClickHouse/issues/68103): Distinguish booleans and integers while parsing values for custom settings: ``` SET custom_a = true; SET custom_b = 1; ```. [#62206](https://github.com/ClickHouse/ClickHouse/pull/62206) ([Vitaly Baranov](https://github.com/vitlibar)).
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#67931](https://github.com/ClickHouse/ClickHouse/issues/67931): Fixing the `Not-ready Set` error after the `PREWHERE` optimization for StorageMerge. [#65057](https://github.com/ClickHouse/ClickHouse/pull/65057) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#68062](https://github.com/ClickHouse/ClickHouse/issues/68062): Fix boolean literals in query sent to external database (for engines like `PostgreSQL`). [#66282](https://github.com/ClickHouse/ClickHouse/pull/66282) ([vdimir](https://github.com/vdimir)).
* Backported in [#67812](https://github.com/ClickHouse/ClickHouse/issues/67812): Only relevant to the experimental Variant data type. Fix crash with Variant + AggregateFunction type. [#67122](https://github.com/ClickHouse/ClickHouse/pull/67122) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#67848](https://github.com/ClickHouse/ClickHouse/issues/67848): Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)).
* Backported in [#68271](https://github.com/ClickHouse/ClickHouse/issues/68271): Fix inserting into stream like engines (Kafka, RabbitMQ, NATS) through HTTP interface. [#67554](https://github.com/ClickHouse/ClickHouse/pull/67554) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
* Backported in [#67806](https://github.com/ClickHouse/ClickHouse/issues/67806): Fix reloading SQL UDFs with UNION. Previously, restarting the server could make UDF invalid. [#67665](https://github.com/ClickHouse/ClickHouse/pull/67665) ([Antonio Andelic](https://github.com/antonio2368)).
* Backported in [#67834](https://github.com/ClickHouse/ClickHouse/issues/67834): Fix potential stack overflow in `JSONMergePatch` function. Renamed this function from `jsonMergePatch` to `JSONMergePatch` because the previous name was wrong. The previous name is still kept for compatibility. Improved diagnostic of errors in the function. This closes [#67304](https://github.com/ClickHouse/ClickHouse/issues/67304). [#67756](https://github.com/ClickHouse/ClickHouse/pull/67756) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Backported in [#68206](https://github.com/ClickHouse/ClickHouse/issues/68206): Fix wrong `count()` result when there is non-deterministic function in predicate. [#67922](https://github.com/ClickHouse/ClickHouse/pull/67922) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
* Backported in [#68089](https://github.com/ClickHouse/ClickHouse/issues/68089): Fixed the calculation of the maximum thread soft limit in containerized environments where the usable CPU count is limited. [#67963](https://github.com/ClickHouse/ClickHouse/pull/67963) ([Robert Schulze](https://github.com/rschu1ze)).
* Backported in [#68120](https://github.com/ClickHouse/ClickHouse/issues/68120): Fixed skipping of untouched parts in mutations with new analyzer. Previously with enabled analyzer data in part could be rewritten by mutation even if mutation doesn't affect this part according to predicate. [#68052](https://github.com/ClickHouse/ClickHouse/pull/68052) ([Anton Popov](https://github.com/CurtizJ)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Update version after release. [#67676](https://github.com/ClickHouse/ClickHouse/pull/67676) ([robot-clickhouse](https://github.com/robot-clickhouse)).
* Backported in [#68074](https://github.com/ClickHouse/ClickHouse/issues/68074): Add an explicit error for `ALTER MODIFY SQL SECURITY` on non-view tables. [#67953](https://github.com/ClickHouse/ClickHouse/pull/67953) ([pufit](https://github.com/pufit)).

View File

@ -59,6 +59,8 @@ Parameters:
- `ef_construction`: (optional, default: 128)
- `ef_search`: (optional, default: 64)
Value 0 for parameters `m`, `ef_construction`, and `ef_search` refers to the default value.
Example:
```sql

View File

@ -1041,3 +1041,14 @@ Compression rates of LZ4 or ZSTD improve on average by 20-40%.
This setting works best for tables with no primary key or a low-cardinality primary key, i.e. a table with only few distinct primary key values.
High-cardinality primary keys, e.g. involving timestamp columns of type `DateTime64`, are not expected to benefit from this setting.
### deduplicate_merge_projection_mode
Whether to allow create projection for the table with non-classic MergeTree, that is not (Replicated, Shared) MergeTree. If allowed, what is the action when merge projections, either drop or rebuild. So classic MergeTree would ignore this setting.
It also controls `OPTIMIZE DEDUPLICATE` as well, but has effect on all MergeTree family members.
Possible values:
- throw, drop, rebuild
Default value: throw

View File

@ -14,7 +14,7 @@ To declare a column of `Dynamic` type, use the following syntax:
<column_name> Dynamic(max_types=N)
```
Where `N` is an optional parameter between `1` and `255` indicating how many different data types can be stored inside a column with type `Dynamic` across single block of data that is stored separately (for example across single data part for MergeTree table). If this limit is exceeded, all new types will be converted to type `String`. Default value of `max_types` is `32`.
Where `N` is an optional parameter between `0` and `254` indicating how many different data types can be stored as separate subcolumns inside a column with type `Dynamic` across single block of data that is stored separately (for example across single data part for MergeTree table). If this limit is exceeded, all values with new types will be stored together in a special shared data structure in binary form. Default value of `max_types` is `32`.
:::note
The Dynamic data type is an experimental feature. To use it, set `allow_experimental_dynamic_type = 1`.
@ -224,41 +224,43 @@ SELECT d::Dynamic(max_types=5) as d2, dynamicType(d2) FROM test;
└───────┴────────────────┘
```
If `K < N`, then the values with the rarest types are converted to `String`:
If `K < N`, then the values with the rarest types will be inserted into a single special subcolumn, but still will be accessible:
```text
CREATE TABLE test (d Dynamic(max_types=4)) ENGINE = Memory;
INSERT INTO test VALUES (NULL), (42), (43), ('42.42'), (true), ([1, 2, 3]);
SELECT d, dynamicType(d), d::Dynamic(max_types=2) as d2, dynamicType(d2) FROM test;
SELECT d, dynamicType(d), d::Dynamic(max_types=2) as d2, dynamicType(d2), isDynamicElementInSharedData(d2) FROM test;
```
```text
┌─d───────┬─dynamicType(d)─┬─d2──────┬─dynamicType(d2)─┐
│ ᴺᵁᴸᴸ │ None │ ᴺᵁᴸᴸ │ None │
│ 42 │ Int64 │ 42 │ Int64 │
│ 43 │ Int64 │ 43 │ Int64 │
│ 42.42 │ String │ 42.42 │ String │
│ true │ Bool │ true │ String
│ [1,2,3] │ Array(Int64) │ [1,2,3] │ String
└─────────┴────────────────┴─────────┴─────────────────┘
┌─d───────┬─dynamicType(d)─┬─d2──────┬─dynamicType(d2)─┬─isDynamicElementInSharedData(d2)─
│ ᴺᵁᴸᴸ │ None │ ᴺᵁᴸᴸ │ None │ false │
│ 42 │ Int64 │ 42 │ Int64 │ false │
│ 43 │ Int64 │ 43 │ Int64 │ false │
│ 42.42 │ String │ 42.42 │ String │ false │
│ true │ Bool │ true │ Bool │ true
│ [1,2,3] │ Array(Int64) │ [1,2,3] │ Array(Int64) │ true
└─────────┴────────────────┴─────────┴─────────────────┴──────────────────────────────────
```
If `K=1`, all types are converted to `String`:
Functions `isDynamicElementInSharedData` returns `true` for rows that are stored in a special shared data structure inside `Dynamic` and as we can see, resulting column contains only 2 types that are not stored in shared data structure.
If `K=0`, all types will be inserted into single special subcolumn:
```text
CREATE TABLE test (d Dynamic(max_types=4)) ENGINE = Memory;
INSERT INTO test VALUES (NULL), (42), (43), ('42.42'), (true), ([1, 2, 3]);
SELECT d, dynamicType(d), d::Dynamic(max_types=1) as d2, dynamicType(d2) FROM test;
SELECT d, dynamicType(d), d::Dynamic(max_types=0) as d2, dynamicType(d2), isDynamicElementInSharedData(d2) FROM test;
```
```text
┌─d───────┬─dynamicType(d)─┬─d2──────┬─dynamicType(d2)─┐
│ ᴺᵁᴸᴸ │ None │ ᴺᵁᴸᴸ │ None │
│ 42 │ Int64 │ 42 │ String
│ 43 │ Int64 │ 43 │ String
│ 42.42 │ String │ 42.42 │ String │
│ true │ Bool │ true │ String
│ [1,2,3] │ Array(Int64) │ [1,2,3] │ String
└─────────┴────────────────┴─────────┴─────────────────┘
┌─d───────┬─dynamicType(d)─┬─d2──────┬─dynamicType(d2)─┬─isDynamicElementInSharedData(d2)─
│ ᴺᵁᴸᴸ │ None │ ᴺᵁᴸᴸ │ None │ false │
│ 42 │ Int64 │ 42 │ Int64 │ true
│ 43 │ Int64 │ 43 │ Int64 │ true
│ 42.42 │ String │ 42.42 │ String │ true │
│ true │ Bool │ true │ Bool │ true
│ [1,2,3] │ Array(Int64) │ [1,2,3] │ Array(Int64) │ true
└─────────┴────────────────┴─────────┴─────────────────┴──────────────────────────────────
```
## Reading Dynamic type from the data
@ -411,17 +413,17 @@ SELECT d, dynamicType(d) FROM test ORDER by d;
## Reaching the limit in number of different data types stored inside Dynamic
`Dynamic` data type can store only limited number of different data types inside. By default, this limit is 32, but you can change it in type declaration using syntax `Dynamic(max_types=N)` where N is between 1 and 255 (due to implementation details, it's impossible to have more than 255 different data types inside Dynamic).
When the limit is reached, all new data types inserted to `Dynamic` column will be casted to `String` and stored as `String` values.
`Dynamic` data type can store only limited number of different data types as separate subcolumns. By default, this limit is 32, but you can change it in type declaration using syntax `Dynamic(max_types=N)` where N is between 0 and 254 (due to implementation details, it's impossible to have more than 254 different data types that can be stored as separate subcolumns inside Dynamic).
When the limit is reached, all new data types inserted to `Dynamic` column will be inserted into a single shared data structure that stores values with different data types in binary form.
Let's see what happens when the limit is reached in different scenarios.
### Reaching the limit during data parsing
During parsing of `Dynamic` values from the data, when the limit is reached for current block of data, all new values will be inserted as `String` values:
During parsing of `Dynamic` values from the data, when the limit is reached for current block of data, all new values will be inserted into shared data structure:
```sql
SELECT d, dynamicType(d) FROM format(JSONEachRow, 'd Dynamic(max_types=3)', '
SELECT d, dynamicType(d), isDynamicElementInSharedData(d) FROM format(JSONEachRow, 'd Dynamic(max_types=3)', '
{"d" : 42}
{"d" : [1, 2, 3]}
{"d" : "Hello, World!"}
@ -432,22 +434,22 @@ SELECT d, dynamicType(d) FROM format(JSONEachRow, 'd Dynamic(max_types=3)', '
```
```text
┌─d──────────────────────────┬─dynamicType(d)─┐
│ 42 │ Int64 │
│ [1,2,3] │ Array(Int64) │
│ Hello, World! │ String │
│ 2020-01-01 │ String
│ ["str1", "str2", "str3"] │ String
{"a" : 1, "b" : [1, 2, 3]} │ String
└────────────────────────────┴────────────────┘
┌─d──────────────────────┬─dynamicType(d)─────────────────┬─isDynamicElementInSharedData(d)─┐
│ 42 │ Int64 │ false
│ [1,2,3] │ Array(Int64) │ false
│ Hello, World! │ String │ false
│ 2020-01-01 │ Date │ true
│ ['str1','str2','str3'] │ Array(String) │ true
(1,[1,2,3]) │ Tuple(a Int64, b Array(Int64)) │ true
└────────────────────────┴────────────────────────────────┴─────────────────────────────────┘
```
As we can see, after inserting 3 different data types `Int64`, `Array(Int64)` and `String` all new types were converted to `String`.
As we can see, after inserting 3 different data types `Int64`, `Array(Int64)` and `String` all new types were inserted into special shared data structure.
### During merges of data parts in MergeTree table engines
During merge of several data parts in MergeTree table the `Dynamic` column in the resulting data part can reach the limit of different data types inside and won't be able to store all types from source parts.
In this case ClickHouse chooses what types will remain after merge and what types will be casted to `String`. In most cases ClickHouse tries to keep the most frequent types and cast the rarest types to `String`, but it depends on the implementation.
During merge of several data parts in MergeTree table the `Dynamic` column in the resulting data part can reach the limit of different data types that can be stored in separate subcolumns inside and won't be able to store all types as subcolumns from source parts.
In this case ClickHouse chooses what types will remain as separate subcolumns after merge and what types will be inserted into shared data structure. In most cases ClickHouse tries to keep the most frequent types and store the rarest types in shared data structure, but it depends on the implementation.
Let's see an example of such merge. First, let's create a table with `Dynamic` column, set the limit of different data types to `3` and insert values with `5` different types:
@ -463,17 +465,17 @@ INSERT INTO test SELECT number, 'str_' || toString(number) FROM numbers(1);
Each insert will create a separate data pert with `Dynamic` column containing single type:
```sql
SELECT count(), dynamicType(d), _part FROM test GROUP BY _part, dynamicType(d) ORDER BY _part;
SELECT count(), dynamicType(d), isDynamicElementInSharedData(d), _part FROM test GROUP BY _part, dynamicType(d), isDynamicElementInSharedData(d) ORDER BY _part, count();
```
```text
┌─count()─┬─dynamicType(d)──────┬─_part─────┐
│ 5 │ UInt64 │ all_1_1_0 │
│ 4 │ Array(UInt64) │ all_2_2_0 │
│ 3 │ Date │ all_3_3_0 │
│ 2 │ Map(UInt64, UInt64) │ all_4_4_0 │
│ 1 │ String │ all_5_5_0 │
└─────────┴─────────────────────┴───────────┘
┌─count()─┬─dynamicType(d)──────┬─isDynamicElementInSharedData(d)─┬─_part─────┐
│ 5 │ UInt64 │ false │ all_1_1_0 │
│ 4 │ Array(UInt64) │ false │ all_2_2_0 │
│ 3 │ Date │ false │ all_3_3_0 │
│ 2 │ Map(UInt64, UInt64) │ false │ all_4_4_0 │
│ 1 │ String │ false │ all_5_5_0 │
└─────────┴─────────────────────┴─────────────────────────────────┴───────────
```
Now, let's merge all parts into one and see what will happen:
@ -481,18 +483,20 @@ Now, let's merge all parts into one and see what will happen:
```sql
SYSTEM START MERGES test;
OPTIMIZE TABLE test FINAL;
SELECT count(), dynamicType(d), _part FROM test GROUP BY _part, dynamicType(d) ORDER BY _part;
SELECT count(), dynamicType(d), isDynamicElementInSharedData(d), _part FROM test GROUP BY _part, dynamicType(d), isDynamicElementInSharedData(d) ORDER BY _part, count() desc;
```
```text
┌─count()─┬─dynamicType(d)─┬─_part─────┐
│ 5 │ UInt64 │ all_1_5_2 │
│ 6 │ String │ all_1_5_2 │
│ 4 │ Array(UInt64) │ all_1_5_2 │
└─────────┴────────────────┴───────────┘
┌─count()─┬─dynamicType(d)──────┬─isDynamicElementInSharedData(d)─┬─_part─────┐
│ 5 │ UInt64 │ false │ all_1_5_2 │
│ 4 │ Array(UInt64) │ false │ all_1_5_2 │
│ 3 │ Date │ false │ all_1_5_2 │
│ 2 │ Map(UInt64, UInt64) │ true │ all_1_5_2 │
│ 1 │ String │ true │ all_1_5_2 │
└─────────┴─────────────────────┴─────────────────────────────────┴───────────┘
```
As we can see, ClickHouse kept the most frequent types `UInt64` and `Array(UInt64)` and casted all other types to `String`.
As we can see, ClickHouse kept the most frequent types `UInt64` and `Array(UInt64)` as subcolumns and inserted all other types into shared data.
## JSONExtract functions with Dynamic
@ -509,22 +513,23 @@ SELECT JSONExtract('{"a" : [1, 2, 3]}', 'a', 'Dynamic') AS dynamic, dynamicType(
```
```sql
SELECT JSONExtract('{"obj" : {"a" : 42, "b" : "Hello", "c" : [1,2,3]}}', 'obj', 'Map(String, Variant(UInt32, String, Array(UInt32)))') AS map_of_dynamics, mapApply((k, v) -> (k, variantType(v)), map_of_dynamics) AS map_of_dynamic_types```
SELECT JSONExtract('{"obj" : {"a" : 42, "b" : "Hello", "c" : [1,2,3]}}', 'obj', 'Map(String, Dynamic)') AS map_of_dynamics, mapApply((k, v) -> (k, dynamicType(v)), map_of_dynamics) AS map_of_dynamic_types
```
```text
┌─map_of_dynamics──────────────────┬─map_of_dynamic_types────────────────────────────┐
│ {'a':42,'b':'Hello','c':[1,2,3]} │ {'a':'UInt32','b':'String','c':'Array(UInt32)'} │
└──────────────────────────────────┴─────────────────────────────────────────────────┘
┌─map_of_dynamics──────────────────┬─map_of_dynamic_types────────────────────────────────────
│ {'a':42,'b':'Hello','c':[1,2,3]} │ {'a':'Int64','b':'String','c':'Array(Nullable(Int64))'} │
└──────────────────────────────────┴─────────────────────────────────────────────────────────
```
```sql
SELECT JSONExtractKeysAndValues('{"a" : 42, "b" : "Hello", "c" : [1,2,3]}', 'Variant(UInt32, String, Array(UInt32))') AS dynamics, arrayMap(x -> (x.1, variantType(x.2)), dynamics) AS dynamic_types```
SELECT JSONExtractKeysAndValues('{"a" : 42, "b" : "Hello", "c" : [1,2,3]}', 'Dynamic') AS dynamics, arrayMap(x -> (x.1, dynamicType(x.2)), dynamics) AS dynamic_types```
```
```text
┌─dynamics───────────────────────────────┬─dynamic_types─────────────────────────────────────────┐
│ [('a',42),('b','Hello'),('c',[1,2,3])] │ [('a','UInt32'),('b','String'),('c','Array(UInt32)')] │
└────────────────────────────────────────┴───────────────────────────────────────────────────────┘
┌─dynamics───────────────────────────────┬─dynamic_types─────────────────────────────────────────────────
│ [('a',42),('b','Hello'),('c',[1,2,3])] │ [('a','Int64'),('b','String'),('c','Array(Nullable(Int64))')] │
└────────────────────────────────────────┴───────────────────────────────────────────────────────────────
```
### Binary output format

View File

@ -103,7 +103,7 @@ LIMIT 2;
└─────────┴─────────┴─────────┘
```
### Inserting data from a file into a table:
### Inserting data from a file into a table
``` sql
INSERT INTO FUNCTION

View File

@ -75,6 +75,8 @@ public:
const String & default_database_,
const String & user_,
const String & password_,
const String & proto_send_chunked_,
const String & proto_recv_chunked_,
const String & quota_key_,
const String & stage,
bool randomize_,
@ -128,7 +130,9 @@ public:
connections.emplace_back(std::make_unique<ConnectionPool>(
concurrency,
cur_host, cur_port,
default_database_, user_, password_, quota_key_,
default_database_, user_, password_,
proto_send_chunked_, proto_recv_chunked_,
quota_key_,
/* cluster_= */ "",
/* cluster_secret_= */ "",
/* client_name_= */ std::string(DEFAULT_CLIENT_NAME),
@ -662,6 +666,50 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv)
Strings hosts = options.count("host") ? options["host"].as<Strings>() : Strings({"localhost"});
String proto_send_chunked {"notchunked"};
String proto_recv_chunked {"notchunked"};
if (options.count("proto_caps"))
{
std::string proto_caps_str = options["proto_caps"].as<std::string>();
std::vector<std::string_view> proto_caps;
splitInto<','>(proto_caps, proto_caps_str);
for (auto cap_str : proto_caps)
{
std::string direction;
if (cap_str.starts_with("send_"))
{
direction = "send";
cap_str = cap_str.substr(std::string_view("send_").size());
}
else if (cap_str.starts_with("recv_"))
{
direction = "recv";
cap_str = cap_str.substr(std::string_view("recv_").size());
}
if (cap_str != "chunked" && cap_str != "notchunked" && cap_str != "chunked_optional" && cap_str != "notchunked_optional")
throw Exception(ErrorCodes::BAD_ARGUMENTS, "proto_caps option is incorrect ({})", proto_caps_str);
if (direction.empty())
{
proto_send_chunked = cap_str;
proto_recv_chunked = cap_str;
}
else
{
if (direction == "send")
proto_send_chunked = cap_str;
else
proto_recv_chunked = cap_str;
}
}
}
Benchmark benchmark(
options["concurrency"].as<unsigned>(),
options["delay"].as<double>(),
@ -673,6 +721,8 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv)
options["database"].as<std::string>(),
options["user"].as<std::string>(),
options["password"].as<std::string>(),
proto_send_chunked,
proto_recv_chunked,
options["quota_key"].as<std::string>(),
options["stage"].as<std::string>(),
options.count("randomize"),

View File

@ -38,6 +38,21 @@
<production>{display_name} \e[1;31m:)\e[0m </production> <!-- if it matched to the substring "production" in the server display name -->
</prompt_by_server_display_name>
<!-- Chunked capabilities for native protocol by client.
Can be enabled separately for send and receive channels.
Supported modes:
- chunked - client will only work with server supporting chunked protocol;
- chunked_optional - client prefer server to enable chunked protocol, but can switch to notchunked if server does not support this;
- notchunked - client will only work with server supporting notchunked protocol (current default);
- notchunked_optional - client prefer server notchunked protocol, but can switch to chunked if server does not support this.
-->
<!--
<proto_caps>
<send>chunked_optional</send>
<recv>chunked_optional</recv>
</proto_caps>
-->
<!--
Settings adjustable via command-line parameters
can take their defaults from that config file, see examples:

View File

@ -150,6 +150,21 @@
-->
<tcp_port>9000</tcp_port>
<!-- Chunked capabilities for native protocol by server.
Can be enabled separately for send and receive channels.
Supported modes:
- chunked - server requires from client to have chunked enabled;
- chunked_optional - server supports both chunked and notchunked protocol;
- notchunked - server requires from client notchunked protocol (current default);
- notchunked_optional - server supports both chunked and notchunked protocol.
-->
<!--
<proto_caps>
<send>notchunked_optional</send>
<recv>notchunked_optional</recv>
</proto_caps>
-->
<!-- Compatibility with MySQL protocol.
ClickHouse will pretend to be MySQL for applications connecting to this port.
-->

View File

@ -158,6 +158,8 @@ void ClientApplicationBase::init(int argc, char ** argv)
("config-file,C", po::value<std::string>(), "config-file path")
("proto_caps", po::value<std::string>(), "enable/disable chunked protocol: chunked_optional, notchunked, notchunked_optional, send_chunked, send_chunked_optional, send_notchunked, send_notchunked_optional, recv_chunked, recv_chunked_optional, recv_notchunked, recv_notchunked_optional")
("query,q", po::value<std::vector<std::string>>()->multitoken(), R"(Query. Can be specified multiple times (--query "SELECT 1" --query "SELECT 2") or once with multiple comma-separated queries (--query "SELECT 1; SELECT 2;"). In the latter case, INSERT queries with non-VALUE format must be separated by empty lines.)")
("queries-file", po::value<std::vector<std::string>>()->multitoken(), "file path with queries to execute; multiple files can be specified (--queries-file file1 file2...)")
("multiquery,n", "Obsolete, does nothing")
@ -337,6 +339,41 @@ void ClientApplicationBase::init(int argc, char ** argv)
if (options.count("server_logs_file"))
server_logs_file = options["server_logs_file"].as<std::string>();
if (options.count("proto_caps"))
{
std::string proto_caps_str = options["proto_caps"].as<std::string>();
std::vector<std::string_view> proto_caps;
splitInto<','>(proto_caps, proto_caps_str);
for (auto cap_str : proto_caps)
{
std::string direction;
if (cap_str.starts_with("send_"))
{
direction = "send";
cap_str = cap_str.substr(std::string_view("send_").size());
}
else if (cap_str.starts_with("recv_"))
{
direction = "recv";
cap_str = cap_str.substr(std::string_view("recv_").size());
}
if (cap_str != "chunked" && cap_str != "notchunked" && cap_str != "chunked_optional" && cap_str != "notchunked_optional")
throw Exception(ErrorCodes::BAD_ARGUMENTS, "proto_caps option is incorrect ({})", proto_caps_str);
if (direction.empty())
{
config().setString("proto_caps.send", std::string(cap_str));
config().setString("proto_caps.recv", std::string(cap_str));
}
else
config().setString("proto_caps." + direction, std::string(cap_str));
}
}
query_processing_stage = QueryProcessingStage::fromString(options["stage"].as<std::string>());
query_kind = parseQueryKind(options["query_kind"].as<std::string>());
profile_events.print = options.count("print-profile-events");

View File

@ -73,9 +73,11 @@
#include <limits>
#include <map>
#include <memory>
#include <string_view>
#include <unordered_map>
#include <Common/config_version.h>
#include <base/find_symbols.h>
#include "config.h"
#include <IO/ReadHelpers.h>
#include <Processors/Formats/Impl/ValuesBlockInputFormat.h>
@ -914,6 +916,8 @@ void ClientBase::processTextAsSingleQuery(const String & full_query)
}
catch (Exception & e)
{
if (server_exception)
server_exception->rethrow();
if (!is_interactive)
e.addMessage("(in query: {})", full_query);
throw;
@ -1032,19 +1036,28 @@ void ClientBase::processOrdinaryQuery(const String & query_to_execute, ASTPtr pa
query_interrupt_handler.start(signals_before_stop);
SCOPE_EXIT({ query_interrupt_handler.stop(); });
connection->sendQuery(
connection_parameters.timeouts,
query,
query_parameters,
client_context->getCurrentQueryId(),
query_processing_stage,
&client_context->getSettingsRef(),
&client_context->getClientInfo(),
true,
[&](const Progress & progress) { onProgress(progress); });
try {
connection->sendQuery(
connection_parameters.timeouts,
query,
query_parameters,
client_context->getCurrentQueryId(),
query_processing_stage,
&client_context->getSettingsRef(),
&client_context->getClientInfo(),
true,
[&](const Progress & progress) { onProgress(progress); });
if (send_external_tables)
sendExternalTables(parsed_query);
}
catch (const NetException &)
{
// We still want to attempt to process whatever we already received or can receive (socket receive buffer can be not empty)
receiveResult(parsed_query, signals_before_stop, settings.partial_result_on_first_cancel);
throw;
}
if (send_external_tables)
sendExternalTables(parsed_query);
receiveResult(parsed_query, signals_before_stop, settings.partial_result_on_first_cancel);
break;

View File

@ -5,8 +5,6 @@
#include <Core/Settings.h>
#include <Compression/CompressedReadBuffer.h>
#include <Compression/CompressedWriteBuffer.h>
#include <IO/ReadBufferFromPocoSocket.h>
#include <IO/WriteBufferFromPocoSocket.h>
#include <IO/ReadHelpers.h>
#include <IO/WriteHelpers.h>
#include <IO/copyData.h>
@ -85,6 +83,7 @@ Connection::~Connection()
Connection::Connection(const String & host_, UInt16 port_,
const String & default_database_,
const String & user_, const String & password_,
const String & proto_send_chunked_, const String & proto_recv_chunked_,
[[maybe_unused]] const SSHKey & ssh_private_key_,
const String & jwt_,
const String & quota_key_,
@ -95,6 +94,7 @@ Connection::Connection(const String & host_, UInt16 port_,
Protocol::Secure secure_)
: host(host_), port(port_), default_database(default_database_)
, user(user_), password(password_)
, proto_send_chunked(proto_send_chunked_), proto_recv_chunked(proto_recv_chunked_)
#if USE_SSH
, ssh_private_key(ssh_private_key_)
#endif
@ -211,10 +211,10 @@ void Connection::connect(const ConnectionTimeouts & timeouts)
, tcp_keep_alive_timeout_in_sec);
}
in = std::make_shared<ReadBufferFromPocoSocket>(*socket);
in = std::make_shared<ReadBufferFromPocoSocketChunked>(*socket);
in->setAsyncCallback(async_callback);
out = std::make_shared<WriteBufferFromPocoSocket>(*socket);
out = std::make_shared<WriteBufferFromPocoSocketChunked>(*socket);
out->setAsyncCallback(async_callback);
connected = true;
setDescription();
@ -222,9 +222,61 @@ void Connection::connect(const ConnectionTimeouts & timeouts)
sendHello();
receiveHello(timeouts.handshake_timeout);
if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS)
{
/// Client side of chunked protocol negotiation.
/// Server advertises its protocol capabilities (separate for send and receive channels) by sending
/// in its 'Hello' response one of four types - chunked, notchunked, chunked_optional, notchunked_optional.
/// Not optional types are strict meaning that server only supports this type, optional means that
/// server prefer this type but capable to work in opposite.
/// Client selects which type it is going to communicate based on the settings from config or arguments,
/// and sends either "chunked" or "notchunked" protocol request in addendum section of handshake.
/// Client can detect if server's protocol capabilities are not compatible with client's settings (for example
/// server strictly requires chunked protocol but client's settings only allows notchunked protocol) - in such case
/// client should interrupt this connection. However if client continues with incompatible protocol type request, server
/// will send appropriate exception and disconnect client.
auto is_chunked = [](const String & chunked_srv_str, const String & chunked_cl_str, const String & direction)
{
bool chunked_srv = chunked_srv_str.starts_with("chunked");
bool optional_srv = chunked_srv_str.ends_with("_optional");
bool chunked_cl = chunked_cl_str.starts_with("chunked");
bool optional_cl = chunked_cl_str.ends_with("_optional");
if (optional_srv)
return chunked_cl;
if (optional_cl)
return chunked_srv;
if (chunked_cl != chunked_srv)
throw NetException(
ErrorCodes::NETWORK_ERROR,
"Incompatible protocol: {} set to {}, server requires {}",
direction,
chunked_cl ? "chunked" : "notchunked",
chunked_srv ? "chunked" : "notchunked");
return chunked_srv;
};
proto_send_chunked = is_chunked(proto_recv_chunked_srv, proto_send_chunked, "send") ? "chunked" : "notchunked";
proto_recv_chunked = is_chunked(proto_send_chunked_srv, proto_recv_chunked, "recv") ? "chunked" : "notchunked";
}
else
{
if (proto_send_chunked == "chunked" || proto_recv_chunked == "chunked")
throw NetException(
ErrorCodes::NETWORK_ERROR,
"Incompatible protocol: server's version is too old and doesn't support chunked protocol while client settings require it.");
}
if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_ADDENDUM)
sendAddendum();
if (proto_send_chunked == "chunked")
out->enableChunked();
if (proto_recv_chunked == "chunked")
in->enableChunked();
LOG_TRACE(log_wrapper.get(), "Connected to {} server version {}.{}.{}.",
server_name, server_version_major, server_version_minor, server_version_patch);
}
@ -393,6 +445,13 @@ void Connection::sendAddendum()
{
if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_QUOTA_KEY)
writeStringBinary(quota_key, *out);
if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS)
{
writeStringBinary(proto_send_chunked, *out);
writeStringBinary(proto_recv_chunked, *out);
}
out->next();
}
@ -472,6 +531,12 @@ void Connection::receiveHello(const Poco::Timespan & handshake_timeout)
else
server_version_patch = server_revision;
if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS)
{
readStringBinary(proto_send_chunked_srv, *in);
readStringBinary(proto_recv_chunked_srv, *in);
}
if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_PASSWORD_COMPLEXITY_RULES)
{
UInt64 rules_size;
@ -611,6 +676,7 @@ bool Connection::ping(const ConnectionTimeouts & timeouts)
UInt64 pong = 0;
writeVarUInt(Protocol::Client::Ping, *out);
out->finishChunk();
out->next();
if (in->eof())
@ -660,6 +726,7 @@ TablesStatusResponse Connection::getTablesStatus(const ConnectionTimeouts & time
writeVarUInt(Protocol::Client::TablesStatusRequest, *out);
request.write(*out, server_revision);
out->finishChunk();
out->next();
UInt64 response_type = 0;
@ -813,6 +880,8 @@ void Connection::sendQuery(
block_profile_events_in.reset();
block_out.reset();
out->finishChunk();
/// Send empty block which means end of data.
if (!with_pending_data)
{
@ -829,6 +898,7 @@ void Connection::sendCancel()
return;
writeVarUInt(Protocol::Client::Cancel, *out);
out->finishChunk();
out->next();
}
@ -854,7 +924,10 @@ void Connection::sendData(const Block & block, const String & name, bool scalar)
size_t prev_bytes = out->count();
block_out->write(block);
maybe_compressed_out->next();
if (maybe_compressed_out != out)
maybe_compressed_out->next();
if (!block)
out->finishChunk();
out->next();
if (throttler)
@ -865,6 +938,7 @@ void Connection::sendIgnoredPartUUIDs(const std::vector<UUID> & uuids)
{
writeVarUInt(Protocol::Client::IgnoredPartUUIDs, *out);
writeVectorBinary(uuids, *out);
out->finishChunk();
out->next();
}
@ -874,6 +948,7 @@ void Connection::sendReadTaskResponse(const String & response)
writeVarUInt(Protocol::Client::ReadTaskResponse, *out);
writeVarUInt(DBMS_CLUSTER_PROCESSING_PROTOCOL_VERSION, *out);
writeStringBinary(response, *out);
out->finishChunk();
out->next();
}
@ -882,6 +957,7 @@ void Connection::sendMergeTreeReadTaskResponse(const ParallelReadResponse & resp
{
writeVarUInt(Protocol::Client::MergeTreeReadTaskResponse, *out);
response.serialize(*out);
out->finishChunk();
out->next();
}
@ -899,6 +975,8 @@ void Connection::sendPreparedData(ReadBuffer & input, size_t size, const String
copyData(input, *out);
else
copyData(input, *out, size);
out->finishChunk();
out->next();
}
@ -927,6 +1005,8 @@ void Connection::sendScalarsData(Scalars & data)
sendData(elem.second, elem.first, true /* scalar */);
}
out->finishChunk();
out_bytes = out->count() - out_bytes;
maybe_compressed_out_bytes = maybe_compressed_out->count() - maybe_compressed_out_bytes;
double elapsed = watch.elapsedSeconds();
@ -1069,13 +1149,13 @@ std::optional<Poco::Net::SocketAddress> Connection::getResolvedAddress() const
bool Connection::poll(size_t timeout_microseconds)
{
return static_cast<ReadBufferFromPocoSocket &>(*in).poll(timeout_microseconds);
return in->poll(timeout_microseconds);
}
bool Connection::hasReadPendingData() const
{
return last_input_packet_type.has_value() || static_cast<const ReadBufferFromPocoSocket &>(*in).hasPendingData();
return last_input_packet_type.has_value() || in->hasBufferedData();
}
@ -1349,6 +1429,8 @@ ServerConnectionPtr Connection::createConnection(const ConnectionParameters & pa
parameters.default_database,
parameters.user,
parameters.password,
parameters.proto_send_chunked,
parameters.proto_recv_chunked,
parameters.ssh_private_key,
parameters.jwt,
parameters.quota_key,

View File

@ -8,8 +8,8 @@
#include <Core/Defines.h>
#include <IO/ReadBufferFromPocoSocket.h>
#include <IO/WriteBufferFromPocoSocket.h>
#include <IO/ReadBufferFromPocoSocketChunked.h>
#include <IO/WriteBufferFromPocoSocketChunked.h>
#include <Interpreters/TablesStatus.h>
#include <Interpreters/Context_fwd.h>
@ -52,6 +52,7 @@ public:
Connection(const String & host_, UInt16 port_,
const String & default_database_,
const String & user_, const String & password_,
const String & proto_send_chunked_, const String & proto_recv_chunked_,
const SSHKey & ssh_private_key_,
const String & jwt_,
const String & quota_key_,
@ -170,6 +171,10 @@ private:
String default_database;
String user;
String password;
String proto_send_chunked;
String proto_recv_chunked;
String proto_send_chunked_srv;
String proto_recv_chunked_srv;
#if USE_SSH
SSHKey ssh_private_key;
#endif
@ -209,8 +214,8 @@ private:
String server_display_name;
std::unique_ptr<Poco::Net::StreamSocket> socket;
std::shared_ptr<ReadBufferFromPocoSocket> in;
std::shared_ptr<WriteBufferFromPocoSocket> out;
std::shared_ptr<ReadBufferFromPocoSocketChunked> in;
std::shared_ptr<WriteBufferFromPocoSocketChunked> out;
std::optional<UInt64> last_input_packet_type;
String query_id;

View File

@ -107,6 +107,9 @@ ConnectionParameters::ConnectionParameters(const Poco::Util::AbstractConfigurati
}
}
proto_send_chunked = config.getString("proto_caps.send", "notchunked");
proto_recv_chunked = config.getString("proto_caps.recv", "notchunked");
quota_key = config.getString("quota_key", "");
/// By default compression is disabled if address looks like localhost.

View File

@ -20,6 +20,8 @@ struct ConnectionParameters
std::string default_database;
std::string user;
std::string password;
std::string proto_send_chunked = "notchunked";
std::string proto_recv_chunked = "notchunked";
std::string quota_key;
SSHKey ssh_private_key;
std::string jwt;

View File

@ -13,6 +13,8 @@ ConnectionPoolPtr ConnectionPoolFactory::get(
String default_database,
String user,
String password,
String proto_send_chunked,
String proto_recv_chunked,
String quota_key,
String cluster,
String cluster_secret,
@ -22,7 +24,7 @@ ConnectionPoolPtr ConnectionPoolFactory::get(
Priority priority)
{
Key key{
max_connections, host, port, default_database, user, password, quota_key, cluster, cluster_secret, client_name, compression, secure, priority};
max_connections, host, port, default_database, user, password, proto_send_chunked, proto_recv_chunked, quota_key, cluster, cluster_secret, client_name, compression, secure, priority};
std::lock_guard lock(mutex);
auto [it, inserted] = pools.emplace(key, ConnectionPoolPtr{});
@ -39,6 +41,8 @@ ConnectionPoolPtr ConnectionPoolFactory::get(
default_database,
user,
password,
proto_send_chunked,
proto_recv_chunked,
quota_key,
cluster,
cluster_secret,

View File

@ -73,6 +73,8 @@ public:
const String & default_database_,
const String & user_,
const String & password_,
const String & proto_send_chunked_,
const String & proto_recv_chunked_,
const String & quota_key_,
const String & cluster_,
const String & cluster_secret_,
@ -85,6 +87,8 @@ public:
, default_database(default_database_)
, user(user_)
, password(password_)
, proto_send_chunked(proto_send_chunked_)
, proto_recv_chunked(proto_recv_chunked_)
, quota_key(quota_key_)
, cluster(cluster_)
, cluster_secret(cluster_secret_)
@ -116,7 +120,9 @@ protected:
{
return std::make_shared<Connection>(
host, port,
default_database, user, password, SSHKey(), /*jwt*/ "", quota_key,
default_database, user, password,
proto_send_chunked, proto_recv_chunked,
SSHKey(), /*jwt*/ "", quota_key,
cluster, cluster_secret,
client_name, compression, secure);
}
@ -125,6 +131,8 @@ private:
String default_database;
String user;
String password;
String proto_send_chunked;
String proto_recv_chunked;
String quota_key;
/// For inter-server authorization
@ -150,6 +158,8 @@ public:
String default_database;
String user;
String password;
String proto_send_chunked;
String proto_recv_chunked;
String quota_key;
String cluster;
String cluster_secret;
@ -173,6 +183,8 @@ public:
String default_database,
String user,
String password,
String proto_send_chunked,
String proto_recv_chunked,
String quota_key,
String cluster,
String cluster_secret,
@ -190,6 +202,7 @@ inline bool operator==(const ConnectionPoolFactory::Key & lhs, const ConnectionP
{
return lhs.max_connections == rhs.max_connections && lhs.host == rhs.host && lhs.port == rhs.port
&& lhs.default_database == rhs.default_database && lhs.user == rhs.user && lhs.password == rhs.password
&& lhs.proto_send_chunked == rhs.proto_send_chunked && lhs.proto_recv_chunked == rhs.proto_recv_chunked
&& lhs.quota_key == rhs.quota_key
&& lhs.cluster == rhs.cluster && lhs.cluster_secret == rhs.cluster_secret && lhs.client_name == rhs.client_name
&& lhs.compression == rhs.compression && lhs.secure == rhs.secure && lhs.priority == rhs.priority;

File diff suppressed because it is too large Load Diff

View File

@ -3,6 +3,7 @@
#include <Columns/IColumn.h>
#include <Columns/ColumnVector.h>
#include <Columns/ColumnVariant.h>
#include <Columns/ColumnString.h>
#include <DataTypes/IDataType.h>
#include <Common/WeakHash.h>
@ -19,11 +20,19 @@ namespace DB
*
* When new values are inserted into Dynamic column, the internal Variant
* type and column are extended if the inserted value has new type.
* When the limit on number of dynamic types is exceeded, all values
* with new types are inserted into special shared variant with type String
* that contains values and their types in binary format.
*/
class ColumnDynamic final : public COWHelper<IColumnHelper<ColumnDynamic>, ColumnDynamic>
{
public:
///
/// Maximum limit on dynamic types. We use ColumnVariant to store all the types,
/// so the limit cannot be greater then ColumnVariant::MAX_NESTED_COLUMNS.
/// We also always have reserved variant for shared variant.
static constexpr size_t MAX_DYNAMIC_TYPES_LIMIT = ColumnVariant::MAX_NESTED_COLUMNS - 1;
static constexpr const char * SHARED_VARIANT_TYPE_NAME = "SharedVariant";
struct Statistics
{
enum class Source
@ -32,12 +41,27 @@ public:
MERGE, /// Statistics were calculated during merge of several MergeTree parts.
};
explicit Statistics(Source source_) : source(source_) {}
/// Source of the statistics.
Source source;
/// Statistics data: (variant name) -> (total variant size in data part).
std::unordered_map<String, size_t> data;
/// Statistics data for usual variants: (variant name) -> (total variant size in data part).
std::unordered_map<String, size_t> variants_statistics;
/// Statistics data for variants from shared variant: (variant name) -> (total variant size in data part).
/// For shared variant we store statistics only for first 256 variants (should cover almost all cases and it's not expensive).
static constexpr const size_t MAX_SHARED_VARIANT_STATISTICS_SIZE = 256;
std::unordered_map<String, size_t> shared_variants_statistics;
};
using StatisticsPtr = std::shared_ptr<const Statistics>;
struct ComparatorBase;
using ComparatorAscendingUnstable = ComparatorAscendingUnstableImpl<ComparatorBase>;
using ComparatorAscendingStable = ComparatorAscendingStableImpl<ComparatorBase>;
using ComparatorDescendingUnstable = ComparatorDescendingUnstableImpl<ComparatorBase>;
using ComparatorDescendingStable = ComparatorDescendingStableImpl<ComparatorBase>;
using ComparatorEqual = ComparatorEqualImpl<ComparatorBase>;
private:
friend class COWHelper<IColumnHelper<ColumnDynamic>, ColumnDynamic>;
@ -54,28 +78,32 @@ private:
};
explicit ColumnDynamic(size_t max_dynamic_types_);
ColumnDynamic(MutableColumnPtr variant_column_, const VariantInfo & variant_info_, size_t max_dynamic_types_, const Statistics & statistics_ = {});
ColumnDynamic(MutableColumnPtr variant_column_, const DataTypePtr & variant_type_, size_t max_dynamic_types_, size_t global_max_dynamic_types_, const StatisticsPtr & statistics_ = {});
ColumnDynamic(MutableColumnPtr variant_column_, const VariantInfo & variant_info_, size_t max_dynamic_types_, size_t global_max_dynamic_types_, const StatisticsPtr & statistics_ = {});
public:
/** Create immutable column using immutable arguments. This arguments may be shared with other columns.
* Use IColumn::mutate in order to make mutable column and mutate shared nested columns.
*/
using Base = COWHelper<IColumnHelper<ColumnDynamic>, ColumnDynamic>;
static Ptr create(const ColumnPtr & variant_column_, const VariantInfo & variant_info_, size_t max_dynamic_types_, const Statistics & statistics_ = {})
static Ptr create(const ColumnPtr & variant_column_, const VariantInfo & variant_info_, size_t max_dynamic_types_, size_t global_max_dynamic_types_, const StatisticsPtr & statistics_ = {})
{
return ColumnDynamic::create(variant_column_->assumeMutable(), variant_info_, max_dynamic_types_, statistics_);
return ColumnDynamic::create(variant_column_->assumeMutable(), variant_info_, max_dynamic_types_, global_max_dynamic_types_, statistics_);
}
static MutablePtr create(MutableColumnPtr variant_column_, const VariantInfo & variant_info_, size_t max_dynamic_types_, const Statistics & statistics_ = {})
static MutablePtr create(MutableColumnPtr variant_column_, const VariantInfo & variant_info_, size_t max_dynamic_types_, size_t global_max_dynamic_types_, const StatisticsPtr & statistics_ = {})
{
return Base::create(std::move(variant_column_), variant_info_, max_dynamic_types_, statistics_);
return Base::create(std::move(variant_column_), variant_info_, max_dynamic_types_, global_max_dynamic_types_, statistics_);
}
static MutablePtr create(MutableColumnPtr variant_column_, const DataTypePtr & variant_type, size_t max_dynamic_types_, const Statistics & statistics_ = {});
static ColumnPtr create(ColumnPtr variant_column_, const DataTypePtr & variant_type, size_t max_dynamic_types_, const Statistics & statistics_ = {})
static MutablePtr create(MutableColumnPtr variant_column_, const DataTypePtr & variant_type_, size_t max_dynamic_types_, size_t global_max_dynamic_types_, const StatisticsPtr & statistics_ = {})
{
return create(variant_column_->assumeMutable(), variant_type, max_dynamic_types_, statistics_);
return Base::create(std::move(variant_column_), variant_type_, max_dynamic_types_, global_max_dynamic_types_, statistics_);
}
static ColumnPtr create(ColumnPtr variant_column_, const DataTypePtr & variant_type, size_t max_dynamic_types_, size_t global_max_dynamic_types_, const StatisticsPtr & statistics_ = {})
{
return create(variant_column_->assumeMutable(), variant_type, max_dynamic_types_, global_max_dynamic_types_, statistics_);
}
static MutablePtr create(size_t max_dynamic_types_)
@ -83,7 +111,7 @@ public:
return Base::create(max_dynamic_types_);
}
std::string getName() const override { return "Dynamic(max_types=" + std::to_string(max_dynamic_types) + ")"; }
std::string getName() const override { return "Dynamic(max_types=" + std::to_string(global_max_dynamic_types) + ")"; }
const char * getFamilyName() const override
{
@ -98,12 +126,12 @@ public:
MutableColumnPtr cloneEmpty() const override
{
/// Keep current dynamic structure
return Base::create(variant_column->cloneEmpty(), variant_info, max_dynamic_types, statistics);
return Base::create(variant_column->cloneEmpty(), variant_info, max_dynamic_types, global_max_dynamic_types, statistics);
}
MutableColumnPtr cloneResized(size_t size) const override
{
return Base::create(variant_column->cloneResized(size), variant_info, max_dynamic_types, statistics);
return Base::create(variant_column->cloneResized(size), variant_info, max_dynamic_types, global_max_dynamic_types, statistics);
}
size_t size() const override
@ -111,15 +139,9 @@ public:
return variant_column->size();
}
Field operator[](size_t n) const override
{
return (*variant_column)[n];
}
Field operator[](size_t n) const override;
void get(size_t n, Field & res) const override
{
variant_column->get(n, res);
}
void get(size_t n, Field & res) const override;
bool isDefaultAt(size_t n) const override
{
@ -187,7 +209,7 @@ public:
ColumnPtr filter(const Filter & filt, ssize_t result_size_hint) const override
{
return create(variant_column->filter(filt, result_size_hint), variant_info, max_dynamic_types);
return create(variant_column->filter(filt, result_size_hint), variant_info, max_dynamic_types, global_max_dynamic_types);
}
void expand(const Filter & mask, bool inverted) override
@ -197,17 +219,17 @@ public:
ColumnPtr permute(const Permutation & perm, size_t limit) const override
{
return create(variant_column->permute(perm, limit), variant_info, max_dynamic_types);
return create(variant_column->permute(perm, limit), variant_info, max_dynamic_types, global_max_dynamic_types);
}
ColumnPtr index(const IColumn & indexes, size_t limit) const override
{
return create(variant_column->index(indexes, limit), variant_info, max_dynamic_types);
return create(variant_column->index(indexes, limit), variant_info, max_dynamic_types, global_max_dynamic_types);
}
ColumnPtr replicate(const Offsets & replicate_offsets) const override
{
return create(variant_column->replicate(replicate_offsets), variant_info, max_dynamic_types);
return create(variant_column->replicate(replicate_offsets), variant_info, max_dynamic_types, global_max_dynamic_types);
}
MutableColumns scatter(ColumnIndex num_columns, const Selector & selector) const override
@ -216,7 +238,7 @@ public:
MutableColumns scattered_columns;
scattered_columns.reserve(num_columns);
for (auto & scattered_variant_column : scattered_variant_columns)
scattered_columns.emplace_back(create(std::move(scattered_variant_column), variant_info, max_dynamic_types));
scattered_columns.emplace_back(create(std::move(scattered_variant_column), variant_info, max_dynamic_types, global_max_dynamic_types));
return scattered_columns;
}
@ -238,16 +260,10 @@ public:
}
void getPermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability,
size_t limit, int nan_direction_hint, IColumn::Permutation & res) const override
{
variant_column->getPermutation(direction, stability, limit, nan_direction_hint, res);
}
size_t limit, int nan_direction_hint, IColumn::Permutation & res) const override;
void updatePermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability,
size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges & equal_ranges) const override
{
variant_column->updatePermutation(direction, stability, limit, nan_direction_hint, res, equal_ranges);
}
size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges & equal_ranges) const override;
void reserve(size_t n) override
{
@ -295,7 +311,7 @@ public:
bool structureEquals(const IColumn & rhs) const override
{
if (const auto * rhs_concrete = typeid_cast<const ColumnDynamic *>(&rhs))
return max_dynamic_types == rhs_concrete->max_dynamic_types;
return global_max_dynamic_types == rhs_concrete->global_max_dynamic_types;
return false;
}
@ -338,17 +354,75 @@ public:
const ColumnVariant & getVariantColumn() const { return assert_cast<const ColumnVariant &>(*variant_column); }
ColumnVariant & getVariantColumn() { return assert_cast<ColumnVariant &>(*variant_column); }
bool addNewVariant(const DataTypePtr & new_variant);
void addStringVariant();
bool addNewVariant(const DataTypePtr & new_variant, const String & new_variant_name);
bool addNewVariant(const DataTypePtr & new_variant) { return addNewVariant(new_variant, new_variant->getName()); }
bool hasDynamicStructure() const override { return true; }
void takeDynamicStructureFromSourceColumns(const Columns & source_columns) override;
const Statistics & getStatistics() const { return statistics; }
const StatisticsPtr & getStatistics() const { return statistics; }
void setStatistics(const StatisticsPtr & statistics_) { statistics = statistics_; }
size_t getMaxDynamicTypes() const { return max_dynamic_types; }
/// Check if we can add new variant types.
/// Shared variant doesn't count in the limit but always presents,
/// so we should subtract 1 from the total types count.
bool canAddNewVariants(size_t current_variants_count, size_t new_variants_count) const { return current_variants_count + new_variants_count - 1 <= max_dynamic_types; }
bool canAddNewVariant(size_t current_variants_count) const { return canAddNewVariants(current_variants_count, 1); }
bool canAddNewVariants(size_t new_variants_count) const { return canAddNewVariants(variant_info.variant_names.size(), new_variants_count); }
bool canAddNewVariant() const { return canAddNewVariants(variant_info.variant_names.size(), 1); }
void setVariantType(const DataTypePtr & variant_type);
void setMaxDynamicPaths(size_t max_dynamic_type_);
static const String & getSharedVariantTypeName()
{
static const String name = SHARED_VARIANT_TYPE_NAME;
return name;
}
static DataTypePtr getSharedVariantDataType();
ColumnVariant::Discriminator getSharedVariantDiscriminator() const
{
return variant_info.variant_name_to_discriminator.at(getSharedVariantTypeName());
}
ColumnString & getSharedVariant()
{
return assert_cast<ColumnString &>(getVariantColumn().getVariantByGlobalDiscriminator(getSharedVariantDiscriminator()));
}
const ColumnString & getSharedVariant() const
{
return assert_cast<const ColumnString &>(getVariantColumn().getVariantByGlobalDiscriminator(getSharedVariantDiscriminator()));
}
/// Serializes type and value in binary format into provided shared variant. Doesn't update Variant discriminators and offsets.
static void serializeValueIntoSharedVariant(ColumnString & shared_variant, const IColumn & src, const DataTypePtr & type, const SerializationPtr & serialization, size_t n);
/// Insert value into shared variant. Also updates Variant discriminators and offsets.
void insertValueIntoSharedVariant(const IColumn & src, const DataTypePtr & type, const String & type_name, size_t n);
const SerializationPtr & getVariantSerialization(const DataTypePtr & variant_type, const String & variant_name) const
{
/// Get serialization for provided data type.
/// To avoid calling type->getDefaultSerialization() every time we use simple cache with max size.
/// When max size is reached, just clear the cache.
if (serialization_cache.size() == SERIALIZATION_CACHE_MAX_SIZE)
serialization_cache.clear();
if (auto it = serialization_cache.find(variant_name); it != serialization_cache.end())
return it->second;
return serialization_cache.emplace(variant_name, variant_type->getDefaultSerialization()).first->second;
}
const SerializationPtr & getVariantSerialization(const DataTypePtr & variant_type) const { return getVariantSerialization(variant_type, variant_type->getName()); }
private:
void createVariantInfo(const DataTypePtr & variant_type);
/// Combine current variant with the other variant and return global discriminators mapping
/// from other variant to the combined one. It's used for inserting from
/// different variants.
@ -361,12 +435,19 @@ private:
/// Store the type of current variant with some additional information.
VariantInfo variant_info;
/// The maximum number of different types that can be stored in this Dynamic column.
/// If exceeded, all new variants will be converted to String.
/// If exceeded, all new variants will be added to a special shared variant with type String
/// in binary format. This limit can be different for different instances of Dynamic column.
/// When max_dynamic_types = 0, we will have only shared variant and insert all values into it.
size_t max_dynamic_types;
/// The types limit specified in the data type by the user Dynamic(max_types=N).
/// max_dynamic_types in all column instances of this Dynamic type can be only smaller
/// (for example, max_dynamic_types can be reduced in takeDynamicStructureFromSourceColumns
/// before merge of different Dynamic columns).
size_t global_max_dynamic_types;
/// Size statistics of each variants from MergeTree data part.
/// Used in takeDynamicStructureFromSourceColumns and set during deserialization.
Statistics statistics;
StatisticsPtr statistics;
/// Cache (Variant name) -> (global discriminators mapping from this variant to current variant in Dynamic column).
/// Used to avoid mappings recalculation in combineVariants for the same Variant types.
@ -374,6 +455,17 @@ private:
/// Cache of Variant types that couldn't be combined with current variant in Dynamic column.
/// Used to avoid checking if combination is possible for the same Variant types.
std::unordered_set<String> variants_with_failed_combination;
/// We can use serializations of different data types to serialize values into shared variant.
/// To avoid creating the same serialization multiple times, use simple cache.
static const size_t SERIALIZATION_CACHE_MAX_SIZE = 256;
mutable std::unordered_map<String, SerializationPtr> serialization_cache;
};
void extendVariantColumn(
IColumn & variant_column,
const DataTypePtr & old_variant_type,
const DataTypePtr & new_variant_type,
std::unordered_map<String, UInt8> old_variant_name_to_discriminator);
}

View File

@ -476,7 +476,7 @@ void ColumnVariant::insertFromImpl(const DB::IColumn & src_, size_t n, const std
}
}
void ColumnVariant::insertRangeFromImpl(const DB::IColumn & src_, size_t start, size_t length, const std::vector<ColumnVariant::Discriminator> * global_discriminators_mapping)
void ColumnVariant::insertRangeFromImpl(const DB::IColumn & src_, size_t start, size_t length, const std::vector<ColumnVariant::Discriminator> * global_discriminators_mapping, const Discriminator * skip_discriminator)
{
const size_t num_variants = variants.size();
const auto & src = assert_cast<const ColumnVariant &>(src_);
@ -557,9 +557,12 @@ void ColumnVariant::insertRangeFromImpl(const DB::IColumn & src_, size_t start,
Discriminator global_discr = src_global_discr;
if (global_discriminators_mapping && src_global_discr != NULL_DISCRIMINATOR)
global_discr = (*global_discriminators_mapping)[src_global_discr];
Discriminator local_discr = localDiscriminatorByGlobal(global_discr);
if (nested_length)
variants[local_discr]->insertRangeFrom(*src.variants[src_local_discr], nested_start, nested_length);
if (!skip_discriminator || global_discr != *skip_discriminator)
{
Discriminator local_discr = localDiscriminatorByGlobal(global_discr);
if (nested_length)
variants[local_discr]->insertRangeFrom(*src.variants[src_local_discr], nested_start, nested_length);
}
}
}
@ -610,7 +613,7 @@ void ColumnVariant::insertRangeFrom(const IColumn & src_, size_t start, size_t l
void ColumnVariant::doInsertRangeFrom(const IColumn & src_, size_t start, size_t length)
#endif
{
insertRangeFromImpl(src_, start, length, nullptr);
insertRangeFromImpl(src_, start, length, nullptr, nullptr);
}
#if !defined(DEBUG_OR_SANITIZER_BUILD)
@ -627,9 +630,9 @@ void ColumnVariant::insertFrom(const DB::IColumn & src_, size_t n, const std::ve
insertFromImpl(src_, n, &global_discriminators_mapping);
}
void ColumnVariant::insertRangeFrom(const IColumn & src_, size_t start, size_t length, const std::vector<ColumnVariant::Discriminator> & global_discriminators_mapping)
void ColumnVariant::insertRangeFrom(const IColumn & src_, size_t start, size_t length, const std::vector<ColumnVariant::Discriminator> & global_discriminators_mapping, Discriminator skip_discriminator)
{
insertRangeFromImpl(src_, start, length, &global_discriminators_mapping);
insertRangeFromImpl(src_, start, length, &global_discriminators_mapping, &skip_discriminator);
}
void ColumnVariant::insertManyFrom(const DB::IColumn & src_, size_t position, size_t length, const std::vector<ColumnVariant::Discriminator> & global_discriminators_mapping)
@ -673,6 +676,14 @@ void ColumnVariant::insertManyIntoVariantFrom(DB::ColumnVariant::Discriminator g
variants[local_discr]->insertManyFrom(src_, position, length);
}
void ColumnVariant::deserializeBinaryIntoVariant(ColumnVariant::Discriminator global_discr, const SerializationPtr & serialization, ReadBuffer & buf, const FormatSettings & format_settings)
{
auto local_discr = localDiscriminatorByGlobal(global_discr);
serialization->deserializeBinary(*variants[local_discr], buf, format_settings);
getLocalDiscriminators().push_back(local_discr);
getOffsets().push_back(variants[local_discr]->size() - 1);
}
void ColumnVariant::insertDefault()
{
getLocalDiscriminators().push_back(NULL_DISCRIMINATOR);
@ -1213,9 +1224,7 @@ struct ColumnVariant::ComparatorBase
ALWAYS_INLINE int compare(size_t lhs, size_t rhs) const
{
int res = parent.compareAt(lhs, rhs, parent, nan_direction_hint);
return res;
return parent.compareAt(lhs, rhs, parent, nan_direction_hint);
}
};

View File

@ -2,6 +2,8 @@
#include <Columns/IColumn.h>
#include <Columns/ColumnVector.h>
#include <Formats/FormatSettings.h>
#include <DataTypes/Serializations/ISerialization.h>
namespace DB
@ -196,13 +198,15 @@ public:
/// Methods for insertion from another Variant but with known mapping between global discriminators.
void insertFrom(const IColumn & src_, size_t n, const std::vector<ColumnVariant::Discriminator> & global_discriminators_mapping);
void insertRangeFrom(const IColumn & src_, size_t start, size_t length, const std::vector<ColumnVariant::Discriminator> & global_discriminators_mapping);
/// Don't insert data into variant with skip_discriminator global discriminator, it will be processed separately.
void insertRangeFrom(const IColumn & src_, size_t start, size_t length, const std::vector<ColumnVariant::Discriminator> & global_discriminators_mapping, Discriminator skip_discriminator);
void insertManyFrom(const IColumn & src_, size_t position, size_t length, const std::vector<ColumnVariant::Discriminator> & global_discriminators_mapping);
/// Methods for insertion into a specific variant.
void insertIntoVariantFrom(Discriminator global_discr, const IColumn & src_, size_t n);
void insertRangeIntoVariantFrom(Discriminator global_discr, const IColumn & src_, size_t start, size_t length);
void insertManyIntoVariantFrom(Discriminator global_discr, const IColumn & src_, size_t position, size_t length);
void deserializeBinaryIntoVariant(Discriminator global_discr, const SerializationPtr & serialization, ReadBuffer & buf, const FormatSettings & format_settings);
void insertDefault() override;
void insertManyDefaults(size_t length) override;
@ -264,6 +268,7 @@ public:
ColumnPtr & getVariantPtrByGlobalDiscriminator(size_t discr) { return variants[global_to_local_discriminators.at(discr)]; }
const NestedColumns & getVariants() const { return variants; }
NestedColumns & getVariants() { return variants; }
const IColumn & getLocalDiscriminatorsColumn() const { return *local_discriminators; }
IColumn & getLocalDiscriminatorsColumn() { return *local_discriminators; }
@ -303,6 +308,8 @@ public:
return true;
}
std::vector<Discriminator> getLocalToGlobalDiscriminatorsMapping() const { return local_to_global_discriminators; }
/// Check if we have only 1 non-empty variant and no NULL values,
/// and if so, return the discriminator of this non-empty column.
std::optional<Discriminator> getLocalDiscriminatorOfOneNoneEmptyVariantNoNulls() const;
@ -323,7 +330,7 @@ public:
private:
void insertFromImpl(const IColumn & src_, size_t n, const std::vector<ColumnVariant::Discriminator> * global_discriminators_mapping);
void insertRangeFromImpl(const IColumn & src_, size_t start, size_t length, const std::vector<ColumnVariant::Discriminator> * global_discriminators_mapping);
void insertRangeFromImpl(const IColumn & src_, size_t start, size_t length, const std::vector<ColumnVariant::Discriminator> * global_discriminators_mapping, const Discriminator * skip_discriminator);
void insertManyFromImpl(const IColumn & src_, size_t position, size_t length, const std::vector<ColumnVariant::Discriminator> * global_discriminators_mapping);
void initIdentityGlobalToLocalDiscriminatorsMapping();

View File

@ -7,28 +7,34 @@ using namespace DB;
TEST(ColumnDynamic, CreateEmpty)
{
auto column = ColumnDynamic::create(255);
auto column = ColumnDynamic::create(254);
ASSERT_TRUE(column->empty());
ASSERT_EQ(column->getVariantInfo().variant_type->getName(), "Variant()");
ASSERT_TRUE(column->getVariantInfo().variant_names.empty());
ASSERT_TRUE(column->getVariantInfo().variant_name_to_discriminator.empty());
ASSERT_EQ(column->getVariantInfo().variant_type->getName(), "Variant(SharedVariant)");
ASSERT_EQ(column->getVariantInfo().variant_names.size(), 1);
ASSERT_EQ(column->getVariantInfo().variant_names[0], "SharedVariant");
ASSERT_EQ(column->getVariantInfo().variant_name_to_discriminator.size(), 1);
ASSERT_EQ(column->getVariantInfo().variant_name_to_discriminator.at("SharedVariant"), 0);
ASSERT_TRUE(column->getVariantColumn().getVariantByGlobalDiscriminator(0).empty());
}
TEST(ColumnDynamic, InsertDefault)
{
auto column = ColumnDynamic::create(255);
auto column = ColumnDynamic::create(254);
column->insertDefault();
ASSERT_TRUE(column->size() == 1);
ASSERT_EQ(column->getVariantInfo().variant_type->getName(), "Variant()");
ASSERT_TRUE(column->getVariantInfo().variant_names.empty());
ASSERT_TRUE(column->getVariantInfo().variant_name_to_discriminator.empty());
ASSERT_EQ(column->getVariantInfo().variant_type->getName(), "Variant(SharedVariant)");
ASSERT_EQ(column->getVariantInfo().variant_names.size(), 1);
ASSERT_EQ(column->getVariantInfo().variant_names[0], "SharedVariant");
ASSERT_EQ(column->getVariantInfo().variant_name_to_discriminator.size(), 1);
ASSERT_EQ(column->getVariantInfo().variant_name_to_discriminator.at("SharedVariant"), 0);
ASSERT_TRUE(column->getVariantColumn().getVariantByGlobalDiscriminator(0).empty());
ASSERT_TRUE(column->isNullAt(0));
ASSERT_EQ((*column)[0], Field(Null()));
}
TEST(ColumnDynamic, InsertFields)
{
auto column = ColumnDynamic::create(255);
auto column = ColumnDynamic::create(254);
column->insert(Field(42));
column->insert(Field(-42));
column->insert(Field("str1"));
@ -41,16 +47,16 @@ TEST(ColumnDynamic, InsertFields)
column->insert(Field(43.43));
ASSERT_TRUE(column->size() == 10);
ASSERT_EQ(column->getVariantInfo().variant_type->getName(), "Variant(Float64, Int8, String)");
std::vector<String> expected_names = {"Float64", "Int8", "String"};
ASSERT_EQ(column->getVariantInfo().variant_type->getName(), "Variant(Float64, Int8, SharedVariant, String)");
std::vector<String> expected_names = {"Float64", "Int8", "SharedVariant", "String"};
ASSERT_EQ(column->getVariantInfo().variant_names, expected_names);
std::unordered_map<String, UInt8> expected_variant_name_to_discriminator = {{"Float64", 0}, {"Int8", 1}, {"String", 2}};
std::unordered_map<String, UInt8> expected_variant_name_to_discriminator = {{"Float64", 0}, {"Int8", 1}, {"SharedVariant", 2}, {"String", 3}};
ASSERT_TRUE(column->getVariantInfo().variant_name_to_discriminator == expected_variant_name_to_discriminator);
}
ColumnDynamic::MutablePtr getDynamicWithManyVariants(size_t num_variants, Field tuple_element = Field(42))
{
auto column = ColumnDynamic::create(255);
auto column = ColumnDynamic::create(254);
for (size_t i = 0; i != num_variants; ++i)
{
Tuple tuple;
@ -66,61 +72,71 @@ TEST(ColumnDynamic, InsertFieldsOverflow1)
{
auto column = getDynamicWithManyVariants(253);
ASSERT_EQ(column->getVariantInfo().variant_names.size(), 253);
ASSERT_EQ(column->getVariantInfo().variant_names.size(), 254);
column->insert(Field(42.42));
ASSERT_EQ(column->getVariantInfo().variant_names.size(), 254);
ASSERT_EQ(column->size(), 254);
ASSERT_EQ(column->getVariantInfo().variant_names.size(), 255);
ASSERT_TRUE(column->getVariantInfo().variant_name_to_discriminator.contains("Float64"));
column->insert(Field(42));
ASSERT_EQ(column->size(), 255);
ASSERT_EQ(column->getVariantInfo().variant_names.size(), 255);
ASSERT_FALSE(column->getVariantInfo().variant_name_to_discriminator.contains("Int8"));
ASSERT_TRUE(column->getVariantInfo().variant_name_to_discriminator.contains("String"));
ASSERT_EQ(column->getSharedVariant().size(), 1);
Field field = (*column)[column->size() - 1];
ASSERT_EQ(field, "42");
ASSERT_EQ(field, 42);
column->insert(Field(43));
ASSERT_EQ(column->size(), 256);
ASSERT_EQ(column->getVariantInfo().variant_names.size(), 255);
ASSERT_FALSE(column->getVariantInfo().variant_name_to_discriminator.contains("Int8"));
ASSERT_TRUE(column->getVariantInfo().variant_name_to_discriminator.contains("String"));
ASSERT_EQ(column->getSharedVariant().size(), 2);
field = (*column)[column->size() - 1];
ASSERT_EQ(field, "43");
ASSERT_EQ(field, 43);
column->insert(Field("str1"));
ASSERT_EQ(column->size(), 257);
ASSERT_EQ(column->getVariantInfo().variant_names.size(), 255);
ASSERT_FALSE(column->getVariantInfo().variant_name_to_discriminator.contains("Int8"));
ASSERT_TRUE(column->getVariantInfo().variant_name_to_discriminator.contains("String"));
ASSERT_FALSE(column->getVariantInfo().variant_name_to_discriminator.contains("String"));
ASSERT_EQ(column->getSharedVariant().size(), 3);
field = (*column)[column->size() - 1];
ASSERT_EQ(field, "str1");
column->insert(Field(Array({Field(42), Field(43)})));
ASSERT_EQ(column->getVariantInfo().variant_names.size(), 255);
ASSERT_FALSE(column->getVariantInfo().variant_name_to_discriminator.contains("Array(Int8)"));
ASSERT_TRUE(column->getVariantInfo().variant_name_to_discriminator.contains("String"));
ASSERT_FALSE(column->getVariantInfo().variant_name_to_discriminator.contains("String"));
ASSERT_EQ(column->getSharedVariant().size(), 4);
field = (*column)[column->size() - 1];
ASSERT_EQ(field, "[42, 43]");
ASSERT_EQ(field, Field(Array({Field(42), Field(43)})));
}
TEST(ColumnDynamic, InsertFieldsOverflow2)
{
auto column = getDynamicWithManyVariants(254);
ASSERT_EQ(column->getVariantInfo().variant_names.size(), 254);
ASSERT_EQ(column->getVariantInfo().variant_names.size(), 255);
column->insert(Field("str1"));
ASSERT_EQ(column->getVariantInfo().variant_names.size(), 255);
ASSERT_TRUE(column->getVariantInfo().variant_name_to_discriminator.contains("String"));
ASSERT_FALSE(column->getVariantInfo().variant_name_to_discriminator.contains("String"));
ASSERT_EQ(column->getSharedVariant().size(), 1);
Field field = (*column)[column->size() - 1];
ASSERT_EQ(field, "str1");
column->insert(Field(42));
ASSERT_EQ(column->getVariantInfo().variant_names.size(), 255);
ASSERT_FALSE(column->getVariantInfo().variant_name_to_discriminator.contains("Int8"));
ASSERT_TRUE(column->getVariantInfo().variant_name_to_discriminator.contains("String"));
Field field = (*column)[column->size() - 1];
ASSERT_EQ(field, "42");
ASSERT_FALSE(column->getVariantInfo().variant_name_to_discriminator.contains("String"));
ASSERT_EQ(column->getSharedVariant().size(), 2);
field = (*column)[column->size() - 1];
ASSERT_EQ(field, 42);
}
ColumnDynamic::MutablePtr getInsertFromColumn(size_t num = 1)
{
auto column_from = ColumnDynamic::create(255);
auto column_from = ColumnDynamic::create(254);
for (size_t i = 0; i != num; ++i)
{
column_from->insert(Field(42));
@ -154,41 +170,41 @@ void checkInsertFrom(const ColumnDynamic::MutablePtr & column_from, ColumnDynami
TEST(ColumnDynamic, InsertFrom1)
{
auto column_to = ColumnDynamic::create(255);
checkInsertFrom(getInsertFromColumn(), column_to, "Variant(Float64, Int8, String)", {"Float64", "Int8", "String"}, {{"Float64", 0}, {"Int8", 1}, {"String", 2}});
auto column_to = ColumnDynamic::create(254);
checkInsertFrom(getInsertFromColumn(), column_to, "Variant(Float64, Int8, SharedVariant, String)", {"Float64", "Int8", "SharedVariant", "String"}, {{"Float64", 0}, {"Int8", 1}, {"SharedVariant", 2}, {"String", 3}});
}
TEST(ColumnDynamic, InsertFrom2)
{
auto column_to = ColumnDynamic::create(255);
auto column_to = ColumnDynamic::create(254);
column_to->insert(Field(42));
column_to->insert(Field(42.42));
column_to->insert(Field("str"));
checkInsertFrom(getInsertFromColumn(), column_to, "Variant(Float64, Int8, String)", {"Float64", "Int8", "String"}, {{"Float64", 0}, {"Int8", 1}, {"String", 2}});
checkInsertFrom(getInsertFromColumn(), column_to, "Variant(Float64, Int8, SharedVariant, String)", {"Float64", "Int8", "SharedVariant", "String"}, {{"Float64", 0}, {"Int8", 1}, {"SharedVariant", 2}, {"String", 3}});
}
TEST(ColumnDynamic, InsertFrom3)
{
auto column_to = ColumnDynamic::create(255);
auto column_to = ColumnDynamic::create(254);
column_to->insert(Field(42));
column_to->insert(Field(42.42));
column_to->insert(Field("str"));
column_to->insert(Array({Field(42)}));
checkInsertFrom(getInsertFromColumn(), column_to, "Variant(Array(Int8), Float64, Int8, String)", {"Array(Int8)", "Float64", "Int8", "String"}, {{"Array(Int8)", 0}, {"Float64", 1}, {"Int8", 2}, {"String", 3}});
checkInsertFrom(getInsertFromColumn(), column_to, "Variant(Array(Int8), Float64, Int8, SharedVariant, String)", {"Array(Int8)", "Float64", "Int8", "SharedVariant", "String"}, {{"Array(Int8)", 0}, {"Float64", 1}, {"Int8", 2}, {"SharedVariant", 3}, {"String", 4}});
}
TEST(ColumnDynamic, InsertFromOverflow1)
{
auto column_from = ColumnDynamic::create(255);
auto column_from = ColumnDynamic::create(254);
column_from->insert(Field(42));
column_from->insert(Field(42.42));
column_from->insert(Field("str"));
auto column_to = getDynamicWithManyVariants(253);
column_to->insertFrom(*column_from, 0);
ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 254);
ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255);
ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8"));
auto field = (*column_to)[column_to->size() - 1];
ASSERT_EQ(field, 42);
@ -196,20 +212,22 @@ TEST(ColumnDynamic, InsertFromOverflow1)
column_to->insertFrom(*column_from, 1);
ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255);
ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64"));
ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String"));
ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String"));
ASSERT_EQ(column_to->getSharedVariant().size(), 1);
field = (*column_to)[column_to->size() - 1];
ASSERT_EQ(field, "42.42");
ASSERT_EQ(field, 42.42);
column_to->insertFrom(*column_from, 2);
ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255);
ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String"));
ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String"));
ASSERT_EQ(column_to->getSharedVariant().size(), 2);
field = (*column_to)[column_to->size() - 1];
ASSERT_EQ(field, "str");
}
TEST(ColumnDynamic, InsertFromOverflow2)
{
auto column_from = ColumnDynamic::create(255);
auto column_from = ColumnDynamic::create(254);
column_from->insert(Field(42));
column_from->insert(Field(42.42));
@ -221,9 +239,32 @@ TEST(ColumnDynamic, InsertFromOverflow2)
column_to->insertFrom(*column_from, 1);
ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64"));
ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String"));
ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String"));
ASSERT_EQ(column_to->getSharedVariant().size(), 1);
field = (*column_to)[column_to->size() - 1];
ASSERT_EQ(field, "42.42");
ASSERT_EQ(field, 42.42);
}
TEST(ColumnDynamic, InsertFromOverflow3)
{
auto column_from = ColumnDynamic::create(1);
column_from->insert(Field(42));
column_from->insert(Field(42.42));
auto column_to = ColumnDynamic::create(254);
column_to->insert(Field(41));
column_to->insertFrom(*column_from, 0);
ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8"));
ASSERT_EQ(column_to->getSharedVariant().size(), 0);
auto field = (*column_to)[column_to->size() - 1];
ASSERT_EQ(field, 42);
column_to->insertFrom(*column_from, 1);
ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64"));
ASSERT_EQ(column_to->getSharedVariant().size(), 1);
field = (*column_to)[column_to->size() - 1];
ASSERT_EQ(field, 42.42);
}
void checkInsertManyFrom(const ColumnDynamic::MutablePtr & column_from, ColumnDynamic::MutablePtr & column_to, const std::string & expected_variant, const std::vector<String> & expected_names, const std::unordered_map<String, UInt8> & expected_variant_name_to_discriminator)
@ -256,42 +297,43 @@ void checkInsertManyFrom(const ColumnDynamic::MutablePtr & column_from, ColumnDy
TEST(ColumnDynamic, InsertManyFrom1)
{
auto column_to = ColumnDynamic::create(255);
checkInsertManyFrom(getInsertFromColumn(), column_to, "Variant(Float64, Int8, String)", {"Float64", "Int8", "String"}, {{"Float64", 0}, {"Int8", 1}, {"String", 2}});
auto column_to = ColumnDynamic::create(254);
checkInsertManyFrom(getInsertFromColumn(), column_to, "Variant(Float64, Int8, SharedVariant, String)", {"Float64", "Int8", "SharedVariant", "String"}, {{"Float64", 0}, {"Int8", 1}, {"SharedVariant", 2}, {"String", 3}});
}
TEST(ColumnDynamic, InsertManyFrom2)
{
auto column_to = ColumnDynamic::create(255);
auto column_to = ColumnDynamic::create(254);
column_to->insert(Field(42));
column_to->insert(Field(42.42));
column_to->insert(Field("str"));
checkInsertManyFrom(getInsertFromColumn(), column_to, "Variant(Float64, Int8, String)", {"Float64", "Int8", "String"}, {{"Float64", 0}, {"Int8", 1}, {"String", 2}});
checkInsertManyFrom(getInsertFromColumn(), column_to, "Variant(Float64, Int8, SharedVariant, String)", {"Float64", "Int8", "SharedVariant", "String"}, {{"Float64", 0}, {"Int8", 1}, {"SharedVariant", 2}, {"String", 3}});
}
TEST(ColumnDynamic, InsertManyFrom3)
{
auto column_to = ColumnDynamic::create(255);
auto column_to = ColumnDynamic::create(254);
column_to->insert(Field(42));
column_to->insert(Field(42.42));
column_to->insert(Field("str"));
column_to->insert(Array({Field(42)}));
checkInsertManyFrom(getInsertFromColumn(), column_to, "Variant(Array(Int8), Float64, Int8, String)", {"Array(Int8)", "Float64", "Int8", "String"}, {{"Array(Int8)", 0}, {"Float64", 1}, {"Int8", 2}, {"String", 3}});
checkInsertManyFrom(getInsertFromColumn(), column_to, "Variant(Array(Int8), Float64, Int8, SharedVariant, String)", {"Array(Int8)", "Float64", "Int8", "SharedVariant", "String"}, {{"Array(Int8)", 0}, {"Float64", 1}, {"Int8", 2}, {"SharedVariant", 3}, {"String", 4}});
}
TEST(ColumnDynamic, InsertManyFromOverflow1)
{
auto column_from = ColumnDynamic::create(255);
auto column_from = ColumnDynamic::create(254);
column_from->insert(Field(42));
column_from->insert(Field(42.42));
column_from->insert(Field("str"));
auto column_to = getDynamicWithManyVariants(253);
column_to->insertManyFrom(*column_from, 0, 2);
ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 254);
ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255);
ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8"));
ASSERT_EQ(column_to->getSharedVariant().size(), 0);
auto field = (*column_to)[column_to->size() - 2];
ASSERT_EQ(field, 42);
field = (*column_to)[column_to->size() - 1];
@ -300,15 +342,17 @@ TEST(ColumnDynamic, InsertManyFromOverflow1)
column_to->insertManyFrom(*column_from, 1, 2);
ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255);
ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64"));
ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String"));
ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String"));
ASSERT_EQ(column_to->getSharedVariant().size(), 2);
field = (*column_to)[column_to->size() - 2];
ASSERT_EQ(field, "42.42");
ASSERT_EQ(field, 42.42);
field = (*column_to)[column_to->size() - 1];
ASSERT_EQ(field, "42.42");
ASSERT_EQ(field, 42.42);
column_to->insertManyFrom(*column_from, 2, 2);
ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255);
ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String"));
ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String"));
ASSERT_EQ(column_to->getSharedVariant().size(), 4);
field = (*column_to)[column_to->size() - 1];
ASSERT_EQ(field, "str");
field = (*column_to)[column_to->size() - 2];
@ -317,14 +361,15 @@ TEST(ColumnDynamic, InsertManyFromOverflow1)
TEST(ColumnDynamic, InsertManyFromOverflow2)
{
auto column_from = ColumnDynamic::create(255);
auto column_from = ColumnDynamic::create(254);
column_from->insert(Field(42));
column_from->insert(Field(42.42));
auto column_to = getDynamicWithManyVariants(253);
column_to->insertManyFrom(*column_from, 0, 2);
ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 254);
ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255);
ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8"));
ASSERT_EQ(column_to->getSharedVariant().size(), 0);
auto field = (*column_to)[column_to->size() - 2];
ASSERT_EQ(field, 42);
field = (*column_to)[column_to->size() - 1];
@ -333,11 +378,39 @@ TEST(ColumnDynamic, InsertManyFromOverflow2)
column_to->insertManyFrom(*column_from, 1, 2);
ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255);
ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64"));
ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String"));
ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String"));
ASSERT_EQ(column_to->getSharedVariant().size(), 2);
field = (*column_to)[column_to->size() - 2];
ASSERT_EQ(field, "42.42");
ASSERT_EQ(field, 42.42);
field = (*column_to)[column_to->size() - 1];
ASSERT_EQ(field, "42.42");
ASSERT_EQ(field, 42.42);
}
TEST(ColumnDynamic, InsertManyFromOverflow3)
{
auto column_from = ColumnDynamic::create(1);
column_from->insert(Field(42));
column_from->insert(Field(42.42));
auto column_to = ColumnDynamic::create(254);
column_to->insert(Field(41));
column_to->insertManyFrom(*column_from, 0, 2);
ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8"));
ASSERT_EQ(column_to->getSharedVariant().size(), 0);
auto field = (*column_to)[column_to->size() - 2];
ASSERT_EQ(field, 42);
field = (*column_to)[column_to->size() - 1];
ASSERT_EQ(field, 42);
column_to->insertManyFrom(*column_from, 1, 2);
ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64"));
ASSERT_EQ(column_to->getSharedVariant().size(), 2);
field = (*column_to)[column_to->size() - 2];
ASSERT_EQ(field, 42.42);
field = (*column_to)[column_to->size() - 1];
ASSERT_EQ(field, 42.42);
}
void checkInsertRangeFrom(const ColumnDynamic::MutablePtr & column_from, ColumnDynamic::MutablePtr & column_to, const std::string & expected_variant, const std::vector<String> & expected_names, const std::unordered_map<String, UInt8> & expected_variant_name_to_discriminator)
@ -368,34 +441,34 @@ void checkInsertRangeFrom(const ColumnDynamic::MutablePtr & column_from, ColumnD
TEST(ColumnDynamic, InsertRangeFrom1)
{
auto column_to = ColumnDynamic::create(255);
checkInsertRangeFrom(getInsertFromColumn(2), column_to, "Variant(Float64, Int8, String)", {"Float64", "Int8", "String"}, {{"Float64", 0}, {"Int8", 1}, {"String", 2}});
auto column_to = ColumnDynamic::create(254);
checkInsertRangeFrom(getInsertFromColumn(2), column_to, "Variant(Float64, Int8, SharedVariant, String)", {"Float64", "Int8", "SharedVariant", "String"}, {{"Float64", 0}, {"Int8", 1}, {"SharedVariant", 2}, {"String", 3}});
}
TEST(ColumnDynamic, InsertRangeFrom2)
{
auto column_to = ColumnDynamic::create(255);
auto column_to = ColumnDynamic::create(254);
column_to->insert(Field(42));
column_to->insert(Field(42.42));
column_to->insert(Field("str1"));
checkInsertRangeFrom(getInsertFromColumn(2), column_to, "Variant(Float64, Int8, String)", {"Float64", "Int8", "String"}, {{"Float64", 0}, {"Int8", 1}, {"String", 2}});
checkInsertRangeFrom(getInsertFromColumn(2), column_to, "Variant(Float64, Int8, SharedVariant, String)", {"Float64", "Int8", "SharedVariant", "String"}, {{"Float64", 0}, {"Int8", 1}, {"SharedVariant", 2}, {"String", 3}});
}
TEST(ColumnDynamic, InsertRangeFrom3)
{
auto column_to = ColumnDynamic::create(255);
auto column_to = ColumnDynamic::create(254);
column_to->insert(Field(42));
column_to->insert(Field(42.42));
column_to->insert(Field("str1"));
column_to->insert(Array({Field(42)}));
checkInsertRangeFrom(getInsertFromColumn(2), column_to, "Variant(Array(Int8), Float64, Int8, String)", {"Array(Int8)", "Float64", "Int8", "String"}, {{"Array(Int8)", 0}, {"Float64", 1}, {"Int8", 2}, {"String", 3}});
checkInsertRangeFrom(getInsertFromColumn(2), column_to, "Variant(Array(Int8), Float64, Int8, SharedVariant, String)", {"Array(Int8)", "Float64", "Int8", "SharedVariant", "String"}, {{"Array(Int8)", 0}, {"Float64", 1}, {"Int8", 2}, {"SharedVariant", 3}, {"String", 4}});
}
TEST(ColumnDynamic, InsertRangeFromOverflow1)
{
auto column_from = ColumnDynamic::create(255);
auto column_from = ColumnDynamic::create(254);
column_from->insert(Field(42));
column_from->insert(Field(43));
column_from->insert(Field(42.42));
@ -403,23 +476,25 @@ TEST(ColumnDynamic, InsertRangeFromOverflow1)
auto column_to = getDynamicWithManyVariants(253);
column_to->insertRangeFrom(*column_from, 0, 4);
ASSERT_EQ(column_to->size(), 257);
ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255);
ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8"));
ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String"));
ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String"));
ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64"));
ASSERT_EQ(column_to->getSharedVariant().size(), 2);
auto field = (*column_to)[column_to->size() - 4];
ASSERT_EQ(field, Field(42));
field = (*column_to)[column_to->size() - 3];
ASSERT_EQ(field, Field(43));
field = (*column_to)[column_to->size() - 2];
ASSERT_EQ(field, Field("42.42"));
ASSERT_EQ(field, Field(42.42));
field = (*column_to)[column_to->size() - 1];
ASSERT_EQ(field, Field("str"));
}
TEST(ColumnDynamic, InsertRangeFromOverflow2)
{
auto column_from = ColumnDynamic::create(255);
auto column_from = ColumnDynamic::create(254);
column_from->insert(Field(42));
column_from->insert(Field(43));
column_from->insert(Field(42.42));
@ -428,19 +503,20 @@ TEST(ColumnDynamic, InsertRangeFromOverflow2)
column_to->insertRangeFrom(*column_from, 0, 3);
ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255);
ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8"));
ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String"));
ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String"));
ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64"));
ASSERT_EQ(column_to->getSharedVariant().size(), 1);
auto field = (*column_to)[column_to->size() - 3];
ASSERT_EQ(field, Field(42));
field = (*column_to)[column_to->size() - 2];
ASSERT_EQ(field, Field(43));
field = (*column_to)[column_to->size() - 1];
ASSERT_EQ(field, Field("42.42"));
ASSERT_EQ(field, Field(42.42));
}
TEST(ColumnDynamic, InsertRangeFromOverflow3)
{
auto column_from = ColumnDynamic::create(255);
auto column_from = ColumnDynamic::create(254);
column_from->insert(Field(42));
column_from->insert(Field(43));
column_from->insert(Field(42.42));
@ -449,20 +525,21 @@ TEST(ColumnDynamic, InsertRangeFromOverflow3)
column_to->insert(Field("Str"));
column_to->insertRangeFrom(*column_from, 0, 3);
ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255);
ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8"));
ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8"));
ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String"));
ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64"));
ASSERT_EQ(column_to->getSharedVariant().size(), 3);
auto field = (*column_to)[column_to->size() - 3];
ASSERT_EQ(field, Field(42));
field = (*column_to)[column_to->size() - 2];
ASSERT_EQ(field, Field(43));
field = (*column_to)[column_to->size() - 1];
ASSERT_EQ(field, Field("42.42"));
ASSERT_EQ(field, Field(42.42));
}
TEST(ColumnDynamic, InsertRangeFromOverflow4)
{
auto column_from = ColumnDynamic::create(255);
auto column_from = ColumnDynamic::create(254);
column_from->insert(Field(42));
column_from->insert(Field(42.42));
column_from->insert(Field("str"));
@ -471,19 +548,20 @@ TEST(ColumnDynamic, InsertRangeFromOverflow4)
column_to->insertRangeFrom(*column_from, 0, 3);
ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255);
ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8"));
ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String"));
ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String"));
ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64"));
ASSERT_EQ(column_to->getSharedVariant().size(), 3);
auto field = (*column_to)[column_to->size() - 3];
ASSERT_EQ(field, Field("42"));
ASSERT_EQ(field, Field(42));
field = (*column_to)[column_to->size() - 2];
ASSERT_EQ(field, Field("42.42"));
ASSERT_EQ(field, Field(42.42));
field = (*column_to)[column_to->size() - 1];
ASSERT_EQ(field, Field("str"));
}
TEST(ColumnDynamic, InsertRangeFromOverflow5)
{
auto column_from = ColumnDynamic::create(255);
auto column_from = ColumnDynamic::create(254);
column_from->insert(Field(42));
column_from->insert(Field(43));
column_from->insert(Field(42.42));
@ -493,22 +571,23 @@ TEST(ColumnDynamic, InsertRangeFromOverflow5)
column_to->insert(Field("str"));
column_to->insertRangeFrom(*column_from, 0, 4);
ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255);
ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8"));
ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8"));
ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String"));
ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64"));
ASSERT_EQ(column_to->getSharedVariant().size(), 3);
auto field = (*column_to)[column_to->size() - 4];
ASSERT_EQ(field, Field(42));
field = (*column_to)[column_to->size() - 3];
ASSERT_EQ(field, Field(43));
field = (*column_to)[column_to->size() - 2];
ASSERT_EQ(field, Field("42.42"));
ASSERT_EQ(field, Field(42.42));
field = (*column_to)[column_to->size() - 1];
ASSERT_EQ(field, Field("str"));
}
TEST(ColumnDynamic, InsertRangeFromOverflow6)
{
auto column_from = ColumnDynamic::create(255);
auto column_from = ColumnDynamic::create(254);
column_from->insert(Field(42));
column_from->insert(Field(43));
column_from->insert(Field(44));
@ -520,13 +599,14 @@ TEST(ColumnDynamic, InsertRangeFromOverflow6)
auto column_to = getDynamicWithManyVariants(253);
column_to->insertRangeFrom(*column_from, 2, 5);
ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255);
ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64"));
ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String"));
ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8"));
ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64"));
ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String"));
ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8"));
ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Array(Int8)"));
ASSERT_EQ(column_to->getSharedVariant().size(), 4);
auto field = (*column_to)[column_to->size() - 5];
ASSERT_EQ(field, Field("44"));
ASSERT_EQ(field, Field(44));
field = (*column_to)[column_to->size() - 4];
ASSERT_EQ(field, Field(42.42));
field = (*column_to)[column_to->size() - 3];
@ -534,12 +614,136 @@ TEST(ColumnDynamic, InsertRangeFromOverflow6)
field = (*column_to)[column_to->size() - 2];
ASSERT_EQ(field, Field("str"));
field = (*column_to)[column_to->size() - 1];
ASSERT_EQ(field, Field("[42]"));
ASSERT_EQ(field, Field(Array({Field(42)})));
}
TEST(ColumnDynamic, InsertRangeFromOverflow7)
{
auto column_from = ColumnDynamic::create(2);
column_from->insert(Field(42.42));
column_from->insert(Field("str1"));
column_from->insert(Field(42));
column_from->insert(Field(43.43));
column_from->insert(Field(Array({Field(41)})));
column_from->insert(Field(43));
column_from->insert(Field("str2"));
column_from->insert(Field(Array({Field(42)})));
auto column_to = ColumnDynamic::create(254);
column_to->insert(Field(42));
column_to->insertRangeFrom(*column_from, 0, 8);
ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 4);
ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64"));
ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String"));
ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8"));
ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Array(Int8)"));
ASSERT_EQ(column_to->getSharedVariant().size(), 2);
auto field = (*column_to)[column_to->size() - 8];
ASSERT_EQ(field, Field(42.42));
field = (*column_to)[column_to->size() - 7];
ASSERT_EQ(field, Field("str1"));
field = (*column_to)[column_to->size() - 6];
ASSERT_EQ(field, Field(42));
field = (*column_to)[column_to->size() - 5];
ASSERT_EQ(field, Field(43.43));
field = (*column_to)[column_to->size() - 4];
ASSERT_EQ(field, Field(Array({Field(41)})));
field = (*column_to)[column_to->size() - 3];
ASSERT_EQ(field, Field(43));
field = (*column_to)[column_to->size() - 2];
ASSERT_EQ(field, Field("str2"));
field = (*column_to)[column_to->size() - 1];
ASSERT_EQ(field, Field(Array({Field(42)})));
}
TEST(ColumnDynamic, InsertRangeFromOverflow8)
{
auto column_from = ColumnDynamic::create(2);
column_from->insert(Field(42.42));
column_from->insert(Field("str1"));
column_from->insert(Field(42));
column_from->insert(Field(43.43));
column_from->insert(Field(Array({Field(41)})));
column_from->insert(Field(43));
column_from->insert(Field("str2"));
column_from->insert(Field(Array({Field(42)})));
auto column_to = ColumnDynamic::create(2);
column_to->insert(Field(42));
column_from->insert(Field("str1"));
column_to->insertRangeFrom(*column_from, 0, 8);
ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 3);
ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8"));
ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String"));
ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64"));
ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Array(Int8)"));
ASSERT_EQ(column_to->getSharedVariant().size(), 4);
auto field = (*column_to)[column_to->size() - 8];
ASSERT_EQ(field, Field(42.42));
field = (*column_to)[column_to->size() - 7];
ASSERT_EQ(field, Field("str1"));
field = (*column_to)[column_to->size() - 6];
ASSERT_EQ(field, Field(42));
field = (*column_to)[column_to->size() - 5];
ASSERT_EQ(field, Field(43.43));
field = (*column_to)[column_to->size() - 4];
ASSERT_EQ(field, Field(Array({Field(41)})));
field = (*column_to)[column_to->size() - 3];
ASSERT_EQ(field, Field(43));
field = (*column_to)[column_to->size() - 2];
ASSERT_EQ(field, Field("str2"));
field = (*column_to)[column_to->size() - 1];
ASSERT_EQ(field, Field(Array({Field(42)})));
}
TEST(ColumnDynamic, InsertRangeFromOverflow9)
{
auto column_from = ColumnDynamic::create(3);
column_from->insert(Field("str1"));
column_from->insert(Field(42.42));
column_from->insert(Field("str2"));
column_from->insert(Field(42));
column_from->insert(Field(43.43));
column_from->insert(Field(Array({Field(41)})));
column_from->insert(Field(43));
column_from->insert(Field("str2"));
column_from->insert(Field(Array({Field(42)})));
auto column_to = ColumnDynamic::create(2);
column_to->insert(Field(42));
column_to->insertRangeFrom(*column_from, 0, 9);
ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 3);
ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8"));
ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String"));
ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64"));
ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Array(Int8)"));
ASSERT_EQ(column_to->getSharedVariant().size(), 4);
auto field = (*column_to)[column_to->size() - 9];
ASSERT_EQ(field, Field("str1"));
field = (*column_to)[column_to->size() - 8];
ASSERT_EQ(field, Field(42.42));
field = (*column_to)[column_to->size() - 7];
ASSERT_EQ(field, Field("str2"));
field = (*column_to)[column_to->size() - 6];
ASSERT_EQ(field, Field(42));
field = (*column_to)[column_to->size() - 5];
ASSERT_EQ(field, Field(43.43));
field = (*column_to)[column_to->size() - 4];
ASSERT_EQ(field, Field(Array({Field(41)})));
field = (*column_to)[column_to->size() - 3];
ASSERT_EQ(field, Field(43));
field = (*column_to)[column_to->size() - 2];
ASSERT_EQ(field, Field("str2"));
field = (*column_to)[column_to->size() - 1];
ASSERT_EQ(field, Field(Array({Field(42)})));
}
TEST(ColumnDynamic, SerializeDeserializeFromArena1)
{
auto column = ColumnDynamic::create(255);
auto column = ColumnDynamic::create(254);
column->insert(Field(42));
column->insert(Field(42.42));
column->insert(Field("str"));
@ -564,7 +768,7 @@ TEST(ColumnDynamic, SerializeDeserializeFromArena1)
TEST(ColumnDynamic, SerializeDeserializeFromArena2)
{
auto column_from = ColumnDynamic::create(255);
auto column_from = ColumnDynamic::create(254);
column_from->insert(Field(42));
column_from->insert(Field(42.42));
column_from->insert(Field("str"));
@ -577,26 +781,26 @@ TEST(ColumnDynamic, SerializeDeserializeFromArena2)
column_from->serializeValueIntoArena(2, arena, pos);
column_from->serializeValueIntoArena(3, arena, pos);
auto column_to = ColumnDynamic::create(255);
auto column_to = ColumnDynamic::create(254);
pos = column_to->deserializeAndInsertFromArena(ref1.data);
pos = column_to->deserializeAndInsertFromArena(pos);
pos = column_to->deserializeAndInsertFromArena(pos);
column_to->deserializeAndInsertFromArena(pos);
ASSERT_EQ((*column_from)[column_from->size() - 4], 42);
ASSERT_EQ((*column_from)[column_from->size() - 3], 42.42);
ASSERT_EQ((*column_from)[column_from->size() - 2], "str");
ASSERT_EQ((*column_from)[column_from->size() - 1], Null());
ASSERT_EQ(column_to->getVariantInfo().variant_type->getName(), "Variant(Float64, Int8, String)");
std::vector<String> expected_names = {"Float64", "Int8", "String"};
ASSERT_EQ((*column_to)[column_to->size() - 4], 42);
ASSERT_EQ((*column_to)[column_to->size() - 3], 42.42);
ASSERT_EQ((*column_to)[column_to->size() - 2], "str");
ASSERT_EQ((*column_to)[column_to->size() - 1], Null());
ASSERT_EQ(column_to->getVariantInfo().variant_type->getName(), "Variant(Float64, Int8, SharedVariant, String)");
std::vector<String> expected_names = {"Float64", "Int8", "SharedVariant", "String"};
ASSERT_EQ(column_to->getVariantInfo().variant_names, expected_names);
std::unordered_map<String, UInt8> expected_variant_name_to_discriminator = {{"Float64", 0}, {"Int8", 1}, {"String", 2}};
std::unordered_map<String, UInt8> expected_variant_name_to_discriminator = {{"Float64", 0}, {"Int8", 1}, {"SharedVariant", 2}, {"String", 3}};
ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator == expected_variant_name_to_discriminator);
}
TEST(ColumnDynamic, SerializeDeserializeFromArenaOverflow)
TEST(ColumnDynamic, SerializeDeserializeFromArenaOverflow1)
{
auto column_from = ColumnDynamic::create(255);
auto column_from = ColumnDynamic::create(254);
column_from->insert(Field(42));
column_from->insert(Field(42.42));
column_from->insert(Field("str"));
@ -615,18 +819,56 @@ TEST(ColumnDynamic, SerializeDeserializeFromArenaOverflow)
pos = column_to->deserializeAndInsertFromArena(pos);
column_to->deserializeAndInsertFromArena(pos);
ASSERT_EQ((*column_from)[column_from->size() - 4], 42);
ASSERT_EQ((*column_from)[column_from->size() - 3], 42.42);
ASSERT_EQ((*column_from)[column_from->size() - 2], "str");
ASSERT_EQ((*column_from)[column_from->size() - 1], Null());
ASSERT_EQ((*column_to)[column_to->size() - 4], 42);
ASSERT_EQ((*column_to)[column_to->size() - 3], 42.42);
ASSERT_EQ((*column_to)[column_to->size() - 2], "str");
ASSERT_EQ((*column_to)[column_to->size() - 1], Null());
ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8"));
ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64"));
ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String"));
ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String"));
ASSERT_EQ(column_to->getSharedVariant().size(), 2);
}
TEST(ColumnDynamic, SerializeDeserializeFromArenaOverflow2)
{
auto column_from = ColumnDynamic::create(2);
column_from->insert(Field(42));
column_from->insert(Field(42.42));
column_from->insert(Field("str"));
column_from->insert(Field(Null()));
column_from->insert(Field(Array({Field(42)})));
Arena arena;
const char * pos = nullptr;
auto ref1 = column_from->serializeValueIntoArena(0, arena, pos);
column_from->serializeValueIntoArena(1, arena, pos);
column_from->serializeValueIntoArena(2, arena, pos);
column_from->serializeValueIntoArena(3, arena, pos);
column_from->serializeValueIntoArena(4, arena, pos);
auto column_to = ColumnDynamic::create(2);
column_to->insert(Field(42.42));
pos = column_to->deserializeAndInsertFromArena(ref1.data);
pos = column_to->deserializeAndInsertFromArena(pos);
pos = column_to->deserializeAndInsertFromArena(pos);
pos = column_to->deserializeAndInsertFromArena(pos);
column_to->deserializeAndInsertFromArena(pos);
ASSERT_EQ((*column_to)[column_to->size() - 5], 42);
ASSERT_EQ((*column_to)[column_to->size() - 4], 42.42);
ASSERT_EQ((*column_to)[column_to->size() - 3], "str");
ASSERT_EQ((*column_to)[column_to->size() - 2], Null());
ASSERT_EQ((*column_to)[column_to->size() - 1], Field(Array({Field(42)})));
ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8"));
ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64"));
ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String"));
ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Array(Int8)"));
ASSERT_EQ(column_to->getSharedVariant().size(), 2);
}
TEST(ColumnDynamic, skipSerializedInArena)
{
auto column_from = ColumnDynamic::create(255);
auto column_from = ColumnDynamic::create(3);
column_from->insert(Field(42));
column_from->insert(Field(42.42));
column_from->insert(Field("str"));
@ -640,13 +882,41 @@ TEST(ColumnDynamic, skipSerializedInArena)
auto ref4 = column_from->serializeValueIntoArena(3, arena, pos);
const char * end = ref4.data + ref4.size;
auto column_to = ColumnDynamic::create(255);
auto column_to = ColumnDynamic::create(254);
pos = column_to->skipSerializedInArena(ref1.data);
pos = column_to->skipSerializedInArena(pos);
pos = column_to->skipSerializedInArena(pos);
pos = column_to->skipSerializedInArena(pos);
ASSERT_EQ(pos, end);
ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.empty());
ASSERT_TRUE(column_to->getVariantInfo().variant_names.empty());
ASSERT_EQ(column_to->getVariantInfo().variant_name_to_discriminator.at("SharedVariant"), 0);
ASSERT_EQ(column_to->getVariantInfo().variant_names, Names{"SharedVariant"});
}
TEST(ColumnDynamic, compare)
{
auto column_from = ColumnDynamic::create(3);
column_from->insert(Field(42));
column_from->insert(Field(42.42));
column_from->insert(Field("str"));
column_from->insert(Field(Null()));
column_from->insert(Field(Array({Field(42)})));
ASSERT_EQ(column_from->compareAt(0, 0, *column_from, -1), 0);
ASSERT_EQ(column_from->compareAt(0, 1, *column_from, -1), 1);
ASSERT_EQ(column_from->compareAt(1, 1, *column_from, -1), 0);
ASSERT_EQ(column_from->compareAt(0, 2, *column_from, -1), -1);
ASSERT_EQ(column_from->compareAt(2, 0, *column_from, -1), 1);
ASSERT_EQ(column_from->compareAt(2, 4, *column_from, -1), 1);
ASSERT_EQ(column_from->compareAt(4, 2, *column_from, -1), -1);
ASSERT_EQ(column_from->compareAt(4, 4, *column_from, -1), 0);
ASSERT_EQ(column_from->compareAt(0, 3, *column_from, -1), 1);
ASSERT_EQ(column_from->compareAt(1, 3, *column_from, -1), 1);
ASSERT_EQ(column_from->compareAt(2, 3, *column_from, -1), 1);
ASSERT_EQ(column_from->compareAt(3, 3, *column_from, -1), 0);
ASSERT_EQ(column_from->compareAt(4, 3, *column_from, -1), 1);
ASSERT_EQ(column_from->compareAt(3, 0, *column_from, -1), -1);
ASSERT_EQ(column_from->compareAt(3, 1, *column_from, -1), -1);
ASSERT_EQ(column_from->compareAt(3, 2, *column_from, -1), -1);
ASSERT_EQ(column_from->compareAt(3, 4, *column_from, -1), -1);
}

View File

@ -1,18 +1,24 @@
#include <Common/formatReadable.h>
#include <Common/AsynchronousMetrics.h>
#include <Common/Exception.h>
#include <Common/setThreadName.h>
#include <Common/CurrentMetrics.h>
#include <Common/filesystemHelpers.h>
#include <Common/logger_useful.h>
#include <IO/UncompressedCache.h>
#include <IO/MMappedFileCache.h>
#include <IO/ReadHelpers.h>
#include <IO/UncompressedCache.h>
#include <base/cgroupsv2.h>
#include <base/errnoToString.h>
#include <base/find_symbols.h>
#include <base/getPageSize.h>
#include <sys/resource.h>
#include <Common/CurrentMetrics.h>
#include <Common/Exception.h>
#include <Common/filesystemHelpers.h>
#include <Common/formatReadable.h>
#include <Common/logger_useful.h>
#include <Common/setThreadName.h>
#include <boost/locale/date_time_facet.hpp>
#include <chrono>
#include <string_view>
#include "config.h"
@ -52,6 +58,12 @@ static std::unique_ptr<ReadBufferFromFilePRead> openFileIfExists(const std::stri
return {};
}
static void openCgroupv2MetricFile(const std::string & filename, std::optional<ReadBufferFromFilePRead> & out)
{
if (auto path = getCgroupsV2PathContainingFile(filename))
openFileIfExists((path.value() + filename).c_str(), out);
};
#endif
@ -63,21 +75,15 @@ AsynchronousMetrics::AsynchronousMetrics(
, protocol_server_metrics_func(protocol_server_metrics_func_)
{
#if defined(OS_LINUX)
openFileIfExists("/proc/meminfo", meminfo);
openFileIfExists("/proc/loadavg", loadavg);
openFileIfExists("/proc/stat", proc_stat);
openFileIfExists("/proc/cpuinfo", cpuinfo);
openFileIfExists("/proc/sys/fs/file-nr", file_nr);
openFileIfExists("/proc/uptime", uptime);
openFileIfExists("/proc/net/dev", net_dev);
/// CGroups v2
openFileIfExists("/sys/fs/cgroup/memory.max", cgroupmem_limit_in_bytes);
if (cgroupmem_limit_in_bytes)
{
openFileIfExists("/sys/fs/cgroup/memory.current", cgroupmem_usage_in_bytes);
}
openFileIfExists("/sys/fs/cgroup/cpu.max", cgroupcpu_max);
openCgroupv2MetricFile("memory.max", cgroupmem_limit_in_bytes);
openCgroupv2MetricFile("memory.current", cgroupmem_usage_in_bytes);
openCgroupv2MetricFile("cpu.max", cgroupcpu_max);
openCgroupv2MetricFile("cpu.stat", cgroupcpu_stat);
/// CGroups v1
if (!cgroupmem_limit_in_bytes)
@ -90,6 +96,21 @@ AsynchronousMetrics::AsynchronousMetrics(
openFileIfExists("/sys/fs/cgroup/cpu/cpu.cfs_period_us", cgroupcpu_cfs_period);
openFileIfExists("/sys/fs/cgroup/cpu/cpu.cfs_quota_us", cgroupcpu_cfs_quota);
}
if (!cgroupcpu_stat)
openFileIfExists("/sys/fs/cgroup/cpuacct/cpuacct.stat", cgroupcpuacct_stat);
if (!cgroupcpu_stat && !cgroupcpuacct_stat)
{
/// The following metrics are not cgroup-aware and we've found cgroup-specific metric files for the similar metrics,
/// so we're better not reporting them at all to avoid confusion
openFileIfExists("/proc/loadavg", loadavg);
openFileIfExists("/proc/stat", proc_stat);
openFileIfExists("/proc/uptime", uptime);
}
/// The same story for memory metrics
if (!cgroupmem_limit_in_bytes)
openFileIfExists("/proc/meminfo", meminfo);
openFileIfExists("/proc/sys/vm/max_map_count", vm_max_map_count);
openFileIfExists("/proc/self/maps", vm_maps);
@ -570,6 +591,151 @@ AsynchronousMetrics::NetworkInterfaceStatValues::operator-(const AsynchronousMet
#endif
#if defined(OS_LINUX)
void AsynchronousMetrics::applyCPUMetricsUpdate(
AsynchronousMetricValues & new_values, const std::string & cpu_suffix, const ProcStatValuesCPU & delta_values, double multiplier)
{
new_values["OSUserTime" + cpu_suffix]
= {delta_values.user * multiplier,
"The ratio of time the CPU core was running userspace code. This is a system-wide metric, it includes all the processes on the "
"host machine, not just clickhouse-server."
" This includes also the time when the CPU was under-utilized due to the reasons internal to the CPU (memory loads, pipeline "
"stalls, branch mispredictions, running another SMT core)."
" The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across "
"them [0..num cores]."};
new_values["OSNiceTime" + cpu_suffix]
= {delta_values.nice * multiplier,
"The ratio of time the CPU core was running userspace code with higher priority. This is a system-wide metric, it includes all "
"the processes on the host machine, not just clickhouse-server."
" The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across "
"them [0..num cores]."};
new_values["OSSystemTime" + cpu_suffix]
= {delta_values.system * multiplier,
"The ratio of time the CPU core was running OS kernel (system) code. This is a system-wide metric, it includes all the "
"processes on the host machine, not just clickhouse-server."
" The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across "
"them [0..num cores]."};
new_values["OSIdleTime" + cpu_suffix]
= {delta_values.idle * multiplier,
"The ratio of time the CPU core was idle (not even ready to run a process waiting for IO) from the OS kernel standpoint. This "
"is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server."
" This does not include the time when the CPU was under-utilized due to the reasons internal to the CPU (memory loads, pipeline "
"stalls, branch mispredictions, running another SMT core)."
" The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across "
"them [0..num cores]."};
new_values["OSIOWaitTime" + cpu_suffix]
= {delta_values.iowait * multiplier,
"The ratio of time the CPU core was not running the code but when the OS kernel did not run any other process on this CPU as "
"the processes were waiting for IO. This is a system-wide metric, it includes all the processes on the host machine, not just "
"clickhouse-server."
" The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across "
"them [0..num cores]."};
new_values["OSIrqTime" + cpu_suffix]
= {delta_values.irq * multiplier,
"The ratio of time spent for running hardware interrupt requests on the CPU. This is a system-wide metric, it includes all the "
"processes on the host machine, not just clickhouse-server."
" A high number of this metric may indicate hardware misconfiguration or a very high network load."
" The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across "
"them [0..num cores]."};
new_values["OSSoftIrqTime" + cpu_suffix]
= {delta_values.softirq * multiplier,
"The ratio of time spent for running software interrupt requests on the CPU. This is a system-wide metric, it includes all the "
"processes on the host machine, not just clickhouse-server."
" A high number of this metric may indicate inefficient software running on the system."
" The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across "
"them [0..num cores]."};
new_values["OSStealTime" + cpu_suffix]
= {delta_values.steal * multiplier,
"The ratio of time spent in other operating systems by the CPU when running in a virtualized environment. This is a system-wide "
"metric, it includes all the processes on the host machine, not just clickhouse-server."
" Not every virtualized environments present this metric, and most of them don't."
" The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across "
"them [0..num cores]."};
new_values["OSGuestTime" + cpu_suffix]
= {delta_values.guest * multiplier,
"The ratio of time spent running a virtual CPU for guest operating systems under the control of the Linux kernel (See `man "
"procfs`). This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server."
" This metric is irrelevant for ClickHouse, but still exists for completeness."
" The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across "
"them [0..num cores]."};
new_values["OSGuestNiceTime" + cpu_suffix]
= {delta_values.guest_nice * multiplier,
"The ratio of time spent running a virtual CPU for guest operating systems under the control of the Linux kernel, when a guest "
"was set to a higher priority (See `man procfs`). This is a system-wide metric, it includes all the processes on the host "
"machine, not just clickhouse-server."
" This metric is irrelevant for ClickHouse, but still exists for completeness."
" The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across "
"them [0..num cores]."};
}
void AsynchronousMetrics::applyNormalizedCPUMetricsUpdate(
AsynchronousMetricValues & new_values, double num_cpus_to_normalize, const ProcStatValuesCPU & delta_values_all_cpus, double multiplier)
{
chassert(num_cpus_to_normalize);
new_values["OSUserTimeNormalized"]
= {delta_values_all_cpus.user * multiplier / num_cpus_to_normalize,
"The value is similar to `OSUserTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless "
"of the number of cores."
" This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is "
"non-uniform, and still get the average resource utilization metric."};
new_values["OSNiceTimeNormalized"]
= {delta_values_all_cpus.nice * multiplier / num_cpus_to_normalize,
"The value is similar to `OSNiceTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless "
"of the number of cores."
" This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is "
"non-uniform, and still get the average resource utilization metric."};
new_values["OSSystemTimeNormalized"]
= {delta_values_all_cpus.system * multiplier / num_cpus_to_normalize,
"The value is similar to `OSSystemTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless "
"of the number of cores."
" This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is "
"non-uniform, and still get the average resource utilization metric."};
new_values["OSIdleTimeNormalized"]
= {delta_values_all_cpus.idle * multiplier / num_cpus_to_normalize,
"The value is similar to `OSIdleTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless "
"of the number of cores."
" This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is "
"non-uniform, and still get the average resource utilization metric."};
new_values["OSIOWaitTimeNormalized"]
= {delta_values_all_cpus.iowait * multiplier / num_cpus_to_normalize,
"The value is similar to `OSIOWaitTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless "
"of the number of cores."
" This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is "
"non-uniform, and still get the average resource utilization metric."};
new_values["OSIrqTimeNormalized"]
= {delta_values_all_cpus.irq * multiplier / num_cpus_to_normalize,
"The value is similar to `OSIrqTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of "
"the number of cores."
" This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is "
"non-uniform, and still get the average resource utilization metric."};
new_values["OSSoftIrqTimeNormalized"]
= {delta_values_all_cpus.softirq * multiplier / num_cpus_to_normalize,
"The value is similar to `OSSoftIrqTime` but divided to the number of CPU cores to be measured in the [0..1] interval "
"regardless of the number of cores."
" This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is "
"non-uniform, and still get the average resource utilization metric."};
new_values["OSStealTimeNormalized"]
= {delta_values_all_cpus.steal * multiplier / num_cpus_to_normalize,
"The value is similar to `OSStealTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless "
"of the number of cores."
" This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is "
"non-uniform, and still get the average resource utilization metric."};
new_values["OSGuestTimeNormalized"]
= {delta_values_all_cpus.guest * multiplier / num_cpus_to_normalize,
"The value is similar to `OSGuestTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless "
"of the number of cores."
" This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is "
"non-uniform, and still get the average resource utilization metric."};
new_values["OSGuestNiceTimeNormalized"]
= {delta_values_all_cpus.guest_nice * multiplier / num_cpus_to_normalize,
"The value is similar to `OSGuestNiceTime` but divided to the number of CPU cores to be measured in the [0..1] interval "
"regardless of the number of cores."
" This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is "
"non-uniform, and still get the average resource utilization metric."};
}
#endif
void AsynchronousMetrics::update(TimePoint update_time, bool force_update)
{
Stopwatch watch;
@ -831,7 +997,68 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update)
new_values["CGroupMaxCPU"] = { max_cpu_cgroups, "The maximum number of CPU cores according to CGroups."};
}
if (proc_stat)
if (cgroupcpu_stat || cgroupcpuacct_stat)
{
try
{
ReadBufferFromFilePRead & in = cgroupcpu_stat ? *cgroupcpu_stat : *cgroupcpuacct_stat;
ProcStatValuesCPU current_values{};
/// We re-read the file from the beginning each time
in.rewind();
while (!in.eof())
{
String name;
readStringUntilWhitespace(name, in);
skipWhitespaceIfAny(in);
/// `user_usec` for cgroup v2 and `user` for cgroup v1
if (name.starts_with("user"))
{
readText(current_values.user, in);
skipToNextLineOrEOF(in);
}
/// `system_usec` for cgroup v2 and `system` for cgroup v1
else if (name.starts_with("system"))
{
readText(current_values.system, in);
skipToNextLineOrEOF(in);
}
else
skipToNextLineOrEOF(in);
}
if (!first_run)
{
auto get_clock_ticks = [&]()
{
if (auto hz = sysconf(_SC_CLK_TCK); hz != -1)
return hz;
else
throw ErrnoException(ErrorCodes::CANNOT_SYSCONF, "Cannot call 'sysconf' to obtain system HZ");
};
const auto cgroup_version_specific_divisor = cgroupcpu_stat ? 1e6 : get_clock_ticks();
const double multiplier = 1.0 / cgroup_version_specific_divisor
/ (std::chrono::duration_cast<std::chrono::nanoseconds>(time_since_previous_update).count() / 1e9);
const ProcStatValuesCPU delta_values = current_values - proc_stat_values_all_cpus;
applyCPUMetricsUpdate(new_values, /*cpu_suffix=*/"", delta_values, multiplier);
if (max_cpu_cgroups > 0)
applyNormalizedCPUMetricsUpdate(new_values, max_cpu_cgroups, delta_values, multiplier);
}
proc_stat_values_all_cpus = current_values;
}
catch (...)
{
tryLogCurrentException(__PRETTY_FUNCTION__);
openCgroupv2MetricFile("cpu.stat", cgroupcpu_stat);
if (!cgroupcpu_stat)
openFileIfExists("/sys/fs/cgroup/cpuacct/cpuacct.stat", cgroupcpuacct_stat);
}
}
else if (proc_stat)
{
try
{
@ -886,43 +1113,7 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update)
else
delta_values_all_cpus = delta_values;
new_values["OSUserTime" + cpu_suffix] = { delta_values.user * multiplier,
"The ratio of time the CPU core was running userspace code. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server."
" This includes also the time when the CPU was under-utilized due to the reasons internal to the CPU (memory loads, pipeline stalls, branch mispredictions, running another SMT core)."
" The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."};
new_values["OSNiceTime" + cpu_suffix] = { delta_values.nice * multiplier,
"The ratio of time the CPU core was running userspace code with higher priority. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server."
" The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."};
new_values["OSSystemTime" + cpu_suffix] = { delta_values.system * multiplier,
"The ratio of time the CPU core was running OS kernel (system) code. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server."
" The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."};
new_values["OSIdleTime" + cpu_suffix] = { delta_values.idle * multiplier,
"The ratio of time the CPU core was idle (not even ready to run a process waiting for IO) from the OS kernel standpoint. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server."
" This does not include the time when the CPU was under-utilized due to the reasons internal to the CPU (memory loads, pipeline stalls, branch mispredictions, running another SMT core)."
" The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."};
new_values["OSIOWaitTime" + cpu_suffix] = { delta_values.iowait * multiplier,
"The ratio of time the CPU core was not running the code but when the OS kernel did not run any other process on this CPU as the processes were waiting for IO. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server."
" The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."};
new_values["OSIrqTime" + cpu_suffix] = { delta_values.irq * multiplier,
"The ratio of time spent for running hardware interrupt requests on the CPU. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server."
" A high number of this metric may indicate hardware misconfiguration or a very high network load."
" The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."};
new_values["OSSoftIrqTime" + cpu_suffix] = { delta_values.softirq * multiplier,
"The ratio of time spent for running software interrupt requests on the CPU. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server."
" A high number of this metric may indicate inefficient software running on the system."
" The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."};
new_values["OSStealTime" + cpu_suffix] = { delta_values.steal * multiplier,
"The ratio of time spent in other operating systems by the CPU when running in a virtualized environment. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server."
" Not every virtualized environments present this metric, and most of them don't."
" The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."};
new_values["OSGuestTime" + cpu_suffix] = { delta_values.guest * multiplier,
"The ratio of time spent running a virtual CPU for guest operating systems under the control of the Linux kernel (See `man procfs`). This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server."
" This metric is irrelevant for ClickHouse, but still exists for completeness."
" The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."};
new_values["OSGuestNiceTime" + cpu_suffix] = { delta_values.guest_nice * multiplier,
"The ratio of time spent running a virtual CPU for guest operating systems under the control of the Linux kernel, when a guest was set to a higher priority (See `man procfs`). This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server."
" This metric is irrelevant for ClickHouse, but still exists for completeness."
" The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."};
applyCPUMetricsUpdate(new_values, cpu_suffix, delta_values, multiplier);
}
prev_values = current_values;
@ -978,38 +1169,7 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update)
Float64 num_cpus_to_normalize = max_cpu_cgroups > 0 ? max_cpu_cgroups : num_cpus;
if (num_cpus_to_normalize > 0)
{
new_values["OSUserTimeNormalized"] = { delta_values_all_cpus.user * multiplier / num_cpus_to_normalize,
"The value is similar to `OSUserTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores."
" This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."};
new_values["OSNiceTimeNormalized"] = { delta_values_all_cpus.nice * multiplier / num_cpus_to_normalize,
"The value is similar to `OSNiceTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores."
" This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."};
new_values["OSSystemTimeNormalized"] = { delta_values_all_cpus.system * multiplier / num_cpus_to_normalize,
"The value is similar to `OSSystemTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores."
" This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."};
new_values["OSIdleTimeNormalized"] = { delta_values_all_cpus.idle * multiplier / num_cpus_to_normalize,
"The value is similar to `OSIdleTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores."
" This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."};
new_values["OSIOWaitTimeNormalized"] = { delta_values_all_cpus.iowait * multiplier / num_cpus_to_normalize,
"The value is similar to `OSIOWaitTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores."
" This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."};
new_values["OSIrqTimeNormalized"] = { delta_values_all_cpus.irq * multiplier / num_cpus_to_normalize,
"The value is similar to `OSIrqTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores."
" This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."};
new_values["OSSoftIrqTimeNormalized"] = { delta_values_all_cpus.softirq * multiplier / num_cpus_to_normalize,
"The value is similar to `OSSoftIrqTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores."
" This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."};
new_values["OSStealTimeNormalized"] = { delta_values_all_cpus.steal * multiplier / num_cpus_to_normalize,
"The value is similar to `OSStealTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores."
" This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."};
new_values["OSGuestTimeNormalized"] = { delta_values_all_cpus.guest * multiplier / num_cpus_to_normalize,
"The value is similar to `OSGuestTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores."
" This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."};
new_values["OSGuestNiceTimeNormalized"] = { delta_values_all_cpus.guest_nice * multiplier / num_cpus_to_normalize,
"The value is similar to `OSGuestNiceTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores."
" This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."};
}
applyNormalizedCPUMetricsUpdate(new_values, num_cpus_to_normalize, delta_values_all_cpus, multiplier);
}
proc_stat_values_other = current_other_values;
@ -1042,8 +1202,7 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update)
tryLogCurrentException(__PRETTY_FUNCTION__);
}
}
if (meminfo)
else if (meminfo)
{
try
{

View File

@ -126,6 +126,8 @@ private:
std::optional<ReadBufferFromFilePRead> cgroupcpu_cfs_period TSA_GUARDED_BY(data_mutex);
std::optional<ReadBufferFromFilePRead> cgroupcpu_cfs_quota TSA_GUARDED_BY(data_mutex);
std::optional<ReadBufferFromFilePRead> cgroupcpu_max TSA_GUARDED_BY(data_mutex);
std::optional<ReadBufferFromFilePRead> cgroupcpu_stat TSA_GUARDED_BY(data_mutex);
std::optional<ReadBufferFromFilePRead> cgroupcpuacct_stat TSA_GUARDED_BY(data_mutex);
std::optional<ReadBufferFromFilePRead> vm_max_map_count TSA_GUARDED_BY(data_mutex);
std::optional<ReadBufferFromFilePRead> vm_maps TSA_GUARDED_BY(data_mutex);
@ -221,6 +223,16 @@ private:
void openBlockDevices();
void openSensorsChips();
void openEDAC();
void applyCPUMetricsUpdate(
AsynchronousMetricValues & new_values, const std::string & cpu_suffix, const ProcStatValuesCPU & delta_values, double multiplier);
void applyNormalizedCPUMetricsUpdate(
AsynchronousMetricValues & new_values,
double num_cpus_to_normalize,
const ProcStatValuesCPU & delta_values_all_cpus,
double multiplier);
#endif
void run();

View File

@ -144,31 +144,6 @@ private:
/// - I did not test what happens if a host has v1 and v2 simultaneously enabled. I believe such
/// systems existed only for a short transition period.
std::optional<std::string> getCgroupsV2Path()
{
if (!cgroupsV2Enabled())
return {};
if (!cgroupsV2MemoryControllerEnabled())
return {};
fs::path current_cgroup = cgroupV2PathOfProcess();
if (current_cgroup.empty())
return {};
/// Return the bottom-most nested current memory file. If there is no such file at the current
/// level, try again at the parent level as memory settings are inherited.
while (current_cgroup != default_cgroups_mount.parent_path())
{
const auto current_path = current_cgroup / "memory.current";
const auto stat_path = current_cgroup / "memory.stat";
if (fs::exists(current_path) && fs::exists(stat_path))
return {current_cgroup};
current_cgroup = current_cgroup.parent_path();
}
return {};
}
std::optional<std::string> getCgroupsV1Path()
{
auto path = default_cgroups_mount / "memory/memory.stat";
@ -179,7 +154,7 @@ std::optional<std::string> getCgroupsV1Path()
std::pair<std::string, CgroupsMemoryUsageObserver::CgroupsVersion> getCgroupsPath()
{
auto v2_path = getCgroupsV2Path();
auto v2_path = getCgroupsV2PathContainingFile("memory.current");
if (v2_path.has_value())
return {*v2_path, CgroupsMemoryUsageObserver::CgroupsVersion::V2};

View File

@ -307,7 +307,7 @@
M(FilteringMarksWithPrimaryKey, "Number of threads currently doing filtering of mark ranges by the primary key") \
M(FilteringMarksWithSecondaryKeys, "Number of threads currently doing filtering of mark ranges by secondary keys") \
\
M(S3DiskNoKeyErrors, "The number of `NoSuchKey` errors that occur when reading data from S3 cloud storage through ClickHouse disks.") \
M(DiskS3NoSuchKeyErrors, "The number of `NoSuchKey` errors that occur when reading data from S3 cloud storage through ClickHouse disks.") \
#ifdef APPLY_FOR_EXTERNAL_METRICS
#define APPLY_FOR_METRICS(M) APPLY_FOR_BUILTIN_METRICS(M) APPLY_FOR_EXTERNAL_METRICS(M)

View File

@ -209,8 +209,35 @@
\
M(Merge, "Number of launched background merges.") \
M(MergedRows, "Rows read for background merges. This is the number of rows before merge.") \
M(MergedColumns, "Number of columns merged during the horizontal stage of merges.") \
M(GatheredColumns, "Number of columns gathered during the vertical stage of merges.") \
M(MergedUncompressedBytes, "Uncompressed bytes (for columns as they stored in memory) that was read for background merges. This is the number before merge.") \
M(MergesTimeMilliseconds, "Total time spent for background merges.")\
M(MergeTotalMilliseconds, "Total time spent for background merges") \
M(MergeExecuteMilliseconds, "Total busy time spent for execution of background merges") \
M(MergeHorizontalStageTotalMilliseconds, "Total time spent for horizontal stage of background merges") \
M(MergeHorizontalStageExecuteMilliseconds, "Total busy time spent for execution of horizontal stage of background merges") \
M(MergeVerticalStageTotalMilliseconds, "Total time spent for vertical stage of background merges") \
M(MergeVerticalStageExecuteMilliseconds, "Total busy time spent for execution of vertical stage of background merges") \
M(MergeProjectionStageTotalMilliseconds, "Total time spent for projection stage of background merges") \
M(MergeProjectionStageExecuteMilliseconds, "Total busy time spent for execution of projection stage of background merges") \
\
M(MergingSortedMilliseconds, "Total time spent while merging sorted columns") \
M(AggregatingSortedMilliseconds, "Total time spent while aggregating sorted columns") \
M(CollapsingSortedMilliseconds, "Total time spent while collapsing sorted columns") \
M(ReplacingSortedMilliseconds, "Total time spent while replacing sorted columns") \
M(SummingSortedMilliseconds, "Total time spent while summing sorted columns") \
M(VersionedCollapsingSortedMilliseconds, "Total time spent while version collapsing sorted columns") \
M(GatheringColumnMilliseconds, "Total time spent while gathering columns for vertical merge") \
\
M(MutationTotalParts, "Number of total parts for which mutations tried to be applied") \
M(MutationUntouchedParts, "Number of total parts for which mutations tried to be applied but which was completely skipped according to predicate") \
M(MutatedRows, "Rows read for mutations. This is the number of rows before mutation") \
M(MutatedUncompressedBytes, "Uncompressed bytes (for columns as they stored in memory) that was read for mutations. This is the number before mutation.") \
M(MutationTotalMilliseconds, "Total time spent for mutations.") \
M(MutationExecuteMilliseconds, "Total busy time spent for execution of mutations.") \
M(MutationAllPartColumns, "Number of times when task to mutate all columns in part was created") \
M(MutationSomePartColumns, "Number of times when task to mutate some columns in part was created") \
M(MutateTaskProjectionsCalculationMicroseconds, "Time spent calculating projections in mutations.") \
\
M(MergeTreeDataWriterRows, "Number of rows INSERTed to MergeTree tables.") \
M(MergeTreeDataWriterUncompressedBytes, "Uncompressed bytes (for columns as they stored in memory) INSERTed to MergeTree tables.") \
@ -225,7 +252,6 @@
M(MergeTreeDataWriterProjectionsCalculationMicroseconds, "Time spent calculating projections") \
M(MergeTreeDataProjectionWriterSortingBlocksMicroseconds, "Time spent sorting blocks (for projection it might be a key different from table's sorting key)") \
M(MergeTreeDataProjectionWriterMergingBlocksMicroseconds, "Time spent merging blocks") \
M(MutateTaskProjectionsCalculationMicroseconds, "Time spent calculating projections") \
\
M(InsertedWideParts, "Number of parts inserted in Wide format.") \
M(InsertedCompactParts, "Number of parts inserted in Compact format.") \

View File

@ -184,14 +184,20 @@ void DynamicResourceManager::updateConfiguration(const Poco::Util::AbstractConfi
// Resource update leads to loss of runtime data of nodes and may lead to temporary violation of constraints (e.g. limits)
// Try to minimise this by reusing "equal" resources (initialized with the same configuration).
std::vector<State::ResourcePtr> resources_to_attach;
for (auto & [name, new_resource] : new_state->resources)
{
if (auto iter = state->resources.find(name); iter != state->resources.end()) // Resource update
{
State::ResourcePtr old_resource = iter->second;
if (old_resource->equals(*new_resource))
{
new_resource = old_resource; // Rewrite with older version to avoid loss of runtime data
continue;
}
}
// It is new or updated resource
resources_to_attach.emplace_back(new_resource);
}
// Commit new state
@ -199,17 +205,14 @@ void DynamicResourceManager::updateConfiguration(const Poco::Util::AbstractConfi
state = new_state;
// Attach new and updated resources to the scheduler
for (auto & [name, resource] : new_state->resources)
for (auto & resource : resources_to_attach)
{
const SchedulerNodePtr & root = resource->nodes.find("/")->second.ptr;
if (root->parent == nullptr)
resource->attached_to = &scheduler;
scheduler.event_queue->enqueue([this, root]
{
resource->attached_to = &scheduler;
scheduler.event_queue->enqueue([this, root]
{
scheduler.attachChild(root);
});
}
scheduler.attachChild(root);
});
}
// NOTE: after mutex unlock `state` became available for Classifier(s) and must be immutable

View File

@ -83,6 +83,9 @@ static constexpr auto DBMS_MIN_REVISION_WITH_SYSTEM_KEYWORDS_TABLE = 54468;
static constexpr auto DBMS_MIN_REVISION_WITH_ROWS_BEFORE_AGGREGATION = 54469;
/// Packets size header
static constexpr auto DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS = 54470;
/// Version of ClickHouse TCP protocol.
///
/// Should be incremented manually on protocol changes.
@ -90,6 +93,6 @@ static constexpr auto DBMS_MIN_REVISION_WITH_ROWS_BEFORE_AGGREGATION = 54469;
/// NOTE: DBMS_TCP_PROTOCOL_VERSION has nothing common with VERSION_REVISION,
/// later is just a number for server version (one number instead of commit SHA)
/// for simplicity (sometimes it may be more convenient in some use cases).
static constexpr auto DBMS_TCP_PROTOCOL_VERSION = 54469;
static constexpr auto DBMS_TCP_PROTOCOL_VERSION = 54470;
}

View File

@ -593,7 +593,6 @@ class IColumn;
M(UInt64, mutations_sync, 0, "Wait for synchronous execution of ALTER TABLE UPDATE/DELETE queries (mutations). 0 - execute asynchronously. 1 - wait current server. 2 - wait all replicas if they exist.", 0) \
M(Bool, enable_lightweight_delete, true, "Enable lightweight DELETE mutations for mergetree tables.", 0) ALIAS(allow_experimental_lightweight_delete) \
M(UInt64, lightweight_deletes_sync, 2, "The same as 'mutation_sync', but controls only execution of lightweight deletes", 0) \
M(LightweightMutationProjectionMode, lightweight_mutation_projection_mode, LightweightMutationProjectionMode::THROW, "When lightweight delete happens on a table with projection(s), the possible operations include throw the exception as projection exists, or drop all projection related to this table then do lightweight delete.", 0) \
M(Bool, apply_deleted_mask, true, "Enables filtering out rows deleted with lightweight DELETE. If disabled, a query will be able to read those rows. This is useful for debugging and \"undelete\" scenarios", 0) \
M(Bool, optimize_normalize_count_variants, true, "Rewrite aggregate functions that semantically equals to count() as count().", 0) \
M(Bool, optimize_injective_functions_inside_uniq, true, "Delete injective functions of one argument inside uniq*() functions.", 0) \

View File

@ -104,7 +104,7 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
{"dictionary_validate_primary_key_type", false, false, "Validate primary key type for dictionaries. By default id type for simple layouts will be implicitly converted to UInt64."},
{"collect_hash_table_stats_during_joins", false, true, "New setting."},
{"max_size_to_preallocate_for_joins", 0, 100'000'000, "New setting."},
{"input_format_orc_reader_time_zone_name", "GMT", "GMT", "The time zone name for ORC row reader, the default ORC row reader's time zone is GMT."}, {"lightweight_mutation_projection_mode", "throw", "throw", "When lightweight delete happens on a table with projection(s), the possible operations include throw the exception as projection exists, or drop all projection related to this table then do lightweight delete."},
{"input_format_orc_reader_time_zone_name", "GMT", "GMT", "The time zone name for ORC row reader, the default ORC row reader's time zone is GMT."},
{"database_replicated_allow_heavy_create", true, false, "Long-running DDL queries (CREATE AS SELECT and POPULATE) for Replicated database engine was forbidden"},
{"query_plan_merge_filters", false, false, "Allow to merge filters in the query plan"},
{"azure_sdk_max_retries", 10, 10, "Maximum number of retries in azure sdk"},

View File

@ -175,7 +175,8 @@ IMPLEMENT_SETTING_ENUM(ParallelReplicasCustomKeyFilterType, ErrorCodes::BAD_ARGU
IMPLEMENT_SETTING_ENUM(LightweightMutationProjectionMode, ErrorCodes::BAD_ARGUMENTS,
{{"throw", LightweightMutationProjectionMode::THROW},
{"drop", LightweightMutationProjectionMode::DROP}})
{"drop", LightweightMutationProjectionMode::DROP},
{"rebuild", LightweightMutationProjectionMode::REBUILD}})
IMPLEMENT_SETTING_ENUM(DeduplicateMergeProjectionMode, ErrorCodes::BAD_ARGUMENTS,
{{"throw", DeduplicateMergeProjectionMode::THROW},

View File

@ -311,6 +311,7 @@ enum class LightweightMutationProjectionMode : uint8_t
{
THROW,
DROP,
REBUILD,
};
DECLARE_SETTING_ENUM(LightweightMutationProjectionMode)

View File

@ -7,6 +7,7 @@
#include <DataTypes/NestedUtils.h>
#include <DataTypes/DataTypeNullable.h>
#include <DataTypes/DataTypesNumber.h>
#include <DataTypes/DataTypesBinaryEncoding.h>
#include <Columns/ColumnDynamic.h>
#include <Columns/ColumnVariant.h>
#include <Core/Field.h>
@ -14,6 +15,7 @@
#include <Parsers/ASTFunction.h>
#include <Parsers/ASTIdentifier.h>
#include <Parsers/ASTLiteral.h>
#include <IO/ReadBufferFromMemory.h>
namespace DB
{
@ -71,8 +73,8 @@ static DataTypePtr create(const ASTPtr & arguments)
auto * literal = argument->arguments->children[1]->as<ASTLiteral>();
if (!literal || literal->value.getType() != Field::Types::UInt64 || literal->value.safeGet<UInt64>() == 0 || literal->value.safeGet<UInt64>() > ColumnVariant::MAX_NESTED_COLUMNS)
throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "'max_types' argument for Dynamic type should be a positive integer between 1 and 255");
if (!literal || literal->value.getType() != Field::Types::UInt64 || literal->value.safeGet<UInt64>() > ColumnDynamic::MAX_DYNAMIC_TYPES_LIMIT)
throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "'max_types' argument for Dynamic type should be a positive integer between 0 and {}", ColumnDynamic::MAX_DYNAMIC_TYPES_LIMIT);
return std::make_shared<DataTypeDynamic>(literal->value.safeGet<UInt64>());
}
@ -84,30 +86,72 @@ void registerDataTypeDynamic(DataTypeFactory & factory)
std::unique_ptr<IDataType::SubstreamData> DataTypeDynamic::getDynamicSubcolumnData(std::string_view subcolumn_name, const DB::IDataType::SubstreamData & data, bool throw_if_null) const
{
auto [subcolumn_type_name, subcolumn_nested_name] = Nested::splitName(subcolumn_name);
auto [type_subcolumn_name, subcolumn_nested_name] = Nested::splitName(subcolumn_name);
/// Check if requested subcolumn is a valid data type.
auto subcolumn_type = DataTypeFactory::instance().tryGet(String(subcolumn_type_name));
auto subcolumn_type = DataTypeFactory::instance().tryGet(String(type_subcolumn_name));
if (!subcolumn_type)
{
if (throw_if_null)
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Dynamic type doesn't have subcolumn '{}'", subcolumn_type_name);
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Dynamic type doesn't have subcolumn '{}'", type_subcolumn_name);
return nullptr;
}
std::unique_ptr<SubstreamData> res = std::make_unique<SubstreamData>(subcolumn_type->getDefaultSerialization());
res->type = subcolumn_type;
std::optional<ColumnVariant::Discriminator> discriminator;
ColumnPtr null_map_for_variant_from_shared_variant;
if (data.column)
{
/// If column was provided, we should extract subcolumn from Dynamic column.
const auto & dynamic_column = assert_cast<const ColumnDynamic &>(*data.column);
const auto & variant_info = dynamic_column.getVariantInfo();
const auto & variant_column = dynamic_column.getVariantColumn();
const auto & shared_variant = dynamic_column.getSharedVariant();
/// Check if provided Dynamic column has subcolumn of this type.
auto it = variant_info.variant_name_to_discriminator.find(subcolumn_type->getName());
String subcolumn_type_name = subcolumn_type->getName();
auto it = variant_info.variant_name_to_discriminator.find(subcolumn_type_name);
if (it != variant_info.variant_name_to_discriminator.end())
{
discriminator = it->second;
res->column = dynamic_column.getVariantColumn().getVariantPtrByGlobalDiscriminator(*discriminator);
res->column = variant_column.getVariantPtrByGlobalDiscriminator(*discriminator);
}
/// Otherwise if there is data in shared variant try to find requested type there.
else if (!shared_variant.empty())
{
/// Create null map for resulting subcolumn to make it Nullable.
auto null_map_column = ColumnUInt8::create();
NullMap & null_map = assert_cast<ColumnUInt8 &>(*null_map_column).getData();
null_map.reserve(variant_column.size());
auto subcolumn = subcolumn_type->createColumn();
auto shared_variant_local_discr = variant_column.localDiscriminatorByGlobal(dynamic_column.getSharedVariantDiscriminator());
const auto & local_discriminators = variant_column.getLocalDiscriminators();
const auto & offsets = variant_column.getOffsets();
const FormatSettings format_settings;
for (size_t i = 0; i != local_discriminators.size(); ++i)
{
if (local_discriminators[i] == shared_variant_local_discr)
{
auto value = shared_variant.getDataAt(offsets[i]);
ReadBufferFromMemory buf(value.data, value.size);
auto type = decodeDataType(buf);
if (type->getName() == subcolumn_type_name)
{
dynamic_column.getVariantSerialization(subcolumn_type, subcolumn_type_name)->deserializeBinary(*subcolumn, buf, format_settings);
null_map.push_back(0);
}
else
{
null_map.push_back(1);
}
}
else
{
null_map.push_back(1);
}
}
res->column = std::move(subcolumn);
null_map_for_variant_from_shared_variant = std::move(null_map_column);
}
}
@ -125,7 +169,7 @@ std::unique_ptr<IDataType::SubstreamData> DataTypeDynamic::getDynamicSubcolumnDa
return nullptr;
}
res->serialization = std::make_shared<SerializationDynamicElement>(res->serialization, subcolumn_type->getName(), is_null_map_subcolumn);
res->serialization = std::make_shared<SerializationDynamicElement>(res->serialization, subcolumn_type->getName(), String(subcolumn_nested_name), is_null_map_subcolumn);
/// Make resulting subcolumn Nullable only if type subcolumn can be inside Nullable or can be LowCardinality(Nullable()).
bool make_subcolumn_nullable = subcolumn_type->canBeInsideNullable() || subcolumn_type->lowCardinality();
if (!is_null_map_subcolumn && make_subcolumn_nullable)
@ -133,10 +177,10 @@ std::unique_ptr<IDataType::SubstreamData> DataTypeDynamic::getDynamicSubcolumnDa
if (data.column)
{
/// Check if provided Dynamic column has subcolumn of this type. In this case we should use VariantSubcolumnCreator/VariantNullMapSubcolumnCreator to
/// create full subcolumn from variant according to discriminators.
if (discriminator)
{
/// Provided Dynamic column has subcolumn of this type, we should use VariantSubcolumnCreator/VariantNullMapSubcolumnCreator to
/// create full subcolumn from variant according to discriminators.
const auto & variant_column = assert_cast<const ColumnDynamic &>(*data.column).getVariantColumn();
std::unique_ptr<ISerialization::ISubcolumnCreator> creator;
if (is_null_map_subcolumn)
@ -154,6 +198,21 @@ std::unique_ptr<IDataType::SubstreamData> DataTypeDynamic::getDynamicSubcolumnDa
make_subcolumn_nullable);
res->column = creator->create(res->column);
}
/// Check if requested type was extracted from shared variant. In this case we should use
/// VariantSubcolumnCreator to create full subcolumn from variant according to created null map.
else if (null_map_for_variant_from_shared_variant)
{
if (is_null_map_subcolumn)
{
res->column = null_map_for_variant_from_shared_variant;
}
else
{
SerializationVariantElement::VariantSubcolumnCreator creator(
null_map_for_variant_from_shared_variant, "", 0, 0, make_subcolumn_nullable, null_map_for_variant_from_shared_variant);
res->column = creator.create(res->column);
}
}
/// Provided Dynamic column doesn't have subcolumn of this type, just create column filled with default values.
else if (is_null_map_subcolumn)
{

View File

@ -150,6 +150,12 @@ DataTypePtr DataTypeFactory::getCustom(DataTypeCustomDescPtr customization) cons
return type;
}
DataTypePtr DataTypeFactory::getCustom(const String & base_name, DataTypeCustomDescPtr customization) const
{
auto type = get(base_name);
type->setCustomization(std::move(customization));
return type;
}
void DataTypeFactory::registerDataType(const String & family_name, Value creator, Case case_sensitiveness)
{

View File

@ -34,6 +34,7 @@ public:
DataTypePtr get(const String & family_name, const ASTPtr & parameters) const;
DataTypePtr get(const ASTPtr & ast) const;
DataTypePtr getCustom(DataTypeCustomDescPtr customization) const;
DataTypePtr getCustom(const String & base_name, DataTypeCustomDescPtr customization) const;
/// Return nullptr in case of error.
DataTypePtr tryGet(const String & full_name) const;

View File

@ -192,17 +192,12 @@ MutableColumnPtr DataTypeTuple::createColumn() const
MutableColumnPtr DataTypeTuple::createColumn(const ISerialization & serialization) const
{
/// If we read Tuple as Variant subcolumn, it may be wrapped to SerializationVariantElement.
/// Here we don't need it, so we drop this wrapper.
const auto * current_serialization = &serialization;
while (const auto * serialization_variant_element = typeid_cast<const SerializationVariantElement *>(current_serialization))
current_serialization = serialization_variant_element->getNested().get();
/// If we read subcolumn of nested Tuple, it may be wrapped to SerializationNamed
/// If we read subcolumn of nested Tuple or this Tuple is a subcolumn, it may be wrapped to SerializationWrapper
/// several times to allow to reconstruct the substream path name.
/// Here we don't need substream path name, so we drop first several wrapper serializations.
while (const auto * serialization_named = typeid_cast<const SerializationNamed *>(current_serialization))
current_serialization = serialization_named->getNested().get();
const auto * current_serialization = &serialization;
while (const auto * serialization_wrapper = dynamic_cast<const SerializationWrapper *>(current_serialization))
current_serialization = serialization_wrapper->getNested().get();
const auto * serialization_tuple = typeid_cast<const SerializationTuple *>(current_serialization);
if (!serialization_tuple)

View File

@ -117,7 +117,7 @@ bool DataTypeVariant::equals(const IDataType & rhs) const
/// The same data types with different custom names considered different.
/// For example, UInt8 and Bool.
if ((variants[i]->hasCustomName() || rhs_variant.variants[i]) && variants[i]->getName() != rhs_variant.variants[i]->getName())
if ((variants[i]->hasCustomName() || rhs_variant.variants[i]->hasCustomName()) && variants[i]->getName() != rhs_variant.variants[i]->getName())
return false;
}

View File

@ -444,7 +444,7 @@ void encodeDataType(const DataTypePtr & type, WriteBuffer & buf)
case BinaryTypeIndex::Dynamic:
{
const auto & dynamic_type = assert_cast<const DataTypeDynamic &>(*type);
/// Maximum number of dynamic types is 255, we can write it as 1 byte.
/// Maximum number of dynamic types is 254, we can write it as 1 byte.
writeBinary(UInt8(dynamic_type.getMaxDynamicTypes()), buf);
break;
}

View File

@ -27,15 +27,21 @@ namespace ErrorCodes
struct SerializeBinaryBulkStateDynamic : public ISerialization::SerializeBinaryBulkState
{
SerializationDynamic::DynamicStructureSerializationVersion structure_version;
size_t max_dynamic_types;
DataTypePtr variant_type;
Names variant_names;
SerializationPtr variant_serialization;
ISerialization::SerializeBinaryBulkStatePtr variant_state;
/// Variants statistics. Map (Variant name) -> (Variant size).
ColumnDynamic::Statistics statistics = { .source = ColumnDynamic::Statistics::Source::READ, .data = {} };
/// Variants statistics.
ColumnDynamic::Statistics statistics;
/// If true, statistics will be recalculated during serialization.
bool recalculate_statistics = false;
explicit SerializeBinaryBulkStateDynamic(UInt64 structure_version_) : structure_version(structure_version_) {}
explicit SerializeBinaryBulkStateDynamic(UInt64 structure_version_)
: structure_version(structure_version_), statistics(ColumnDynamic::Statistics::Source::READ)
{
}
};
struct DeserializeBinaryBulkStateDynamic : public ISerialization::DeserializeBinaryBulkState
@ -106,20 +112,41 @@ void SerializationDynamic::serializeBinaryBulkStatePrefix(
writeBinaryLittleEndian(structure_version, *stream);
auto dynamic_state = std::make_shared<SerializeBinaryBulkStateDynamic>(structure_version);
dynamic_state->max_dynamic_types = column_dynamic.getMaxDynamicTypes();
/// Write max_dynamic_types parameter, because it can differ from the max_dynamic_types
/// that is specified in the Dynamic type (we could decrease it before merge).
writeBinaryLittleEndian(dynamic_state->max_dynamic_types, *stream);
dynamic_state->variant_type = variant_info.variant_type;
dynamic_state->variant_names = variant_info.variant_names;
const auto & variant_column = column_dynamic.getVariantColumn();
/// Write internal Variant type name.
/// Write information about variants.
size_t num_variants = dynamic_state->variant_names.size() - 1; /// Don't write shared variant, Dynamic column should always have it.
writeBinaryLittleEndian(num_variants, *stream);
if (settings.data_types_binary_encoding)
encodeDataType(dynamic_state->variant_type, *stream);
{
const auto & variants = assert_cast<const DataTypeVariant &>(*dynamic_state->variant_type).getVariants();
for (const auto & variant: variants)
{
if (variant->getName() != ColumnDynamic::getSharedVariantTypeName())
encodeDataType(variant, *stream);
}
}
else
writeStringBinary(dynamic_state->variant_type->getName(), *stream);
{
for (const auto & name : dynamic_state->variant_names)
{
if (name != ColumnDynamic::getSharedVariantTypeName())
writeStringBinary(name, *stream);
}
}
/// Write statistics in prefix if needed.
if (settings.dynamic_write_statistics == SerializeBinaryBulkSettings::DynamicStatisticsMode::PREFIX)
{
const auto & statistics = column_dynamic.getStatistics();
/// First, write statistics for usual variants.
for (size_t i = 0; i != variant_info.variant_names.size(); ++i)
{
size_t size = 0;
@ -129,13 +156,55 @@ void SerializationDynamic::serializeBinaryBulkStatePrefix(
/// - statistics read from the data part during deserialization of Dynamic column (Statistics::Source::READ).
/// We can rely only on statistics calculated during the merge, because column with statistics that was read
/// during deserialization from some data part could be filtered/limited/transformed/etc and so the statistics can be outdated.
if (!statistics.data.empty() && statistics.source == ColumnDynamic::Statistics::Source::MERGE)
size = statistics.data.at(variant_info.variant_names[i]);
if (statistics && statistics->source == ColumnDynamic::Statistics::Source::MERGE)
size = statistics->variants_statistics.at(variant_info.variant_names[i]);
/// Otherwise we can use only variant sizes from current column.
else
size = variant_column.getVariantByGlobalDiscriminator(i).size();
writeVarUInt(size, *stream);
}
/// Second, write statistics for variants in shared variant.
/// Check if we have statistics calculated during merge of some data parts (Statistics::Source::MERGE).
if (statistics && statistics->source == ColumnDynamic::Statistics::Source::MERGE)
{
writeVarUInt(statistics->shared_variants_statistics.size(), *stream);
for (const auto & [variant_name, size] : statistics->shared_variants_statistics)
{
writeStringBinary(variant_name, *stream);
writeVarUInt(size, *stream);
}
}
/// If we don't have statistics for shared variants from merge, calculate it from the column.
else
{
std::unordered_map<String, size_t> shared_variants_statistics;
const auto & shared_variant = column_dynamic.getSharedVariant();
for (size_t i = 0; i != shared_variant.size(); ++i)
{
auto value = shared_variant.getDataAt(i);
ReadBufferFromMemory buf(value.data, value.size);
auto type = decodeDataType(buf);
auto type_name = type->getName();
if (auto it = shared_variants_statistics.find(type_name); it != shared_variants_statistics.end())
++it->second;
else if (shared_variants_statistics.size() < ColumnDynamic::Statistics::MAX_SHARED_VARIANT_STATISTICS_SIZE)
shared_variants_statistics.emplace(type_name, 1);
}
writeVarUInt(shared_variants_statistics.size(), *stream);
for (const auto & [variant_name, size] : shared_variants_statistics)
{
writeStringBinary(variant_name, *stream);
writeVarUInt(size, *stream);
}
}
}
/// Otherwise statistics will be written in the suffix, in this case we will recalculate
/// statistics during serialization to make it more precise.
else
{
dynamic_state->recalculate_statistics = true;
}
dynamic_state->variant_serialization = dynamic_state->variant_type->getDefaultSerialization();
@ -182,33 +251,58 @@ ISerialization::DeserializeBinaryBulkStatePtr SerializationDynamic::deserializeD
UInt64 structure_version;
readBinaryLittleEndian(structure_version, *structure_stream);
auto structure_state = std::make_shared<DeserializeBinaryBulkStateDynamicStructure>(structure_version);
/// Read internal Variant type name.
/// Read max_dynamic_types parameter.
readBinaryLittleEndian(structure_state->max_dynamic_types, *structure_stream);
/// Read information about variants.
DataTypes variants;
size_t num_variants;
readBinaryLittleEndian(num_variants, *structure_stream);
variants.reserve(num_variants + 1); /// +1 for shared variant.
if (settings.data_types_binary_encoding)
{
structure_state->variant_type = decodeDataType(*structure_stream);
for (size_t i = 0; i != num_variants; ++i)
variants.push_back(decodeDataType(*structure_stream));
}
else
{
String data_type_name;
readStringBinary(data_type_name, *structure_stream);
structure_state->variant_type = DataTypeFactory::instance().get(data_type_name);
for (size_t i = 0; i != num_variants; ++i)
{
readStringBinary(data_type_name, *structure_stream);
variants.push_back(DataTypeFactory::instance().get(data_type_name));
}
}
const auto * variant_type = typeid_cast<const DataTypeVariant *>(structure_state->variant_type.get());
if (!variant_type)
throw Exception(ErrorCodes::INCORRECT_DATA, "Incorrect type of Dynamic nested column, expected Variant, got {}", structure_state->variant_type->getName());
/// Add shared variant, Dynamic column should always have it.
variants.push_back(ColumnDynamic::getSharedVariantDataType());
auto variant_type = std::make_shared<DataTypeVariant>(variants);
/// Read statistics.
if (settings.dynamic_read_statistics)
{
const auto & variants = variant_type->getVariants();
ColumnDynamic::Statistics statistics(ColumnDynamic::Statistics::Source::READ);
/// First, read statistics for usual variants.
size_t variant_size;
for (const auto & variant : variants)
for (const auto & variant : variant_type->getVariants())
{
readVarUInt(variant_size, *structure_stream);
structure_state->statistics.data[variant->getName()] = variant_size;
statistics.variants_statistics[variant->getName()] = variant_size;
}
/// Second, read statistics for shared variants.
size_t statistics_size;
readVarUInt(statistics_size, *structure_stream);
String variant_name;
for (size_t i = 0; i != statistics_size; ++i)
{
readStringBinary(variant_name, *structure_stream);
readVarUInt(variant_size, *structure_stream);
statistics.shared_variants_statistics[variant_name] = variant_size;
}
structure_state->statistics = std::make_shared<const ColumnDynamic::Statistics>(std::move(statistics));
}
structure_state->variant_type = std::move(variant_type);
state = structure_state;
addToSubstreamsDeserializeStatesCache(cache, settings.path, state);
}
@ -231,8 +325,16 @@ void SerializationDynamic::serializeBinaryBulkStateSuffix(
/// Write statistics in suffix if needed.
if (settings.dynamic_write_statistics == SerializeBinaryBulkSettings::DynamicStatisticsMode::SUFFIX)
{
/// First, write statistics for usual variants.
for (const auto & variant_name : dynamic_state->variant_names)
writeVarUInt(dynamic_state->statistics.data[variant_name], *stream);
writeVarUInt(dynamic_state->statistics.variants_statistics[variant_name], *stream);
/// Second, write statistics for shared variants.
writeVarUInt(dynamic_state->statistics.shared_variants_statistics.size(), *stream);
for (const auto & [variant_name, size] : dynamic_state->statistics.shared_variants_statistics)
{
writeStringBinary(variant_name, *stream);
writeVarUInt(size, *stream);
}
}
settings.path.push_back(Substream::DynamicData);
@ -255,9 +357,42 @@ void SerializationDynamic::serializeBinaryBulkWithMultipleStreams(
if (!variant_info.variant_type->equals(*dynamic_state->variant_type))
throw Exception(ErrorCodes::LOGICAL_ERROR, "Mismatch of internal columns of Dynamic. Expected: {}, Got: {}", dynamic_state->variant_type->getName(), variant_info.variant_type->getName());
if (column_dynamic.getMaxDynamicTypes() != dynamic_state->max_dynamic_types)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Mismatch of max_dynamic_types parameter of Dynamic. Expected: {}, Got: {}", dynamic_state->max_dynamic_types, column_dynamic.getMaxDynamicTypes());
settings.path.push_back(Substream::DynamicData);
assert_cast<const SerializationVariant &>(*dynamic_state->variant_serialization)
.serializeBinaryBulkWithMultipleStreamsAndUpdateVariantStatistics(*variant_column, offset, limit, settings, dynamic_state->variant_state, dynamic_state->statistics.data);
if (dynamic_state->recalculate_statistics)
{
assert_cast<const SerializationVariant &>(*dynamic_state->variant_serialization)
.serializeBinaryBulkWithMultipleStreamsAndUpdateVariantStatistics(*variant_column, offset, limit, settings, dynamic_state->variant_state, dynamic_state->statistics.variants_statistics);
/// Calculate statistics for shared variants.
const auto & shared_variant = column_dynamic.getSharedVariant();
if (!shared_variant.empty())
{
const auto & local_discriminators = variant_column->getLocalDiscriminators();
const auto & offsets = variant_column->getOffsets();
const auto shared_variant_discr = variant_column->localDiscriminatorByGlobal(column_dynamic.getSharedVariantDiscriminator());
size_t end = limit == 0 || offset + limit > local_discriminators.size() ? local_discriminators.size() : offset + limit;
for (size_t i = offset; i != end; ++i)
{
if (local_discriminators[i] == shared_variant_discr)
{
auto value = shared_variant.getDataAt(offsets[i]);
ReadBufferFromMemory buf(value.data, value.size);
auto type = decodeDataType(buf);
auto type_name = type->getName();
if (auto it = dynamic_state->statistics.shared_variants_statistics.find(type_name); it != dynamic_state->statistics.shared_variants_statistics.end())
++it->second;
else if (dynamic_state->statistics.shared_variants_statistics.size() < ColumnDynamic::Statistics::MAX_SHARED_VARIANT_STATISTICS_SIZE)
dynamic_state->statistics.shared_variants_statistics.emplace(type_name, 1);
}
}
}
}
else
{
assert_cast<const SerializationVariant &>(*dynamic_state->variant_serialization).serializeBinaryBulkWithMultipleStreams(*variant_column, offset, limit, settings, dynamic_state->variant_state);
}
settings.path.pop_back();
}
@ -272,13 +407,17 @@ void SerializationDynamic::deserializeBinaryBulkWithMultipleStreams(
return;
auto mutable_column = column->assumeMutable();
auto & column_dynamic = assert_cast<ColumnDynamic &>(*mutable_column);
auto * dynamic_state = checkAndGetState<DeserializeBinaryBulkStateDynamic>(state);
auto * structure_state = checkAndGetState<DeserializeBinaryBulkStateDynamicStructure>(dynamic_state->structure_state);
if (mutable_column->empty())
mutable_column = ColumnDynamic::create(structure_state->variant_type->createColumn(), structure_state->variant_type, max_dynamic_types, structure_state->statistics);
{
column_dynamic.setMaxDynamicPaths(structure_state->max_dynamic_types);
column_dynamic.setVariantType(structure_state->variant_type);
column_dynamic.setStatistics(structure_state->statistics);
}
auto & column_dynamic = assert_cast<ColumnDynamic &>(*mutable_column);
const auto & variant_info = column_dynamic.getVariantInfo();
if (!variant_info.variant_type->equals(*structure_state->variant_type))
throw Exception(ErrorCodes::LOGICAL_ERROR, "Mismatch of internal columns of Dynamic. Expected: {}, Got: {}", structure_state->variant_type->getName(), variant_info.variant_type->getName());
@ -329,24 +468,42 @@ void SerializationDynamic::serializeBinary(const IColumn & column, size_t row_nu
encodeDataType(std::make_shared<DataTypeNothing>(), ostr);
return;
}
/// Check if this value is in shared variant. In this case it's already
/// in desired binary format.
else if (global_discr == dynamic_column.getSharedVariantDiscriminator())
{
auto value = dynamic_column.getSharedVariant().getDataAt(variant_column.offsetAt(row_num));
ostr.write(value.data, value.size);
return;
}
const auto & variant_type = assert_cast<const DataTypeVariant &>(*variant_info.variant_type).getVariant(global_discr);
const auto & variant_type_name = variant_info.variant_names[global_discr];
encodeDataType(variant_type, ostr);
variant_type->getDefaultSerialization()->serializeBinary(variant_column.getVariantByGlobalDiscriminator(global_discr), variant_column.offsetAt(row_num), ostr, settings);
dynamic_column.getVariantSerialization(variant_type, variant_type_name)->serializeBinary(variant_column.getVariantByGlobalDiscriminator(global_discr), variant_column.offsetAt(row_num), ostr, settings);
}
template <typename DeserializeFunc>
static void deserializeVariant(
template <typename ReturnType = void, typename DeserializeFunc>
static ReturnType deserializeVariant(
ColumnVariant & variant_column,
const DataTypePtr & variant_type,
const SerializationPtr & variant_serialization,
ColumnVariant::Discriminator global_discr,
ReadBuffer & istr,
DeserializeFunc deserialize)
{
auto & variant = variant_column.getVariantByGlobalDiscriminator(global_discr);
deserialize(*variant_type->getDefaultSerialization(), variant, istr);
if constexpr (std::is_same_v<ReturnType, bool>)
{
if (!deserialize(*variant_serialization, variant, istr))
return ReturnType(false);
}
else
{
deserialize(*variant_serialization, variant, istr);
}
variant_column.getLocalDiscriminators().push_back(variant_column.localDiscriminatorByGlobal(global_discr));
variant_column.getOffsets().push_back(variant.size() - 1);
return ReturnType(true);
}
void SerializationDynamic::deserializeBinary(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
@ -360,11 +517,12 @@ void SerializationDynamic::deserializeBinary(IColumn & column, ReadBuffer & istr
}
auto variant_type_name = variant_type->getName();
const auto & variant_serialization = dynamic_column.getVariantSerialization(variant_type, variant_type_name);
const auto & variant_info = dynamic_column.getVariantInfo();
auto it = variant_info.variant_name_to_discriminator.find(variant_type_name);
if (it != variant_info.variant_name_to_discriminator.end())
{
deserializeVariant(dynamic_column.getVariantColumn(), variant_type, it->second, istr, [&settings](const ISerialization & serialization, IColumn & variant, ReadBuffer & buf){ serialization.deserializeBinary(variant, buf, settings); });
deserializeVariant(dynamic_column.getVariantColumn(), variant_serialization, it->second, istr, [&settings](const ISerialization & serialization, IColumn & variant, ReadBuffer & buf){ serialization.deserializeBinary(variant, buf, settings); });
return;
}
@ -372,25 +530,15 @@ void SerializationDynamic::deserializeBinary(IColumn & column, ReadBuffer & istr
if (dynamic_column.addNewVariant(variant_type))
{
auto discr = variant_info.variant_name_to_discriminator.at(variant_type_name);
deserializeVariant(dynamic_column.getVariantColumn(), variant_type, discr, istr, [&settings](const ISerialization & serialization, IColumn & variant, ReadBuffer & buf){ serialization.deserializeBinary(variant, buf, settings); });
deserializeVariant(dynamic_column.getVariantColumn(), variant_serialization, discr, istr, [&settings](const ISerialization & serialization, IColumn & variant, ReadBuffer & buf){ serialization.deserializeBinary(variant, buf, settings); });
return;
}
/// We reached maximum number of variants and couldn't add new variant.
/// This case should be really rare in real use cases.
/// We should always be able to add String variant and insert value as String.
dynamic_column.addStringVariant();
/// In this case we insert this value into shared variant in binary form.
auto tmp_variant_column = variant_type->createColumn();
variant_type->getDefaultSerialization()->deserializeBinary(*tmp_variant_column, istr, settings);
auto string_column = castColumn(ColumnWithTypeAndName(tmp_variant_column->getPtr(), variant_type, ""), std::make_shared<DataTypeString>());
auto & variant_column = dynamic_column.getVariantColumn();
variant_column.insertIntoVariantFrom(variant_info.variant_name_to_discriminator.at("String"), *string_column, 0);
}
void SerializationDynamic::serializeTextCSV(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const
{
const auto & dynamic_column = assert_cast<const ColumnDynamic &>(column);
dynamic_column.getVariantInfo().variant_type->getDefaultSerialization()->serializeTextCSV(dynamic_column.getVariantColumn(), row_num, ostr, settings);
variant_serialization->deserializeBinary(*tmp_variant_column, istr, settings);
dynamic_column.insertValueIntoSharedVariant(*tmp_variant_column, variant_type, variant_type_name, 0);
}
template <typename ReadFieldFunc, typename TryDeserializeVariantFunc, typename DeserializeVariant>
@ -406,6 +554,7 @@ static void deserializeTextImpl(
auto & dynamic_column = assert_cast<ColumnDynamic &>(column);
auto & variant_column = dynamic_column.getVariantColumn();
const auto & variant_info = dynamic_column.getVariantInfo();
const auto & variant_types = assert_cast<const DataTypeVariant &>(*variant_info.variant_type).getVariants();
String field = read_field(istr);
auto field_buf = std::make_unique<ReadBufferFromString>(field);
JSONInferenceInfo json_info;
@ -413,27 +562,81 @@ static void deserializeTextImpl(
if (escaping_rule == FormatSettings::EscapingRule::JSON)
transformFinalInferredJSONTypeIfNeeded(variant_type, settings, &json_info);
if (checkIfTypeIsComplete(variant_type) && dynamic_column.addNewVariant(variant_type))
/// If inferred type is not complete, we cannot add it as a new variant.
/// Let's try to deserialize this field into existing variants.
/// If failed, insert this value as String.
if (!checkIfTypeIsComplete(variant_type))
{
size_t shared_variant_discr = dynamic_column.getSharedVariantDiscriminator();
for (size_t i = 0; i != variant_types.size(); ++i)
{
field_buf = std::make_unique<ReadBufferFromString>(field);
if (i != shared_variant_discr
&& deserializeVariant<bool>(
variant_column,
dynamic_column.getVariantSerialization(variant_types[i], variant_info.variant_names[i]),
i,
*field_buf,
try_deserialize_variant))
return;
}
variant_type = std::make_shared<DataTypeString>();
/// To be able to deserialize field as String with Quoted escaping rule, it should be quoted.
if (escaping_rule == FormatSettings::EscapingRule::Quoted && (field.size() < 2 || field.front() != '\'' || field.back() != '\''))
field = "'" + field + "'";
}
else if (dynamic_column.addNewVariant(variant_type, variant_type->getName()))
{
auto discr = variant_info.variant_name_to_discriminator.at(variant_type->getName());
deserializeVariant(dynamic_column.getVariantColumn(), variant_type, discr, *field_buf, deserialize_variant);
deserializeVariant(dynamic_column.getVariantColumn(), dynamic_column.getVariantSerialization(variant_type), discr, *field_buf, deserialize_variant);
return;
}
/// We couldn't infer type or add new variant. Try to insert field into current variants.
/// We couldn't infer type or add new variant. Insert it into shared variant.
auto tmp_variant_column = variant_type->createColumn();
field_buf = std::make_unique<ReadBufferFromString>(field);
if (try_deserialize_variant(*variant_info.variant_type->getDefaultSerialization(), variant_column, *field_buf))
return;
auto variant_type_name = variant_type->getName();
deserialize_variant(*dynamic_column.getVariantSerialization(variant_type, variant_type_name), *tmp_variant_column, *field_buf);
dynamic_column.insertValueIntoSharedVariant(*tmp_variant_column, variant_type, variant_type_name, 0);
}
/// We couldn't insert field into any existing variant, add String variant and read value as String.
dynamic_column.addStringVariant();
template <typename NestedSerialize>
static void serializeTextImpl(
const IColumn & column,
size_t row_num,
WriteBuffer & ostr,
const FormatSettings & settings,
NestedSerialize nested_serialize)
{
const auto & dynamic_column = assert_cast<const ColumnDynamic &>(column);
const auto & variant_column = dynamic_column.getVariantColumn();
/// Check if this row has value in shared variant. In this case we should first deserialize it from binary format.
if (variant_column.globalDiscriminatorAt(row_num) == dynamic_column.getSharedVariantDiscriminator())
{
auto value = dynamic_column.getSharedVariant().getDataAt(variant_column.offsetAt(row_num));
ReadBufferFromMemory buf(value.data, value.size);
auto variant_type = decodeDataType(buf);
auto tmp_variant_column = variant_type->createColumn();
auto variant_serialization = dynamic_column.getVariantSerialization(variant_type);
variant_serialization->deserializeBinary(*tmp_variant_column, buf, settings);
nested_serialize(*variant_serialization, *tmp_variant_column, 0, ostr);
}
/// Otherwise just use serialization for Variant.
else
{
nested_serialize(*dynamic_column.getVariantInfo().variant_type->getDefaultSerialization(), variant_column, row_num, ostr);
}
}
if (escaping_rule == FormatSettings::EscapingRule::Quoted && (field.size() < 2 || field.front() != '\'' || field.back() != '\''))
field = "'" + field + "'";
void SerializationDynamic::serializeTextCSV(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const
{
auto nested_serialize = [&settings](const ISerialization & serialization, const IColumn & col, size_t row, WriteBuffer & buf)
{
serialization.serializeTextCSV(col, row, buf, settings);
};
field_buf = std::make_unique<ReadBufferFromString>(field);
auto string_discr = variant_info.variant_name_to_discriminator.at("String");
deserializeVariant(dynamic_column.getVariantColumn(), std::make_shared<DataTypeString>(), string_discr, *field_buf, deserialize_variant);
serializeTextImpl(column, row_num, ostr, settings, nested_serialize);
}
void SerializationDynamic::deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
@ -466,8 +669,12 @@ bool SerializationDynamic::tryDeserializeTextCSV(DB::IColumn & column, DB::ReadB
void SerializationDynamic::serializeTextEscaped(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const
{
const auto & dynamic_column = assert_cast<const ColumnDynamic &>(column);
dynamic_column.getVariantInfo().variant_type->getDefaultSerialization()->serializeTextEscaped(dynamic_column.getVariantColumn(), row_num, ostr, settings);
auto nested_serialize = [&settings](const ISerialization & serialization, const IColumn & col, size_t row, WriteBuffer & buf)
{
serialization.serializeTextEscaped(col, row, buf, settings);
};
serializeTextImpl(column, row_num, ostr, settings, nested_serialize);
}
void SerializationDynamic::deserializeTextEscaped(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
@ -500,8 +707,12 @@ bool SerializationDynamic::tryDeserializeTextEscaped(DB::IColumn & column, DB::R
void SerializationDynamic::serializeTextQuoted(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const
{
const auto & dynamic_column = assert_cast<const ColumnDynamic &>(column);
dynamic_column.getVariantInfo().variant_type->getDefaultSerialization()->serializeTextQuoted(dynamic_column.getVariantColumn(), row_num, ostr, settings);
auto nested_serialize = [&settings](const ISerialization & serialization, const IColumn & col, size_t row, WriteBuffer & buf)
{
serialization.serializeTextQuoted(col, row, buf, settings);
};
serializeTextImpl(column, row_num, ostr, settings, nested_serialize);
}
void SerializationDynamic::deserializeTextQuoted(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
@ -534,8 +745,12 @@ bool SerializationDynamic::tryDeserializeTextQuoted(DB::IColumn & column, DB::Re
void SerializationDynamic::serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const
{
const auto & dynamic_column = assert_cast<const ColumnDynamic &>(column);
dynamic_column.getVariantInfo().variant_type->getDefaultSerialization()->serializeTextJSON(dynamic_column.getVariantColumn(), row_num, ostr, settings);
auto nested_serialize = [&settings](const ISerialization & serialization, const IColumn & col, size_t row, WriteBuffer & buf)
{
serialization.serializeTextJSON(col, row, buf, settings);
};
serializeTextImpl(column, row_num, ostr, settings, nested_serialize);
}
void SerializationDynamic::deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
@ -568,8 +783,12 @@ bool SerializationDynamic::tryDeserializeTextJSON(DB::IColumn & column, DB::Read
void SerializationDynamic::serializeTextRaw(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const
{
const auto & dynamic_column = assert_cast<const ColumnDynamic &>(column);
dynamic_column.getVariantInfo().variant_type->getDefaultSerialization()->serializeTextRaw(dynamic_column.getVariantColumn(), row_num, ostr, settings);
auto nested_serialize = [&settings](const ISerialization & serialization, const IColumn & col, size_t row, WriteBuffer & buf)
{
serialization.serializeTextRaw(col, row, buf, settings);
};
serializeTextImpl(column, row_num, ostr, settings, nested_serialize);
}
void SerializationDynamic::deserializeTextRaw(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
@ -602,8 +821,12 @@ bool SerializationDynamic::tryDeserializeTextRaw(DB::IColumn & column, DB::ReadB
void SerializationDynamic::serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const
{
const auto & dynamic_column = assert_cast<const ColumnDynamic &>(column);
dynamic_column.getVariantInfo().variant_type->getDefaultSerialization()->serializeText(dynamic_column.getVariantColumn(), row_num, ostr, settings);
auto nested_serialize = [&settings](const ISerialization & serialization, const IColumn & col, size_t row, WriteBuffer & buf)
{
serialization.serializeText(col, row, buf, settings);
};
serializeTextImpl(column, row_num, ostr, settings, nested_serialize);
}
void SerializationDynamic::deserializeWholeText(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
@ -636,8 +859,12 @@ bool SerializationDynamic::tryDeserializeWholeText(DB::IColumn & column, DB::Rea
void SerializationDynamic::serializeTextXML(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const
{
const auto & dynamic_column = assert_cast<const ColumnDynamic &>(column);
dynamic_column.getVariantInfo().variant_type->getDefaultSerialization()->serializeTextXML(dynamic_column.getVariantColumn(), row_num, ostr, settings);
auto nested_serialize = [&settings](const ISerialization & serialization, const IColumn & col, size_t row, WriteBuffer & buf)
{
serialization.serializeTextXML(col, row, buf, settings);
};
serializeTextImpl(column, row_num, ostr, settings, nested_serialize);
}
}

View File

@ -105,9 +105,13 @@ private:
{
DynamicStructureSerializationVersion structure_version;
DataTypePtr variant_type;
ColumnDynamic::Statistics statistics = {.source = ColumnDynamic::Statistics::Source::READ, .data = {}};
size_t max_dynamic_types;
ColumnDynamic::StatisticsPtr statistics;
explicit DeserializeBinaryBulkStateDynamicStructure(UInt64 structure_version_) : structure_version(structure_version_) {}
explicit DeserializeBinaryBulkStateDynamicStructure(UInt64 structure_version_)
: structure_version(structure_version_)
{
}
};
size_t max_dynamic_types;

View File

@ -4,7 +4,10 @@
#include <DataTypes/Serializations/SerializationDynamic.h>
#include <DataTypes/DataTypeVariant.h>
#include <DataTypes/DataTypeFactory.h>
#include <DataTypes/DataTypesBinaryEncoding.h>
#include <Columns/ColumnDynamic.h>
#include <Columns/ColumnLowCardinality.h>
#include <Formats/FormatSettings.h>
#include <IO/ReadHelpers.h>
namespace DB
@ -21,6 +24,8 @@ struct DeserializeBinaryBulkStateDynamicElement : public ISerialization::Deseria
ISerialization::DeserializeBinaryBulkStatePtr structure_state;
SerializationPtr variant_serialization;
ISerialization::DeserializeBinaryBulkStatePtr variant_element_state;
bool read_from_shared_variant;
ColumnPtr shared_variant;
};
void SerializationDynamicElement::enumerateStreams(
@ -73,9 +78,10 @@ void SerializationDynamicElement::deserializeBinaryBulkStatePrefix(
auto dynamic_element_state = std::make_shared<DeserializeBinaryBulkStateDynamicElement>();
dynamic_element_state->structure_state = std::move(structure_state);
const auto & variant_type = checkAndGetState<SerializationDynamic::DeserializeBinaryBulkStateDynamicStructure>(dynamic_element_state->structure_state)->variant_type;
const auto & variant_type = assert_cast<const DataTypeVariant &>(
*checkAndGetState<SerializationDynamic::DeserializeBinaryBulkStateDynamicStructure>(dynamic_element_state->structure_state)->variant_type);
/// Check if we actually have required element in the Variant.
if (auto global_discr = assert_cast<const DataTypeVariant &>(*variant_type).tryGetVariantDiscriminator(dynamic_element_name))
if (auto global_discr = variant_type.tryGetVariantDiscriminator(dynamic_element_name))
{
settings.path.push_back(Substream::DynamicData);
if (is_null_map_subcolumn)
@ -83,6 +89,21 @@ void SerializationDynamicElement::deserializeBinaryBulkStatePrefix(
else
dynamic_element_state->variant_serialization = std::make_shared<SerializationVariantElement>(nested_serialization, dynamic_element_name, *global_discr);
dynamic_element_state->variant_serialization->deserializeBinaryBulkStatePrefix(settings, dynamic_element_state->variant_element_state, cache);
dynamic_element_state->read_from_shared_variant = false;
settings.path.pop_back();
}
/// If we don't have this element in the Variant, we will read shared variant and try to find it there.
else
{
auto shared_variant_global_discr = variant_type.tryGetVariantDiscriminator(ColumnDynamic::getSharedVariantTypeName());
chassert(shared_variant_global_discr.has_value());
settings.path.push_back(Substream::DynamicData);
dynamic_element_state->variant_serialization = std::make_shared<SerializationVariantElement>(
ColumnDynamic::getSharedVariantDataType()->getDefaultSerialization(),
ColumnDynamic::getSharedVariantTypeName(),
*shared_variant_global_discr);
dynamic_element_state->variant_serialization->deserializeBinaryBulkStatePrefix(settings, dynamic_element_state->variant_element_state, cache);
dynamic_element_state->read_from_shared_variant = true;
settings.path.pop_back();
}
@ -115,23 +136,103 @@ void SerializationDynamicElement::deserializeBinaryBulkWithMultipleStreams(
auto * dynamic_element_state = checkAndGetState<DeserializeBinaryBulkStateDynamicElement>(state);
if (dynamic_element_state->variant_serialization)
/// Check if this subcolumn should not be read from shared variant.
/// In this case just read data from the corresponding variant.
if (!dynamic_element_state->read_from_shared_variant)
{
settings.path.push_back(Substream::DynamicData);
dynamic_element_state->variant_serialization->deserializeBinaryBulkWithMultipleStreams(result_column, limit, settings, dynamic_element_state->variant_element_state, cache);
dynamic_element_state->variant_serialization->deserializeBinaryBulkWithMultipleStreams(
result_column, limit, settings, dynamic_element_state->variant_element_state, cache);
settings.path.pop_back();
}
else if (is_null_map_subcolumn)
{
auto mutable_column = result_column->assumeMutable();
auto & data = assert_cast<ColumnUInt8 &>(*mutable_column).getData();
data.resize_fill(data.size() + limit, 1);
}
/// Otherwise, read the shared variant column and extract requested type from it.
else
{
auto mutable_column = result_column->assumeMutable();
mutable_column->insertManyDefaults(limit);
result_column = std::move(mutable_column);
settings.path.push_back(Substream::DynamicData);
/// Initialize shared_variant column if needed.
if (result_column->empty())
dynamic_element_state->shared_variant = makeNullable(ColumnDynamic::getSharedVariantDataType()->createColumn());
size_t prev_size = result_column->size();
dynamic_element_state->variant_serialization->deserializeBinaryBulkWithMultipleStreams(
dynamic_element_state->shared_variant, limit, settings, dynamic_element_state->variant_element_state, cache);
settings.path.pop_back();
/// If we need to read a subcolumn from variant column, create an empty variant column, fill it and extract subcolumn.
auto variant_type = DataTypeFactory::instance().get(dynamic_element_name);
auto result_type = makeNullableOrLowCardinalityNullableSafe(variant_type);
MutableColumnPtr variant_column = nested_subcolumn.empty() || is_null_map_subcolumn ? result_column->assumeMutable() : result_type->createColumn();
variant_column->reserve(variant_column->size() + limit);
MutableColumnPtr non_nullable_variant_column = variant_column->assumeMutable();
NullMap * null_map = nullptr;
bool is_low_cardinality_nullable = isColumnLowCardinalityNullable(*variant_column);
/// Resulting subolumn can be Nullable, but value is serialized in shared variant as non-Nullable.
/// Extract non-nullable column and remember the null map to fill it during deserialization.
if (isColumnNullable(*variant_column))
{
auto & nullable_variant_column = assert_cast<ColumnNullable &>(*variant_column);
non_nullable_variant_column = nullable_variant_column.getNestedColumnPtr()->assumeMutable();
null_map = &nullable_variant_column.getNullMapData();
}
else if (is_null_map_subcolumn)
{
null_map = &assert_cast<ColumnUInt8 &>(*variant_column).getData();
}
auto variant_serialization = variant_type->getDefaultSerialization();
const auto & nullable_shared_variant = assert_cast<const ColumnNullable &>(*dynamic_element_state->shared_variant);
const auto & shared_null_map = nullable_shared_variant.getNullMapData();
const auto & shared_variant = assert_cast<const ColumnString &>(nullable_shared_variant.getNestedColumn());
const FormatSettings format_settings;
for (size_t i = prev_size; i != shared_variant.size(); ++i)
{
if (!shared_null_map[i])
{
auto value = shared_variant.getDataAt(i);
ReadBufferFromMemory buf(value.data, value.size);
auto type = decodeDataType(buf);
if (type->getName() == dynamic_element_name)
{
/// When requested type is LowCardinality the subcolumn type name will be LowCardinality(Nullable).
/// Value in shared variant is serialized as LowCardinality and we cannot simply deserialize it
/// inside LowCardinality(Nullable) column (it will try to deserialize null bit). In this case we
/// have to create temporary LowCardinality column, deserialize value into it and insert it into
/// resulting LowCardinality(Nullable) (insertion from LowCardinality column to LowCardinality(Nullable)
/// column is allowed).
if (is_low_cardinality_nullable)
{
auto tmp_column = variant_type->createColumn();
variant_serialization->deserializeBinary(*tmp_column, buf, format_settings);
non_nullable_variant_column->insertFrom(*tmp_column, 0);
}
else if (is_null_map_subcolumn)
{
null_map->push_back(0);
}
else
{
variant_serialization->deserializeBinary(*non_nullable_variant_column, buf, format_settings);
if (null_map)
null_map->push_back(0);
}
}
else
{
variant_column->insertDefault();
}
}
else
{
variant_column->insertDefault();
}
}
/// Extract nested subcolumn if needed.
if (!nested_subcolumn.empty() && !is_null_map_subcolumn)
{
auto subcolumn = result_type->getSubcolumn(nested_subcolumn, variant_column->getPtr());
result_column->assumeMutable()->insertRangeFrom(*subcolumn, 0, subcolumn->size());
}
}
}

View File

@ -13,11 +13,15 @@ private:
/// To be able to deserialize Dynamic element as a subcolumn
/// we need its type name and global discriminator.
String dynamic_element_name;
/// Nested subcolumn of a type dynamic type. For example, for `Tuple(a UInt32)`.a
/// subcolumn dynamic_element_name = 'Tuple(a UInt32)' and nested_subcolumn = 'a'.
/// Needed to extract nested subcolumn from values in shared variant.
String nested_subcolumn;
bool is_null_map_subcolumn;
public:
SerializationDynamicElement(const SerializationPtr & nested_, const String & dynamic_element_name_, bool is_null_map_subcolumn_ = false)
: SerializationWrapper(nested_), dynamic_element_name(dynamic_element_name_), is_null_map_subcolumn(is_null_map_subcolumn_)
SerializationDynamicElement(const SerializationPtr & nested_, const String & dynamic_element_name_, const String & nested_subcolumn_, bool is_null_map_subcolumn_ = false)
: SerializationWrapper(nested_), dynamic_element_name(dynamic_element_name_), nested_subcolumn(nested_subcolumn_), is_null_map_subcolumn(is_null_map_subcolumn_)
{
}

View File

@ -305,8 +305,10 @@ SerializationVariantElement::VariantSubcolumnCreator::VariantSubcolumnCreator(
const String & variant_element_name_,
ColumnVariant::Discriminator global_variant_discriminator_,
ColumnVariant::Discriminator local_variant_discriminator_,
bool make_nullable_)
bool make_nullable_,
const ColumnPtr & null_map_)
: local_discriminators(local_discriminators_)
, null_map(null_map_)
, variant_element_name(variant_element_name_)
, global_variant_discriminator(global_variant_discriminator_)
, local_variant_discriminator(local_variant_discriminator_)
@ -314,12 +316,13 @@ SerializationVariantElement::VariantSubcolumnCreator::VariantSubcolumnCreator(
{
}
DataTypePtr SerializationVariantElement::VariantSubcolumnCreator::create(const DB::DataTypePtr & prev) const
DataTypePtr SerializationVariantElement::VariantSubcolumnCreator::create(const DataTypePtr & prev) const
{
return make_nullable ? makeNullableOrLowCardinalityNullableSafe(prev) : prev;
}
SerializationPtr SerializationVariantElement::VariantSubcolumnCreator::create(const DB::SerializationPtr & prev) const
SerializationPtr SerializationVariantElement::VariantSubcolumnCreator::create(const SerializationPtr & prev) const
{
return std::make_shared<SerializationVariantElement>(prev, variant_element_name, global_variant_discriminator);
}
@ -339,12 +342,16 @@ ColumnPtr SerializationVariantElement::VariantSubcolumnCreator::create(const DB:
return res;
}
/// In general case we should iterate through discriminators and create null-map for our variant.
NullMap null_map;
null_map.reserve(local_discriminators->size());
const auto & local_discriminators_data = assert_cast<const ColumnVariant::ColumnDiscriminators &>(*local_discriminators).getData();
for (auto local_discr : local_discriminators_data)
null_map.push_back(local_discr != local_variant_discriminator);
/// In general case we should iterate through discriminators and create null-map for our variant if we don't already have it.
std::optional<NullMap> null_map_from_discriminators;
if (!null_map)
{
null_map_from_discriminators = NullMap();
null_map_from_discriminators->reserve(local_discriminators->size());
const auto & local_discriminators_data = assert_cast<const ColumnVariant::ColumnDiscriminators &>(*local_discriminators).getData();
for (auto local_discr : local_discriminators_data)
null_map_from_discriminators->push_back(local_discr != local_variant_discriminator);
}
/// Now we can create new column from null-map and variant column using IColumn::expand.
auto res_column = IColumn::mutate(prev);
@ -356,13 +363,21 @@ ColumnPtr SerializationVariantElement::VariantSubcolumnCreator::create(const DB:
if (make_nullable && prev->lowCardinality())
res_column = assert_cast<ColumnLowCardinality &>(*res_column).cloneNullable();
res_column->expand(null_map, /*inverted = */ true);
if (null_map_from_discriminators)
res_column->expand(*null_map_from_discriminators, /*inverted = */ true);
else
res_column->expand(assert_cast<const ColumnUInt8 &>(*null_map).getData(), /*inverted = */ true);
if (make_nullable && prev->canBeInsideNullable())
{
auto null_map_col = ColumnUInt8::create();
null_map_col->getData() = std::move(null_map);
return ColumnNullable::create(std::move(res_column), std::move(null_map_col));
if (null_map_from_discriminators)
{
auto null_map_col = ColumnUInt8::create();
null_map_col->getData() = std::move(*null_map_from_discriminators);
return ColumnNullable::create(std::move(res_column), std::move(null_map_col));
}
return ColumnNullable::create(std::move(res_column), null_map->assumeMutable());
}
return res_column;

View File

@ -63,18 +63,22 @@ public:
struct VariantSubcolumnCreator : public ISubcolumnCreator
{
private:
const ColumnPtr local_discriminators;
const ColumnPtr null_map; /// optional
const String variant_element_name;
const ColumnVariant::Discriminator global_variant_discriminator;
const ColumnVariant::Discriminator local_variant_discriminator;
bool make_nullable;
public:
VariantSubcolumnCreator(
const ColumnPtr & local_discriminators_,
const String & variant_element_name_,
ColumnVariant::Discriminator global_variant_discriminator_,
ColumnVariant::Discriminator local_variant_discriminator_,
bool make_nullable_);
bool make_nullable_,
const ColumnPtr & null_map_ = nullptr);
DataTypePtr create(const DataTypePtr & prev) const override;
ColumnPtr create(const ColumnPtr & prev) const override;

View File

@ -1583,6 +1583,8 @@ void DatabaseReplicated::dropTable(ContextPtr local_context, const String & tabl
}
auto table = tryGetTable(table_name, getContext());
if (!table)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Table {} doesn't exist", table_name);
if (table->getName() == "MaterializedView" || table->getName() == "WindowView")
{
/// Avoid recursive locking of metadata_mutex

View File

@ -196,7 +196,7 @@ PostgreSQLTableStructure::ColumnsInfoPtr readNamesAndTypesList(
}
else
{
std::tuple<std::string, std::string, std::string, uint16_t, std::string, std::string, std::string> row;
std::tuple<std::string, std::string, std::string, uint16_t, std::string, std::string, std::string, std::string> row;
while (stream >> row)
{
const auto column_name = std::get<0>(row);
@ -206,13 +206,14 @@ PostgreSQLTableStructure::ColumnsInfoPtr readNamesAndTypesList(
std::get<3>(row));
columns.push_back(NameAndTypePair(column_name, data_type));
auto attgenerated = std::get<6>(row);
auto attgenerated = std::get<7>(row);
attributes.emplace(
column_name,
PostgreSQLTableStructure::PGAttribute{
.atttypid = parse<int>(std::get<4>(row)),
.atttypmod = parse<int>(std::get<5>(row)),
.attnum = parse<int>(std::get<6>(row)),
.atthasdef = false,
.attgenerated = attgenerated.empty() ? char{} : char(attgenerated[0]),
.attr_def = {}
@ -308,6 +309,7 @@ PostgreSQLTableStructure fetchPostgreSQLTableStructure(
"attndims AS dims, " /// array dimensions
"atttypid as type_id, "
"atttypmod as type_modifier, "
"attnum as att_num, "
"attgenerated as generated " /// if column has GENERATED
"FROM pg_attribute "
"WHERE attrelid = (SELECT oid FROM pg_class WHERE {}) "
@ -338,17 +340,29 @@ PostgreSQLTableStructure fetchPostgreSQLTableStructure(
"WHERE adrelid = (SELECT oid FROM pg_class WHERE {});", where);
pqxx::result result{tx.exec(attrdef_query)};
for (const auto row : result)
if (static_cast<uint64_t>(result.size()) > table.physical_columns->names.size())
{
size_t adnum = row[0].as<int>();
if (!adnum || adnum > table.physical_columns->names.size())
throw Exception(ErrorCodes::LOGICAL_ERROR,
"Received {} attrdef, but currently fetched columns list has {} columns",
result.size(), table.physical_columns->attributes.size());
}
for (const auto & column_attrs : table.physical_columns->attributes)
{
if (column_attrs.second.attgenerated != 's') /// e.g. not a generated column
{
throw Exception(ErrorCodes::LOGICAL_ERROR,
"Received adnum {}, but currently fetched columns list has {} columns",
adnum, table.physical_columns->attributes.size());
continue;
}
for (const auto row : result)
{
int adnum = row[0].as<int>();
if (column_attrs.second.attnum == adnum)
{
table.physical_columns->attributes.at(column_attrs.first).attr_def = row[1].as<std::string>();
break;
}
}
const auto column_name = table.physical_columns->names[adnum - 1];
table.physical_columns->attributes.at(column_name).attr_def = row[1].as<std::string>();
}
}

View File

@ -16,6 +16,7 @@ struct PostgreSQLTableStructure
{
Int32 atttypid;
Int32 atttypmod;
Int32 attnum;
bool atthasdef;
char attgenerated;
std::string attr_def;

View File

@ -51,6 +51,8 @@ namespace
configuration.db,
configuration.user,
configuration.password,
configuration.proto_send_chunked,
configuration.proto_recv_chunked,
configuration.quota_key,
"", /* cluster */
"", /* cluster_secret */
@ -222,7 +224,7 @@ void registerDictionarySourceClickHouse(DictionarySourceFactory & factory)
{
validateNamedCollection(
*named_collection, {}, ValidateKeysMultiset<ExternalDatabaseEqualKeysSet>{
"secure", "host", "hostname", "port", "user", "username", "password", "quota_key", "name",
"secure", "host", "hostname", "port", "user", "username", "password", "proto_send_chunked", "proto_recv_chunked", "quota_key", "name",
"db", "database", "table","query", "where", "invalidate_query", "update_field", "update_lag"});
const auto secure = named_collection->getOrDefault("secure", false);
@ -234,6 +236,8 @@ void registerDictionarySourceClickHouse(DictionarySourceFactory & factory)
.host = host,
.user = named_collection->getAnyOrDefault<String>({"user", "username"}, "default"),
.password = named_collection->getOrDefault<String>("password", ""),
.proto_send_chunked = named_collection->getOrDefault<String>("proto_send_chunked", "notchunked"),
.proto_recv_chunked = named_collection->getOrDefault<String>("proto_recv_chunked", "notchunked"),
.quota_key = named_collection->getOrDefault<String>("quota_key", ""),
.db = named_collection->getAnyOrDefault<String>({"db", "database"}, default_database),
.table = named_collection->getOrDefault<String>("table", ""),
@ -258,6 +262,8 @@ void registerDictionarySourceClickHouse(DictionarySourceFactory & factory)
.host = host,
.user = config.getString(settings_config_prefix + ".user", "default"),
.password = config.getString(settings_config_prefix + ".password", ""),
.proto_send_chunked = config.getString(settings_config_prefix + ".proto_caps.send", "notchunked"),
.proto_recv_chunked = config.getString(settings_config_prefix + ".proto_caps.recv", "notchunked"),
.quota_key = config.getString(settings_config_prefix + ".quota_key", ""),
.db = config.getString(settings_config_prefix + ".db", default_database),
.table = config.getString(settings_config_prefix + ".table", ""),

View File

@ -23,6 +23,8 @@ public:
const std::string host;
const std::string user;
const std::string password;
const std::string proto_send_chunked;
const std::string proto_recv_chunked;
const std::string quota_key;
const std::string db;
const std::string table;

View File

@ -1362,13 +1362,14 @@ public:
}
auto & variant_column = column_dynamic.getVariantColumn();
auto variant_info = column_dynamic.getVariantInfo();
const auto & variant_info = column_dynamic.getVariantInfo();
/// Second, infer ClickHouse type for this element and add it as a new variant.
auto element_type = elementToDataType(element, format_settings);
if (column_dynamic.addNewVariant(element_type))
auto element_type_name = element_type->getName();
if (column_dynamic.addNewVariant(element_type, element_type_name))
{
auto node = buildJSONExtractTree<JSONParser>(element_type, "Dynamic inference");
auto global_discriminator = variant_info.variant_name_to_discriminator[element_type->getName()];
auto global_discriminator = variant_info.variant_name_to_discriminator.at(element_type_name);
auto & variant = variant_column.getVariantByGlobalDiscriminator(global_discriminator);
if (!node->insertResultToColumn(variant, element, insert_settings, format_settings, error))
return false;
@ -1377,29 +1378,15 @@ public:
return true;
}
/// We couldn't add new variant. Try to insert element into current variants.
auto variant_node = buildJSONExtractTree<JSONParser>(variant_info.variant_type, "Dynamic inference");
if (variant_node->insertResultToColumn(variant_column, element, insert_settings, format_settings, error))
return true;
/// We couldn't insert element into any existing variant, add String variant and read value as String.
column_dynamic.addStringVariant();
auto string_global_discriminator = variant_info.variant_name_to_discriminator["String"];
auto & string_column = variant_column.getVariantByGlobalDiscriminator(string_global_discriminator);
if (!getStringNode()->insertResultToColumn(string_column, element, insert_settings, format_settings, error))
/// We couldn't add this variant, insert it into shared variant.
auto tmp_variant_column = element_type->createColumn();
auto node = buildJSONExtractTree<JSONParser>(element_type, "Dynamic inference");
if (!node->insertResultToColumn(*tmp_variant_column, element, insert_settings, format_settings, error))
return false;
variant_column.getLocalDiscriminators().push_back(variant_column.localDiscriminatorByGlobal(string_global_discriminator));
variant_column.getOffsets().push_back(string_column.size() - 1);
column_dynamic.insertValueIntoSharedVariant(*tmp_variant_column, element_type, element_type_name, 0);
return true;
}
static const std::unique_ptr<JSONExtractTreeNode<JSONParser>> & getStringNode()
{
static const std::unique_ptr<JSONExtractTreeNode<JSONParser>> string_node
= buildJSONExtractTree<JSONParser>(std::make_shared<DataTypeString>(), "Dynamic inference");
return string_node;
}
static DataTypePtr elementToDataType(const typename JSONParser::Element & element, const FormatSettings & format_settings)
{
JSONInferenceInfo json_inference_info;

View File

@ -43,6 +43,7 @@
#include <DataTypes/DataTypeDynamic.h>
#include <DataTypes/DataTypesDecimal.h>
#include <DataTypes/DataTypesNumber.h>
#include <DataTypes/DataTypesBinaryEncoding.h>
#include <DataTypes/ObjectUtils.h>
#include <DataTypes/Serializations/SerializationDecimal.h>
#include <Formats/FormatSettings.h>
@ -4287,13 +4288,98 @@ private:
WrapperType createDynamicToColumnWrapper(const DataTypePtr &) const
{
return [this]
(ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable * col_nullable, size_t input_rows_count) -> ColumnPtr
(ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable *, size_t input_rows_count) -> ColumnPtr
{
/// When casting Dynamic to regular column we should cast all variants from current Dynamic column
/// and construct the result based on discriminators.
const auto & column_dynamic = assert_cast<const ColumnDynamic &>(*arguments.front().column.get());
const auto & variant_column = column_dynamic.getVariantColumn();
const auto & variant_info = column_dynamic.getVariantInfo();
auto variant_wrapper = createVariantToColumnWrapper(assert_cast<const DataTypeVariant &>(*variant_info.variant_type), result_type);
ColumnsWithTypeAndName args = {ColumnWithTypeAndName(column_dynamic.getVariantColumnPtr(), variant_info.variant_type, "")};
return variant_wrapper(args, result_type, col_nullable, input_rows_count);
/// First, cast usual variants to result type.
const auto & variant_types = assert_cast<const DataTypeVariant &>(*variant_info.variant_type).getVariants();
std::vector<ColumnPtr> casted_variant_columns;
casted_variant_columns.reserve(variant_types.size());
for (size_t i = 0; i != variant_types.size(); ++i)
{
const auto & variant_col = variant_column.getVariantPtrByGlobalDiscriminator(i);
ColumnsWithTypeAndName variant = {{variant_col, variant_types[i], ""}};
auto variant_wrapper = prepareUnpackDictionaries(variant_types[i], result_type);
casted_variant_columns.push_back(variant_wrapper(variant, result_type, nullptr, variant_col->size()));
}
/// Second, collect all variants stored in shared variant and cast them to result type.
std::vector<MutableColumnPtr> variant_columns_from_shared_variant;
DataTypes variant_types_from_shared_variant;
/// We will need to know what variant to use when we see discriminator of a shared variant.
/// To do it, we remember what variant was extracted from each row and what was it's offset.
PaddedPODArray<UInt64> shared_variant_indexes;
PaddedPODArray<UInt64> shared_variant_offsets;
std::unordered_map<String, UInt64> shared_variant_to_index;
const auto & shared_variant = column_dynamic.getSharedVariant();
const auto shared_variant_discr = column_dynamic.getSharedVariantDiscriminator();
const auto & local_discriminators = variant_column.getLocalDiscriminators();
const auto & offsets = variant_column.getOffsets();
if (!shared_variant.empty())
{
shared_variant_indexes.reserve(input_rows_count);
shared_variant_offsets.reserve(input_rows_count);
FormatSettings format_settings;
const auto shared_variant_local_discr = variant_column.localDiscriminatorByGlobal(shared_variant_discr);
for (size_t i = 0; i != input_rows_count; ++i)
{
if (local_discriminators[i] == shared_variant_local_discr)
{
auto value = shared_variant.getDataAt(offsets[i]);
ReadBufferFromMemory buf(value.data, value.size);
auto type = decodeDataType(buf);
auto type_name = type->getName();
auto it = shared_variant_to_index.find(type_name);
/// Check if we didn't create column for this variant yet.
if (it == shared_variant_to_index.end())
{
it = shared_variant_to_index.emplace(type_name, variant_columns_from_shared_variant.size()).first;
variant_columns_from_shared_variant.push_back(type->createColumn());
variant_types_from_shared_variant.push_back(type);
}
shared_variant_indexes.push_back(it->second);
shared_variant_offsets.push_back(variant_columns_from_shared_variant[it->second]->size());
type->getDefaultSerialization()->deserializeBinary(*variant_columns_from_shared_variant[it->second], buf, format_settings);
}
else
{
shared_variant_indexes.emplace_back();
shared_variant_offsets.emplace_back();
}
}
}
/// Cast all extracted variants into result type.
std::vector<ColumnPtr> casted_shared_variant_columns;
casted_shared_variant_columns.reserve(variant_types_from_shared_variant.size());
for (size_t i = 0; i != variant_types_from_shared_variant.size(); ++i)
{
ColumnsWithTypeAndName variant = {{variant_columns_from_shared_variant[i]->getPtr(), variant_types_from_shared_variant[i], ""}};
auto variant_wrapper = prepareUnpackDictionaries(variant_types_from_shared_variant[i], result_type);
casted_shared_variant_columns.push_back(variant_wrapper(variant, result_type, nullptr, variant_columns_from_shared_variant[i]->size()));
}
/// Construct result column from all casted variants.
auto res = result_type->createColumn();
res->reserve(input_rows_count);
for (size_t i = 0; i != input_rows_count; ++i)
{
auto global_discr = variant_column.globalDiscriminatorByLocal(local_discriminators[i]);
if (global_discr == ColumnVariant::NULL_DISCRIMINATOR)
res->insertDefault();
else if (global_discr == shared_variant_discr)
res->insertFrom(*casted_shared_variant_columns[shared_variant_indexes[i]], shared_variant_offsets[i]);
else
res->insertFrom(*casted_variant_columns[global_discr], offsets[i]);
}
return res;
};
}
@ -4320,200 +4406,51 @@ private:
};
}
std::pair<ColumnPtr, DataTypePtr> getReducedVariant(
const ColumnVariant & variant_column,
const DataTypePtr & variant_type,
const std::unordered_map<String, ColumnVariant::Discriminator> & variant_name_to_discriminator,
size_t max_result_num_variants,
const ColumnDynamic::Statistics & statistics = {}) const
WrapperType createVariantToDynamicWrapper(const DataTypeVariant & from_variant_type, const DataTypeDynamic & dynamic_type) const
{
const auto & variant_types = assert_cast<const DataTypeVariant &>(*variant_type).getVariants();
/// First check if we don't exceed the limit in current Variant column.
if (variant_types.size() < max_result_num_variants || (variant_types.size() == max_result_num_variants && variant_name_to_discriminator.contains("String")))
return {variant_column.getPtr(), variant_type};
/// We want to keep the most frequent variants and convert to string the rarest.
std::vector<std::pair<size_t, ColumnVariant::Discriminator>> variant_sizes;
variant_sizes.reserve(variant_types.size());
std::optional<ColumnVariant::Discriminator> old_string_discriminator;
/// List of variants that should be converted to a single String variant.
std::vector<ColumnVariant::Discriminator> variants_to_convert_to_string;
for (size_t i = 0; i != variant_types.size(); ++i)
/// First create extended Variant with shared variant type and cast this Variant to it.
auto variants_for_dynamic = from_variant_type.getVariants();
size_t number_of_variants = variants_for_dynamic.size();
variants_for_dynamic.push_back(ColumnDynamic::getSharedVariantDataType());
const auto & variant_type_for_dynamic = std::make_shared<DataTypeVariant>(variants_for_dynamic);
auto old_to_new_variant_wrapper = createVariantToVariantWrapper(from_variant_type, *variant_type_for_dynamic);
auto max_dynamic_types = dynamic_type.getMaxDynamicTypes();
return [old_to_new_variant_wrapper, variant_type_for_dynamic, number_of_variants, max_dynamic_types]
(ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable * col_nullable, size_t input_rows_count) -> ColumnPtr
{
/// String variant won't be removed.
String variant_name = variant_types[i]->getName();
auto variant_column_for_dynamic = old_to_new_variant_wrapper(arguments, result_type, col_nullable, input_rows_count);
/// If resulting Dynamic column can contain all variants from this Variant column, just create Dynamic column from it.
if (max_dynamic_types >= number_of_variants)
return ColumnDynamic::create(variant_column_for_dynamic, variant_type_for_dynamic, max_dynamic_types, max_dynamic_types);
if (variant_name == "String")
{
old_string_discriminator = i;
/// For simplicity, add this variant to the list that will be converted to string,
/// so we will process it with other variants when constructing the new String variant.
variants_to_convert_to_string.push_back(i);
}
else
{
size_t size = 0;
if (statistics.data.empty())
size = variant_column.getVariantByGlobalDiscriminator(i).size();
else
size = statistics.data.at(variant_name);
variant_sizes.emplace_back(size, i);
}
}
/// Sort variants by sizes, so we will keep the most frequent.
std::sort(variant_sizes.begin(), variant_sizes.end(), std::greater());
DataTypes remaining_variants;
remaining_variants.reserve(max_result_num_variants);
/// Add String variant in advance.
remaining_variants.push_back(std::make_shared<DataTypeString>());
for (auto [_, discr] : variant_sizes)
{
if (remaining_variants.size() != max_result_num_variants)
remaining_variants.push_back(variant_types[discr]);
else
variants_to_convert_to_string.push_back(discr);
}
auto reduced_variant = std::make_shared<DataTypeVariant>(remaining_variants);
const auto & new_variants = reduced_variant->getVariants();
/// To construct reduced variant column we will need mapping from old to new discriminators.
std::vector<ColumnVariant::Discriminator> old_to_new_discriminators_mapping;
old_to_new_discriminators_mapping.resize(variant_types.size());
ColumnVariant::Discriminator string_variant_discriminator = 0;
for (size_t i = 0; i != new_variants.size(); ++i)
{
String variant_name = new_variants[i]->getName();
if (variant_name == "String")
{
string_variant_discriminator = i;
for (auto discr : variants_to_convert_to_string)
old_to_new_discriminators_mapping[discr] = i;
}
else
{
auto old_discr = variant_name_to_discriminator.at(variant_name);
old_to_new_discriminators_mapping[old_discr] = i;
}
}
/// Convert all reduced variants to String.
std::unordered_map<ColumnVariant::Discriminator, ColumnPtr> variants_converted_to_string;
variants_converted_to_string.reserve(variants_to_convert_to_string.size());
size_t string_variant_size = 0;
for (auto discr : variants_to_convert_to_string)
{
auto string_type = std::make_shared<DataTypeString>();
auto string_wrapper = prepareUnpackDictionaries(variant_types[discr], string_type);
auto column_to_convert = ColumnWithTypeAndName(variant_column.getVariantPtrByGlobalDiscriminator(discr), variant_types[discr], "");
ColumnsWithTypeAndName args = {column_to_convert};
auto variant_string_column = string_wrapper(args, string_type, nullptr, column_to_convert.column->size());
string_variant_size += variant_string_column->size();
variants_converted_to_string[discr] = variant_string_column;
}
/// Create new discriminators and offsets and fill new String variant according to old discriminators.
auto string_variant = ColumnString::create();
string_variant->reserve(string_variant_size);
auto new_discriminators_column = variant_column.getLocalDiscriminatorsPtr()->cloneEmpty();
auto & new_discriminators_data = assert_cast<ColumnVariant::ColumnDiscriminators &>(*new_discriminators_column).getData();
new_discriminators_data.reserve(variant_column.size());
auto new_offsets = variant_column.getOffsetsPtr()->cloneEmpty();
auto & new_offsets_data = assert_cast<ColumnVariant::ColumnOffsets &>(*new_offsets).getData();
new_offsets_data.reserve(variant_column.size());
const auto & old_local_discriminators = variant_column.getLocalDiscriminators();
const auto & old_offsets = variant_column.getOffsets();
for (size_t i = 0; i != old_local_discriminators.size(); ++i)
{
auto old_discr = variant_column.globalDiscriminatorByLocal(old_local_discriminators[i]);
if (old_discr == ColumnVariant::NULL_DISCRIMINATOR)
{
new_discriminators_data.push_back(ColumnVariant::NULL_DISCRIMINATOR);
new_offsets_data.push_back(0);
continue;
}
auto new_discr = old_to_new_discriminators_mapping[old_discr];
new_discriminators_data.push_back(new_discr);
if (new_discr != string_variant_discriminator)
{
new_offsets_data.push_back(old_offsets[i]);
}
else
{
new_offsets_data.push_back(string_variant->size());
string_variant->insertFrom(*variants_converted_to_string[old_discr], old_offsets[i]);
}
}
/// Create new list of variant columns.
Columns new_variant_columns;
new_variant_columns.resize(new_variants.size());
for (size_t i = 0; i != variant_types.size(); ++i)
{
auto new_discr = old_to_new_discriminators_mapping[i];
if (new_discr != string_variant_discriminator)
new_variant_columns[new_discr] = variant_column.getVariantPtrByGlobalDiscriminator(i);
}
new_variant_columns[string_variant_discriminator] = std::move(string_variant);
return {ColumnVariant::create(std::move(new_discriminators_column), std::move(new_offsets), new_variant_columns), reduced_variant};
}
WrapperType createVariantToDynamicWrapper(const DataTypePtr & from_type, const DataTypeDynamic & dynamic_type) const
{
const auto & from_variant_type = assert_cast<const DataTypeVariant &>(*from_type);
size_t max_dynamic_types = dynamic_type.getMaxDynamicTypes();
const auto & variants = from_variant_type.getVariants();
std::unordered_map<String, ColumnVariant::Discriminator> variant_name_to_discriminator;
variant_name_to_discriminator.reserve(variants.size());
for (size_t i = 0; i != variants.size(); ++i)
variant_name_to_discriminator[variants[i]->getName()] = i;
return [from_type, max_dynamic_types, variant_name_to_discriminator, this]
(ColumnsWithTypeAndName & arguments, const DataTypePtr &, const ColumnNullable *, size_t) -> ColumnPtr
{
const auto & variant_column = assert_cast<const ColumnVariant &>(*arguments.front().column);
auto [reduced_variant_column, reduced_variant_type] = getReducedVariant(variant_column, from_type, variant_name_to_discriminator, max_dynamic_types);
return ColumnDynamic::create(reduced_variant_column, reduced_variant_type, max_dynamic_types);
/// Otherwise some variants should go to the shared variant. Create temporary Dynamic column from this Variant and insert
/// all data to the resulting Dynamic column, this insertion will do all the logic with shared variant.
auto tmp_dynamic_column = ColumnDynamic::create(variant_column_for_dynamic, variant_type_for_dynamic, number_of_variants, number_of_variants);
auto result_dynamic_column = ColumnDynamic::create(max_dynamic_types);
result_dynamic_column->insertRangeFrom(*tmp_dynamic_column, 0, tmp_dynamic_column->size());
return result_dynamic_column;
};
}
WrapperType createColumnToDynamicWrapper(const DataTypePtr & from_type, const DataTypeDynamic & dynamic_type) const
{
if (const auto * variant_type = typeid_cast<const DataTypeVariant *>(from_type.get()))
return createVariantToDynamicWrapper(from_type, dynamic_type);
if (dynamic_type.getMaxDynamicTypes() == 1)
{
DataTypePtr string_type = std::make_shared<DataTypeString>();
if (from_type->isNullable())
string_type = makeNullable(string_type);
auto string_wrapper = prepareUnpackDictionaries(from_type, string_type);
auto variant_type = std::make_shared<DataTypeVariant>(DataTypes{removeNullable(string_type)});
auto variant_wrapper = createColumnToVariantWrapper(string_type, *variant_type);
return [string_wrapper, variant_wrapper, string_type, variant_type, max_dynamic_types=dynamic_type.getMaxDynamicTypes()]
(ColumnsWithTypeAndName & arguments, const DataTypePtr &, const ColumnNullable * col_nullable, size_t input_rows_count) -> ColumnPtr
{
auto string_column = string_wrapper(arguments, string_type, col_nullable, input_rows_count);
auto column = ColumnWithTypeAndName(string_column, string_type, "");
ColumnsWithTypeAndName args = {column};
auto variant_column = variant_wrapper(args, variant_type, nullptr, string_column->size());
return ColumnDynamic::create(variant_column, variant_type, max_dynamic_types);
};
}
return createVariantToDynamicWrapper(*variant_type, dynamic_type);
if (context && context->getSettingsRef().cast_string_to_dynamic_use_inference && isStringOrFixedString(removeNullable(removeLowCardinality(from_type))))
return createStringToDynamicThroughParsingWrapper();
/// First, cast column to Variant with 2 variants - the type of the column we cast and shared variant type.
auto variant_type = std::make_shared<DataTypeVariant>(DataTypes{removeNullableOrLowCardinalityNullable(from_type)});
auto variant_wrapper = createColumnToVariantWrapper(from_type, *variant_type);
return [variant_wrapper, variant_type, max_dynamic_types=dynamic_type.getMaxDynamicTypes()]
(ColumnsWithTypeAndName & arguments, const DataTypePtr &, const ColumnNullable * col_nullable, size_t input_rows_count) -> ColumnPtr
auto column_to_variant_wrapper = createColumnToVariantWrapper(from_type, *variant_type);
/// Second, cast this Variant to Dynamic.
auto variant_to_dynamic_wrapper = createVariantToDynamicWrapper(*variant_type, dynamic_type);
return [column_to_variant_wrapper, variant_to_dynamic_wrapper, variant_type]
(ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable * col_nullable, size_t input_rows_count) -> ColumnPtr
{
auto variant_res = variant_wrapper(arguments, variant_type, col_nullable, input_rows_count);
return ColumnDynamic::create(variant_res, variant_type, max_dynamic_types);
auto variant_res = column_to_variant_wrapper(arguments, variant_type, col_nullable, input_rows_count);
ColumnsWithTypeAndName args = {{variant_res, variant_type, ""}};
return variant_to_dynamic_wrapper(args, result_type, nullptr, input_rows_count);
};
}
@ -4530,21 +4467,26 @@ private:
(ColumnsWithTypeAndName & arguments, const DataTypePtr &, const ColumnNullable *, size_t) -> ColumnPtr
{
const auto & column_dynamic = assert_cast<const ColumnDynamic &>(*arguments[0].column);
return ColumnDynamic::create(column_dynamic.getVariantColumnPtr(), column_dynamic.getVariantInfo(), to_max_types);
/// We should use the same limit as already used in column and change only global limit.
/// It's needed because shared variant should contain values only when limit is exceeded,
/// so if there are already some data, we cannot increase the limit.
return ColumnDynamic::create(column_dynamic.getVariantColumnPtr(), column_dynamic.getVariantInfo(), column_dynamic.getMaxDynamicTypes(), to_max_types);
};
}
return [to_max_types, this]
return [to_max_types]
(ColumnsWithTypeAndName & arguments, const DataTypePtr &, const ColumnNullable *, size_t) -> ColumnPtr
{
const auto & column_dynamic = assert_cast<const ColumnDynamic &>(*arguments[0].column);
auto [reduced_variant_column, reduced_variant_type] = getReducedVariant(
column_dynamic.getVariantColumn(),
column_dynamic.getVariantInfo().variant_type,
column_dynamic.getVariantInfo().variant_name_to_discriminator,
to_max_types,
column_dynamic.getStatistics());
return ColumnDynamic::create(reduced_variant_column, reduced_variant_type, to_max_types);
/// If real limit in the column is not greater than desired, just use the same variant column.
if (column_dynamic.getMaxDynamicTypes() <= to_max_types)
return ColumnDynamic::create(column_dynamic.getVariantColumnPtr(), column_dynamic.getVariantInfo(), column_dynamic.getMaxDynamicTypes(), to_max_types);
/// Otherwise some variants should go to the shared variant. In this case we can just insert all
/// the data into resulting column and it will do all the logic with shared variant.
auto result_dynamic_column = ColumnDynamic::create(to_max_types);
result_dynamic_column->insertRangeFrom(column_dynamic, 0, column_dynamic.size());
return result_dynamic_column;
};
}

View File

@ -2,10 +2,14 @@
#include <Functions/FunctionFactory.h>
#include <Functions/FunctionHelpers.h>
#include <DataTypes/IDataType.h>
#include <DataTypes/DataTypesBinaryEncoding.h>
#include <DataTypes/DataTypeFactory.h>
#include <DataTypes/DataTypeString.h>
#include <DataTypes/DataTypeLowCardinality.h>
#include <Columns/ColumnVariant.h>
#include <Columns/ColumnDynamic.h>
#include <Columns/ColumnsNumber.h>
#include <IO/ReadBufferFromMemory.h>
#include <Common/assert_cast.h>
@ -65,11 +69,15 @@ public:
const auto & variant_column = dynamic_column->getVariantColumn();
auto res = result_type->createColumn();
String element_type;
auto shared_variant_discr = dynamic_column->getSharedVariantDiscriminator();
const auto & shared_variant = dynamic_column->getSharedVariant();
for (size_t i = 0; i != input_rows_count; ++i)
{
auto global_discr = variant_column.globalDiscriminatorAt(i);
if (global_discr == ColumnVariant::NULL_DISCRIMINATOR)
element_type = name_for_null;
else if (global_discr == shared_variant_discr)
element_type = getTypeNameFromSharedVariantValue(shared_variant.getDataAt(variant_column.offsetAt(i)));
else
element_type = variant_info.variant_names[global_discr];
@ -78,6 +86,63 @@ public:
return res;
}
String getTypeNameFromSharedVariantValue(StringRef value) const
{
ReadBufferFromMemory buf(value.data, value.size);
return decodeDataType(buf)->getName();
}
};
class FunctionIsDynamicElementInSharedData : public IFunction
{
public:
static constexpr auto name = "isDynamicElementInSharedData";
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionIsDynamicElementInSharedData>(); }
String getName() const override { return name; }
size_t getNumberOfArguments() const override { return 1; }
bool useDefaultImplementationForConstants() const override { return true; }
bool useDefaultImplementationForNulls() const override { return false; }
bool useDefaultImplementationForLowCardinalityColumns() const override { return false; }
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
{
if (arguments.empty() || arguments.size() > 1)
throw Exception(
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
"Number of arguments for function {} doesn't match: passed {}, should be 1",
getName(), arguments.empty());
if (!isDynamic(arguments[0].type.get()))
throw Exception(
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"First argument for function {} must be Dynamic, got {} instead",
getName(), arguments[0].type->getName());
return DataTypeFactory::instance().get("Bool");
}
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override
{
const ColumnDynamic * dynamic_column = checkAndGetColumn<ColumnDynamic>(arguments[0].column.get());
if (!dynamic_column)
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"First argument for function {} must be Dynamic, got {} instead",
getName(), arguments[0].type->getName());
const auto & variant_column = dynamic_column->getVariantColumn();
const auto & local_discriminators = variant_column.getLocalDiscriminators();
auto res = result_type->createColumn();
auto & res_data = assert_cast<ColumnUInt8 &>(*res).getData();
res_data.reserve(dynamic_column->size());
auto shared_variant_local_discr = variant_column.localDiscriminatorByGlobal(dynamic_column->getSharedVariantDiscriminator());
for (size_t i = 0; i != input_rows_count; ++i)
res_data.push_back(local_discriminators[i] == shared_variant_local_discr);
return res;
}
};
}
@ -88,7 +153,7 @@ REGISTER_FUNCTION(DynamicType)
.description = R"(
Returns the variant type name for each row of `Dynamic` column. If row contains NULL, it returns 'None' for it.
)",
.syntax = {"dynamicType(variant)"},
.syntax = {"dynamicType(dynamic)"},
.arguments = {{"dynamic", "Dynamic column"}},
.examples = {{{
"Example",
@ -104,6 +169,30 @@ SELECT d, dynamicType(d) FROM test;
Hello, World! String
[1,2,3] Array(Int64)
)"}}},
.categories{"Variant"},
});
factory.registerFunction<FunctionIsDynamicElementInSharedData>(FunctionDocumentation{
.description = R"(
Returns true for rows in Dynamic column that are not separated into subcolumns and stored inside shared variant in binary form.
)",
.syntax = {"isDynamicElementInSharedData(dynamic)"},
.arguments = {{"dynamic", "Dynamic column"}},
.examples = {{{
"Example",
R"(
CREATE TABLE test (d Dynamic(max_types=2)) ENGINE = Memory;
INSERT INTO test VALUES (NULL), (42), ('Hello, World!'), ([1, 2, 3]);
SELECT d, isDynamicElementInSharedData(d) FROM test;
)",
R"(
disDynamicElementInSharedData(d)
false
42 false
Hello, World! true
[1,2,3] true
)"}}},
.categories{"Variant"},
});

58
src/IO/NetUtils.h Normal file
View File

@ -0,0 +1,58 @@
#pragma once
#include <concepts>
#include <bit>
namespace DB
{
template<std::integral T>
constexpr T netToHost(T value) noexcept
{
if constexpr (std::endian::native != std::endian::big)
return std::byteswap(value);
return value;
}
template<std::integral T>
constexpr T hostToNet(T value) noexcept
{
if constexpr (std::endian::native != std::endian::big)
return std::byteswap(value);
return value;
}
template<std::integral T>
constexpr T toLittleEndian(T value) noexcept
{
if constexpr (std::endian::native == std::endian::big)
return std::byteswap(value);
return value;
}
template<std::integral T>
constexpr T toBigEndian(T value) noexcept
{
if constexpr (std::endian::native != std::endian::big)
return std::byteswap(value);
return value;
}
template<std::integral T>
constexpr T fromLittleEndian(T value) noexcept
{
if constexpr (std::endian::native == std::endian::big)
return std::byteswap(value);
return value;
}
template<std::integral T>
constexpr T fromBigEndian(T value) noexcept
{
if constexpr (std::endian::native != std::endian::big)
return std::byteswap(value);
return value;
}
}

View File

@ -32,7 +32,7 @@ namespace ErrorCodes
extern const int LOGICAL_ERROR;
}
bool ReadBufferFromPocoSocket::nextImpl()
ssize_t ReadBufferFromPocoSocketBase::socketReceiveBytesImpl(char * ptr, size_t size)
{
ssize_t bytes_read = 0;
Stopwatch watch;
@ -43,14 +43,11 @@ bool ReadBufferFromPocoSocket::nextImpl()
ProfileEvents::increment(ProfileEvents::NetworkReceiveBytes, bytes_read);
});
CurrentMetrics::Increment metric_increment(CurrentMetrics::NetworkReceive);
/// Add more details to exceptions.
try
{
CurrentMetrics::Increment metric_increment(CurrentMetrics::NetworkReceive);
if (internal_buffer.size() > INT_MAX)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Buffer overflow");
/// If async_callback is specified, set socket to non-blocking mode
/// and try to read data from it, if socket is not ready for reading,
/// run async_callback and try again later.
@ -61,7 +58,7 @@ bool ReadBufferFromPocoSocket::nextImpl()
socket.setBlocking(false);
SCOPE_EXIT(socket.setBlocking(true));
bool secure = socket.secure();
bytes_read = socket.impl()->receiveBytes(internal_buffer.begin(), static_cast<int>(internal_buffer.size()));
bytes_read = socket.impl()->receiveBytes(ptr, static_cast<int>(size));
/// Check EAGAIN and ERR_SSL_WANT_READ/ERR_SSL_WANT_WRITE for secure socket (reading from secure socket can write too).
while (bytes_read < 0 && (errno == EAGAIN || (secure && (checkSSLWantRead(bytes_read) || checkSSLWantWrite(bytes_read)))))
@ -73,12 +70,12 @@ bool ReadBufferFromPocoSocket::nextImpl()
async_callback(socket.impl()->sockfd(), socket.getReceiveTimeout(), AsyncEventTimeoutType::RECEIVE, socket_description, AsyncTaskExecutor::Event::READ | AsyncTaskExecutor::Event::ERROR);
/// Try to read again.
bytes_read = socket.impl()->receiveBytes(internal_buffer.begin(), static_cast<int>(internal_buffer.size()));
bytes_read = socket.impl()->receiveBytes(ptr, static_cast<int>(size));
}
}
else
{
bytes_read = socket.impl()->receiveBytes(internal_buffer.begin(), static_cast<int>(internal_buffer.size()));
bytes_read = socket.impl()->receiveBytes(ptr, static_cast<int>(size));
}
}
catch (const Poco::Net::NetException & e)
@ -99,6 +96,16 @@ bool ReadBufferFromPocoSocket::nextImpl()
if (bytes_read < 0)
throw NetException(ErrorCodes::CANNOT_READ_FROM_SOCKET, "Cannot read from socket (peer: {}, local: {})", peer_address.toString(), socket.address().toString());
return bytes_read;
}
bool ReadBufferFromPocoSocketBase::nextImpl()
{
if (internal_buffer.size() > INT_MAX)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Buffer overflow");
ssize_t bytes_read = socketReceiveBytesImpl(internal_buffer.begin(), internal_buffer.size());
if (read_event != ProfileEvents::end())
ProfileEvents::increment(read_event, bytes_read);
@ -110,7 +117,7 @@ bool ReadBufferFromPocoSocket::nextImpl()
return true;
}
ReadBufferFromPocoSocket::ReadBufferFromPocoSocket(Poco::Net::Socket & socket_, size_t buf_size)
ReadBufferFromPocoSocketBase::ReadBufferFromPocoSocketBase(Poco::Net::Socket & socket_, size_t buf_size)
: BufferWithOwnMemory<ReadBuffer>(buf_size)
, socket(socket_)
, peer_address(socket.peerAddress())
@ -119,19 +126,22 @@ ReadBufferFromPocoSocket::ReadBufferFromPocoSocket(Poco::Net::Socket & socket_,
{
}
ReadBufferFromPocoSocket::ReadBufferFromPocoSocket(Poco::Net::Socket & socket_, const ProfileEvents::Event & read_event_, size_t buf_size)
: ReadBufferFromPocoSocket(socket_, buf_size)
ReadBufferFromPocoSocketBase::ReadBufferFromPocoSocketBase(Poco::Net::Socket & socket_, const ProfileEvents::Event & read_event_, size_t buf_size)
: ReadBufferFromPocoSocketBase(socket_, buf_size)
{
read_event = read_event_;
}
bool ReadBufferFromPocoSocket::poll(size_t timeout_microseconds) const
bool ReadBufferFromPocoSocketBase::poll(size_t timeout_microseconds) const
{
if (available())
/// For secure socket it is important to check if any remaining data available in underlying decryption buffer -
/// read always retrieves the whole encrypted frame from the wire and puts it into underlying buffer while returning only requested size -
/// further poll() can block though there is still data to read in the underlying decryption buffer.
if (available() || socket.impl()->available())
return true;
Stopwatch watch;
bool res = socket.poll(timeout_microseconds, Poco::Net::Socket::SELECT_READ | Poco::Net::Socket::SELECT_ERROR);
bool res = socket.impl()->poll(timeout_microseconds, Poco::Net::Socket::SELECT_READ | Poco::Net::Socket::SELECT_ERROR);
ProfileEvents::increment(ProfileEvents::NetworkReceiveElapsedMicroseconds, watch.elapsedMicroseconds());
return res;
}

View File

@ -9,7 +9,7 @@ namespace DB
{
/// Works with the ready Poco::Net::Socket. Blocking operations.
class ReadBufferFromPocoSocket : public BufferWithOwnMemory<ReadBuffer>
class ReadBufferFromPocoSocketBase : public BufferWithOwnMemory<ReadBuffer>
{
protected:
Poco::Net::Socket & socket;
@ -25,16 +25,29 @@ protected:
bool nextImpl() override;
public:
explicit ReadBufferFromPocoSocket(Poco::Net::Socket & socket_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE);
explicit ReadBufferFromPocoSocket(Poco::Net::Socket & socket_, const ProfileEvents::Event & read_event_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE);
explicit ReadBufferFromPocoSocketBase(Poco::Net::Socket & socket_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE);
explicit ReadBufferFromPocoSocketBase(Poco::Net::Socket & socket_, const ProfileEvents::Event & read_event_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE);
bool poll(size_t timeout_microseconds) const;
void setAsyncCallback(AsyncCallback async_callback_) { async_callback = std::move(async_callback_); }
ssize_t socketReceiveBytesImpl(char * ptr, size_t size);
private:
AsyncCallback async_callback;
std::string socket_description;
};
class ReadBufferFromPocoSocket : public ReadBufferFromPocoSocketBase
{
public:
explicit ReadBufferFromPocoSocket(Poco::Net::Socket & socket_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE)
: ReadBufferFromPocoSocketBase(socket_, buf_size)
{}
explicit ReadBufferFromPocoSocket(Poco::Net::Socket & socket_, const ProfileEvents::Event & read_event_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE)
: ReadBufferFromPocoSocketBase(socket_, read_event_, buf_size)
{}
};
}

View File

@ -0,0 +1,166 @@
#include <IO/ReadBufferFromPocoSocketChunked.h>
#include <Common/logger_useful.h>
#include <IO/NetUtils.h>
namespace DB::ErrorCodes
{
extern const int LOGICAL_ERROR;
}
namespace DB
{
ReadBufferFromPocoSocketChunked::ReadBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, size_t buf_size)
: ReadBufferFromPocoSocketChunked(socket_, ProfileEvents::end(), buf_size)
{}
ReadBufferFromPocoSocketChunked::ReadBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, const ProfileEvents::Event & read_event_, size_t buf_size)
: ReadBufferFromPocoSocketBase(
socket_, read_event_,
std::min(buf_size, static_cast<size_t>(std::numeric_limits<decltype(chunk_left)>::max()))),
our_address(socket_.address()), log(getLogger("Protocol"))
{}
void ReadBufferFromPocoSocketChunked::enableChunked()
{
if (chunked)
return;
chunked = 1;
data_end = buffer().end();
/// Resize working buffer so any next read will call nextImpl
working_buffer.resize(offset());
chunk_left = 0;
next_chunk = 0;
}
bool ReadBufferFromPocoSocketChunked::hasBufferedData() const
{
if (available())
return true;
return chunked && (static_cast<size_t>(data_end - working_buffer.end()) > sizeof(next_chunk));
}
bool ReadBufferFromPocoSocketChunked::poll(size_t timeout_microseconds) const
{
if (chunked)
if (available() || static_cast<size_t>(data_end - working_buffer.end()) > sizeof(next_chunk))
return true;
return ReadBufferFromPocoSocketBase::poll(timeout_microseconds);
}
bool ReadBufferFromPocoSocketChunked::loadNextChunk(Position c_pos, bool cont)
{
auto buffered = std::min(static_cast<size_t>(data_end - c_pos), sizeof(next_chunk));
if (buffered)
std::memcpy(&next_chunk, c_pos, buffered);
if (buffered < sizeof(next_chunk))
if (socketReceiveBytesImpl(reinterpret_cast<char *>(&next_chunk) + buffered, sizeof(next_chunk) - buffered) < static_cast<ssize_t>(sizeof(next_chunk) - buffered))
return false;
next_chunk = fromLittleEndian(next_chunk);
if (next_chunk)
{
if (cont)
LOG_TEST(log, "{} <- {} Chunk receive continued. Size {}", ourAddress().toString(), peerAddress().toString(), next_chunk);
}
else
LOG_TEST(log, "{} <- {} Chunk receive ended.", ourAddress().toString(), peerAddress().toString());
return true;
}
bool ReadBufferFromPocoSocketChunked::processChunkLeft(Position c_pos)
{
if (data_end - c_pos < chunk_left)
{
working_buffer.resize(data_end - buffer().begin());
nextimpl_working_buffer_offset = c_pos - buffer().begin();
chunk_left -= (data_end - c_pos);
return true;
}
nextimpl_working_buffer_offset = c_pos - buffer().begin();
working_buffer.resize(nextimpl_working_buffer_offset + chunk_left);
c_pos += chunk_left;
if (!loadNextChunk(c_pos, true))
return false;
chunk_left = 0;
return true;
}
bool ReadBufferFromPocoSocketChunked::nextImpl()
{
if (!chunked)
return ReadBufferFromPocoSocketBase::nextImpl();
auto * c_pos = pos;
if (chunk_left == 0)
{
if (next_chunk == 0)
{
if (chunked == 1)
chunked = 2; // first chunked block - no end marker
else
c_pos = pos + sizeof(next_chunk); // bypass chunk end marker
if (c_pos > data_end)
c_pos = data_end;
if (!loadNextChunk(c_pos))
return false;
chunk_left = next_chunk;
next_chunk = 0;
if (chunk_left == 0)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Native protocol: empty chunk received");
c_pos += sizeof(next_chunk);
if (c_pos >= data_end)
{
if (!ReadBufferFromPocoSocketBase::nextImpl())
return false;
data_end = buffer().end();
c_pos = buffer().begin();
}
LOG_TEST(log, "{} <- {} Chunk receive started. Message {}, size {}", ourAddress().toString(), peerAddress().toString(), static_cast<unsigned int>(*c_pos), chunk_left);
}
else
{
c_pos += sizeof(next_chunk);
if (c_pos >= data_end)
{
if (!ReadBufferFromPocoSocketBase::nextImpl())
return false;
data_end = buffer().end();
c_pos = buffer().begin();
}
chunk_left = next_chunk;
next_chunk = 0;
}
}
else
{
if (!ReadBufferFromPocoSocketBase::nextImpl())
return false;
data_end = buffer().end();
c_pos = buffer().begin();
}
return processChunkLeft(c_pos);
}
}

View File

@ -0,0 +1,109 @@
#pragma once
#include <IO/ReadBuffer.h>
#include <IO/ReadBufferFromPocoSocket.h>
/*
Handshake +=============
| 'Hello' type
| handshake exchange
| chunked protocol negotiation
+=============
Basic chunk:
+=============
Chunk begins | 0x12345678 chunk size, 4 bytes little endian
+-------------
| Packet type always follows beginning of the chunk
| packet data
+-------------
Chunk ends | 0x00000000 4 zero bytes
+=============
Datastream chunk:
+=============
Chunk begins | 0x12345678
+-------------
| Packet type
| packet data
+-------------
| Packet type
| packet data
+-------------
...arbitrary number .....
of packets... .....
+-------------
| Packet type
| packet data
+-------------
Chunk ends | 0x00000000
+=============
Multipart chunk:
+=============
Chunk begins | 0x12345678 chunk part size, 4 bytes little endian
+-------------
| Packet type
| packet data
+-------------
| Packet type
| (partial) packet data
+=============
Chunk continues | 0x12345678 chunk next part size, 4 bytes little endian
+=============
| possibly previous packet's data
+-------------
| Packet type
| packet data
+-------------
...arbitrary number .....
of chunk parts... .....
+-------------
| Packet type
| packet data
+-------------
Chunk ends | 0x00000000
+=============
*/
namespace DB
{
class ReadBufferFromPocoSocketChunked: public ReadBufferFromPocoSocketBase
{
public:
using ReadBufferFromPocoSocketBase::setAsyncCallback;
explicit ReadBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE);
explicit ReadBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, const ProfileEvents::Event & read_event_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE);
void enableChunked();
bool hasBufferedData() const;
bool poll(size_t timeout_microseconds) const;
Poco::Net::SocketAddress peerAddress() { return peer_address; }
Poco::Net::SocketAddress ourAddress() { return our_address; }
protected:
bool loadNextChunk(Position c_pos, bool cont = false);
bool processChunkLeft(Position c_pos);
bool nextImpl() override;
Poco::Net::SocketAddress our_address;
private:
LoggerPtr log;
Position data_end = nullptr; // end position of data in the internal_buffer
UInt32 chunk_left = 0; // chunk left to read from socket
UInt32 next_chunk = 0; // size of the next cnunk
UInt8 chunked = 0; // 0 - disabled; 1 - started; 2 - enabled;
};
}

View File

@ -46,7 +46,7 @@ namespace ProfileEvents
namespace CurrentMetrics
{
extern const Metric S3DiskNoKeyErrors;
extern const Metric DiskS3NoSuchKeyErrors;
}
namespace DB
@ -701,7 +701,7 @@ RequestResult Client::processRequestResult(RequestResult && outcome) const
return std::forward<RequestResult>(outcome);
if (outcome.GetError().GetErrorType() == Aws::S3::S3Errors::NO_SUCH_KEY)
CurrentMetrics::add(CurrentMetrics::S3DiskNoKeyErrors);
CurrentMetrics::add(CurrentMetrics::DiskS3NoSuchKeyErrors);
String enriched_message = fmt::format(
"{} {}",

View File

@ -64,7 +64,8 @@ public:
}
bytes += bytes_in_buffer;
pos = working_buffer.begin();
pos = working_buffer.begin() + nextimpl_working_buffer_offset;
nextimpl_working_buffer_offset = 0;
}
/// Calling finalize() in the destructor of derived classes is a bad practice.
@ -164,6 +165,11 @@ protected:
bool finalized = false;
bool canceled = false;
/// The number of bytes to preserve from the initial position of `working_buffer`
/// buffer. Apparently this is an additional out-parameter for nextImpl(),
/// not a real field.
size_t nextimpl_working_buffer_offset = 0;
private:
/** Write the data in the buffer (from the beginning of the buffer to the current position).
* Throw an exception if something is wrong.

View File

@ -183,6 +183,7 @@ WriteBufferFromPocoSocket::WriteBufferFromPocoSocket(Poco::Net::Socket & socket_
, socket(socket_)
, peer_address(socket.peerAddress())
, our_address(socket.address())
, write_event(ProfileEvents::end())
, socket_description("socket (" + peer_address.toString() + ")")
{
}

View File

@ -0,0 +1,210 @@
#include <IO/WriteBufferFromPocoSocketChunked.h>
#include <Common/logger_useful.h>
#include <IO/NetUtils.h>
namespace
{
template <typename T>
void setValue(T * typed_ptr, std::type_identity_t<T> val)
{
memcpy(static_cast<void*>(typed_ptr), &val, sizeof(T));
}
}
namespace DB
{
WriteBufferFromPocoSocketChunked::WriteBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, size_t buf_size)
: WriteBufferFromPocoSocketChunked(socket_, ProfileEvents::end(), buf_size)
{}
WriteBufferFromPocoSocketChunked::WriteBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, const ProfileEvents::Event & write_event_, size_t buf_size)
: WriteBufferFromPocoSocket(
socket_, write_event_,
std::clamp(buf_size, sizeof(*chunk_size_ptr) + 1, static_cast<size_t>(std::numeric_limits<std::remove_reference_t<decltype(*chunk_size_ptr)>>::max()))),
log(getLogger("Protocol"))
{}
void WriteBufferFromPocoSocketChunked::enableChunked()
{
chunked = true;
/// Initialize next chunk
chunk_size_ptr = reinterpret_cast<decltype(chunk_size_ptr)>(pos);
pos += std::min(available(), sizeof(*chunk_size_ptr));
/// Pretend finishChunk() was just called to prevent sending empty chunk if finishChunk() called immediately
last_finish_chunk = chunk_size_ptr;
}
void WriteBufferFromPocoSocketChunked::finishChunk()
{
if (!chunked)
return;
if (pos <= reinterpret_cast<Position>(chunk_size_ptr) + sizeof(*chunk_size_ptr))
{
/// Prevent duplicate finish chunk (and finish chunk right after enableChunked())
if (chunk_size_ptr == last_finish_chunk)
return;
/// If current chunk is empty it means we are finishing a chunk previously sent by next(),
/// we want to convert current chunk header into end-of-chunk marker and initialize next chunk.
/// We don't need to worry about if it's the end of the buffer because next() always sends the whole buffer
/// so it should be a beginning of the buffer.
chassert(reinterpret_cast<Position>(chunk_size_ptr) == working_buffer.begin());
setValue(chunk_size_ptr, 0);
/// Initialize next chunk
chunk_size_ptr = reinterpret_cast<decltype(chunk_size_ptr)>(pos);
pos += std::min(available(), sizeof(*chunk_size_ptr));
last_finish_chunk = chunk_size_ptr;
return;
}
/// Previously finished chunk wasn't sent yet
if (last_finish_chunk == chunk_size_ptr)
{
chunk_started = false;
LOG_TEST(log, "{} -> {} Chunk send ended.", ourAddress().toString(), peerAddress().toString());
}
/// Fill up current chunk size
setValue(chunk_size_ptr, toLittleEndian(static_cast<UInt32>(pos - reinterpret_cast<Position>(chunk_size_ptr) - sizeof(*chunk_size_ptr))));
if (!chunk_started)
LOG_TEST(log, "{} -> {} Chunk send started. Message {}, size {}",
ourAddress().toString(), peerAddress().toString(),
static_cast<unsigned int>(*(reinterpret_cast<char *>(chunk_size_ptr) + sizeof(*chunk_size_ptr))),
*chunk_size_ptr);
else
{
chunk_started = false;
LOG_TEST(log, "{} -> {} Chunk send continued. Size {}", ourAddress().toString(), peerAddress().toString(), *chunk_size_ptr);
}
LOG_TEST(log, "{} -> {} Chunk send ended.", ourAddress().toString(), peerAddress().toString());
if (available() < sizeof(*chunk_size_ptr))
{
finishing = available();
pos += available();
chunk_size_ptr = reinterpret_cast<decltype(chunk_size_ptr)>(pos);
last_finish_chunk = chunk_size_ptr;
return;
}
/// Buffer end-of-chunk
setValue(reinterpret_cast<decltype(chunk_size_ptr)>(pos), 0);
pos += sizeof(*chunk_size_ptr);
/// Initialize next chunk
chunk_size_ptr = reinterpret_cast<decltype(chunk_size_ptr)>(pos);
pos += std::min(available(), sizeof(*chunk_size_ptr));
last_finish_chunk = chunk_size_ptr;
}
WriteBufferFromPocoSocketChunked::~WriteBufferFromPocoSocketChunked()
{
try
{
finalize();
}
catch (...)
{
tryLogCurrentException(__PRETTY_FUNCTION__);
}
}
void WriteBufferFromPocoSocketChunked::nextImpl()
{
if (!chunked)
{
WriteBufferFromPocoSocket::nextImpl();
return;
}
/// next() after finishChunk at the end of the buffer
if (finishing < sizeof(*chunk_size_ptr))
{
pos -= finishing;
/// Send current chunk
WriteBufferFromPocoSocket::nextImpl();
/// Send end-of-chunk directly
UInt32 s = 0;
socketSendBytes(reinterpret_cast<const char *>(&s), sizeof(s));
finishing = sizeof(*chunk_size_ptr);
/// Initialize next chunk
chunk_size_ptr = reinterpret_cast<decltype(chunk_size_ptr)>(working_buffer.begin());
nextimpl_working_buffer_offset = sizeof(*chunk_size_ptr);
last_finish_chunk = chunk_size_ptr;
return;
}
/// Prevent sending empty chunk
if (offset() == sizeof(*chunk_size_ptr))
{
nextimpl_working_buffer_offset = sizeof(*chunk_size_ptr);
return;
}
/// Finish chunk at the end of the buffer
if (working_buffer.end() - reinterpret_cast<Position>(chunk_size_ptr) <= static_cast<std::ptrdiff_t>(sizeof(*chunk_size_ptr)))
{
pos = reinterpret_cast<Position>(chunk_size_ptr);
/// Send current chunk
WriteBufferFromPocoSocket::nextImpl();
/// Initialize next chunk
chunk_size_ptr = reinterpret_cast<decltype(chunk_size_ptr)>(working_buffer.begin());
nextimpl_working_buffer_offset = sizeof(*chunk_size_ptr);
last_finish_chunk = nullptr;
return;
}
bool initialize_last_finish_chunk = false;
if (pos - reinterpret_cast<Position>(chunk_size_ptr) == sizeof(*chunk_size_ptr)) // next() after finishChunk
{
pos -= sizeof(*chunk_size_ptr);
initialize_last_finish_chunk = true;
}
else // fill up current chunk size
{
setValue(chunk_size_ptr, toLittleEndian(static_cast<UInt32>(pos - reinterpret_cast<Position>(chunk_size_ptr) - sizeof(*chunk_size_ptr))));
if (!chunk_started)
{
chunk_started = true;
LOG_TEST(log, "{} -> {} Chunk send started. Message {}, size {}",
ourAddress().toString(), peerAddress().toString(),
static_cast<unsigned int>(*(reinterpret_cast<char *>(chunk_size_ptr) + sizeof(*chunk_size_ptr))),
*chunk_size_ptr);
}
else
LOG_TEST(log, "{} -> {} Chunk send continued. Size {}", ourAddress().toString(), peerAddress().toString(), *chunk_size_ptr);
}
/// Send current chunk
WriteBufferFromPocoSocket::nextImpl();
/// Initialize next chunk
chunk_size_ptr = reinterpret_cast<decltype(chunk_size_ptr)>(working_buffer.begin());
nextimpl_working_buffer_offset = sizeof(*chunk_size_ptr);
last_finish_chunk = initialize_last_finish_chunk ? chunk_size_ptr : nullptr;
}
void WriteBufferFromPocoSocketChunked::finalizeImpl()
{
if (chunked && offset() == sizeof(*chunk_size_ptr))
pos -= sizeof(*chunk_size_ptr);
WriteBufferFromPocoSocket::finalizeImpl();
}
}

View File

@ -0,0 +1,36 @@
#pragma once
#include <Common/logger_useful.h>
#include <IO/WriteBufferFromPocoSocket.h>
#include <algorithm>
namespace DB
{
class WriteBufferFromPocoSocketChunked: public WriteBufferFromPocoSocket
{
public:
explicit WriteBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE);
explicit WriteBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, const ProfileEvents::Event & write_event_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE);
void enableChunked();
void finishChunk();
~WriteBufferFromPocoSocketChunked() override;
protected:
void nextImpl() override;
void finalizeImpl() override;
Poco::Net::SocketAddress peerAddress() const { return peer_address; }
Poco::Net::SocketAddress ourAddress() const { return our_address; }
private:
LoggerPtr log;
bool chunked = false;
UInt32 * last_finish_chunk = nullptr; // pointer to the last chunk header created by finishChunk
bool chunk_started = false; // chunk started flag
UInt32 * chunk_size_ptr = nullptr; // pointer to the chunk size holder in the buffer
size_t finishing = sizeof(*chunk_size_ptr); // indicates not enough buffer for end-of-chunk marker
};
}

View File

@ -113,6 +113,9 @@ Cluster::Address::Address(
secure = ConfigHelper::getBool(config, config_prefix + ".secure", false, /* empty_as */true) ? Protocol::Secure::Enable : Protocol::Secure::Disable;
priority = Priority{config.getInt(config_prefix + ".priority", 1)};
proto_send_chunked = config.getString(config_prefix + ".proto_caps.send", "notchunked");
proto_recv_chunked = config.getString(config_prefix + ".proto_caps.recv", "notchunked");
const char * port_type = secure == Protocol::Secure::Enable ? "tcp_port_secure" : "tcp_port";
auto default_port = config.getInt(port_type, 0);
@ -425,7 +428,9 @@ Cluster::Cluster(const Poco::Util::AbstractConfiguration & config,
auto pool = ConnectionPoolFactory::instance().get(
static_cast<unsigned>(settings.distributed_connections_pool_size),
address.host_name, address.port,
address.default_database, address.user, address.password, address.quota_key,
address.default_database, address.user, address.password,
address.proto_send_chunked, address.proto_recv_chunked,
address.quota_key,
address.cluster, address.cluster_secret,
"server", address.compression,
address.secure, address.priority);
@ -589,6 +594,8 @@ void Cluster::addShard(
replica.default_database,
replica.user,
replica.password,
replica.proto_send_chunked,
replica.proto_recv_chunked,
replica.quota_key,
replica.cluster,
replica.cluster_secret,
@ -744,6 +751,8 @@ Cluster::Cluster(Cluster::ReplicasAsShardsTag, const Cluster & from, const Setti
address.default_database,
address.user,
address.password,
address.proto_send_chunked,
address.proto_recv_chunked,
address.quota_key,
address.cluster,
address.cluster_secret,

View File

@ -114,6 +114,8 @@ public:
UInt16 port{0};
String user;
String password;
String proto_send_chunked = "notchunked";
String proto_recv_chunked = "notchunked";
String quota_key;
/// For inter-server authorization

View File

@ -354,8 +354,8 @@ size_t HashJoinMethods<KIND, STRICTNESS, MapsTemplate>::joinRightColumns(
{
if (unlikely(current_offset >= max_joined_block_rows))
{
added_columns.offsets_to_replicate->resize_assume_reserved(i);
added_columns.filter.resize_assume_reserved(i);
added_columns.offsets_to_replicate->resize(i);
added_columns.filter.resize(i);
break;
}
}

View File

@ -18,11 +18,25 @@ struct JoinFeatures
static constexpr bool inner = KIND == JoinKind::Inner;
static constexpr bool full = KIND == JoinKind::Full;
/** Whether we may need duplicate rows from the left table.
* For example, when we have row (key1, attr1) in left table
* and rows (key1, attr2), (key1, attr3) in right table,
* then we need to duplicate row (key1, attr1) for each of joined rows from right table, so result will be
* (key1, attr1, key1, attr2)
* (key1, attr1, key1, attr3)
*/
static constexpr bool need_replication = is_all_join || (is_any_join && right) || (is_semi_join && right);
/// Whether we need to filter rows from the left table that do not have matches in the right table.
static constexpr bool need_filter = !need_replication && (inner || right || (is_semi_join && left) || (is_anti_join && left));
/// Whether we need to add default values for columns from the left table.
static constexpr bool add_missing = (left || full) && !is_semi_join;
/// Whether we need to store flags for rows from the right table table
/// that indicates if they have matches in the left table.
static constexpr bool need_flags = MapGetter<KIND, STRICTNESS, std::is_same_v<std::decay_t<Map>, HashJoin::MapsAll>>::flagged;
static constexpr bool is_maps_all = std::is_same_v<std::decay_t<Map>, HashJoin::MapsAll>;
};

View File

@ -17,6 +17,7 @@
#include <Storages/AlterCommands.h>
#include <Storages/IStorage.h>
#include <Storages/MutationCommands.h>
#include <Storages/MergeTree/MergeTreeSettings.h>
namespace DB
@ -27,7 +28,6 @@ namespace ErrorCodes
extern const int TABLE_IS_READ_ONLY;
extern const int SUPPORT_IS_DISABLED;
extern const int BAD_ARGUMENTS;
extern const int NOT_IMPLEMENTED;
extern const int QUERY_IS_PROHIBITED;
}
@ -67,13 +67,42 @@ BlockIO InterpreterDeleteQuery::execute()
auto table_lock = table->lockForShare(getContext()->getCurrentQueryId(), getContext()->getSettingsRef().lock_acquire_timeout);
auto metadata_snapshot = table->getInMemoryMetadataPtr();
auto lightweightDelete = [&]()
if (table->supportsDelete())
{
/// Convert to MutationCommand
MutationCommands mutation_commands;
MutationCommand mut_command;
mut_command.type = MutationCommand::Type::DELETE;
mut_command.predicate = delete_query.predicate;
mutation_commands.emplace_back(mut_command);
table->checkMutationIsPossible(mutation_commands, getContext()->getSettingsRef());
MutationsInterpreter::Settings settings(false);
MutationsInterpreter(table, metadata_snapshot, mutation_commands, getContext(), settings).validate();
table->mutate(mutation_commands, getContext());
return {};
}
else if (table->supportsLightweightDelete())
{
if (!getContext()->getSettingsRef().enable_lightweight_delete)
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED,
"Lightweight delete mutate is disabled. "
"Set `enable_lightweight_delete` setting to enable it");
if (metadata_snapshot->hasProjections())
{
if (const auto * merge_tree_data = dynamic_cast<const MergeTreeData *>(table.get()))
if (merge_tree_data->getSettings()->lightweight_mutation_projection_mode == LightweightMutationProjectionMode::THROW)
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED,
"DELETE query is not allowed for table {} because as it has projections and setting "
"lightweight_mutation_projection_mode is set to THROW. "
"User should change lightweight_mutation_projection_mode OR "
"drop all the projections manually before running the query",
table_id.getFullTableName());
}
/// Build "ALTER ... UPDATE _row_exists = 0 WHERE predicate" query
String alter_query =
"ALTER TABLE " + table->getStorageID().getFullTableName()
@ -94,79 +123,9 @@ BlockIO InterpreterDeleteQuery::execute()
context->setSetting("mutations_sync", Field(context->getSettingsRef().lightweight_deletes_sync));
InterpreterAlterQuery alter_interpreter(alter_ast, context);
return alter_interpreter.execute();
};
if (table->supportsDelete())
{
/// Convert to MutationCommand
MutationCommands mutation_commands;
MutationCommand mut_command;
mut_command.type = MutationCommand::Type::DELETE;
mut_command.predicate = delete_query.predicate;
mutation_commands.emplace_back(mut_command);
table->checkMutationIsPossible(mutation_commands, getContext()->getSettingsRef());
MutationsInterpreter::Settings settings(false);
MutationsInterpreter(table, metadata_snapshot, mutation_commands, getContext(), settings).validate();
table->mutate(mutation_commands, getContext());
return {};
}
else if (table->supportsLightweightDelete())
{
return lightweightDelete();
}
else
{
if (table->hasProjection())
{
auto context = Context::createCopy(getContext());
auto mode = context->getSettingsRef().lightweight_mutation_projection_mode;
if (mode == LightweightMutationProjectionMode::THROW)
{
throw Exception(ErrorCodes::NOT_IMPLEMENTED,
"DELETE query is not supported for table {} as it has projections. "
"User should drop all the projections manually before running the query",
table->getStorageID().getFullTableName());
}
else if (mode == LightweightMutationProjectionMode::DROP)
{
std::vector<String> all_projections = metadata_snapshot->projections.getAllRegisteredNames();
context->setSetting("mutations_sync", Field(context->getSettingsRef().lightweight_deletes_sync));
/// Drop projections first so that lightweight delete can be performed.
for (const auto & projection : all_projections)
{
String alter_query =
"ALTER TABLE " + table->getStorageID().getFullTableName()
+ (delete_query.cluster.empty() ? "" : " ON CLUSTER " + backQuoteIfNeed(delete_query.cluster))
+ " DROP PROJECTION IF EXISTS " + projection;
ParserAlterQuery parser;
ASTPtr alter_ast = parseQuery(
parser,
alter_query.data(),
alter_query.data() + alter_query.size(),
"ALTER query",
0,
DBMS_DEFAULT_MAX_PARSER_DEPTH,
DBMS_DEFAULT_MAX_PARSER_BACKTRACKS);
InterpreterAlterQuery alter_interpreter(alter_ast, context);
alter_interpreter.execute();
}
}
else
{
throw Exception(ErrorCodes::BAD_ARGUMENTS,
"Unrecognized lightweight_mutation_projection_mode, only throw and drop are allowed.");
}
return lightweightDelete();
}
throw Exception(ErrorCodes::BAD_ARGUMENTS,
"DELETE query is not supported for table {}",
table->getStorageID().getFullTableName());

View File

@ -663,13 +663,16 @@ BlockIO InterpreterSystemQuery::execute()
startStopAction(ActionLocks::ViewRefresh, false);
break;
case Type::REFRESH_VIEW:
getRefreshTask()->run();
for (const auto & task : getRefreshTasks())
task->run();
break;
case Type::CANCEL_VIEW:
getRefreshTask()->cancel();
for (const auto & task : getRefreshTasks())
task->cancel();
break;
case Type::TEST_VIEW:
getRefreshTask()->setFakeTime(query.fake_time_for_view);
for (const auto & task : getRefreshTasks())
task->setFakeTime(query.fake_time_for_view);
break;
case Type::DROP_REPLICA:
dropReplica(query);
@ -1242,15 +1245,15 @@ void InterpreterSystemQuery::flushDistributed(ASTSystemQuery & query)
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "SYSTEM RESTART DISK is not supported");
}
RefreshTaskHolder InterpreterSystemQuery::getRefreshTask()
RefreshTaskList InterpreterSystemQuery::getRefreshTasks()
{
auto ctx = getContext();
ctx->checkAccess(AccessType::SYSTEM_VIEWS);
auto task = ctx->getRefreshSet().getTask(table_id);
if (!task)
auto tasks = ctx->getRefreshSet().findTasks(table_id);
if (tasks.empty())
throw Exception(
ErrorCodes::BAD_ARGUMENTS, "Refreshable view {} doesn't exist", table_id.getNameForLogs());
return task;
return tasks;
}

View File

@ -74,7 +74,7 @@ private:
void flushDistributed(ASTSystemQuery & query);
[[noreturn]] void restartDisk(String & name);
RefreshTaskHolder getRefreshTask();
RefreshTaskList getRefreshTasks();
AccessRightsElements getRequiredAccessForDDLOnCluster() const;
void startStopAction(StorageActionBlockType action_type, bool start);

View File

@ -198,6 +198,29 @@ void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState & s
print_database_table();
}
if (sync_replica_mode != SyncReplicaMode::DEFAULT)
{
settings.ostr << ' ';
print_keyword(magic_enum::enum_name(sync_replica_mode));
// If the mode is LIGHTWEIGHT and specific source replicas are specified
if (sync_replica_mode == SyncReplicaMode::LIGHTWEIGHT && !src_replicas.empty())
{
settings.ostr << ' ';
print_keyword("FROM");
settings.ostr << ' ';
bool first = true;
for (const auto & src : src_replicas)
{
if (!first)
settings.ostr << ", ";
first = false;
settings.ostr << quoteString(src);
}
}
}
if (query_settings)
{
settings.ostr << (settings.hilite ? hilite_keyword : "") << settings.nl_or_ws << "SETTINGS " << (settings.hilite ? hilite_none : "");
@ -233,28 +256,6 @@ void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState & s
print_identifier(disk);
}
if (sync_replica_mode != SyncReplicaMode::DEFAULT)
{
settings.ostr << ' ';
print_keyword(magic_enum::enum_name(sync_replica_mode));
// If the mode is LIGHTWEIGHT and specific source replicas are specified
if (sync_replica_mode == SyncReplicaMode::LIGHTWEIGHT && !src_replicas.empty())
{
settings.ostr << ' ';
print_keyword("FROM");
settings.ostr << ' ';
bool first = true;
for (const auto & src : src_replicas)
{
if (!first)
settings.ostr << ", ";
first = false;
settings.ostr << quoteString(src);
}
}
}
break;
}
case Type::SYNC_DATABASE_REPLICA:

View File

@ -3,6 +3,11 @@
#include <Processors/Merges/IMergingTransform.h>
#include <Processors/Merges/Algorithms/AggregatingSortedAlgorithm.h>
namespace ProfileEvents
{
extern const Event AggregatingSortedMilliseconds;
}
namespace DB
{
@ -29,6 +34,11 @@ public:
}
String getName() const override { return "AggregatingSortedTransform"; }
void onFinish() override
{
logMergedStats(ProfileEvents::AggregatingSortedMilliseconds, "Aggregated sorted", getLogger("AggregatingSortedTransform"));
}
};
}

View File

@ -30,6 +30,8 @@ public:
void consume(Input & input, size_t source_num) override;
Status merge() override;
MergedStats getMergedStats() const override { return merged_data.getMergedStats(); }
/// Stores information for aggregation of SimpleAggregateFunction columns
struct SimpleAggregateDescription
{

View File

@ -126,6 +126,9 @@ IMergingAlgorithm::Status FinishAggregatingInOrderAlgorithm::merge()
Chunk FinishAggregatingInOrderAlgorithm::prepareToMerge()
{
total_merged_rows += accumulated_rows;
total_merged_bytes += accumulated_bytes;
accumulated_rows = 0;
accumulated_bytes = 0;

View File

@ -50,6 +50,8 @@ public:
void consume(Input & input, size_t source_num) override;
Status merge() override;
MergedStats getMergedStats() const override { return {.bytes = accumulated_bytes, .rows = accumulated_rows, .blocks = chunk_num}; }
private:
Chunk prepareToMerge();
void addToAggregation();
@ -92,6 +94,9 @@ private:
UInt64 chunk_num = 0;
size_t accumulated_rows = 0;
size_t accumulated_bytes = 0;
size_t total_merged_rows = 0;
size_t total_merged_bytes = 0;
};
}

View File

@ -33,6 +33,8 @@ public:
const char * getName() const override { return "GraphiteRollupSortedAlgorithm"; }
Status merge() override;
MergedStats getMergedStats() const override { return merged_data->getMergedStats(); }
struct ColumnsDefinition
{
size_t path_column_num;

View File

@ -1,7 +1,7 @@
#pragma once
#include <Processors/Chunk.h>
#include <variant>
#include <Common/ProfileEvents.h>
namespace DB
{
@ -65,6 +65,15 @@ public:
IMergingAlgorithm() = default;
virtual ~IMergingAlgorithm() = default;
struct MergedStats
{
UInt64 bytes = 0;
UInt64 rows = 0;
UInt64 blocks = 0;
};
virtual MergedStats getMergedStats() const = 0;
};
// TODO: use when compile with clang which could support it

View File

@ -16,6 +16,8 @@ public:
void initialize(Inputs inputs) override;
void consume(Input & input, size_t source_num) override;
MergedStats getMergedStats() const override { return merged_data->getMergedStats(); }
private:
Block header;
SortDescription description;

View File

@ -183,6 +183,8 @@ public:
UInt64 totalAllocatedBytes() const { return total_allocated_bytes; }
UInt64 maxBlockSize() const { return max_block_size; }
IMergingAlgorithm::MergedStats getMergedStats() const { return {.bytes = total_allocated_bytes, .rows = total_merged_rows, .blocks = total_chunks}; }
virtual ~MergedData() = default;
protected:

View File

@ -31,7 +31,7 @@ public:
void consume(Input & input, size_t source_num) override;
Status merge() override;
const MergedData & getMergedData() const { return merged_data; }
MergedStats getMergedStats() const override { return merged_data.getMergedStats(); }
private:
Block header;

View File

@ -30,6 +30,8 @@ public:
void consume(Input & input, size_t source_num) override;
Status merge() override;
MergedStats getMergedStats() const override { return merged_data.getMergedStats(); }
struct AggregateDescription;
struct MapDescription;

View File

@ -3,6 +3,11 @@
#include <Processors/Merges/IMergingTransform.h>
#include <Processors/Merges/Algorithms/CollapsingSortedAlgorithm.h>
namespace ProfileEvents
{
extern const Event CollapsingSortedMilliseconds;
}
namespace DB
{
@ -36,6 +41,11 @@ public:
}
String getName() const override { return "CollapsingSortedTransform"; }
void onFinish() override
{
logMergedStats(ProfileEvents::CollapsingSortedMilliseconds, "Collapsed sorted", getLogger("CollapsingSortedTransform"));
}
};
}

View File

@ -2,7 +2,10 @@
#include <Processors/Merges/Algorithms/IMergingAlgorithm.h>
#include <Processors/IProcessor.h>
#include <Common/ProfileEvents.h>
#include <Common/Stopwatch.h>
#include <Common/logger_useful.h>
#include <Common/formatReadable.h>
namespace DB
{
@ -110,6 +113,8 @@ public:
void work() override
{
Stopwatch watch{CLOCK_MONOTONIC_COARSE};
if (!state.init_chunks.empty())
algorithm.initialize(std::move(state.init_chunks));
@ -147,6 +152,8 @@ public:
// std::cerr << "Finished" << std::endl;
state.is_finished = true;
}
merging_elapsed_ns += watch.elapsedNanoseconds();
}
protected:
@ -156,7 +163,33 @@ protected:
Algorithm algorithm;
/// Profile info.
Stopwatch total_stopwatch {CLOCK_MONOTONIC_COARSE};
UInt64 merging_elapsed_ns = 0;
void logMergedStats(ProfileEvents::Event elapsed_ms_event, std::string_view transform_message, LoggerPtr log) const
{
auto stats = algorithm.getMergedStats();
UInt64 elapsed_ms = merging_elapsed_ns / 1000000LL;
ProfileEvents::increment(elapsed_ms_event, elapsed_ms);
/// Don't print info for small parts (< 1M rows)
if (stats.rows < 1000000)
return;
double seconds = static_cast<double>(merging_elapsed_ns) / 1000000000ULL;
if (seconds == 0.0)
{
LOG_DEBUG(log, "{}, {} blocks, {} rows, {} bytes in 0 sec.",
transform_message, stats.blocks, stats.rows, stats.bytes);
}
else
{
LOG_DEBUG(log, "{}, {} blocks, {} rows, {} bytes in {} sec., {} rows/sec., {}/sec.",
transform_message, stats.blocks, stats.rows, stats.bytes,
seconds, stats.rows / seconds, ReadableSize(stats.bytes / seconds));
}
}
private:
using IMergingTransformBase::state;

View File

@ -1,9 +1,12 @@
#include <Processors/Merges/MergingSortedTransform.h>
#include <Processors/Transforms/ColumnGathererTransform.h>
#include <IO/WriteBuffer.h>
#include <Common/logger_useful.h>
#include <Common/formatReadable.h>
namespace ProfileEvents
{
extern const Event MergingSortedMilliseconds;
}
namespace DB
{
@ -18,7 +21,6 @@ MergingSortedTransform::MergingSortedTransform(
UInt64 limit_,
bool always_read_till_end_,
WriteBuffer * out_row_sources_buf_,
bool quiet_,
bool use_average_block_sizes,
bool have_all_inputs_)
: IMergingTransform(
@ -37,7 +39,6 @@ MergingSortedTransform::MergingSortedTransform(
limit_,
out_row_sources_buf_,
use_average_block_sizes)
, quiet(quiet_)
{
}
@ -48,22 +49,7 @@ void MergingSortedTransform::onNewInput()
void MergingSortedTransform::onFinish()
{
if (quiet)
return;
const auto & merged_data = algorithm.getMergedData();
auto log = getLogger("MergingSortedTransform");
double seconds = total_stopwatch.elapsedSeconds();
if (seconds == 0.0)
LOG_DEBUG(log, "Merge sorted {} blocks, {} rows in 0 sec.", merged_data.totalChunks(), merged_data.totalMergedRows());
else
LOG_DEBUG(log, "Merge sorted {} blocks, {} rows in {} sec., {} rows/sec., {}/sec",
merged_data.totalChunks(), merged_data.totalMergedRows(), seconds,
merged_data.totalMergedRows() / seconds,
ReadableSize(merged_data.totalAllocatedBytes() / seconds));
logMergedStats(ProfileEvents::MergingSortedMilliseconds, "Merged sorted", getLogger("MergingSortedTransform"));
}
}

View File

@ -21,7 +21,6 @@ public:
UInt64 limit_ = 0,
bool always_read_till_end_ = false,
WriteBuffer * out_row_sources_buf_ = nullptr,
bool quiet_ = false,
bool use_average_block_sizes = false,
bool have_all_inputs_ = true);
@ -30,9 +29,6 @@ public:
protected:
void onNewInput() override;
void onFinish() override;
private:
bool quiet = false;
};
}

View File

@ -3,6 +3,10 @@
#include <Processors/Merges/IMergingTransform.h>
#include <Processors/Merges/Algorithms/ReplacingSortedAlgorithm.h>
namespace ProfileEvents
{
extern const Event ReplacingSortedMilliseconds;
}
namespace DB
{
@ -38,6 +42,11 @@ public:
}
String getName() const override { return "ReplacingSorted"; }
void onFinish() override
{
logMergedStats(ProfileEvents::ReplacingSortedMilliseconds, "Replaced sorted", getLogger("ReplacingSortedTransform"));
}
};
}

View File

@ -3,6 +3,11 @@
#include <Processors/Merges/IMergingTransform.h>
#include <Processors/Merges/Algorithms/SummingSortedAlgorithm.h>
namespace ProfileEvents
{
extern const Event SummingSortedMilliseconds;
}
namespace DB
{
@ -33,6 +38,11 @@ public:
}
String getName() const override { return "SummingSortedTransform"; }
void onFinish() override
{
logMergedStats(ProfileEvents::SummingSortedMilliseconds, "Summed sorted", getLogger("SummingSortedTransform"));
}
};
}

Some files were not shown because too many files have changed in this diff Show More