Merge remote-tracking branch 'origin/master' into pr-local-plan

This commit is contained in:
Igor Nikonov 2024-06-12 10:30:43 +00:00
commit f8afb299f6
37 changed files with 656 additions and 212 deletions

View File

@ -37,7 +37,6 @@ Checks: [
'-cert-oop54-cpp',
'-cert-oop57-cpp',
'-clang-analyzer-optin.core.EnumCastOutOfRange', # https://github.com/abseil/abseil-cpp/issues/1667
'-clang-analyzer-optin.performance.Padding',
'-clang-analyzer-unix.Malloc',

4
.gitmodules vendored
View File

@ -161,9 +161,9 @@
[submodule "contrib/xz"]
path = contrib/xz
url = https://github.com/xz-mirror/xz
[submodule "contrib/abseil-cpp"]
[submodule "abseil"]
path = contrib/abseil-cpp
url = https://github.com/abseil/abseil-cpp
url = https://github.com/ClickHouse/abseil-cpp.git
[submodule "contrib/dragonbox"]
path = contrib/dragonbox
url = https://github.com/ClickHouse/dragonbox

2
contrib/abseil-cpp vendored

@ -1 +1 @@
Subproject commit 3bd86026c93da5a40006fd53403dff9d5f5e30e3
Subproject commit a3c4dd3e77f28b526efbb0eb394b72e29c633936

View File

@ -1,6 +1,8 @@
set(ABSL_ROOT_DIR "${ClickHouse_SOURCE_DIR}/contrib/abseil-cpp")
set(ABSL_COMMON_INCLUDE_DIRS "${ABSL_ROOT_DIR}")
# This is a minimized version of the function definition in CMake/AbseilHelpers.cmake
#
# Copyright 2017 The Abseil Authors.
#
@ -16,7 +18,6 @@ set(ABSL_COMMON_INCLUDE_DIRS "${ABSL_ROOT_DIR}")
# See the License for the specific language governing permissions and
# limitations under the License.
#
function(absl_cc_library)
cmake_parse_arguments(ABSL_CC_LIB
"DISABLE_INSTALL;PUBLIC;TESTONLY"
@ -76,6 +77,12 @@ function(absl_cc_library)
add_library(absl::${ABSL_CC_LIB_NAME} ALIAS ${_NAME})
endfunction()
# The following definitions are an amalgamation of the CMakeLists.txt files in absl/*/
# To refresh them when upgrading to a new version:
# - copy them over from upstream
# - remove calls of 'absl_cc_test'
# - remove calls of `absl_cc_library` that contain `TESTONLY`
# - append '${DIR}' to the file definitions
set(DIR ${ABSL_ROOT_DIR}/absl/algorithm)
@ -102,12 +109,12 @@ absl_cc_library(
absl::algorithm
absl::core_headers
absl::meta
absl::nullability
PUBLIC
)
set(DIR ${ABSL_ROOT_DIR}/absl/base)
# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
atomic_hook
@ -146,6 +153,18 @@ absl_cc_library(
${ABSL_DEFAULT_COPTS}
)
absl_cc_library(
NAME
no_destructor
HDRS
"${DIR}/no_destructor.h"
DEPS
absl::config
absl::nullability
COPTS
${ABSL_DEFAULT_COPTS}
)
absl_cc_library(
NAME
nullability
@ -305,6 +324,8 @@ absl_cc_library(
${ABSL_DEFAULT_COPTS}
LINKOPTS
${ABSL_DEFAULT_LINKOPTS}
$<$<BOOL:${LIBRT}>:-lrt>
$<$<BOOL:${MINGW}>:-ladvapi32>
DEPS
absl::atomic_hook
absl::base_internal
@ -312,6 +333,7 @@ absl_cc_library(
absl::core_headers
absl::dynamic_annotations
absl::log_severity
absl::nullability
absl::raw_logging_internal
absl::spinlock_wait
absl::type_traits
@ -357,6 +379,7 @@ absl_cc_library(
absl::base
absl::config
absl::core_headers
absl::nullability
PUBLIC
)
@ -467,10 +490,11 @@ absl_cc_library(
LINKOPTS
${ABSL_DEFAULT_LINKOPTS}
DEPS
absl::container_common
absl::common_policy_traits
absl::compare
absl::compressed_tuple
absl::config
absl::container_common
absl::container_memory
absl::cord
absl::core_headers
@ -480,7 +504,6 @@ absl_cc_library(
absl::strings
absl::throw_delegate
absl::type_traits
absl::utility
)
# Internal-only target, do not depend on directly.
@ -523,7 +546,9 @@ absl_cc_library(
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::base_internal
absl::compressed_tuple
absl::config
absl::core_headers
absl::memory
absl::span
@ -548,18 +573,6 @@ absl_cc_library(
PUBLIC
)
# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
counting_allocator
HDRS
"${DIR}/internal/counting_allocator.h"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::config
)
absl_cc_library(
NAME
flat_hash_map
@ -570,7 +583,7 @@ absl_cc_library(
DEPS
absl::container_memory
absl::core_headers
absl::hash_function_defaults
absl::hash_container_defaults
absl::raw_hash_map
absl::algorithm_container
absl::memory
@ -586,7 +599,7 @@ absl_cc_library(
${ABSL_DEFAULT_COPTS}
DEPS
absl::container_memory
absl::hash_function_defaults
absl::hash_container_defaults
absl::raw_hash_set
absl::algorithm_container
absl::core_headers
@ -604,7 +617,7 @@ absl_cc_library(
DEPS
absl::container_memory
absl::core_headers
absl::hash_function_defaults
absl::hash_container_defaults
absl::node_slot_policy
absl::raw_hash_map
absl::algorithm_container
@ -620,8 +633,9 @@ absl_cc_library(
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::container_memory
absl::core_headers
absl::hash_function_defaults
absl::hash_container_defaults
absl::node_slot_policy
absl::raw_hash_set
absl::algorithm_container
@ -629,6 +643,19 @@ absl_cc_library(
PUBLIC
)
absl_cc_library(
NAME
hash_container_defaults
HDRS
"${DIR}/hash_container_defaults.h"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::config
absl::hash_function_defaults
PUBLIC
)
# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
@ -655,9 +682,11 @@ absl_cc_library(
${ABSL_DEFAULT_COPTS}
DEPS
absl::config
absl::container_common
absl::cord
absl::hash
absl::strings
absl::type_traits
PUBLIC
)
@ -703,6 +732,7 @@ absl_cc_library(
absl::base
absl::config
absl::exponential_biased
absl::no_destructor
absl::raw_logging_internal
absl::sample_recorder
absl::synchronization
@ -756,7 +786,9 @@ absl_cc_library(
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::config
absl::container_memory
absl::core_headers
absl::raw_hash_set
absl::throw_delegate
PUBLIC
@ -817,6 +849,7 @@ absl_cc_library(
DEPS
absl::config
absl::core_headers
absl::debugging_internal
absl::meta
absl::strings
absl::span
@ -931,6 +964,7 @@ absl_cc_library(
absl::crc32c
absl::config
absl::strings
absl::no_destructor
)
set(DIR ${ABSL_ROOT_DIR}/absl/debugging)
@ -954,6 +988,8 @@ absl_cc_library(
"${DIR}/stacktrace.cc"
COPTS
${ABSL_DEFAULT_COPTS}
LINKOPTS
$<$<BOOL:${EXECINFO_LIBRARY}>:${EXECINFO_LIBRARY}>
DEPS
absl::debugging_internal
absl::config
@ -980,6 +1016,7 @@ absl_cc_library(
${ABSL_DEFAULT_COPTS}
LINKOPTS
${ABSL_DEFAULT_LINKOPTS}
$<$<BOOL:${MINGW}>:-ldbghelp>
DEPS
absl::debugging_internal
absl::demangle_internal
@ -1058,8 +1095,10 @@ absl_cc_library(
demangle_internal
HDRS
"${DIR}/internal/demangle.h"
"${DIR}/internal/demangle_rust.h"
SRCS
"${DIR}/internal/demangle.cc"
"${DIR}/internal/demangle_rust.cc"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
@ -1252,6 +1291,7 @@ absl_cc_library(
absl::strings
absl::synchronization
absl::flat_hash_map
absl::no_destructor
)
# Internal-only target, do not depend on directly.
@ -1283,12 +1323,9 @@ absl_cc_library(
absl_cc_library(
NAME
flags
SRCS
"${DIR}/flag.cc"
HDRS
"${DIR}/declare.h"
"${DIR}/flag.h"
"${DIR}/internal/flag_msvc.inc"
COPTS
${ABSL_DEFAULT_COPTS}
LINKOPTS
@ -1299,7 +1336,6 @@ absl_cc_library(
absl::flags_config
absl::flags_internal
absl::flags_reflection
absl::base
absl::core_headers
absl::strings
)
@ -1379,6 +1415,9 @@ absl_cc_library(
absl::synchronization
)
############################################################################
# Unit tests in alphabetical order.
set(DIR ${ABSL_ROOT_DIR}/absl/functional)
absl_cc_library(
@ -1431,6 +1470,18 @@ absl_cc_library(
PUBLIC
)
absl_cc_library(
NAME
overload
HDRS
"${DIR}/overload.h"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::meta
PUBLIC
)
set(DIR ${ABSL_ROOT_DIR}/absl/hash)
absl_cc_library(
@ -1640,6 +1691,7 @@ absl_cc_library(
absl::log_internal_conditions
absl::log_internal_message
absl::log_internal_strip
absl::absl_vlog_is_on
)
absl_cc_library(
@ -1721,6 +1773,7 @@ absl_cc_library(
absl::log_entry
absl::log_severity
absl::log_sink
absl::no_destructor
absl::raw_logging_internal
absl::synchronization
absl::span
@ -1771,6 +1824,7 @@ absl_cc_library(
LINKOPTS
${ABSL_DEFAULT_LINKOPTS}
DEPS
absl::core_headers
absl::log_internal_message
absl::log_internal_nullstream
absl::log_severity
@ -1876,6 +1930,11 @@ absl_cc_library(
PUBLIC
)
# Warning: Many linkers will strip the contents of this library because its
# symbols are only used in a global constructor. A workaround is for clients
# to link this using $<LINK_LIBRARY:WHOLE_ARCHIVE,absl::log_flags> instead of
# the plain absl::log_flags.
# TODO(b/320467376): Implement the equivalent of Bazel's alwayslink=True.
absl_cc_library(
NAME
log_flags
@ -1897,6 +1956,7 @@ absl_cc_library(
absl::flags
absl::flags_marshalling
absl::strings
absl::vlog_config_internal
PUBLIC
)
@ -1919,6 +1979,7 @@ absl_cc_library(
absl::log_severity
absl::raw_logging_internal
absl::strings
absl::vlog_config_internal
)
absl_cc_library(
@ -1952,6 +2013,7 @@ absl_cc_library(
${ABSL_DEFAULT_LINKOPTS}
DEPS
absl::log_internal_log_impl
absl::vlog_is_on
PUBLIC
)
@ -2064,21 +2126,75 @@ absl_cc_library(
)
absl_cc_library(
NAME
log_internal_fnmatch
SRCS
"${DIR}/internal/fnmatch.cc"
HDRS
"${DIR}/internal/fnmatch.h"
COPTS
${ABSL_DEFAULT_COPTS}
LINKOPTS
${ABSL_DEFAULT_LINKOPTS}
DEPS
absl::config
absl::strings
NAME
vlog_config_internal
SRCS
"${DIR}/internal/vlog_config.cc"
HDRS
"${DIR}/internal/vlog_config.h"
COPTS
${ABSL_DEFAULT_COPTS}
LINKOPTS
${ABSL_DEFAULT_LINKOPTS}
DEPS
absl::base
absl::config
absl::core_headers
absl::log_internal_fnmatch
absl::memory
absl::no_destructor
absl::strings
absl::synchronization
absl::optional
)
absl_cc_library(
NAME
absl_vlog_is_on
COPTS
${ABSL_DEFAULT_COPTS}
LINKOPTS
${ABSL_DEFAULT_LINKOPTS}
HDRS
"${DIR}/absl_vlog_is_on.h"
DEPS
absl::vlog_config_internal
absl::config
absl::core_headers
absl::strings
)
absl_cc_library(
NAME
vlog_is_on
COPTS
${ABSL_DEFAULT_COPTS}
LINKOPTS
${ABSL_DEFAULT_LINKOPTS}
HDRS
"${DIR}/vlog_is_on.h"
DEPS
absl::absl_vlog_is_on
)
absl_cc_library(
NAME
log_internal_fnmatch
SRCS
"${DIR}/internal/fnmatch.cc"
HDRS
"${DIR}/internal/fnmatch.h"
COPTS
${ABSL_DEFAULT_COPTS}
LINKOPTS
${ABSL_DEFAULT_LINKOPTS}
DEPS
absl::config
absl::strings
)
# Test targets
set(DIR ${ABSL_ROOT_DIR}/absl/memory)
absl_cc_library(
@ -2147,6 +2263,7 @@ absl_cc_library(
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::compare
absl::config
absl::core_headers
absl::bits
@ -2176,6 +2293,8 @@ absl_cc_library(
PUBLIC
)
set(DIR ${ABSL_ROOT_DIR}/absl/profiling)
absl_cc_library(
NAME
sample_recorder
@ -2188,8 +2307,6 @@ absl_cc_library(
absl::synchronization
)
set(DIR ${ABSL_ROOT_DIR}/absl/profiling)
absl_cc_library(
NAME
exponential_biased
@ -2265,6 +2382,7 @@ absl_cc_library(
LINKOPTS
${ABSL_DEFAULT_LINKOPTS}
DEPS
absl::config
absl::fast_type_id
absl::optional
)
@ -2336,11 +2454,13 @@ absl_cc_library(
DEPS
absl::config
absl::inlined_vector
absl::nullability
absl::random_internal_pool_urbg
absl::random_internal_salted_seed_seq
absl::random_internal_seed_material
absl::random_seed_gen_exception
absl::span
absl::string_view
)
# Internal-only target, do not depend on directly.
@ -2399,6 +2519,7 @@ absl_cc_library(
${ABSL_DEFAULT_COPTS}
LINKOPTS
${ABSL_DEFAULT_LINKOPTS}
$<$<BOOL:${MINGW}>:-lbcrypt>
DEPS
absl::core_headers
absl::optional
@ -2658,6 +2779,29 @@ absl_cc_library(
absl::config
)
# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
random_internal_distribution_test_util
SRCS
"${DIR}/internal/chi_square.cc"
"${DIR}/internal/distribution_test_util.cc"
HDRS
"${DIR}/internal/chi_square.h"
"${DIR}/internal/distribution_test_util.h"
COPTS
${ABSL_DEFAULT_COPTS}
LINKOPTS
${ABSL_DEFAULT_LINKOPTS}
DEPS
absl::config
absl::core_headers
absl::raw_logging_internal
absl::strings
absl::str_format
absl::span
)
# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
@ -2699,6 +2843,8 @@ absl_cc_library(
absl::function_ref
absl::inlined_vector
absl::memory
absl::no_destructor
absl::nullability
absl::optional
absl::raw_logging_internal
absl::span
@ -2724,8 +2870,11 @@ absl_cc_library(
absl::base
absl::config
absl::core_headers
absl::has_ostream_operator
absl::nullability
absl::raw_logging_internal
absl::status
absl::str_format
absl::strings
absl::type_traits
absl::utility
@ -2748,6 +2897,7 @@ absl_cc_library(
absl::base
absl::config
absl::core_headers
absl::nullability
absl::throw_delegate
PUBLIC
)
@ -2762,6 +2912,7 @@ absl_cc_library(
"${DIR}/has_absl_stringify.h"
"${DIR}/internal/damerau_levenshtein_distance.h"
"${DIR}/internal/string_constant.h"
"${DIR}/internal/has_absl_stringify.h"
"${DIR}/match.h"
"${DIR}/numbers.h"
"${DIR}/str_cat.h"
@ -2805,6 +2956,7 @@ absl_cc_library(
absl::endian
absl::int128
absl::memory
absl::nullability
absl::raw_logging_internal
absl::throw_delegate
absl::type_traits
@ -2824,6 +2976,18 @@ absl_cc_library(
PUBLIC
)
absl_cc_library(
NAME
has_ostream_operator
HDRS
"${DIR}/has_ostream_operator.h"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::config
PUBLIC
)
# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
@ -2855,7 +3019,12 @@ absl_cc_library(
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::config
absl::core_headers
absl::nullability
absl::span
absl::str_format_internal
absl::string_view
PUBLIC
)
@ -2886,6 +3055,7 @@ absl_cc_library(
absl::strings
absl::config
absl::core_headers
absl::fixed_array
absl::inlined_vector
absl::numeric_representation
absl::type_traits
@ -2989,6 +3159,7 @@ absl_cc_library(
DEPS
absl::base
absl::config
absl::no_destructor
absl::raw_logging_internal
absl::synchronization
)
@ -3079,6 +3250,7 @@ absl_cc_library(
absl::endian
absl::function_ref
absl::inlined_vector
absl::nullability
absl::optional
absl::raw_logging_internal
absl::span
@ -3246,6 +3418,8 @@ absl_cc_library(
${ABSL_DEFAULT_COPTS}
DEPS
Threads::Threads
# TODO(#1495): Use $<LINK_LIBRARY:FRAMEWORK,CoreFoundation> once our
# minimum CMake version >= 3.24
$<$<PLATFORM_ID:Darwin>:-Wl,-framework,CoreFoundation>
)
@ -3286,8 +3460,8 @@ absl_cc_library(
NAME
bad_any_cast_impl
SRCS
"${DIR}/bad_any_cast.h"
"${DIR}/bad_any_cast.cc"
"${DIR}/bad_any_cast.h"
"${DIR}/bad_any_cast.cc"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
@ -3307,6 +3481,7 @@ absl_cc_library(
DEPS
absl::algorithm
absl::core_headers
absl::nullability
absl::throw_delegate
absl::type_traits
PUBLIC
@ -3327,6 +3502,7 @@ absl_cc_library(
absl::config
absl::core_headers
absl::memory
absl::nullability
absl::type_traits
absl::utility
PUBLIC
@ -3389,6 +3565,7 @@ absl_cc_library(
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::config
absl::core_headers
absl::type_traits
PUBLIC

View File

@ -41,8 +41,7 @@
"docker/test/stateless": {
"name": "clickhouse/stateless-test",
"dependent": [
"docker/test/stateful",
"docker/test/unit"
"docker/test/stateful"
]
},
"docker/test/stateful": {
@ -122,15 +121,16 @@
"docker/test/base": {
"name": "clickhouse/test-base",
"dependent": [
"docker/test/clickbench",
"docker/test/fuzzer",
"docker/test/libfuzzer",
"docker/test/integration/base",
"docker/test/keeper-jepsen",
"docker/test/libfuzzer",
"docker/test/server-jepsen",
"docker/test/sqllogic",
"docker/test/sqltest",
"docker/test/clickbench",
"docker/test/stateless"
"docker/test/stateless",
"docker/test/unit"
]
},
"docker/test/integration/kerberized_hadoop": {

View File

@ -1,9 +1,7 @@
# rebuild in #33610
# docker build -t clickhouse/unit-test .
ARG FROM_TAG=latest
FROM clickhouse/stateless-test:$FROM_TAG
RUN apt-get install gdb
FROM clickhouse/test-base:$FROM_TAG
COPY run.sh /
CMD ["/bin/bash", "/run.sh"]

View File

@ -974,10 +974,12 @@ Default value: false
- [exclude_deleted_rows_for_part_size_in_merge](#exclude_deleted_rows_for_part_size_in_merge) setting
### allow_experimental_optimized_row_order
### optimize_row_order
Controls if the row order should be optimized during inserts to improve the compressability of the newly inserted table part.
Only has an effect for ordinary MergeTree-engine tables. Does nothing for specialized MergeTree engine tables (e.g. CollapsingMergeTree).
MergeTree tables are (optionally) compressed using [compression codecs](../../sql-reference/statements/create/table.md#column_compression_codec).
Generic compression codecs such as LZ4 and ZSTD achieve maximum compression rates if the data exposes patterns.
Long runs of the same value typically compress very well.

View File

@ -18,7 +18,7 @@ This tool works via HTTP, not via pipes, shared memory, or TCP because:
However it can be used as standalone tool from command line with the following
parameters in POST-request URL:
- `connection_string` -- ODBC connection string.
- `columns` -- columns in ClickHouse NamesAndTypesList format, name in backticks,
- `sample_block` -- columns description in ClickHouse NamesAndTypesList format, name in backticks,
type as string. Name and type are space separated, rows separated with
newline.
- `max_block_size` -- optional parameter, sets maximum size of single block.

View File

@ -0,0 +1,95 @@
---
slug: /en/sql-reference/aggregate-functions/reference/flamegraph
sidebar_position: 110
---
# flameGraph
Aggregate function which builds a [flamegraph](https://www.brendangregg.com/flamegraphs.html) using the list of stacktraces. Outputs an array of strings which can be used by [flamegraph.pl utility](https://github.com/brendangregg/FlameGraph) to render an SVG of the flamegraph.
## Syntax
```sql
flameGraph(traces, [size], [ptr])
```
## Parameters
- `traces` — a stacktrace. [Array](../../data-types/array.md)([UInt64](../../data-types/int-uint.md)).
- `size` — an allocation size for memory profiling. (optional - default `1`). [UInt64](../../data-types/int-uint.md).
- `ptr` — an allocation address. (optional - default `0`). [UInt64](../../data-types/int-uint.md).
:::note
In the case where `ptr != 0`, a flameGraph will map allocations (size > 0) and deallocations (size < 0) with the same size and ptr.
Only allocations which were not freed are shown. Non mapped deallocations are ignored.
:::
## Returned value
- An array of strings for use with [flamegraph.pl utility](https://github.com/brendangregg/FlameGraph). [Array](../../data-types/array.md)([String](../../data-types/string.md)).
## Examples
### Building a flamegraph based on a CPU query profiler
```sql
SET query_profiler_cpu_time_period_ns=10000000;
SELECT SearchPhrase, COUNT(DISTINCT UserID) AS u FROM hits WHERE SearchPhrase <> '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;
```
```text
clickhouse client --allow_introspection_functions=1 -q "select arrayJoin(flameGraph(arrayReverse(trace))) from system.trace_log where trace_type = 'CPU' and query_id = 'xxx'" | ~/dev/FlameGraph/flamegraph.pl > flame_cpu.svg
```
### Building a flamegraph based on a memory query profiler, showing all allocations
```sql
SET memory_profiler_sample_probability=1, max_untracked_memory=1;
SELECT SearchPhrase, COUNT(DISTINCT UserID) AS u FROM hits WHERE SearchPhrase <> '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;
```
```text
clickhouse client --allow_introspection_functions=1 -q "select arrayJoin(flameGraph(trace, size)) from system.trace_log where trace_type = 'MemorySample' and query_id = 'xxx'" | ~/dev/FlameGraph/flamegraph.pl --countname=bytes --color=mem > flame_mem.svg
```
### Building a flamegraph based on a memory query profiler, showing allocations which were not deallocated in query context
```sql
SET memory_profiler_sample_probability=1, max_untracked_memory=1, use_uncompressed_cache=1, merge_tree_max_rows_to_use_cache=100000000000, merge_tree_max_bytes_to_use_cache=1000000000000;
SELECT SearchPhrase, COUNT(DISTINCT UserID) AS u FROM hits WHERE SearchPhrase <> '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;
```
```text
clickhouse client --allow_introspection_functions=1 -q "SELECT arrayJoin(flameGraph(trace, size, ptr)) FROM system.trace_log WHERE trace_type = 'MemorySample' AND query_id = 'xxx'" | ~/dev/FlameGraph/flamegraph.pl --countname=bytes --color=mem > flame_mem_untracked.svg
```
### Build a flamegraph based on memory query profiler, showing active allocations at the fixed point of time
```sql
SET memory_profiler_sample_probability=1, max_untracked_memory=1;
SELECT SearchPhrase, COUNT(DISTINCT UserID) AS u FROM hits WHERE SearchPhrase <> '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;
```
- 1 - Memory usage per second
```sql
SELECT event_time, m, formatReadableSize(max(s) as m) FROM (SELECT event_time, sum(size) OVER (ORDER BY event_time) AS s FROM system.trace_log WHERE query_id = 'xxx' AND trace_type = 'MemorySample') GROUP BY event_time ORDER BY event_time;
```
- 2 - Find a time point with maximal memory usage
```sql
SELECT argMax(event_time, s), max(s) FROM (SELECT event_time, sum(size) OVER (ORDER BY event_time) AS s FROM system.trace_log WHERE query_id = 'xxx' AND trace_type = 'MemorySample');
```
- 3 - Fix active allocations at fixed point of time
```text
clickhouse client --allow_introspection_functions=1 -q "SELECT arrayJoin(flameGraph(trace, size, ptr)) FROM (SELECT * FROM system.trace_log WHERE trace_type = 'MemorySample' AND query_id = 'xxx' AND event_time <= 'yyy' ORDER BY event_time)" | ~/dev/FlameGraph/flamegraph.pl --countname=bytes --color=mem > flame_mem_time_point_pos.svg
```
- 4 - Find deallocations at fixed point of time
```text
clickhouse client --allow_introspection_functions=1 -q "SELECT arrayJoin(flameGraph(trace, -size, ptr)) FROM (SELECT * FROM system.trace_log WHERE trace_type = 'MemorySample' AND query_id = 'xxx' AND event_time > 'yyy' ORDER BY event_time desc)" | ~/dev/FlameGraph/flamegraph.pl --countname=bytes --color=mem > flame_mem_time_point_neg.svg
```

View File

@ -58,6 +58,7 @@ ClickHouse-specific aggregate functions:
- [topKWeighted](../reference/topkweighted.md)
- [deltaSum](../reference/deltasum.md)
- [deltaSumTimestamp](../reference/deltasumtimestamp.md)
- [flameGraph](../reference/flame_graph.md)
- [groupArray](../reference/grouparray.md)
- [groupArrayLast](../reference/grouparraylast.md)
- [groupUniqArray](../reference/groupuniqarray.md)

View File

@ -75,7 +75,12 @@ struct ScopeAliases
if (jt == transitive_aliases.end())
return {};
key = &(getKey(jt->second, find_option));
const auto & new_key = getKey(jt->second, find_option);
/// Ignore potential cyclic aliases.
if (new_key == *key)
return {};
key = &new_key;
it = alias_map.find(*key);
}

View File

@ -54,9 +54,9 @@ namespace
S3::PocoHTTPClientConfiguration client_configuration = S3::ClientFactory::instance().createClientConfiguration(
settings.auth_settings.region,
context->getRemoteHostFilter(),
static_cast<unsigned>(local_settings.s3_max_redirects),
static_cast<unsigned>(local_settings.backup_restore_s3_retry_attempts),
local_settings.enable_s3_requests_logging,
static_cast<unsigned>(global_settings.s3_max_redirects),
static_cast<unsigned>(global_settings.s3_retry_attempts),
global_settings.enable_s3_requests_logging,
/* for_disk_s3 = */ false,
request_settings.get_request_throttler,
request_settings.put_request_throttler,

View File

@ -188,6 +188,18 @@ NamesAndTypesList NamesAndTypesList::filter(const Names & names) const
return filter(NameSet(names.begin(), names.end()));
}
NamesAndTypesList NamesAndTypesList::eraseNames(const NameSet & names) const
{
NamesAndTypesList res;
for (const auto & column : *this)
{
if (!names.contains(column.name))
res.push_back(column);
}
return res;
}
NamesAndTypesList NamesAndTypesList::addTypes(const Names & names) const
{
/// NOTE: It's better to make a map in `IStorage` than to create it here every time again.

View File

@ -111,6 +111,9 @@ public:
/// Leave only the columns whose names are in the `names`. In `names` there can be superfluous columns.
NamesAndTypesList filter(const Names & names) const;
/// Leave only the columns whose names are not in the `names`.
NamesAndTypesList eraseNames(const NameSet & names) const;
/// Unlike `filter`, returns columns in the order in which they go in `names`.
NamesAndTypesList addTypes(const Names & names) const;

View File

@ -517,7 +517,6 @@ class IColumn;
M(UInt64, backup_restore_keeper_value_max_size, 1048576, "Maximum size of data of a [Zoo]Keeper's node during backup", 0) \
M(UInt64, backup_restore_batch_size_for_keeper_multiread, 10000, "Maximum size of batch for multiread request to [Zoo]Keeper during backup or restore", 0) \
M(UInt64, backup_restore_batch_size_for_keeper_multi, 1000, "Maximum size of batch for multi request to [Zoo]Keeper during backup or restore", 0) \
M(UInt64, backup_restore_s3_retry_attempts, 1000, "Setting for Aws::Client::RetryStrategy, Aws::Client does retries itself, 0 means no retries. It takes place only for backup/restore.", 0) \
M(UInt64, max_backup_bandwidth, 0, "The maximum read speed in bytes per second for particular backup on server. Zero means unlimited.", 0) \
\
M(Bool, log_profile_events, true, "Log query performance statistics into the query_log, query_thread_log and query_views_log.", 0) \

View File

@ -115,7 +115,6 @@ static const std::map<ClickHouseVersion, SettingsChangesHistory::SettingsChanges
{"http_max_chunk_size", 0, 0, "Internal limitation"},
{"prefer_external_sort_block_bytes", 0, DEFAULT_BLOCK_SIZE * 256, "Prefer maximum block bytes for external sort, reduce the memory usage during merging."},
{"input_format_force_null_for_omitted_fields", false, false, "Disable type-defaults for omitted fields when needed"},
{"backup_restore_s3_retry_attempts", 0, 1000, "A new setting."},
{"cast_string_to_dynamic_use_inference", false, false, "Add setting to allow converting String to Dynamic through parsing"},
{"allow_experimental_dynamic_type", false, false, "Add new experimental Dynamic type"},
{"azure_max_blocks_in_multipart_upload", 50000, 50000, "Maximum number of blocks in multipart upload for Azure."},

View File

@ -382,6 +382,7 @@ void S3ObjectStorage::removeObjectsImpl(const StoredObjects & objects, bool if_e
{
std::vector<Aws::S3::Model::ObjectIdentifier> current_chunk;
String keys;
size_t first_position = current_position;
for (; current_position < objects.size() && current_chunk.size() < chunk_size_limit; ++current_position)
{
Aws::S3::Model::ObjectIdentifier obj;
@ -407,9 +408,9 @@ void S3ObjectStorage::removeObjectsImpl(const StoredObjects & objects, bool if_e
{
const auto * outcome_error = outcome.IsSuccess() ? nullptr : &outcome.GetError();
auto time_now = std::chrono::system_clock::now();
for (const auto & object : objects)
for (size_t i = first_position; i < current_position; ++i)
blob_storage_log->addEvent(BlobStorageLogElement::EventType::Delete,
uri.bucket, object.remote_path, object.local_path, object.bytes_size,
uri.bucket, objects[i].remote_path, objects[i].local_path, objects[i].bytes_size,
outcome_error, time_now);
}

View File

@ -162,7 +162,7 @@ public:
class RetryStrategy : public Aws::Client::RetryStrategy
{
public:
explicit RetryStrategy(uint32_t maxRetries_ = 10, uint32_t scaleFactor_ = 25, uint32_t maxDelayMs_ = 5000);
explicit RetryStrategy(uint32_t maxRetries_ = 10, uint32_t scaleFactor_ = 25, uint32_t maxDelayMs_ = 90000);
/// NOLINTNEXTLINE(google-runtime-int)
bool ShouldRetry(const Aws::Client::AWSError<Aws::Client::CoreErrors>& error, long attemptedRetries) const override;

View File

@ -504,6 +504,10 @@ void SystemLog<LogElement>::flushImpl(const std::vector<LogElement> & to_flush,
Block block(std::move(log_element_columns));
MutableColumns columns = block.mutateColumns();
for (auto & column : columns)
column->reserve(to_flush.size());
for (const auto & elem : to_flush)
elem.appendToBlock(columns);
@ -532,7 +536,8 @@ void SystemLog<LogElement>::flushImpl(const std::vector<LogElement> & to_flush,
}
catch (...)
{
tryLogCurrentException(__PRETTY_FUNCTION__);
tryLogCurrentException(__PRETTY_FUNCTION__, fmt::format("Failed to flush system log {} with {} entries up to offset {}",
table_id.getNameForLogs(), to_flush.size(), to_flush_end));
}
queue->confirm(to_flush_end);

View File

@ -19,18 +19,18 @@ public:
size_t sum_index_columns = 0;
size_t sum_ordinary_columns = 0;
ColumnSizeEstimator(ColumnToSize && map_, const Names & key_columns, const Names & ordinary_columns)
ColumnSizeEstimator(ColumnToSize && map_, const NamesAndTypesList & key_columns, const NamesAndTypesList & ordinary_columns)
: map(std::move(map_))
{
for (const auto & name : key_columns)
for (const auto & [name, _] : key_columns)
if (!map.contains(name)) map[name] = 0;
for (const auto & name : ordinary_columns)
for (const auto & [name, _] : ordinary_columns)
if (!map.contains(name)) map[name] = 0;
for (const auto & name : key_columns)
for (const auto & [name, _] : key_columns)
sum_index_columns += map.at(name);
for (const auto & name : ordinary_columns)
for (const auto & [name, _] : ordinary_columns)
sum_ordinary_columns += map.at(name);
sum_total = std::max(static_cast<decltype(sum_index_columns)>(1), sum_index_columns + sum_ordinary_columns);

View File

@ -48,59 +48,23 @@ namespace ErrorCodes
extern const int SUPPORT_IS_DISABLED;
}
/// PK columns are sorted and merged, ordinary columns are gathered using info from merge step
static void extractMergingAndGatheringColumns(
const NamesAndTypesList & storage_columns,
const ExpressionActionsPtr & sorting_key_expr,
const IndicesDescription & indexes,
const MergeTreeData::MergingParams & merging_params,
NamesAndTypesList & gathering_columns, Names & gathering_column_names,
NamesAndTypesList & merging_columns, Names & merging_column_names)
static ColumnsStatistics getStatisticsForColumns(
const NamesAndTypesList & columns_to_read,
const StorageMetadataPtr & metadata_snapshot)
{
Names sort_key_columns_vec = sorting_key_expr->getRequiredColumns();
std::set<String> key_columns(sort_key_columns_vec.cbegin(), sort_key_columns_vec.cend());
for (const auto & index : indexes)
ColumnsStatistics all_statistics;
const auto & all_columns = metadata_snapshot->getColumns();
for (const auto & column : columns_to_read)
{
Names index_columns_vec = index.expression->getRequiredColumns();
std::copy(index_columns_vec.cbegin(), index_columns_vec.cend(),
std::inserter(key_columns, key_columns.end()));
}
/// Force sign column for Collapsing mode
if (merging_params.mode == MergeTreeData::MergingParams::Collapsing)
key_columns.emplace(merging_params.sign_column);
/// Force version column for Replacing mode
if (merging_params.mode == MergeTreeData::MergingParams::Replacing)
{
key_columns.emplace(merging_params.is_deleted_column);
key_columns.emplace(merging_params.version_column);
}
/// Force sign column for VersionedCollapsing mode. Version is already in primary key.
if (merging_params.mode == MergeTreeData::MergingParams::VersionedCollapsing)
key_columns.emplace(merging_params.sign_column);
/// Force to merge at least one column in case of empty key
if (key_columns.empty())
key_columns.emplace(storage_columns.front().name);
/// TODO: also force "summing" and "aggregating" columns to make Horizontal merge only for such columns
for (const auto & column : storage_columns)
{
if (key_columns.contains(column.name))
const auto * desc = all_columns.tryGet(column.name);
if (desc && !desc->statistics.empty())
{
merging_columns.emplace_back(column);
merging_column_names.emplace_back(column.name);
}
else
{
gathering_columns.emplace_back(column);
gathering_column_names.emplace_back(column.name);
auto statistics = MergeTreeStatisticsFactory::instance().get(desc->statistics);
all_statistics.push_back(std::move(statistics));
}
}
return all_statistics;
}
static void addMissedColumnsToSerializationInfos(
@ -129,6 +93,77 @@ static void addMissedColumnsToSerializationInfos(
}
}
/// PK columns are sorted and merged, ordinary columns are gathered using info from merge step
void MergeTask::ExecuteAndFinalizeHorizontalPart::extractMergingAndGatheringColumns() const
{
const auto & sorting_key_expr = global_ctx->metadata_snapshot->getSortingKey().expression;
Names sort_key_columns_vec = sorting_key_expr->getRequiredColumns();
std::set<String> key_columns(sort_key_columns_vec.cbegin(), sort_key_columns_vec.cend());
/// Force sign column for Collapsing mode
if (ctx->merging_params.mode == MergeTreeData::MergingParams::Collapsing)
key_columns.emplace(ctx->merging_params.sign_column);
/// Force version column for Replacing mode
if (ctx->merging_params.mode == MergeTreeData::MergingParams::Replacing)
{
key_columns.emplace(ctx->merging_params.is_deleted_column);
key_columns.emplace(ctx->merging_params.version_column);
}
/// Force sign column for VersionedCollapsing mode. Version is already in primary key.
if (ctx->merging_params.mode == MergeTreeData::MergingParams::VersionedCollapsing)
key_columns.emplace(ctx->merging_params.sign_column);
/// Force to merge at least one column in case of empty key
if (key_columns.empty())
key_columns.emplace(global_ctx->storage_columns.front().name);
const auto & skip_indexes = global_ctx->metadata_snapshot->getSecondaryIndices();
for (const auto & index : skip_indexes)
{
auto index_columns = index.expression->getRequiredColumns();
/// Calculate indexes that depend only on one column on vertical
/// stage and other indexes on horizonatal stage of merge.
if (index_columns.size() == 1)
{
const auto & column_name = index_columns.front();
global_ctx->skip_indexes_by_column[column_name].push_back(index);
}
else
{
std::ranges::copy(index_columns, std::inserter(key_columns, key_columns.end()));
global_ctx->merging_skip_indexes.push_back(index);
}
}
/// TODO: also force "summing" and "aggregating" columns to make Horizontal merge only for such columns
for (const auto & column : global_ctx->storage_columns)
{
if (key_columns.contains(column.name))
{
global_ctx->merging_columns.emplace_back(column);
/// If column is in horizontal stage we need to calculate its indexes on horizontal stage as well
auto it = global_ctx->skip_indexes_by_column.find(column.name);
if (it != global_ctx->skip_indexes_by_column.end())
{
for (auto & index : it->second)
global_ctx->merging_skip_indexes.push_back(std::move(index));
global_ctx->skip_indexes_by_column.erase(it);
}
}
else
{
global_ctx->gathering_columns.emplace_back(column);
}
}
}
bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare()
{
@ -196,27 +231,18 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare()
if (!global_ctx->parent_part)
global_ctx->temporary_directory_lock = global_ctx->data->getTemporaryPartDirectoryHolder(local_tmp_part_basename);
global_ctx->all_column_names = global_ctx->metadata_snapshot->getColumns().getNamesOfPhysical();
global_ctx->storage_columns = global_ctx->metadata_snapshot->getColumns().getAllPhysical();
auto object_columns = MergeTreeData::getConcreteObjectColumns(global_ctx->future_part->parts, global_ctx->metadata_snapshot->getColumns());
extendObjectColumns(global_ctx->storage_columns, object_columns, false);
global_ctx->storage_snapshot = std::make_shared<StorageSnapshot>(*global_ctx->data, global_ctx->metadata_snapshot, std::move(object_columns));
extractMergingAndGatheringColumns(
global_ctx->storage_columns,
global_ctx->metadata_snapshot->getSortingKey().expression,
global_ctx->metadata_snapshot->getSecondaryIndices(),
ctx->merging_params,
global_ctx->gathering_columns,
global_ctx->gathering_column_names,
global_ctx->merging_columns,
global_ctx->merging_column_names);
extractMergingAndGatheringColumns();
global_ctx->new_data_part->uuid = global_ctx->future_part->uuid;
global_ctx->new_data_part->partition.assign(global_ctx->future_part->getPartition());
global_ctx->new_data_part->is_temp = global_ctx->parent_part == nullptr;
/// In case of replicated merge tree with zero copy replication
/// Here Clickhouse claims that this new part can be deleted in temporary state without unlocking the blobs
/// The blobs have to be removed along with the part, this temporary part owns them and does not share them yet.
@ -278,6 +304,7 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare()
ctx->sum_input_rows_upper_bound = global_ctx->merge_list_element_ptr->total_rows_count;
ctx->sum_compressed_bytes_upper_bound = global_ctx->merge_list_element_ptr->total_size_bytes_compressed;
global_ctx->chosen_merge_algorithm = chooseMergeAlgorithm();
global_ctx->merge_list_element_ptr->merge_algorithm.store(global_ctx->chosen_merge_algorithm, std::memory_order_relaxed);
@ -298,9 +325,9 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare()
case MergeAlgorithm::Horizontal:
{
global_ctx->merging_columns = global_ctx->storage_columns;
global_ctx->merging_column_names = global_ctx->all_column_names;
global_ctx->merging_skip_indexes = global_ctx->metadata_snapshot->getSecondaryIndices();
global_ctx->gathering_columns.clear();
global_ctx->gathering_column_names.clear();
global_ctx->skip_indexes_by_column.clear();
break;
}
case MergeAlgorithm::Vertical:
@ -309,13 +336,13 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare()
ctx->rows_sources_write_buf = std::make_unique<CompressedWriteBuffer>(*ctx->rows_sources_uncompressed_write_buf);
std::map<String, UInt64> local_merged_column_to_size;
for (const MergeTreeData::DataPartPtr & part : global_ctx->future_part->parts)
for (const auto & part : global_ctx->future_part->parts)
part->accumulateColumnSizes(local_merged_column_to_size);
ctx->column_sizes = ColumnSizeEstimator(
std::move(local_merged_column_to_size),
global_ctx->merging_column_names,
global_ctx->gathering_column_names);
global_ctx->merging_columns,
global_ctx->gathering_columns);
break;
}
@ -323,9 +350,6 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare()
throw Exception(ErrorCodes::LOGICAL_ERROR, "Merge algorithm must be chosen");
}
assert(global_ctx->gathering_columns.size() == global_ctx->gathering_column_names.size());
assert(global_ctx->merging_columns.size() == global_ctx->merging_column_names.size());
/// If merge is vertical we cannot calculate it
ctx->blocks_are_granules_size = (global_ctx->chosen_merge_algorithm == MergeAlgorithm::Vertical);
@ -342,28 +366,25 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare()
/// resources for this).
if (!ctx->need_remove_expired_values)
{
size_t expired_columns = 0;
auto part_serialization_infos = global_ctx->new_data_part->getSerializationInfos();
NameSet columns_to_remove;
for (auto & [column_name, ttl] : global_ctx->new_data_part->ttl_infos.columns_ttl)
{
if (ttl.finished())
{
global_ctx->new_data_part->expired_columns.insert(column_name);
LOG_TRACE(ctx->log, "Adding expired column {} for part {}", column_name, global_ctx->new_data_part->name);
std::erase(global_ctx->gathering_column_names, column_name);
std::erase(global_ctx->merging_column_names, column_name);
std::erase(global_ctx->all_column_names, column_name);
columns_to_remove.insert(column_name);
part_serialization_infos.erase(column_name);
++expired_columns;
}
}
if (expired_columns)
if (!columns_to_remove.empty())
{
global_ctx->gathering_columns = global_ctx->gathering_columns.filter(global_ctx->gathering_column_names);
global_ctx->merging_columns = global_ctx->merging_columns.filter(global_ctx->merging_column_names);
global_ctx->storage_columns = global_ctx->storage_columns.filter(global_ctx->all_column_names);
global_ctx->gathering_columns = global_ctx->gathering_columns.eraseNames(columns_to_remove);
global_ctx->merging_columns = global_ctx->merging_columns.eraseNames(columns_to_remove);
global_ctx->storage_columns = global_ctx->storage_columns.eraseNames(columns_to_remove);
global_ctx->new_data_part->setColumns(
global_ctx->storage_columns,
@ -376,8 +397,8 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare()
global_ctx->new_data_part,
global_ctx->metadata_snapshot,
global_ctx->merging_columns,
MergeTreeIndexFactory::instance().getMany(global_ctx->metadata_snapshot->getSecondaryIndices()),
MergeTreeStatisticsFactory::instance().getMany(global_ctx->metadata_snapshot->getColumns()),
MergeTreeIndexFactory::instance().getMany(global_ctx->merging_skip_indexes),
getStatisticsForColumns(global_ctx->merging_columns, global_ctx->metadata_snapshot),
ctx->compression_codec,
global_ctx->txn ? global_ctx->txn->tid : Tx::PrehistoricTID,
/*reset_columns=*/ true,
@ -407,9 +428,7 @@ void MergeTask::addGatheringColumn(GlobalRuntimeContextPtr global_ctx, const Str
return;
global_ctx->storage_columns.emplace_back(name, type);
global_ctx->all_column_names.emplace_back(name);
global_ctx->gathering_columns.emplace_back(name, type);
global_ctx->gathering_column_names.emplace_back(name);
}
@ -423,7 +442,6 @@ MergeTask::StageRuntimeContextPtr MergeTask::ExecuteAndFinalizeHorizontalPart::g
new_ctx->compression_codec = std::move(ctx->compression_codec);
new_ctx->tmp_disk = std::move(ctx->tmp_disk);
new_ctx->it_name_and_type = std::move(ctx->it_name_and_type);
new_ctx->column_num_for_vertical_merge = std::move(ctx->column_num_for_vertical_merge);
new_ctx->read_with_direct_io = std::move(ctx->read_with_direct_io);
new_ctx->need_sync = std::move(ctx->need_sync);
@ -510,7 +528,7 @@ bool MergeTask::VerticalMergeStage::prepareVerticalMergeForAllColumns() const
size_t sum_input_rows_exact = global_ctx->merge_list_element_ptr->rows_read;
size_t input_rows_filtered = *global_ctx->input_rows_filtered;
global_ctx->merge_list_element_ptr->columns_written = global_ctx->merging_column_names.size();
global_ctx->merge_list_element_ptr->columns_written = global_ctx->merging_columns.size();
global_ctx->merge_list_element_ptr->progress.store(ctx->column_sizes->keyColumnsWeight(), std::memory_order_relaxed);
ctx->rows_sources_write_buf->next();
@ -546,14 +564,12 @@ bool MergeTask::VerticalMergeStage::prepareVerticalMergeForAllColumns() const
/// Move ownership from std::unique_ptr<ReadBuffer> to std::unique_ptr<ReadBufferFromFile> for CompressedReadBufferFromFile.
/// First, release ownership from unique_ptr to base type.
reread_buf.release(); /// NOLINT(bugprone-unused-return-value,hicpp-ignored-remove-result): we already have the pointer value in `reread_buffer_raw`
/// Then, move ownership to unique_ptr to concrete type.
std::unique_ptr<ReadBufferFromFile> reread_buffer_from_file(reread_buffer_raw);
/// CompressedReadBufferFromFile expects std::unique_ptr<ReadBufferFromFile> as argument.
ctx->rows_sources_read_buf = std::make_unique<CompressedReadBufferFromFile>(std::move(reread_buffer_from_file));
/// For external cycle
global_ctx->gathering_column_names_size = global_ctx->gathering_column_names.size();
ctx->column_num_for_vertical_merge = 0;
ctx->it_name_and_type = global_ctx->gathering_columns.cbegin();
const auto & settings = global_ctx->context->getSettingsRef();
@ -636,6 +652,21 @@ void MergeTask::VerticalMergeStage::prepareVerticalMergeForOneColumn() const
pipe.addTransform(std::move(transform));
MergeTreeIndices indexes_to_recalc;
auto indexes_it = global_ctx->skip_indexes_by_column.find(column_name);
if (indexes_it != global_ctx->skip_indexes_by_column.end())
{
indexes_to_recalc = MergeTreeIndexFactory::instance().getMany(indexes_it->second);
pipe.addTransform(std::make_shared<ExpressionTransform>(
pipe.getHeader(),
indexes_it->second.getSingleExpressionForIndices(global_ctx->metadata_snapshot->getColumns(),
global_ctx->data->getContext())));
pipe.addTransform(std::make_shared<MaterializingTransform>(pipe.getHeader()));
}
ctx->column_parts_pipeline = QueryPipeline(std::move(pipe));
/// Dereference unique_ptr
@ -646,19 +677,16 @@ void MergeTask::VerticalMergeStage::prepareVerticalMergeForOneColumn() const
/// Is calculated inside MergeProgressCallback.
ctx->column_parts_pipeline.disableProfileEventUpdate();
ctx->executor = std::make_unique<PullingPipelineExecutor>(ctx->column_parts_pipeline);
NamesAndTypesList columns_list = {*ctx->it_name_and_type};
ctx->column_to = std::make_unique<MergedColumnOnlyOutputStream>(
global_ctx->new_data_part,
global_ctx->metadata_snapshot,
ctx->executor->getHeader(),
columns_list,
ctx->compression_codec,
/// we don't need to recalc indices here
/// because all of them were already recalculated and written
/// as key part of vertical merge
std::vector<MergeTreeIndexPtr>{},
ColumnsStatistics{}, /// TODO(hanfei)
indexes_to_recalc,
getStatisticsForColumns(columns_list, global_ctx->metadata_snapshot),
&global_ctx->written_offset_columns,
global_ctx->to->getIndexGranularity());
@ -716,8 +744,7 @@ void MergeTask::VerticalMergeStage::finalizeVerticalMergeForOneColumn() const
global_ctx->merge_list_element_ptr->bytes_written_uncompressed += bytes;
global_ctx->merge_list_element_ptr->progress.store(ctx->progress_before + ctx->column_sizes->columnWeight(column_name), std::memory_order_relaxed);
/// This is the external cycle increment.
++ctx->column_num_for_vertical_merge;
/// This is the external loop increment.
++ctx->it_name_and_type;
}
@ -749,9 +776,9 @@ bool MergeTask::MergeProjectionsStage::mergeMinMaxIndexAndPrepareProjections() c
LOG_DEBUG(ctx->log,
"Merge sorted {} rows, containing {} columns ({} merged, {} gathered) in {} sec., {} rows/sec., {}/sec.",
global_ctx->merge_list_element_ptr->rows_read,
global_ctx->all_column_names.size(),
global_ctx->merging_column_names.size(),
global_ctx->gathering_column_names.size(),
global_ctx->storage_columns.size(),
global_ctx->merging_columns.size(),
global_ctx->gathering_columns.size(),
elapsed_seconds,
global_ctx->merge_list_element_ptr->rows_read / elapsed_seconds,
ReadableSize(global_ctx->merge_list_element_ptr->bytes_read_uncompressed / elapsed_seconds));
@ -888,7 +915,7 @@ bool MergeTask::VerticalMergeStage::executeVerticalMergeForAllColumns() const
return false;
/// This is the external cycle condition
if (ctx->column_num_for_vertical_merge >= global_ctx->gathering_column_names_size)
if (ctx->it_name_and_type == global_ctx->gathering_columns.end())
return false;
switch (ctx->vertical_merge_one_column_state)
@ -976,7 +1003,7 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::createMergedStream()
*global_ctx->data,
global_ctx->storage_snapshot,
part,
global_ctx->merging_column_names,
global_ctx->merging_columns.getNames(),
/*mark_ranges=*/ {},
global_ctx->input_rows_filtered,
/*apply_deleted_mask=*/ true,
@ -1115,12 +1142,12 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::createMergedStream()
/// If deduplicate_by_columns is empty, add all columns except virtuals.
if (global_ctx->deduplicate_by_columns.empty())
{
for (const auto & column_name : global_ctx->merging_column_names)
for (const auto & column : global_ctx->merging_columns)
{
if (virtuals.tryGet(column_name, VirtualsKind::Persistent))
if (virtuals.tryGet(column.name, VirtualsKind::Persistent))
continue;
global_ctx->deduplicate_by_columns.emplace_back(column_name);
global_ctx->deduplicate_by_columns.emplace_back(column.name);
}
}
@ -1141,11 +1168,13 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::createMergedStream()
builder->addTransform(std::move(transform));
}
if (global_ctx->metadata_snapshot->hasSecondaryIndices())
if (!global_ctx->merging_skip_indexes.empty())
{
const auto & indices = global_ctx->metadata_snapshot->getSecondaryIndices();
builder->addTransform(std::make_shared<ExpressionTransform>(
builder->getHeader(), indices.getSingleExpressionForIndices(global_ctx->metadata_snapshot->getColumns(), global_ctx->data->getContext())));
builder->getHeader(),
global_ctx->merging_skip_indexes.getSingleExpressionForIndices(global_ctx->metadata_snapshot->getColumns(),
global_ctx->data->getContext())));
builder->addTransform(std::make_shared<MaterializingTransform>(builder->getHeader()));
}

View File

@ -24,6 +24,7 @@
#include <Storages/MergeTree/MergedColumnOnlyOutputStream.h>
#include <Storages/MergeTree/MergeProgress.h>
#include <Storages/MergeTree/MergeTreeData.h>
#include <Storages/MergeTree/MergeTreeIndices.h>
namespace DB
@ -164,14 +165,13 @@ private:
NamesAndTypesList gathering_columns{};
NamesAndTypesList merging_columns{};
Names gathering_column_names{};
Names merging_column_names{};
NamesAndTypesList storage_columns{};
Names all_column_names{};
MergeTreeData::DataPart::Checksums checksums_gathered_columns{};
IndicesDescription merging_skip_indexes;
std::unordered_map<String, IndicesDescription> skip_indexes_by_column;
MergeAlgorithm chosen_merge_algorithm{MergeAlgorithm::Undecided};
size_t gathering_column_names_size{0};
std::unique_ptr<MergeStageProgress> horizontal_stage_progress{nullptr};
std::unique_ptr<MergeStageProgress> column_progress{nullptr};
@ -232,7 +232,6 @@ private:
/// Dependencies for next stages
std::list<DB::NameAndTypePair>::const_iterator it_name_and_type;
size_t column_num_for_vertical_merge{0};
bool need_sync{false};
};
@ -260,12 +259,14 @@ private:
MergeAlgorithm chooseMergeAlgorithm() const;
void createMergedStream();
void extractMergingAndGatheringColumns() const;
void setRuntimeContext(StageRuntimeContextPtr local, StageRuntimeContextPtr global) override
{
ctx = static_pointer_cast<ExecuteAndFinalizeHorizontalPartRuntimeContext>(local);
global_ctx = static_pointer_cast<GlobalRuntimeContext>(global);
}
StageRuntimeContextPtr getContextForNextStage() override;
ExecuteAndFinalizeHorizontalPartRuntimeContextPtr ctx;
@ -284,7 +285,6 @@ private:
CompressionCodecPtr compression_codec;
TemporaryDataOnDiskPtr tmp_disk{nullptr};
std::list<DB::NameAndTypePair>::const_iterator it_name_and_type;
size_t column_num_for_vertical_merge{0};
bool read_with_direct_io{false};
bool need_sync{false};
/// End dependencies from previous stages

View File

@ -503,7 +503,8 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempPartImpl(
ProfileEvents::increment(ProfileEvents::MergeTreeDataWriterBlocksAlreadySorted);
}
if (data.getSettings()->allow_experimental_optimized_row_order)
if (data.getSettings()->optimize_row_order
&& data.merging_params.mode == MergeTreeData::MergingParams::Mode::Ordinary) /// Nobody knows if this optimization messes up specialized MergeTree engines.
{
RowOrderOptimizer::optimize(block, sort_description, perm);
perm_ptr = &perm;
@ -730,7 +731,8 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeProjectionPartImpl(
ProfileEvents::increment(ProfileEvents::MergeTreeDataProjectionWriterBlocksAlreadySorted);
}
if (data.getSettings()->allow_experimental_optimized_row_order)
if (data.getSettings()->optimize_row_order
&& data.merging_params.mode == MergeTreeData::MergingParams::Mode::Ordinary) /// Nobody knows if this optimization messes up specialized MergeTree engines.
{
RowOrderOptimizer::optimize(block, sort_description, perm);
perm_ptr = &perm;

View File

@ -94,6 +94,7 @@ struct Settings;
M(Bool, async_insert, false, "If true, data from INSERT query is stored in queue and later flushed to table in background.", 0) \
M(Bool, add_implicit_sign_column_constraint_for_collapsing_engine, false, "If true, add implicit constraint for sign column for CollapsingMergeTree engine.", 0) \
M(Milliseconds, sleep_before_commit_local_part_in_replicated_table_ms, 0, "For testing. Do not change it.", 0) \
M(Bool, optimize_row_order, false, "Allow reshuffling of rows during part inserts and merges to improve the compressibility of the new part", 0) \
\
/* Part removal settings. */ \
M(UInt64, simultaneous_parts_removal_limit, 0, "Maximum number of parts to remove during one CleanupThread iteration (0 means unlimited).", 0) \
@ -199,7 +200,6 @@ struct Settings;
M(Bool, cache_populated_by_fetch, false, "Only available in ClickHouse Cloud", 0) \
M(Bool, force_read_through_cache_for_merges, false, "Force read-through filesystem cache for merges", 0) \
M(Bool, allow_experimental_replacing_merge_with_cleanup, false, "Allow experimental CLEANUP merges for ReplacingMergeTree with is_deleted column.", 0) \
M(Bool, allow_experimental_optimized_row_order, false, "Allow reshuffling of rows during part inserts and merges to improve the compressibility of the new part", 0) \
\
/** Compress marks and primary key. */ \
M(Bool, compress_marks, true, "Marks support compression, reduce mark file size and speed up network transmission.", 0) \

View File

@ -13,15 +13,14 @@ namespace ErrorCodes
MergedColumnOnlyOutputStream::MergedColumnOnlyOutputStream(
const MergeTreeMutableDataPartPtr & data_part,
const StorageMetadataPtr & metadata_snapshot_,
const Block & header_,
const NamesAndTypesList & columns_list_,
CompressionCodecPtr default_codec,
const MergeTreeIndices & indices_to_recalc,
const ColumnsStatistics & stats_to_recalc_,
WrittenOffsetColumns * offset_columns_,
const MergeTreeIndexGranularity & index_granularity,
const MergeTreeIndexGranularityInfo * index_granularity_info)
: IMergedBlockOutputStream(data_part->storage.getSettings(), data_part->getDataPartStoragePtr(), metadata_snapshot_, header_.getNamesAndTypesList(), /*reset_columns=*/ true)
, header(header_)
: IMergedBlockOutputStream(data_part->storage.getSettings(), data_part->getDataPartStoragePtr(), metadata_snapshot_, columns_list_, /*reset_columns=*/ true)
{
const auto & global_settings = data_part->storage.getContext()->getSettings();
@ -37,7 +36,7 @@ MergedColumnOnlyOutputStream::MergedColumnOnlyOutputStream(
data_part->name, data_part->storage.getLogName(), data_part->getSerializations(),
data_part_storage, data_part->index_granularity_info,
storage_settings,
header.getNamesAndTypesList(),
columns_list_,
data_part->getColumnPositions(),
metadata_snapshot_,
data_part->storage.getVirtualsPtr(),

View File

@ -17,7 +17,7 @@ public:
MergedColumnOnlyOutputStream(
const MergeTreeMutableDataPartPtr & data_part,
const StorageMetadataPtr & metadata_snapshot_,
const Block & header_,
const NamesAndTypesList & columns_list_,
CompressionCodecPtr default_codec_,
const MergeTreeIndices & indices_to_recalc_,
const ColumnsStatistics & stats_to_recalc_,
@ -25,16 +25,12 @@ public:
const MergeTreeIndexGranularity & index_granularity = {},
const MergeTreeIndexGranularityInfo * index_granularity_info_ = nullptr);
Block getHeader() const { return header; }
void write(const Block & block) override;
MergeTreeData::DataPart::Checksums
fillChecksums(MergeTreeData::MutableDataPartPtr & new_part, MergeTreeData::DataPart::Checksums & all_checksums);
void finish(bool sync);
private:
Block header;
};
using MergedColumnOnlyOutputStreamPtr = std::shared_ptr<MergedColumnOnlyOutputStream>;

View File

@ -1901,7 +1901,7 @@ private:
ctx->out = std::make_shared<MergedColumnOnlyOutputStream>(
ctx->new_data_part,
ctx->metadata_snapshot,
ctx->updated_header,
ctx->updated_header.getNamesAndTypesList(),
ctx->compression_codec,
std::vector<MergeTreeIndexPtr>(ctx->indices_to_recalc.begin(), ctx->indices_to_recalc.end()),
ColumnsStatistics(ctx->stats_to_recalc.begin(), ctx->stats_to_recalc.end()),

View File

@ -78,9 +78,8 @@ std::vector<size_t> getOtherColumnIndexes(const Block & block, const SortDescrip
/// --------
/// 2 1 a 3
/// ----------------------
EqualRanges getEqualRanges(const Block & block, const SortDescription & sort_description, const IColumn::Permutation & permutation, const LoggerPtr & log)
EqualRanges getEqualRanges(const Block & block, const SortDescription & sort_description, const IColumn::Permutation & permutation)
{
LOG_TRACE(log, "Finding equal ranges");
EqualRanges ranges;
const size_t rows = block.rows();
if (sort_description.empty())
@ -122,11 +121,10 @@ void updatePermutationInEqualRange(
const std::vector<size_t> & other_column_indexes,
IColumn::Permutation & permutation,
const EqualRange & equal_range,
const std::vector<size_t> & cardinalities)
const std::vector<size_t> & cardinalities,
const LoggerPtr & log)
{
LoggerPtr log = getLogger("RowOrderOptimizer");
LOG_TRACE(log, "Starting optimization in equal range");
LOG_TEST(log, "Starting optimization in equal range");
std::vector<size_t> column_order(other_column_indexes.size());
iota(column_order.begin(), column_order.end(), 0);
@ -134,17 +132,17 @@ void updatePermutationInEqualRange(
stable_sort(column_order.begin(), column_order.end(), cmp);
std::vector<EqualRange> ranges = {equal_range};
LOG_TRACE(log, "equal_range: .from: {}, .to: {}", equal_range.from, equal_range.to);
LOG_TEST(log, "equal_range: .from: {}, .to: {}", equal_range.from, equal_range.to);
for (size_t i : column_order)
{
const size_t column_id = other_column_indexes[i];
const ColumnPtr & column = block.getByPosition(column_id).column;
LOG_TRACE(log, "i: {}, column_id: {}, column->getName(): {}, cardinality: {}", i, column_id, column->getName(), cardinalities[i]);
LOG_TEST(log, "i: {}, column_id: {}, column type: {}, cardinality: {}", i, column_id, column->getName(), cardinalities[i]);
column->updatePermutation(
IColumn::PermutationSortDirection::Ascending, IColumn::PermutationSortStability::Stable, 0, 1, permutation, ranges);
}
LOG_TRACE(log, "Finish optimization in equal range");
LOG_TEST(log, "Finish optimization in equal range");
}
}
@ -156,7 +154,10 @@ void RowOrderOptimizer::optimize(const Block & block, const SortDescription & so
LOG_TRACE(log, "Starting optimization");
if (block.columns() == 0)
{
LOG_TRACE(log, "Finished optimization (block has no columns)");
return; /// a table without columns, this should not happen in the first place ...
}
if (permutation.empty())
{
@ -165,17 +166,17 @@ void RowOrderOptimizer::optimize(const Block & block, const SortDescription & so
iota(permutation.data(), rows, IColumn::Permutation::value_type(0));
}
const EqualRanges equal_ranges = getEqualRanges(block, sort_description, permutation, log);
const EqualRanges equal_ranges = getEqualRanges(block, sort_description, permutation);
const std::vector<size_t> other_columns_indexes = getOtherColumnIndexes(block, sort_description);
LOG_TRACE(log, "block.columns(): {}, block.rows(): {}, sort_description.size(): {}, equal_ranges.size(): {}", block.columns(), block.rows(), sort_description.size(), equal_ranges.size());
LOG_TRACE(log, "columns: {}, sorting key columns: {}, rows: {}, equal ranges: {}", block.columns(), sort_description.size(), block.rows(), equal_ranges.size());
for (const auto & equal_range : equal_ranges)
{
if (equal_range.size() <= 1)
continue;
const std::vector<size_t> cardinalities = getCardinalitiesInPermutedRange(block, other_columns_indexes, permutation, equal_range);
updatePermutationInEqualRange(block, other_columns_indexes, permutation, equal_range, cardinalities);
updatePermutationInEqualRange(block, other_columns_indexes, permutation, equal_range, cardinalities, log);
}
LOG_TRACE(log, "Finished optimization");

View File

@ -2,7 +2,6 @@
<profiles>
<default>
<s3_retry_attempts>5</s3_retry_attempts>
<backup_restore_s3_retry_attempts>5</backup_restore_s3_retry_attempts>
</default>
</profiles>
<users>

View File

@ -1,2 +1,3 @@
1 2 3
1 5
300

View File

@ -30,3 +30,7 @@ WHERE (time_stamp_utc >= toDateTime('2024-04-25 00:00:00')) AND (time_stamp_utc
GROUP BY time_stamp_utc
ORDER BY Impressions DESC
LIMIT 1000;
drop table test_table;
create table test_table engine MergeTree order by sum as select 100 as sum union all select 200 as sum;
select sum as sum from (select sum(sum) as sum from test_table);

View File

@ -14,7 +14,7 @@ CREATE TABLE tab (
event Int8
) ENGINE = MergeTree
ORDER BY name
SETTINGS allow_experimental_optimized_row_order = true;
SETTINGS optimize_row_order = true;
INSERT INTO tab VALUES ('Igor', 3), ('Egor', 1), ('Egor', 2), ('Igor', 2), ('Igor', 1);
SELECT * FROM tab ORDER BY name SETTINGS max_threads=1;
@ -34,7 +34,7 @@ CREATE TABLE tab (
flag String
) ENGINE = MergeTree
ORDER BY ()
SETTINGS allow_experimental_optimized_row_order = True;
SETTINGS optimize_row_order = True;
INSERT INTO tab VALUES ('Bob', 4, 100, '1'), ('Nikita', 2, 54, '1'), ('Nikita', 1, 228, '1'), ('Alex', 4, 83, '1'), ('Alex', 4, 134, '1'), ('Alex', 1, 65, '0'), ('Alex', 4, 134, '1'), ('Bob', 2, 53, '0'), ('Alex', 4, 83, '0'), ('Alex', 1, 63, '1'), ('Bob', 2, 53, '1'), ('Alex', 4, 192, '1'), ('Alex', 2, 128, '1'), ('Nikita', 2, 148, '0'), ('Bob', 4, 177, '0'), ('Nikita', 1, 173, '0'), ('Alex', 1, 239, '0'), ('Alex', 1, 63, '0'), ('Alex', 2, 224, '1'), ('Bob', 4, 177, '0'), ('Alex', 2, 128, '1'), ('Alex', 4, 134, '0'), ('Alex', 4, 83, '1'), ('Bob', 4, 100, '0'), ('Nikita', 2, 54, '1'), ('Alex', 1, 239, '1'), ('Bob', 2, 187, '1'), ('Alex', 1, 65, '1'), ('Bob', 2, 53, '1'), ('Alex', 2, 224, '0'), ('Alex', 4, 192, '0'), ('Nikita', 1, 173, '1'), ('Nikita', 2, 148, '1'), ('Bob', 2, 187, '1'), ('Nikita', 2, 208, '1'), ('Nikita', 2, 208, '0'), ('Nikita', 1, 228, '0'), ('Nikita', 2, 148, '0');
SELECT * FROM tab SETTINGS max_threads=1;
@ -58,7 +58,7 @@ CREATE TABLE tab (
flag Nullable(Int32)
) ENGINE = MergeTree
ORDER BY (flag, money)
SETTINGS allow_experimental_optimized_row_order = True, allow_nullable_key = True;
SETTINGS optimize_row_order = True, allow_nullable_key = True;
INSERT INTO tab VALUES ('AB', 0, 42, Null), ('AB', 0, 42, Null), ('A', 1, 42, Null), ('AB', 1, 9.81, 0), ('B', 0, 42, Null), ('B', -1, 3.14, Null), ('B', 1, 2.7, 1), ('B', 0, 42, 1), ('A', 1, 42, 1), ('B', 1, 42, Null), ('B', 0, 2.7, 1), ('A', 0, 2.7, 1), ('B', 2, 3.14, Null), ('A', 0, 3.14, Null), ('A', 1, 2.7, 1), ('A', 1, 42, Null);
SELECT * FROM tab ORDER BY (flag, money) SETTINGS max_threads=1;
@ -89,7 +89,7 @@ CREATE TABLE tab (
tuple_column Tuple(UInt256)
) ENGINE = MergeTree()
ORDER BY (fixed_str, event_date)
SETTINGS allow_experimental_optimized_row_order = True;
SETTINGS optimize_row_order = True;
INSERT INTO tab VALUES ('A', '2020-01-01', [0.0, 1.1], 10, 'some string', {'key':'value'}, (123)), ('A', '2020-01-01', [0.0, 1.1], NULL, 'example', {}, (26)), ('A', '2020-01-01', [2.2, 1.1], 1, 'some other string', {'key2':'value2'}, (5)), ('A', '2020-01-02', [0.0, 1.1], 10, 'some string', {'key':'value'}, (123)), ('A', '2020-01-02', [0.0, 2.2], 10, 'example', {}, (26)), ('A', '2020-01-02', [2.2, 1.1], 1, 'some other string', {'key2':'value2'}, (5)), ('B', '2020-01-04', [0.0, 1.1], 10, 'some string', {'key':'value'}, (123)), ('B', '2020-01-04', [0.0, 2.2], Null, 'example', {}, (26)), ('B', '2020-01-04', [2.2, 1.1], 1, 'some string', {'key2':'value2'}, (5)), ('B', '2020-01-05', [0.0, 1.1], 10, 'some string', {'key':'value'}, (123)), ('B', '2020-01-05', [0.0, 2.2], Null, 'example', {}, (26)), ('B', '2020-01-05', [2.2, 1.1], 1, 'some other string', {'key':'value'}, (5)), ('C', '2020-01-04', [0.0, 1.1], 10, 'some string', {'key':'value'}, (5)), ('C', '2020-01-04', [0.0, 2.2], Null, 'example', {}, (26)), ('C', '2020-01-04', [2.2, 1.1], 1, 'some other string', {'key2':'value2'}, (5));

View File

@ -0,0 +1,33 @@
200
Expression ((Project names + Projection))
Aggregating
Expression (Before GROUP BY)
Filter ((WHERE + Change column names to column identifiers))
ReadFromMergeTree (default.t_ind_merge_1)
Indexes:
PrimaryKey
Condition: true
Parts: 2/2
Granules: 32/32
Skip
Name: idx_b
Description: minmax GRANULARITY 1
Parts: 2/2
Granules: 4/32
200
Expression ((Project names + Projection))
Aggregating
Expression (Before GROUP BY)
Filter ((WHERE + Change column names to column identifiers))
ReadFromMergeTree (default.t_ind_merge_1)
Indexes:
PrimaryKey
Condition: true
Parts: 1/1
Granules: 32/32
Skip
Name: idx_b
Description: minmax GRANULARITY 1
Parts: 1/1
Granules: 4/32
4 1 3

View File

@ -0,0 +1,39 @@
DROP TABLE IF EXISTS t_ind_merge_1;
SET allow_experimental_analyzer = 1;
CREATE TABLE t_ind_merge_1 (a UInt64, b UInt64, c UInt64, d UInt64, INDEX idx_b b TYPE minmax)
ENGINE = MergeTree
ORDER BY a SETTINGS
index_granularity = 64,
merge_max_block_size = 8192,
vertical_merge_algorithm_min_rows_to_activate = 1,
vertical_merge_algorithm_min_columns_to_activate = 1,
min_bytes_for_wide_part = 0,
min_bytes_for_full_part_storage = 0;
INSERT INTO t_ind_merge_1 SELECT number, number, rand(), rand() FROM numbers(1000);
INSERT INTO t_ind_merge_1 SELECT number, number, rand(), rand() FROM numbers(1000);
SELECT count() FROM t_ind_merge_1 WHERE b < 100 SETTINGS force_data_skipping_indices = 'idx_b';
EXPLAIN indexes = 1 SELECT count() FROM t_ind_merge_1 WHERE b < 100;
OPTIMIZE TABLE t_ind_merge_1 FINAL;
SELECT count() FROM t_ind_merge_1 WHERE b < 100 SETTINGS force_data_skipping_indices = 'idx_b';
EXPLAIN indexes = 1 SELECT count() FROM t_ind_merge_1 WHERE b < 100;
SYSTEM FLUSH LOGS;
WITH
(SELECT uuid FROM system.tables WHERE database = currentDatabase() AND table = 't_ind_merge_1') AS uuid,
extractAllGroupsVertical(message, 'containing (\\d+) columns \((\\d+) merged, (\\d+) gathered\)')[1] AS groups
SELECT
groups[1] AS total,
groups[2] AS merged,
groups[3] AS gathered
FROM system.text_log
WHERE ((query_id = uuid || '::all_1_2_1') OR (query_id = currentDatabase() || '.t_ind_merge_1::all_1_2_1')) AND notEmpty(groups)
ORDER BY event_time_microseconds;
DROP TABLE t_ind_merge_1;

View File

@ -0,0 +1,42 @@
DROP TABLE IF EXISTS t_ind_merge_2;
CREATE TABLE t_ind_merge_2 (
a UInt64,
b UInt64,
c UInt64,
d UInt64,
e UInt64,
f UInt64,
INDEX idx_a a TYPE minmax,
INDEX idx_b b TYPE minmax,
INDEX idx_cd c * d TYPE minmax,
INDEX idx_d1 d TYPE minmax,
INDEX idx_d2 d + 7 TYPE set(3),
INDEX idx_e e * 3 TYPE set(3))
ENGINE = MergeTree
ORDER BY a SETTINGS
index_granularity = 64,
vertical_merge_algorithm_min_rows_to_activate = 1,
vertical_merge_algorithm_min_columns_to_activate = 1,
min_bytes_for_wide_part = 0,
min_bytes_for_full_part_storage = 0;
INSERT INTO t_ind_merge_2 SELECT number, number, rand(), rand(), rand(), rand() FROM numbers(1000);
INSERT INTO t_ind_merge_2 SELECT number, number, rand(), rand(), rand(), rand() FROM numbers(1000);
OPTIMIZE TABLE t_ind_merge_2 FINAL;
SYSTEM FLUSH LOGS;
--- merged: a, c, d; gathered: b, e, f
WITH
(SELECT uuid FROM system.tables WHERE database = currentDatabase() AND table = 't_ind_merge_2') AS uuid,
extractAllGroupsVertical(message, 'containing (\\d+) columns \((\\d+) merged, (\\d+) gathered\)')[1] AS groups
SELECT
groups[1] AS total,
groups[2] AS merged,
groups[3] AS gathered
FROM system.text_log
WHERE ((query_id = uuid || '::all_1_2_1') OR (query_id = currentDatabase() || '.t_ind_merge_2::all_1_2_1')) AND notEmpty(groups)
ORDER BY event_time_microseconds;
DROP TABLE t_ind_merge_2;

View File

@ -222,6 +222,7 @@ DatabaseOrdinaryThreadsActive
DateTime
DateTimes
DbCL
deallocated
Decrypted
Deduplicate
Deduplication
@ -293,6 +294,7 @@ FilesystemMainPathUsedBytes
FilesystemMainPathUsedINodes
FixedString
FlameGraph
flameGraph
Flink
ForEach
FreeBSD