diff --git a/.gitmodules b/.gitmodules index bbc8fc7d06c..a3b6450032a 100644 --- a/.gitmodules +++ b/.gitmodules @@ -332,7 +332,7 @@ url = https://github.com/ClickHouse/usearch.git [submodule "contrib/SimSIMD"] path = contrib/SimSIMD - url = https://github.com/ashvardanian/SimSIMD.git + url = https://github.com/ClickHouse/SimSIMD.git [submodule "contrib/FP16"] path = contrib/FP16 url = https://github.com/Maratyszcza/FP16.git diff --git a/CHANGELOG.md b/CHANGELOG.md index 90285582b4e..dacee73440f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -488,6 +488,7 @@ * Remove `is_deterministic` field from the `system.functions` table. [#66630](https://github.com/ClickHouse/ClickHouse/pull/66630) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * Function `tuple` will now try to construct named tuples in query (controlled by `enable_named_columns_in_function_tuple`). Introduce function `tupleNames` to extract names from tuples. [#54881](https://github.com/ClickHouse/ClickHouse/pull/54881) ([Amos Bird](https://github.com/amosbird)). * Change how deduplication for Materialized Views works. Fixed a lot of cases like: - on destination table: data is split for 2 or more blocks and that blocks is considered as duplicate when that block is inserted in parallel. - on MV destination table: the equal blocks are deduplicated, that happens when MV often produces equal data as a result for different input data due to performing aggregation. - on MV destination table: the equal blocks which comes from different MV are deduplicated. [#61601](https://github.com/ClickHouse/ClickHouse/pull/61601) ([Sema Checherinda](https://github.com/CheSema)). +* Functions `bitShiftLeft` and `bitShitfRight` return an error for out of bounds shift positions [#65838](https://github.com/ClickHouse/ClickHouse/pull/65838) ([Pablo Marcos](https://github.com/pamarcos)). #### New Feature * Add `ASOF JOIN` support for `full_sorting_join` algorithm. [#55051](https://github.com/ClickHouse/ClickHouse/pull/55051) ([vdimir](https://github.com/vdimir)). @@ -599,7 +600,6 @@ * Functions `bitTest`, `bitTestAll`, and `bitTestAny` now return an error if the specified bit index is out-of-bounds [#65818](https://github.com/ClickHouse/ClickHouse/pull/65818) ([Pablo Marcos](https://github.com/pamarcos)). * Setting `join_any_take_last_row` is supported in any query with hash join. [#65820](https://github.com/ClickHouse/ClickHouse/pull/65820) ([vdimir](https://github.com/vdimir)). * Better handling of join conditions involving `IS NULL` checks (for example `ON (a = b AND (a IS NOT NULL) AND (b IS NOT NULL) ) OR ( (a IS NULL) AND (b IS NULL) )` is rewritten to `ON a <=> b`), fix incorrect optimization when condition other then `IS NULL` are present. [#65835](https://github.com/ClickHouse/ClickHouse/pull/65835) ([vdimir](https://github.com/vdimir)). -* Functions `bitShiftLeft` and `bitShitfRight` return an error for out of bounds shift positions [#65838](https://github.com/ClickHouse/ClickHouse/pull/65838) ([Pablo Marcos](https://github.com/pamarcos)). * Fix growing memory usage in S3Queue. [#65839](https://github.com/ClickHouse/ClickHouse/pull/65839) ([Kseniia Sumarokova](https://github.com/kssenii)). * Fix tie handling in `arrayAUC` to match sklearn. [#65840](https://github.com/ClickHouse/ClickHouse/pull/65840) ([gabrielmcg44](https://github.com/gabrielmcg44)). * Fix possible issues with MySQL server protocol TLS connections. [#65917](https://github.com/ClickHouse/ClickHouse/pull/65917) ([Azat Khuzhin](https://github.com/azat)). diff --git a/CMakeLists.txt b/CMakeLists.txt index f0965530739..a165be799c0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -88,6 +88,7 @@ string (TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UC) list(REVERSE CMAKE_FIND_LIBRARY_SUFFIXES) option (ENABLE_FUZZING "Fuzzy testing using libfuzzer" OFF) +option (ENABLE_FUZZER_TEST "Build testing fuzzers in order to test libFuzzer functionality" OFF) if (ENABLE_FUZZING) # Also set WITH_COVERAGE=1 for better fuzzing process diff --git a/SECURITY.md b/SECURITY.md index db302da8ecd..1b0648dc489 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -14,9 +14,10 @@ The following versions of ClickHouse server are currently supported with securit | Version | Supported | |:-|:-| +| 24.10 | ✔️ | | 24.9 | ✔️ | | 24.8 | ✔️ | -| 24.7 | ✔️ | +| 24.7 | ❌ | | 24.6 | ❌ | | 24.5 | ❌ | | 24.4 | ❌ | diff --git a/base/base/StringRef.h b/base/base/StringRef.h index aa2bce71032..74878b50545 100644 --- a/base/base/StringRef.h +++ b/base/base/StringRef.h @@ -86,7 +86,7 @@ using StringRefs = std::vector; * For more information, see hash_map_string_2.cpp */ -inline bool compare8(const char * p1, const char * p2) +inline bool compare16(const char * p1, const char * p2) { return 0xFFFF == _mm_movemask_epi8(_mm_cmpeq_epi8( _mm_loadu_si128(reinterpret_cast(p1)), @@ -115,7 +115,7 @@ inline bool compare64(const char * p1, const char * p2) #elif defined(__aarch64__) && defined(__ARM_NEON) -inline bool compare8(const char * p1, const char * p2) +inline bool compare16(const char * p1, const char * p2) { uint64_t mask = getNibbleMask(vceqq_u8( vld1q_u8(reinterpret_cast(p1)), vld1q_u8(reinterpret_cast(p2)))); @@ -185,13 +185,22 @@ inline bool memequalWide(const char * p1, const char * p2, size_t size) switch (size / 16) // NOLINT(bugprone-switch-missing-default-case) { - case 3: if (!compare8(p1 + 32, p2 + 32)) return false; [[fallthrough]]; - case 2: if (!compare8(p1 + 16, p2 + 16)) return false; [[fallthrough]]; - case 1: if (!compare8(p1, p2)) return false; [[fallthrough]]; + case 3: + if (!compare16(p1 + 32, p2 + 32)) + return false; + [[fallthrough]]; + case 2: + if (!compare16(p1 + 16, p2 + 16)) + return false; + [[fallthrough]]; + case 1: + if (!compare16(p1, p2)) + return false; + [[fallthrough]]; default: ; } - return compare8(p1 + size - 16, p2 + size - 16); + return compare16(p1 + size - 16, p2 + size - 16); } #endif diff --git a/base/base/chrono_io.h b/base/base/chrono_io.h index 4ee8dec6634..d55aa11bc1d 100644 --- a/base/base/chrono_io.h +++ b/base/base/chrono_io.h @@ -4,6 +4,7 @@ #include #include #include +#include inline std::string to_string(const std::time_t & time) @@ -11,18 +12,6 @@ inline std::string to_string(const std::time_t & time) return cctz::format("%Y-%m-%d %H:%M:%S", std::chrono::system_clock::from_time_t(time), cctz::local_time_zone()); } -template -std::string to_string(const std::chrono::time_point & tp) -{ - // Don't use DateLUT because it shows weird characters for - // TimePoint::max(). I wish we could use C++20 format, but it's not - // there yet. - // return DateLUT::instance().timeToString(std::chrono::system_clock::to_time_t(tp)); - - auto in_time_t = std::chrono::system_clock::to_time_t(tp); - return to_string(in_time_t); -} - template > std::string to_string(const std::chrono::duration & duration) { @@ -33,6 +22,20 @@ std::string to_string(const std::chrono::duration & duration) return std::to_string(seconds_as_double.count()) + "s"; } +template +std::string to_string(const std::chrono::time_point & tp) +{ + // Don't use DateLUT because it shows weird characters for + // TimePoint::max(). I wish we could use C++20 format, but it's not + // there yet. + // return DateLUT::instance().timeToString(std::chrono::system_clock::to_time_t(tp)); + + if constexpr (std::is_same_v) + return to_string(std::chrono::system_clock::to_time_t(tp)); + else + return to_string(tp.time_since_epoch()); +} + template std::ostream & operator<<(std::ostream & o, const std::chrono::time_point & tp) { @@ -44,3 +47,23 @@ std::ostream & operator<<(std::ostream & o, const std::chrono::duration +struct fmt::formatter> : fmt::formatter +{ + template + auto format(const std::chrono::time_point & tp, FormatCtx & ctx) const + { + return fmt::formatter::format(::to_string(tp), ctx); + } +}; + +template +struct fmt::formatter> : fmt::formatter +{ + template + auto format(const std::chrono::duration & duration, FormatCtx & ctx) const + { + return fmt::formatter::format(::to_string(duration), ctx); + } +}; diff --git a/contrib/SimSIMD b/contrib/SimSIMD index ff51434d90c..ee3c9c9c00b 160000 --- a/contrib/SimSIMD +++ b/contrib/SimSIMD @@ -1 +1 @@ -Subproject commit ff51434d90c66f916e94ff05b24530b127aa4cff +Subproject commit ee3c9c9c00b51645f62a1a9e99611b78c0052a21 diff --git a/contrib/SimSIMD-cmake/CMakeLists.txt b/contrib/SimSIMD-cmake/CMakeLists.txt index f5dc4d63604..8350417479a 100644 --- a/contrib/SimSIMD-cmake/CMakeLists.txt +++ b/contrib/SimSIMD-cmake/CMakeLists.txt @@ -1,4 +1,8 @@ -set(SIMSIMD_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/SimSIMD") - -add_library(_simsimd INTERFACE) -target_include_directories(_simsimd SYSTEM INTERFACE "${SIMSIMD_PROJECT_DIR}/include") +# See contrib/usearch-cmake/CMakeLists.txt, why only enabled on x86 +if (ARCH_AMD64) + set(SIMSIMD_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/SimSIMD") + set(SIMSIMD_SRCS ${SIMSIMD_PROJECT_DIR}/c/lib.c) + add_library(_simsimd ${SIMSIMD_SRCS}) + target_include_directories(_simsimd SYSTEM PUBLIC "${SIMSIMD_PROJECT_DIR}/include") + target_compile_definitions(_simsimd PUBLIC SIMSIMD_DYNAMIC_DISPATCH) +endif() diff --git a/contrib/arrow b/contrib/arrow index 5cfccd8ea65..6e2574f5013 160000 --- a/contrib/arrow +++ b/contrib/arrow @@ -1 +1 @@ -Subproject commit 5cfccd8ea65f33d4517e7409815d761c7650b45d +Subproject commit 6e2574f5013a005c050c9a7787d341aef09d0063 diff --git a/contrib/arrow-cmake/CMakeLists.txt b/contrib/arrow-cmake/CMakeLists.txt index 96d1f4adda7..208d48df178 100644 --- a/contrib/arrow-cmake/CMakeLists.txt +++ b/contrib/arrow-cmake/CMakeLists.txt @@ -213,13 +213,19 @@ target_include_directories(_orc SYSTEM PRIVATE set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src/arrow") # arrow/cpp/src/arrow/CMakeLists.txt (ARROW_SRCS + ARROW_COMPUTE + ARROW_IPC) +# find . \( -iname \*.cc -o -iname \*.cpp -o -iname \*.c \) | sort | awk '{print "\"${LIBRARY_DIR}" substr($1,2) "\"" }' | grep -v 'test.cc' | grep -v 'json' | grep -v 'flight' \| +# grep -v 'csv' | grep -v 'acero' | grep -v 'dataset' | grep -v 'testing' | grep -v 'gpu' | grep -v 'engine' | grep -v 'filesystem' | grep -v 'benchmark.cc' set(ARROW_SRCS + "${LIBRARY_DIR}/adapters/orc/adapter.cc" + "${LIBRARY_DIR}/adapters/orc/options.cc" + "${LIBRARY_DIR}/adapters/orc/util.cc" "${LIBRARY_DIR}/array/array_base.cc" "${LIBRARY_DIR}/array/array_binary.cc" "${LIBRARY_DIR}/array/array_decimal.cc" "${LIBRARY_DIR}/array/array_dict.cc" "${LIBRARY_DIR}/array/array_nested.cc" "${LIBRARY_DIR}/array/array_primitive.cc" + "${LIBRARY_DIR}/array/array_run_end.cc" "${LIBRARY_DIR}/array/builder_adaptive.cc" "${LIBRARY_DIR}/array/builder_base.cc" "${LIBRARY_DIR}/array/builder_binary.cc" @@ -227,124 +233,26 @@ set(ARROW_SRCS "${LIBRARY_DIR}/array/builder_dict.cc" "${LIBRARY_DIR}/array/builder_nested.cc" "${LIBRARY_DIR}/array/builder_primitive.cc" - "${LIBRARY_DIR}/array/builder_union.cc" "${LIBRARY_DIR}/array/builder_run_end.cc" - "${LIBRARY_DIR}/array/array_run_end.cc" + "${LIBRARY_DIR}/array/builder_union.cc" "${LIBRARY_DIR}/array/concatenate.cc" "${LIBRARY_DIR}/array/data.cc" "${LIBRARY_DIR}/array/diff.cc" "${LIBRARY_DIR}/array/util.cc" "${LIBRARY_DIR}/array/validate.cc" - "${LIBRARY_DIR}/builder.cc" "${LIBRARY_DIR}/buffer.cc" - "${LIBRARY_DIR}/chunked_array.cc" - "${LIBRARY_DIR}/chunk_resolver.cc" - "${LIBRARY_DIR}/compare.cc" - "${LIBRARY_DIR}/config.cc" - "${LIBRARY_DIR}/datum.cc" - "${LIBRARY_DIR}/device.cc" - "${LIBRARY_DIR}/extension_type.cc" - "${LIBRARY_DIR}/memory_pool.cc" - "${LIBRARY_DIR}/pretty_print.cc" - "${LIBRARY_DIR}/record_batch.cc" - "${LIBRARY_DIR}/result.cc" - "${LIBRARY_DIR}/scalar.cc" - "${LIBRARY_DIR}/sparse_tensor.cc" - "${LIBRARY_DIR}/status.cc" - "${LIBRARY_DIR}/table.cc" - "${LIBRARY_DIR}/table_builder.cc" - "${LIBRARY_DIR}/tensor.cc" - "${LIBRARY_DIR}/tensor/coo_converter.cc" - "${LIBRARY_DIR}/tensor/csf_converter.cc" - "${LIBRARY_DIR}/tensor/csx_converter.cc" - "${LIBRARY_DIR}/type.cc" - "${LIBRARY_DIR}/visitor.cc" + "${LIBRARY_DIR}/builder.cc" "${LIBRARY_DIR}/c/bridge.cc" - "${LIBRARY_DIR}/io/buffered.cc" - "${LIBRARY_DIR}/io/caching.cc" - "${LIBRARY_DIR}/io/compressed.cc" - "${LIBRARY_DIR}/io/file.cc" - "${LIBRARY_DIR}/io/hdfs.cc" - "${LIBRARY_DIR}/io/hdfs_internal.cc" - "${LIBRARY_DIR}/io/interfaces.cc" - "${LIBRARY_DIR}/io/memory.cc" - "${LIBRARY_DIR}/io/slow.cc" - "${LIBRARY_DIR}/io/stdio.cc" - "${LIBRARY_DIR}/io/transform.cc" - "${LIBRARY_DIR}/util/async_util.cc" - "${LIBRARY_DIR}/util/basic_decimal.cc" - "${LIBRARY_DIR}/util/bit_block_counter.cc" - "${LIBRARY_DIR}/util/bit_run_reader.cc" - "${LIBRARY_DIR}/util/bit_util.cc" - "${LIBRARY_DIR}/util/bitmap.cc" - "${LIBRARY_DIR}/util/bitmap_builders.cc" - "${LIBRARY_DIR}/util/bitmap_ops.cc" - "${LIBRARY_DIR}/util/bpacking.cc" - "${LIBRARY_DIR}/util/cancel.cc" - "${LIBRARY_DIR}/util/compression.cc" - "${LIBRARY_DIR}/util/counting_semaphore.cc" - "${LIBRARY_DIR}/util/cpu_info.cc" - "${LIBRARY_DIR}/util/decimal.cc" - "${LIBRARY_DIR}/util/delimiting.cc" - "${LIBRARY_DIR}/util/formatting.cc" - "${LIBRARY_DIR}/util/future.cc" - "${LIBRARY_DIR}/util/int_util.cc" - "${LIBRARY_DIR}/util/io_util.cc" - "${LIBRARY_DIR}/util/logging.cc" - "${LIBRARY_DIR}/util/key_value_metadata.cc" - "${LIBRARY_DIR}/util/memory.cc" - "${LIBRARY_DIR}/util/mutex.cc" - "${LIBRARY_DIR}/util/string.cc" - "${LIBRARY_DIR}/util/string_builder.cc" - "${LIBRARY_DIR}/util/task_group.cc" - "${LIBRARY_DIR}/util/tdigest.cc" - "${LIBRARY_DIR}/util/thread_pool.cc" - "${LIBRARY_DIR}/util/time.cc" - "${LIBRARY_DIR}/util/trie.cc" - "${LIBRARY_DIR}/util/unreachable.cc" - "${LIBRARY_DIR}/util/uri.cc" - "${LIBRARY_DIR}/util/utf8.cc" - "${LIBRARY_DIR}/util/value_parsing.cc" - "${LIBRARY_DIR}/util/byte_size.cc" - "${LIBRARY_DIR}/util/debug.cc" - "${LIBRARY_DIR}/util/tracing.cc" - "${LIBRARY_DIR}/util/atfork_internal.cc" - "${LIBRARY_DIR}/util/crc32.cc" - "${LIBRARY_DIR}/util/hashing.cc" - "${LIBRARY_DIR}/util/ree_util.cc" - "${LIBRARY_DIR}/util/union_util.cc" - "${LIBRARY_DIR}/vendored/base64.cpp" - "${LIBRARY_DIR}/vendored/datetime/tz.cpp" - "${LIBRARY_DIR}/vendored/musl/strptime.c" - "${LIBRARY_DIR}/vendored/uriparser/UriCommon.c" - "${LIBRARY_DIR}/vendored/uriparser/UriCompare.c" - "${LIBRARY_DIR}/vendored/uriparser/UriEscape.c" - "${LIBRARY_DIR}/vendored/uriparser/UriFile.c" - "${LIBRARY_DIR}/vendored/uriparser/UriIp4Base.c" - "${LIBRARY_DIR}/vendored/uriparser/UriIp4.c" - "${LIBRARY_DIR}/vendored/uriparser/UriMemory.c" - "${LIBRARY_DIR}/vendored/uriparser/UriNormalizeBase.c" - "${LIBRARY_DIR}/vendored/uriparser/UriNormalize.c" - "${LIBRARY_DIR}/vendored/uriparser/UriParseBase.c" - "${LIBRARY_DIR}/vendored/uriparser/UriParse.c" - "${LIBRARY_DIR}/vendored/uriparser/UriQuery.c" - "${LIBRARY_DIR}/vendored/uriparser/UriRecompose.c" - "${LIBRARY_DIR}/vendored/uriparser/UriResolve.c" - "${LIBRARY_DIR}/vendored/uriparser/UriShorten.c" - "${LIBRARY_DIR}/vendored/double-conversion/bignum.cc" - "${LIBRARY_DIR}/vendored/double-conversion/bignum-dtoa.cc" - "${LIBRARY_DIR}/vendored/double-conversion/cached-powers.cc" - "${LIBRARY_DIR}/vendored/double-conversion/double-to-string.cc" - "${LIBRARY_DIR}/vendored/double-conversion/fast-dtoa.cc" - "${LIBRARY_DIR}/vendored/double-conversion/fixed-dtoa.cc" - "${LIBRARY_DIR}/vendored/double-conversion/string-to-double.cc" - "${LIBRARY_DIR}/vendored/double-conversion/strtod.cc" - + "${LIBRARY_DIR}/c/dlpack.cc" + "${LIBRARY_DIR}/chunk_resolver.cc" + "${LIBRARY_DIR}/chunked_array.cc" + "${LIBRARY_DIR}/compare.cc" "${LIBRARY_DIR}/compute/api_aggregate.cc" "${LIBRARY_DIR}/compute/api_scalar.cc" "${LIBRARY_DIR}/compute/api_vector.cc" "${LIBRARY_DIR}/compute/cast.cc" "${LIBRARY_DIR}/compute/exec.cc" + "${LIBRARY_DIR}/compute/expression.cc" "${LIBRARY_DIR}/compute/function.cc" "${LIBRARY_DIR}/compute/function_internal.cc" "${LIBRARY_DIR}/compute/kernel.cc" @@ -355,6 +263,7 @@ set(ARROW_SRCS "${LIBRARY_DIR}/compute/kernels/aggregate_var_std.cc" "${LIBRARY_DIR}/compute/kernels/codegen_internal.cc" "${LIBRARY_DIR}/compute/kernels/hash_aggregate.cc" + "${LIBRARY_DIR}/compute/kernels/ree_util_internal.cc" "${LIBRARY_DIR}/compute/kernels/row_encoder.cc" "${LIBRARY_DIR}/compute/kernels/scalar_arithmetic.cc" "${LIBRARY_DIR}/compute/kernels/scalar_boolean.cc" @@ -382,30 +291,139 @@ set(ARROW_SRCS "${LIBRARY_DIR}/compute/kernels/vector_cumulative_ops.cc" "${LIBRARY_DIR}/compute/kernels/vector_hash.cc" "${LIBRARY_DIR}/compute/kernels/vector_nested.cc" + "${LIBRARY_DIR}/compute/kernels/vector_pairwise.cc" "${LIBRARY_DIR}/compute/kernels/vector_rank.cc" "${LIBRARY_DIR}/compute/kernels/vector_replace.cc" + "${LIBRARY_DIR}/compute/kernels/vector_run_end_encode.cc" "${LIBRARY_DIR}/compute/kernels/vector_select_k.cc" "${LIBRARY_DIR}/compute/kernels/vector_selection.cc" - "${LIBRARY_DIR}/compute/kernels/vector_sort.cc" - "${LIBRARY_DIR}/compute/kernels/vector_selection_internal.cc" "${LIBRARY_DIR}/compute/kernels/vector_selection_filter_internal.cc" + "${LIBRARY_DIR}/compute/kernels/vector_selection_internal.cc" "${LIBRARY_DIR}/compute/kernels/vector_selection_take_internal.cc" - "${LIBRARY_DIR}/compute/light_array.cc" - "${LIBRARY_DIR}/compute/registry.cc" - "${LIBRARY_DIR}/compute/expression.cc" + "${LIBRARY_DIR}/compute/kernels/vector_sort.cc" + "${LIBRARY_DIR}/compute/key_hash_internal.cc" + "${LIBRARY_DIR}/compute/key_map_internal.cc" + "${LIBRARY_DIR}/compute/light_array_internal.cc" "${LIBRARY_DIR}/compute/ordering.cc" + "${LIBRARY_DIR}/compute/registry.cc" "${LIBRARY_DIR}/compute/row/compare_internal.cc" "${LIBRARY_DIR}/compute/row/encode_internal.cc" "${LIBRARY_DIR}/compute/row/grouper.cc" "${LIBRARY_DIR}/compute/row/row_internal.cc" - + "${LIBRARY_DIR}/compute/util.cc" + "${LIBRARY_DIR}/config.cc" + "${LIBRARY_DIR}/datum.cc" + "${LIBRARY_DIR}/device.cc" + "${LIBRARY_DIR}/extension_type.cc" + "${LIBRARY_DIR}/integration/c_data_integration_internal.cc" + "${LIBRARY_DIR}/io/buffered.cc" + "${LIBRARY_DIR}/io/caching.cc" + "${LIBRARY_DIR}/io/compressed.cc" + "${LIBRARY_DIR}/io/file.cc" + "${LIBRARY_DIR}/io/hdfs.cc" + "${LIBRARY_DIR}/io/hdfs_internal.cc" + "${LIBRARY_DIR}/io/interfaces.cc" + "${LIBRARY_DIR}/io/memory.cc" + "${LIBRARY_DIR}/io/slow.cc" + "${LIBRARY_DIR}/io/stdio.cc" + "${LIBRARY_DIR}/io/transform.cc" "${LIBRARY_DIR}/ipc/dictionary.cc" "${LIBRARY_DIR}/ipc/feather.cc" + "${LIBRARY_DIR}/ipc/file_to_stream.cc" "${LIBRARY_DIR}/ipc/message.cc" "${LIBRARY_DIR}/ipc/metadata_internal.cc" "${LIBRARY_DIR}/ipc/options.cc" "${LIBRARY_DIR}/ipc/reader.cc" + "${LIBRARY_DIR}/ipc/stream_to_file.cc" "${LIBRARY_DIR}/ipc/writer.cc" + "${LIBRARY_DIR}/memory_pool.cc" + "${LIBRARY_DIR}/pretty_print.cc" + "${LIBRARY_DIR}/record_batch.cc" + "${LIBRARY_DIR}/result.cc" + "${LIBRARY_DIR}/scalar.cc" + "${LIBRARY_DIR}/sparse_tensor.cc" + "${LIBRARY_DIR}/status.cc" + "${LIBRARY_DIR}/table.cc" + "${LIBRARY_DIR}/table_builder.cc" + "${LIBRARY_DIR}/tensor.cc" + "${LIBRARY_DIR}/tensor/coo_converter.cc" + "${LIBRARY_DIR}/tensor/csf_converter.cc" + "${LIBRARY_DIR}/tensor/csx_converter.cc" + "${LIBRARY_DIR}/type.cc" + "${LIBRARY_DIR}/type_traits.cc" + "${LIBRARY_DIR}/util/align_util.cc" + "${LIBRARY_DIR}/util/async_util.cc" + "${LIBRARY_DIR}/util/atfork_internal.cc" + "${LIBRARY_DIR}/util/basic_decimal.cc" + "${LIBRARY_DIR}/util/bit_block_counter.cc" + "${LIBRARY_DIR}/util/bit_run_reader.cc" + "${LIBRARY_DIR}/util/bit_util.cc" + "${LIBRARY_DIR}/util/bitmap.cc" + "${LIBRARY_DIR}/util/bitmap_builders.cc" + "${LIBRARY_DIR}/util/bitmap_ops.cc" + "${LIBRARY_DIR}/util/bpacking.cc" + "${LIBRARY_DIR}/util/byte_size.cc" + "${LIBRARY_DIR}/util/cancel.cc" + "${LIBRARY_DIR}/util/compression.cc" + "${LIBRARY_DIR}/util/counting_semaphore.cc" + "${LIBRARY_DIR}/util/cpu_info.cc" + "${LIBRARY_DIR}/util/crc32.cc" + "${LIBRARY_DIR}/util/debug.cc" + "${LIBRARY_DIR}/util/decimal.cc" + "${LIBRARY_DIR}/util/delimiting.cc" + "${LIBRARY_DIR}/util/dict_util.cc" + "${LIBRARY_DIR}/util/float16.cc" + "${LIBRARY_DIR}/util/formatting.cc" + "${LIBRARY_DIR}/util/future.cc" + "${LIBRARY_DIR}/util/hashing.cc" + "${LIBRARY_DIR}/util/int_util.cc" + "${LIBRARY_DIR}/util/io_util.cc" + "${LIBRARY_DIR}/util/key_value_metadata.cc" + "${LIBRARY_DIR}/util/list_util.cc" + "${LIBRARY_DIR}/util/logging.cc" + "${LIBRARY_DIR}/util/memory.cc" + "${LIBRARY_DIR}/util/mutex.cc" + "${LIBRARY_DIR}/util/ree_util.cc" + "${LIBRARY_DIR}/util/string.cc" + "${LIBRARY_DIR}/util/string_builder.cc" + "${LIBRARY_DIR}/util/task_group.cc" + "${LIBRARY_DIR}/util/tdigest.cc" + "${LIBRARY_DIR}/util/thread_pool.cc" + "${LIBRARY_DIR}/util/time.cc" + "${LIBRARY_DIR}/util/tracing.cc" + "${LIBRARY_DIR}/util/trie.cc" + "${LIBRARY_DIR}/util/union_util.cc" + "${LIBRARY_DIR}/util/unreachable.cc" + "${LIBRARY_DIR}/util/uri.cc" + "${LIBRARY_DIR}/util/utf8.cc" + "${LIBRARY_DIR}/util/value_parsing.cc" + "${LIBRARY_DIR}/vendored/base64.cpp" + "${LIBRARY_DIR}/vendored/datetime/tz.cpp" + "${LIBRARY_DIR}/vendored/double-conversion/bignum-dtoa.cc" + "${LIBRARY_DIR}/vendored/double-conversion/bignum.cc" + "${LIBRARY_DIR}/vendored/double-conversion/cached-powers.cc" + "${LIBRARY_DIR}/vendored/double-conversion/double-to-string.cc" + "${LIBRARY_DIR}/vendored/double-conversion/fast-dtoa.cc" + "${LIBRARY_DIR}/vendored/double-conversion/fixed-dtoa.cc" + "${LIBRARY_DIR}/vendored/double-conversion/string-to-double.cc" + "${LIBRARY_DIR}/vendored/double-conversion/strtod.cc" + "${LIBRARY_DIR}/vendored/musl/strptime.c" + "${LIBRARY_DIR}/vendored/uriparser/UriCommon.c" + "${LIBRARY_DIR}/vendored/uriparser/UriCompare.c" + "${LIBRARY_DIR}/vendored/uriparser/UriEscape.c" + "${LIBRARY_DIR}/vendored/uriparser/UriFile.c" + "${LIBRARY_DIR}/vendored/uriparser/UriIp4.c" + "${LIBRARY_DIR}/vendored/uriparser/UriIp4Base.c" + "${LIBRARY_DIR}/vendored/uriparser/UriMemory.c" + "${LIBRARY_DIR}/vendored/uriparser/UriNormalize.c" + "${LIBRARY_DIR}/vendored/uriparser/UriNormalizeBase.c" + "${LIBRARY_DIR}/vendored/uriparser/UriParse.c" + "${LIBRARY_DIR}/vendored/uriparser/UriParseBase.c" + "${LIBRARY_DIR}/vendored/uriparser/UriQuery.c" + "${LIBRARY_DIR}/vendored/uriparser/UriRecompose.c" + "${LIBRARY_DIR}/vendored/uriparser/UriResolve.c" + "${LIBRARY_DIR}/vendored/uriparser/UriShorten.c" + "${LIBRARY_DIR}/visitor.cc" "${ARROW_SRC_DIR}/arrow/adapters/orc/adapter.cc" "${ARROW_SRC_DIR}/arrow/adapters/orc/util.cc" @@ -465,22 +483,38 @@ set(PARQUET_SRCS "${LIBRARY_DIR}/arrow/schema.cc" "${LIBRARY_DIR}/arrow/schema_internal.cc" "${LIBRARY_DIR}/arrow/writer.cc" + "${LIBRARY_DIR}/benchmark_util.cc" "${LIBRARY_DIR}/bloom_filter.cc" + "${LIBRARY_DIR}/bloom_filter_reader.cc" "${LIBRARY_DIR}/column_reader.cc" "${LIBRARY_DIR}/column_scanner.cc" "${LIBRARY_DIR}/column_writer.cc" "${LIBRARY_DIR}/encoding.cc" + "${LIBRARY_DIR}/encryption/crypto_factory.cc" "${LIBRARY_DIR}/encryption/encryption.cc" "${LIBRARY_DIR}/encryption/encryption_internal.cc" + "${LIBRARY_DIR}/encryption/encryption_internal_nossl.cc" + "${LIBRARY_DIR}/encryption/file_key_unwrapper.cc" + "${LIBRARY_DIR}/encryption/file_key_wrapper.cc" + "${LIBRARY_DIR}/encryption/file_system_key_material_store.cc" "${LIBRARY_DIR}/encryption/internal_file_decryptor.cc" "${LIBRARY_DIR}/encryption/internal_file_encryptor.cc" + "${LIBRARY_DIR}/encryption/key_material.cc" + "${LIBRARY_DIR}/encryption/key_metadata.cc" + "${LIBRARY_DIR}/encryption/key_toolkit.cc" + "${LIBRARY_DIR}/encryption/key_toolkit_internal.cc" + "${LIBRARY_DIR}/encryption/kms_client.cc" + "${LIBRARY_DIR}/encryption/local_wrap_kms_client.cc" + "${LIBRARY_DIR}/encryption/openssl_internal.cc" "${LIBRARY_DIR}/exception.cc" "${LIBRARY_DIR}/file_reader.cc" "${LIBRARY_DIR}/file_writer.cc" - "${LIBRARY_DIR}/page_index.cc" - "${LIBRARY_DIR}/level_conversion.cc" "${LIBRARY_DIR}/level_comparison.cc" + "${LIBRARY_DIR}/level_comparison_avx2.cc" + "${LIBRARY_DIR}/level_conversion.cc" + "${LIBRARY_DIR}/level_conversion_bmi2.cc" "${LIBRARY_DIR}/metadata.cc" + "${LIBRARY_DIR}/page_index.cc" "${LIBRARY_DIR}/platform.cc" "${LIBRARY_DIR}/printer.cc" "${LIBRARY_DIR}/properties.cc" @@ -489,7 +523,6 @@ set(PARQUET_SRCS "${LIBRARY_DIR}/stream_reader.cc" "${LIBRARY_DIR}/stream_writer.cc" "${LIBRARY_DIR}/types.cc" - "${LIBRARY_DIR}/bloom_filter_reader.cc" "${LIBRARY_DIR}/xxhasher.cc" "${GEN_LIBRARY_DIR}/parquet_constants.cpp" @@ -520,6 +553,9 @@ endif () add_definitions(-DPARQUET_THRIFT_VERSION_MAJOR=0) add_definitions(-DPARQUET_THRIFT_VERSION_MINOR=16) +# As per https://github.com/apache/arrow/pull/35672 you need to enable it explicitly. +add_definitions(-DARROW_ENABLE_THREADING) + # === tools set(TOOLS_DIR "${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/tools/parquet") diff --git a/contrib/flatbuffers b/contrib/flatbuffers index eb3f8279482..0100f6a5779 160000 --- a/contrib/flatbuffers +++ b/contrib/flatbuffers @@ -1 +1 @@ -Subproject commit eb3f827948241ce0e701516f16cd67324802bce9 +Subproject commit 0100f6a5779831fa7a651e4b67ef389a8752bd9b diff --git a/contrib/krb5 b/contrib/krb5 index 71b06c22760..c5b4b994c18 160000 --- a/contrib/krb5 +++ b/contrib/krb5 @@ -1 +1 @@ -Subproject commit 71b06c2276009ae649c7703019f3b4605f66fd3d +Subproject commit c5b4b994c18db86933255907a97eee5993fd18fe diff --git a/contrib/usearch b/contrib/usearch index 1706420acaf..7efe8b710c9 160000 --- a/contrib/usearch +++ b/contrib/usearch @@ -1 +1 @@ -Subproject commit 1706420acafbd83d852c512dcf343af0a4059e48 +Subproject commit 7efe8b710c9831bfe06573b1df0fad001b04a2b5 diff --git a/contrib/usearch-cmake/CMakeLists.txt b/contrib/usearch-cmake/CMakeLists.txt index 25f6ca82a74..fda061bf467 100644 --- a/contrib/usearch-cmake/CMakeLists.txt +++ b/contrib/usearch-cmake/CMakeLists.txt @@ -6,12 +6,63 @@ target_include_directories(_usearch SYSTEM INTERFACE ${USEARCH_PROJECT_DIR}/incl target_link_libraries(_usearch INTERFACE _fp16) target_compile_definitions(_usearch INTERFACE USEARCH_USE_FP16LIB) -# target_compile_definitions(_usearch INTERFACE USEARCH_USE_SIMSIMD) -# ^^ simsimd is not enabled at the moment. Reasons: -# - Vectorization is important for raw scans but not so much for HNSW. We use usearch only for HNSW. -# - Simsimd does compile-time dispatch (choice of SIMD kernels determined by capabilities of the build machine) or dynamic dispatch (SIMD -# kernels chosen at runtime based on cpuid instruction). Since current builds are limited to SSE 4.2 (x86) and NEON (ARM), the speedup of -# the former would be moderate compared to AVX-512 / SVE. The latter is at the moment too fragile with respect to portability across x86 -# and ARM machines ... certain conbinations of quantizations / distance functions / SIMD instructions are not implemented at the moment. +# Only x86 for now. On ARM, the linker goes down in flames. To make SimSIMD compile, I had to remove a macro checks in SimSIMD +# for AVX512 (x86, worked nicely) and __ARM_BF16_FORMAT_ALTERNATIVE. It is probably because of that. +if (ARCH_AMD64) + target_link_libraries(_usearch INTERFACE _simsimd) + target_compile_definitions(_usearch INTERFACE USEARCH_USE_SIMSIMD) + + target_compile_definitions(_usearch INTERFACE USEARCH_CAN_COMPILE_FLOAT16) + target_compile_definitions(_usearch INTERFACE USEARCH_CAN_COMPILE_BF16) +endif () add_library(ch_contrib::usearch ALIAS _usearch) + + +# Cf. https://github.com/llvm/llvm-project/issues/107810 (though it is not 100% the same stack) +# +# LLVM ERROR: Cannot select: 0x7996e7a73150: f32,ch = load<(load (s16) from %ir.22, !tbaa !54231), anyext from bf16> 0x79961cb737c0, 0x7996e7a1a500, undef:i64, ./contrib/SimSIMD/include/simsimd/dot.h:215:1 +# 0x7996e7a1a500: i64 = add 0x79961e770d00, Constant:i64<-16>, ./contrib/SimSIMD/include/simsimd/dot.h:215:1 +# 0x79961e770d00: i64,ch = CopyFromReg 0x79961cb737c0, Register:i64 %4, ./contrib/SimSIMD/include/simsimd/dot.h:215:1 +# 0x7996e7a1ae10: i64 = Register %4 +# 0x7996e7a1b5f0: i64 = Constant<-16> +# 0x7996e7a1a730: i64 = undef +# In function: _ZL23simsimd_dot_bf16_serialPKu6__bf16S0_yPd +# PLEASE submit a bug report to https://github.com/llvm/llvm-project/issues/ and include the crash backtrace. +# Stack dump: +# 0. Running pass 'Function Pass Manager' on module 'src/libdbms.a(MergeTreeIndexVectorSimilarity.cpp.o at 2312737440)'. +# 1. Running pass 'AArch64 Instruction Selection' on function '@_ZL23simsimd_dot_bf16_serialPKu6__bf16S0_yPd' +# #0 0x00007999e83a63bf llvm::sys::PrintStackTrace(llvm::raw_ostream&, int) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xda63bf) +# #1 0x00007999e83a44f9 llvm::sys::RunSignalHandlers() (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xda44f9) +# #2 0x00007999e83a6b00 (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xda6b00) +# #3 0x00007999e6e45320 (/lib/x86_64-linux-gnu/libc.so.6+0x45320) +# #4 0x00007999e6e9eb1c pthread_kill (/lib/x86_64-linux-gnu/libc.so.6+0x9eb1c) +# #5 0x00007999e6e4526e raise (/lib/x86_64-linux-gnu/libc.so.6+0x4526e) +# #6 0x00007999e6e288ff abort (/lib/x86_64-linux-gnu/libc.so.6+0x288ff) +# #7 0x00007999e82fe0c2 llvm::report_fatal_error(llvm::Twine const&, bool) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xcfe0c2) +# #8 0x00007999e8c2f8e3 (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x162f8e3) +# #9 0x00007999e8c2ed76 llvm::SelectionDAGISel::SelectCodeCommon(llvm::SDNode*, unsigned char const*, unsigned int) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x162ed76) +# #10 0x00007999ea1adbcb (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x2badbcb) +# #11 0x00007999e8c2611f llvm::SelectionDAGISel::DoInstructionSelection() (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x162611f) +# #12 0x00007999e8c25790 llvm::SelectionDAGISel::CodeGenAndEmitDAG() (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x1625790) +# #13 0x00007999e8c248de llvm::SelectionDAGISel::SelectAllBasicBlocks(llvm::Function const&) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x16248de) +# #14 0x00007999e8c22934 llvm::SelectionDAGISel::runOnMachineFunction(llvm::MachineFunction&) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x1622934) +# #15 0x00007999e87826b9 llvm::MachineFunctionPass::runOnFunction(llvm::Function&) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x11826b9) +# #16 0x00007999e84f7772 llvm::FPPassManager::runOnFunction(llvm::Function&) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xef7772) +# #17 0x00007999e84fd2f4 llvm::FPPassManager::runOnModule(llvm::Module&) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xefd2f4) +# #18 0x00007999e84f7e9f llvm::legacy::PassManagerImpl::run(llvm::Module&) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xef7e9f) +# #19 0x00007999e99f7d61 (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x23f7d61) +# #20 0x00007999e99f8c91 (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x23f8c91) +# #21 0x00007999e99f8b10 llvm::lto::thinBackend(llvm::lto::Config const&, unsigned int, std::function>> (unsigned int, llvm::Twine const&)>, llvm::Module&, llvm::ModuleSummaryIndex const&, llvm::DenseMap, std::equal_to, std::allocator>, llvm::DenseMapInfo, llvm::detail::DenseMapPair, std::equal_to, std::allocator>>> const&, llvm::DenseMap, llvm::detail::DenseMapPair> const&, llvm::MapVector, llvm::detail::DenseMapPair>, llvm::SmallVector, 0u>>*, std::vector> const&) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x23f8b10) +# #22 0x00007999e99f248d (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x23f248d) +# #23 0x00007999e99f1cd6 (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x23f1cd6) +# #24 0x00007999e82c9beb (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xcc9beb) +# #25 0x00007999e834ebe3 llvm::ThreadPool::processTasks(llvm::ThreadPoolTaskGroup*) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xd4ebe3) +# #26 0x00007999e834f704 (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xd4f704) +# #27 0x00007999e6e9ca94 (/lib/x86_64-linux-gnu/libc.so.6+0x9ca94) +# #28 0x00007999e6f29c3c (/lib/x86_64-linux-gnu/libc.so.6+0x129c3c) +# clang++-18: error: unable to execute command: Aborted (core dumped) +# clang++-18: error: linker command failed due to signal (use -v to see invocation) +# ^[[A^Cninja: build stopped: interrupted by user. diff --git a/docker/keeper/Dockerfile b/docker/keeper/Dockerfile index dfe6a420260..4ecc087afb4 100644 --- a/docker/keeper/Dockerfile +++ b/docker/keeper/Dockerfile @@ -1,7 +1,7 @@ # The Dockerfile.ubuntu exists for the tests/ci/docker_server.py script # If the image is built from Dockerfile.alpine, then the `-alpine` suffix is added automatically, # so the only purpose of Dockerfile.ubuntu is to push `latest`, `head` and so on w/o suffixes -FROM ubuntu:20.04 AS glibc-donor +FROM ubuntu:22.04 AS glibc-donor ARG TARGETARCH RUN arch=${TARGETARCH:-amd64} \ @@ -9,7 +9,11 @@ RUN arch=${TARGETARCH:-amd64} \ amd64) rarch=x86_64 ;; \ arm64) rarch=aarch64 ;; \ esac \ - && ln -s "${rarch}-linux-gnu" /lib/linux-gnu + && ln -s "${rarch}-linux-gnu" /lib/linux-gnu \ + && case $arch in \ + amd64) ln /lib/linux-gnu/ld-linux-x86-64.so.2 /lib/linux-gnu/ld-2.35.so ;; \ + arm64) ln /lib/linux-gnu/ld-linux-aarch64.so.1 /lib/linux-gnu/ld-2.35.so ;; \ + esac FROM alpine @@ -20,7 +24,7 @@ ENV LANG=en_US.UTF-8 \ TZ=UTC \ CLICKHOUSE_CONFIG=/etc/clickhouse-server/config.xml -COPY --from=glibc-donor /lib/linux-gnu/libc.so.6 /lib/linux-gnu/libdl.so.2 /lib/linux-gnu/libm.so.6 /lib/linux-gnu/libpthread.so.0 /lib/linux-gnu/librt.so.1 /lib/linux-gnu/libnss_dns.so.2 /lib/linux-gnu/libnss_files.so.2 /lib/linux-gnu/libresolv.so.2 /lib/linux-gnu/ld-2.31.so /lib/ +COPY --from=glibc-donor /lib/linux-gnu/libc.so.6 /lib/linux-gnu/libdl.so.2 /lib/linux-gnu/libm.so.6 /lib/linux-gnu/libpthread.so.0 /lib/linux-gnu/librt.so.1 /lib/linux-gnu/libnss_dns.so.2 /lib/linux-gnu/libnss_files.so.2 /lib/linux-gnu/libresolv.so.2 /lib/linux-gnu/ld-2.35.so /lib/ COPY --from=glibc-donor /etc/nsswitch.conf /etc/ COPY entrypoint.sh /entrypoint.sh @@ -34,7 +38,7 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="24.9.2.42" +ARG VERSION="24.10.1.2812" ARG PACKAGES="clickhouse-keeper" ARG DIRECT_DOWNLOAD_URLS="" diff --git a/docker/server/Dockerfile.alpine b/docker/server/Dockerfile.alpine index 991c25ad142..93acf1a5773 100644 --- a/docker/server/Dockerfile.alpine +++ b/docker/server/Dockerfile.alpine @@ -35,7 +35,7 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="24.9.2.42" +ARG VERSION="24.10.1.2812" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" ARG DIRECT_DOWNLOAD_URLS="" diff --git a/docker/server/Dockerfile.ubuntu b/docker/server/Dockerfile.ubuntu index 5dc88b49e31..0d5c983f5e6 100644 --- a/docker/server/Dockerfile.ubuntu +++ b/docker/server/Dockerfile.ubuntu @@ -1,4 +1,4 @@ -FROM ubuntu:20.04 +FROM ubuntu:22.04 # see https://github.com/moby/moby/issues/4032#issuecomment-192327844 # It could be removed after we move on a version 23:04+ @@ -28,7 +28,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list ARG REPO_CHANNEL="stable" ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main" -ARG VERSION="24.9.2.42" +ARG VERSION="24.10.1.2812" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" #docker-official-library:off diff --git a/docker/server/README.md b/docker/server/README.md index 65239126790..1dc636414ac 100644 --- a/docker/server/README.md +++ b/docker/server/README.md @@ -20,6 +20,7 @@ For more information and documentation see https://clickhouse.com/. - The amd64 image requires support for [SSE3 instructions](https://en.wikipedia.org/wiki/SSE3). Virtually all x86 CPUs after 2005 support SSE3. - The arm64 image requires support for the [ARMv8.2-A architecture](https://en.wikipedia.org/wiki/AArch64#ARMv8.2-A) and additionally the Load-Acquire RCpc register. The register is optional in version ARMv8.2-A and mandatory in [ARMv8.3-A](https://en.wikipedia.org/wiki/AArch64#ARMv8.3-A). Supported in Graviton >=2, Azure and GCP instances. Examples for unsupported devices are Raspberry Pi 4 (ARMv8.0-A) and Jetson AGX Xavier/Orin (ARMv8.2-A). +- Since the Clickhouse 24.11 Ubuntu images started using `ubuntu:22.04` as its base image. It requires docker version >= `20.10.10` containing [patch](https://github.com/moby/moby/commit/977283509f75303bc6612665a04abf76ff1d2468). As a workaround you could use `docker run [--privileged | --security-opt seccomp=unconfined]` instead, however that has security implications. ## How to use this image diff --git a/docker/test/libfuzzer/Dockerfile b/docker/test/libfuzzer/Dockerfile index 3ffae0cd921..46e305c90ab 100644 --- a/docker/test/libfuzzer/Dockerfile +++ b/docker/test/libfuzzer/Dockerfile @@ -33,8 +33,6 @@ RUN apt-get update \ COPY requirements.txt / RUN pip3 install --no-cache-dir -r /requirements.txt -ENV FUZZER_ARGS="-max_total_time=60" - SHELL ["/bin/bash", "-c"] # docker run --network=host --volume :/workspace -e PR_TO_TEST=<> -e SHA_TO_TEST=<> clickhouse/libfuzzer diff --git a/docker/test/stateless/clickhouse-statelest-test-runner.Dockerfile b/docker/test/stateless/clickhouse-statelest-test-runner.Dockerfile deleted file mode 100644 index a9802f6f1da..00000000000 --- a/docker/test/stateless/clickhouse-statelest-test-runner.Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -# Since right now we can't set volumes to the docker during build, we split building container in stages: -# 1. build base container -# 2. run base conatiner with mounted volumes -# 3. commit container as image -FROM ubuntu:20.04 as clickhouse-test-runner-base - -# A volume where directory with clickhouse packages to be mounted, -# for later installing. -VOLUME /packages - -CMD apt-get update ;\ - DEBIAN_FRONTEND=noninteractive \ - apt install -y /packages/clickhouse-common-static_*.deb \ - /packages/clickhouse-client_*.deb \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* diff --git a/docs/changelogs/v24.10.1.2812-stable.md b/docs/changelogs/v24.10.1.2812-stable.md new file mode 100644 index 00000000000..c26bbf706ff --- /dev/null +++ b/docs/changelogs/v24.10.1.2812-stable.md @@ -0,0 +1,412 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v24.10.1.2812-stable (9cd0a3738d5) FIXME as compared to v24.10.1.1-new (b12a3677418) + +#### Backward Incompatible Change +* Allow to write `SETTINGS` before `FORMAT` in a chain of queries with `UNION` when subqueries are inside parentheses. This closes [#39712](https://github.com/ClickHouse/ClickHouse/issues/39712). Change the behavior when a query has the SETTINGS clause specified twice in a sequence. The closest SETTINGS clause will have a preference for the corresponding subquery. In the previous versions, the outermost SETTINGS clause could take a preference over the inner one. [#68614](https://github.com/ClickHouse/ClickHouse/pull/68614) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Reordering of filter conditions from `[PRE]WHERE` clause is now allowed by default. It could be disabled by setting `allow_reorder_prewhere_conditions` to `false`. [#70657](https://github.com/ClickHouse/ClickHouse/pull/70657) ([Nikita Taranov](https://github.com/nickitat)). +* Fix `optimize_functions_to_subcolumns` optimization (previously could lead to `Invalid column type for ColumnUnique::insertRangeFrom. Expected String, got LowCardinality(String)` error), by preserving `LowCardinality` type in `mapKeys`/`mapValues`. [#70716](https://github.com/ClickHouse/ClickHouse/pull/70716) ([Azat Khuzhin](https://github.com/azat)). +* Remove the `idxd-config` library, which has an incompatible license. This also removes the experimental Intel DeflateQPL codec. [#70987](https://github.com/ClickHouse/ClickHouse/pull/70987) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### New Feature +* MongoDB integration refactored: migration to new driver mongocxx from deprecated Poco::MongoDB, remove support for deprecated old protocol, support for connection by URI, support for all MongoDB types, support for WHERE and ORDER BY statements on MongoDB side, restriction for expression unsupported by MongoDB. [#63279](https://github.com/ClickHouse/ClickHouse/pull/63279) ([Kirill Nikiforov](https://github.com/allmazz)). +* A new `--progress-table` option in clickhouse-client prints a table with metrics changing during query execution; a new `--enable-progress-table-toggle` is associated with the `--progress-table` option, and toggles the rendering of the progress table by pressing the control key (Space). [#63689](https://github.com/ClickHouse/ClickHouse/pull/63689) ([Maria Khristenko](https://github.com/mariaKhr)). +* This allows to grant access to the wildcard prefixes. `GRANT SELECT ON db.table_pefix_* TO user`. [#65311](https://github.com/ClickHouse/ClickHouse/pull/65311) ([pufit](https://github.com/pufit)). +* Add system.query_metric_log which contains history of memory and metric values from table system.events for individual queries, periodically flushed to disk. [#66532](https://github.com/ClickHouse/ClickHouse/pull/66532) ([Pablo Marcos](https://github.com/pamarcos)). +* A simple SELECT query can be written with implicit SELECT to enable calculator-style expressions, e.g., `ch "1 + 2"`. This is controlled by a new setting, `implicit_select`. [#68502](https://github.com/ClickHouse/ClickHouse/pull/68502) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Support --copy mode for clickhouse local as a shortcut for format conversion [#68503](https://github.com/ClickHouse/ClickHouse/issues/68503). [#68583](https://github.com/ClickHouse/ClickHouse/pull/68583) ([Denis Hananein](https://github.com/denis-hananein)). +* Add support for `arrayUnion` function. [#68989](https://github.com/ClickHouse/ClickHouse/pull/68989) ([Peter Nguyen](https://github.com/petern48)). +* Support aggreate function `quantileExactWeightedInterpolated`, which is a interpolated version based on quantileExactWeighted. Some people may wonder why we need a new `quantileExactWeightedInterpolated` since we already have `quantileExactInterpolatedWeighted`. The reason is the new one is more accurate than the old one. BTW, it is for spark compatiability in Apache Gluten. [#69619](https://github.com/ClickHouse/ClickHouse/pull/69619) ([李扬](https://github.com/taiyang-li)). +* Support function arrayElementOrNull. It returns null if array index is out of range or map key not found. [#69646](https://github.com/ClickHouse/ClickHouse/pull/69646) ([李扬](https://github.com/taiyang-li)). +* Allows users to specify regular expressions through new `message_regexp` and `message_regexp_negative` fields in the `config.xml` file to filter out logging. The logging is applied to the formatted un-colored text for the most intuitive developer experience. [#69657](https://github.com/ClickHouse/ClickHouse/pull/69657) ([Peter Nguyen](https://github.com/petern48)). +* Support Dynamic type in most functions by executing them on internal types inside Dynamic. [#69691](https://github.com/ClickHouse/ClickHouse/pull/69691) ([Pavel Kruglov](https://github.com/Avogar)). +* Re-added `RIPEMD160` function, which computes the RIPEMD-160 cryptographic hash of a string. Example: `SELECT HEX(RIPEMD160('The quick brown fox jumps over the lazy dog'))` returns `37F332F68DB77BD9D7EDD4969571AD671CF9DD3B`. [#70087](https://github.com/ClickHouse/ClickHouse/pull/70087) ([Dergousov Maxim](https://github.com/m7kss1)). +* Allow to cache read files for object storage table engines and data lakes using hash from ETag + file path as cache key. [#70135](https://github.com/ClickHouse/ClickHouse/pull/70135) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Support reading Iceberg tables on HDFS. [#70268](https://github.com/ClickHouse/ClickHouse/pull/70268) ([flynn](https://github.com/ucasfl)). +* Allow to read/write JSON type as binary string in RowBinary format under settings `input_format_binary_read_json_as_string/output_format_binary_write_json_as_string`. [#70288](https://github.com/ClickHouse/ClickHouse/pull/70288) ([Pavel Kruglov](https://github.com/Avogar)). +* Allow to serialize/deserialize JSON column as single String column in Native format. For output use setting `output_format_native_write_json_as_string`. For input, use serialization version `1` before the column data. [#70312](https://github.com/ClickHouse/ClickHouse/pull/70312) ([Pavel Kruglov](https://github.com/Avogar)). +* Supports standard CTE, `with insert`, as previously only supports `insert ... with ...`. [#70593](https://github.com/ClickHouse/ClickHouse/pull/70593) ([Shichao Jin](https://github.com/jsc0218)). + +#### Performance Improvement +* Support minmax index for `pointInPolygon`. [#62085](https://github.com/ClickHouse/ClickHouse/pull/62085) ([JackyWoo](https://github.com/JackyWoo)). +* Add support for parquet bloom filters. [#62966](https://github.com/ClickHouse/ClickHouse/pull/62966) ([Arthur Passos](https://github.com/arthurpassos)). +* Lock-free parts rename to avoid INSERT affect SELECT (due to parts lock) (under normal circumstances with `fsync_part_directory`, QPS of SELECT with INSERT in parallel, increased 2x, under heavy load the effect is even bigger). Note, this only includes `ReplicatedMergeTree` for now. [#64955](https://github.com/ClickHouse/ClickHouse/pull/64955) ([Azat Khuzhin](https://github.com/azat)). +* Respect `ttl_only_drop_parts` on `materialize ttl`; only read necessary columns to recalculate TTL and drop parts by replacing them with an empty one. [#65488](https://github.com/ClickHouse/ClickHouse/pull/65488) ([Andrey Zvonov](https://github.com/zvonand)). +* Refactor `IDisk` and `IObjectStorage` for better performance. Tables from `plain` and `plain_rewritable` object storages will initialize faster. [#68146](https://github.com/ClickHouse/ClickHouse/pull/68146) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Optimized thread creation in the ThreadPool to minimize lock contention. Thread creation is now performed outside of the critical section to avoid delays in job scheduling and thread management under high load conditions. This leads to a much more responsive ClickHouse under heavy concurrent load. [#68694](https://github.com/ClickHouse/ClickHouse/pull/68694) ([filimonov](https://github.com/filimonov)). +* Enable reading LowCardinality string columns from ORC. [#69481](https://github.com/ClickHouse/ClickHouse/pull/69481) ([李扬](https://github.com/taiyang-li)). +* Added an ability to parse data directly into sparse columns. [#69828](https://github.com/ClickHouse/ClickHouse/pull/69828) ([Anton Popov](https://github.com/CurtizJ)). +* Supports parallel reading of parquet row groups and prefetching of row groups in single-threaded mode. [#69862](https://github.com/ClickHouse/ClickHouse/pull/69862) ([LiuNeng](https://github.com/liuneng1994)). +* Improved performance of parsing formats with high number of missed values (e.g. `JSONEachRow`). [#69875](https://github.com/ClickHouse/ClickHouse/pull/69875) ([Anton Popov](https://github.com/CurtizJ)). +* Use `LowCardinality` for `ProfileEvents` in system logs such as `part_log`, `query_views_log`, `filesystem_cache_log`. [#70152](https://github.com/ClickHouse/ClickHouse/pull/70152) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Improve performance of FromUnixTimestamp/ToUnixTimestamp functions. [#71042](https://github.com/ClickHouse/ClickHouse/pull/71042) ([kevinyhzou](https://github.com/KevinyhZou)). + +#### Improvement +* Allow parametrised SQL aliases. [#50665](https://github.com/ClickHouse/ClickHouse/pull/50665) ([Anton Kozlov](https://github.com/tonickkozlov)). +* Fixed [#57616](https://github.com/ClickHouse/ClickHouse/issues/57616) this problem occurs because all positive number arguments are automatically identified as `uint64` type, leading to an inability to match int type data in `summapfiltered`. the issue of non-matching is indeed confusing, as the `uint64` parameters are not specified by the user. additionally, if the arguments are `[1,2,3,toint8(-3)]`, due to the `getleastsupertype()`, these parameters will be uniformly treated as `int` type, causing `'1,2,3'` to also fail in matching the `uint` type data in `summapfiltered`. [#58408](https://github.com/ClickHouse/ClickHouse/pull/58408) ([Chen768959](https://github.com/Chen768959)). +* `ALTER TABLE .. REPLACE PARTITION` doesn't wait anymore for mutations/merges that happen in other partitions. [#59138](https://github.com/ClickHouse/ClickHouse/pull/59138) ([Vasily Nemkov](https://github.com/Enmk)). +* Refreshable materialized views are now supported in Replicated databases. [#60669](https://github.com/ClickHouse/ClickHouse/pull/60669) ([Michael Kolupaev](https://github.com/al13n321)). +* Symbolic links for tables in the `data/database_name/` directory are created for the actual paths to the table's data, depending on the storage policy, instead of the `store/...` directory on the default disk. [#61777](https://github.com/ClickHouse/ClickHouse/pull/61777) ([Kirill](https://github.com/kirillgarbar)). +* Apply configuration updates in global context object. It fixes issues like [#62308](https://github.com/ClickHouse/ClickHouse/issues/62308). [#62944](https://github.com/ClickHouse/ClickHouse/pull/62944) ([Amos Bird](https://github.com/amosbird)). +* Reworked settings that control the behavior of parallel replicas algorithms. A quick recap: ClickHouse has four different algorithms for parallel reading involving multiple replicas, which is reflected in the setting `parallel_replicas_mode`, the default value for it is `read_tasks` Additionally, the toggle-switch setting `enable_parallel_replicas` has been added. [#63151](https://github.com/ClickHouse/ClickHouse/pull/63151) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix `ReadSettings` not using user set values, because defaults were only used. [#65625](https://github.com/ClickHouse/ClickHouse/pull/65625) ([Kseniia Sumarokova](https://github.com/kssenii)). +* While parsing an Enum field from JSON, a string containing an integer will be interpreted as the corresponding Enum element. This closes [#65119](https://github.com/ClickHouse/ClickHouse/issues/65119). [#66801](https://github.com/ClickHouse/ClickHouse/pull/66801) ([scanhex12](https://github.com/scanhex12)). +* Allow `TRIM` -ing `LEADING` or `TRAILING` empty string as a no-op. Closes [#67792](https://github.com/ClickHouse/ClickHouse/issues/67792). [#68455](https://github.com/ClickHouse/ClickHouse/pull/68455) ([Peter Nguyen](https://github.com/petern48)). +* Support creating a table with a query: `CREATE TABLE ... CLONE AS ...`. It clones the source table's schema and then attaches all partitions to the newly created table. This feature is only supported with tables of the `MergeTree` family Closes [#65015](https://github.com/ClickHouse/ClickHouse/issues/65015). [#69091](https://github.com/ClickHouse/ClickHouse/pull/69091) ([tuanpach](https://github.com/tuanpach)). +* In Gluten ClickHouse, Spark's timestamp type is mapped to ClickHouse's datetime64(6) type. When casting timestamp '2012-01-01 00:11:22' as a string, Spark returns '2012-01-01 00:11:22', while Gluten ClickHouse returns '2012-01-01 00:11:22.000000'. [#69179](https://github.com/ClickHouse/ClickHouse/pull/69179) ([Wenzheng Liu](https://github.com/lwz9103)). +* Always use the new analyzer to calculate constant expressions when `enable_analyzer` is set to `true`. Support calculation of `executable()` table function arguments without using `SELECT` query for constant expression. [#69292](https://github.com/ClickHouse/ClickHouse/pull/69292) ([Dmitry Novik](https://github.com/novikd)). +* Add `enable_secure_identifiers` to disallow insecure identifiers. [#69411](https://github.com/ClickHouse/ClickHouse/pull/69411) ([tuanpach](https://github.com/tuanpach)). +* Add `show_create_query_identifier_quoting_rule` to define identifier quoting behavior of the show create query result. Possible values: - `user_display`: When the identifiers is a keyword. - `when_necessary`: When the identifiers is one of `{"distinct", "all", "table"}`, or it can cause ambiguity: column names, dictionary attribute names. - `always`: Always quote identifiers. [#69448](https://github.com/ClickHouse/ClickHouse/pull/69448) ([tuanpach](https://github.com/tuanpach)). +* Follow-up to https://github.com/ClickHouse/ClickHouse/pull/69346 Point 4 described there will work now as well:. [#69563](https://github.com/ClickHouse/ClickHouse/pull/69563) ([Vitaly Baranov](https://github.com/vitlibar)). +* Implement generic SerDe between Avro Union and ClickHouse Variant type. Resolves [#69713](https://github.com/ClickHouse/ClickHouse/issues/69713). [#69712](https://github.com/ClickHouse/ClickHouse/pull/69712) ([Jiří Kozlovský](https://github.com/jirislav)). +* 1. CREATE TABLE AS will copy PRIMARY KEY, ORDER BY, and similar clauses. Now it is supported only for the MergeTree family of table engines. 2. For example, the follow SQL statements will trigger exception in the past, but this PR fixes it: if the destination table do not provide an `ORDER BY` or `PRIMARY KEY` expression in the table definition, we will copy that from source table. [#69739](https://github.com/ClickHouse/ClickHouse/pull/69739) ([sakulali](https://github.com/sakulali)). +* Added user-level settings `min_free_disk_bytes_to_throw_insert` and `min_free_disk_ratio_to_throw_insert` to prevent insertions on disks that are almost full. [#69755](https://github.com/ClickHouse/ClickHouse/pull/69755) ([Marco Vilas Boas](https://github.com/marco-vb)). +* If you run `clickhouse-client` or other CLI application and it starts up slowly due to an overloaded server, and you start typing your query, such as `SELECT`, the previous versions will display the remaining of the terminal echo contents before printing the greetings message, such as `SELECTClickHouse local version 24.10.1.1.` instead of `ClickHouse local version 24.10.1.1.`. Now it is fixed. This closes [#31696](https://github.com/ClickHouse/ClickHouse/issues/31696). [#69856](https://github.com/ClickHouse/ClickHouse/pull/69856) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add new column readonly_duration to the system.replicas table. Needed to be able to distinguish actual readonly replicas from sentinel ones in alerts. [#69871](https://github.com/ClickHouse/ClickHouse/pull/69871) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)). +* Change the join to sort settings type to unsigned int. [#69886](https://github.com/ClickHouse/ClickHouse/pull/69886) ([kevinyhzou](https://github.com/KevinyhZou)). +* Support 64-bit XID in Keeper. It can be enabled with `use_xid_64` config. [#69908](https://github.com/ClickHouse/ClickHouse/pull/69908) ([Antonio Andelic](https://github.com/antonio2368)). +* New function getSettingOrDefault() added to return the default value and avoid exception if a custom setting is not found in the current profile. [#69917](https://github.com/ClickHouse/ClickHouse/pull/69917) ([Shankar](https://github.com/shiyer7474)). +* Allow empty needle in function replace, the same behavior with PostgreSQL. [#69918](https://github.com/ClickHouse/ClickHouse/pull/69918) ([zhanglistar](https://github.com/zhanglistar)). +* Enhance OpenTelemetry span logging to include query settings. [#70011](https://github.com/ClickHouse/ClickHouse/pull/70011) ([sharathks118](https://github.com/sharathks118)). +* Allow empty needle in functions replaceRegexp*, like https://github.com/ClickHouse/ClickHouse/pull/69918. [#70053](https://github.com/ClickHouse/ClickHouse/pull/70053) ([zhanglistar](https://github.com/zhanglistar)). +* Add info to higher-order array functions if lambda result type is unexpected. [#70093](https://github.com/ClickHouse/ClickHouse/pull/70093) ([ttanay](https://github.com/ttanay)). +* Keeper improvement: less blocking during cluster changes. [#70275](https://github.com/ClickHouse/ClickHouse/pull/70275) ([Antonio Andelic](https://github.com/antonio2368)). +* Embedded documentation for settings will be strictly more detailed and complete than the documentation on the website. This is the first step before making the website documentation always auto-generated from the source code. This has long-standing implications: - it will be guaranteed to have every setting; - there is no chance of having default values obsolete; - we can generate this documentation for each ClickHouse version; - the documentation can be displayed by the server itself even without Internet access. Generate the docs on the website from the source code. [#70289](https://github.com/ClickHouse/ClickHouse/pull/70289) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add `WITH IMPLICIT` and `FINAL` keywords to the `SHOW GRANTS` command. Fix a minor bug with implicit grants: [#70094](https://github.com/ClickHouse/ClickHouse/issues/70094). [#70293](https://github.com/ClickHouse/ClickHouse/pull/70293) ([pufit](https://github.com/pufit)). +* Don't disable nonblocking read from page cache for the entire server when reading from a blocking I/O. [#70299](https://github.com/ClickHouse/ClickHouse/pull/70299) ([Antonio Andelic](https://github.com/antonio2368)). +* Respect `compatibility` for MergeTree settings. The `compatibility` value is taken from the `default` profile on server startup, and default MergeTree settings are changed accordingly. Further changes of the `compatibility` setting do not affect MergeTree settings. [#70322](https://github.com/ClickHouse/ClickHouse/pull/70322) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Clickhouse-client realtime metrics follow-up: restore cursor when ctrl-c cancels query; immediately stop intercepting keystrokes when the query is canceled; display the metrics table if `--progress-table` is on, and toggling is disabled. [#70423](https://github.com/ClickHouse/ClickHouse/pull/70423) ([Julia Kartseva](https://github.com/jkartseva)). +* Command-line arguments for Bool settings are set to true when no value is provided for the argument (e.g. `clickhouse-client --optimize_aggregation_in_order --query "SELECT 1"`). [#70459](https://github.com/ClickHouse/ClickHouse/pull/70459) ([davidtsuk](https://github.com/davidtsuk)). +* Avoid spamming the logs with large HTTP response bodies in case of errors during inter-server communication. [#70487](https://github.com/ClickHouse/ClickHouse/pull/70487) ([Vladimir Cherkasov](https://github.com/vdimir)). +* Added a new setting `max_parts_to_move` to control the maximum number of parts that can be moved at once. [#70520](https://github.com/ClickHouse/ClickHouse/pull/70520) ([Vladimir Cherkasov](https://github.com/vdimir)). +* Limit the frequency of certain log messages. [#70601](https://github.com/ClickHouse/ClickHouse/pull/70601) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Don't do validation when synchronizing user_directories from keeper. [#70644](https://github.com/ClickHouse/ClickHouse/pull/70644) ([Raúl Marín](https://github.com/Algunenano)). +* Introduced a special (experimental) mode of a merge selector for MergeTree tables which makes it more aggressive for the partitions that are close to the limit by the number of parts. It is controlled by the `merge_selector_use_blurry_base` MergeTree-level setting. [#70645](https://github.com/ClickHouse/ClickHouse/pull/70645) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* `CHECK TABLE` with `PART` qualifier was incorrectly formatted in the client. [#70660](https://github.com/ClickHouse/ClickHouse/pull/70660) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Support write column index and offset index using parquet native writer. [#70669](https://github.com/ClickHouse/ClickHouse/pull/70669) ([LiuNeng](https://github.com/liuneng1994)). +* Support parse `DateTime64` for microseond and timezone in joda syntax. [#70737](https://github.com/ClickHouse/ClickHouse/pull/70737) ([kevinyhzou](https://github.com/KevinyhZou)). +* Changed an approach to figure out if a cloud storage supports [batch delete](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html) or not. [#70786](https://github.com/ClickHouse/ClickHouse/pull/70786) ([Vitaly Baranov](https://github.com/vitlibar)). +* Support for Parquet page V2 on native reader. [#70807](https://github.com/ClickHouse/ClickHouse/pull/70807) ([Arthur Passos](https://github.com/arthurpassos)). +* Add an HTML page for visualizing merges. [#70821](https://github.com/ClickHouse/ClickHouse/pull/70821) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Backported in [#71234](https://github.com/ClickHouse/ClickHouse/issues/71234): Do not call the object storage API when listing directories, as this may be cost-inefficient. Instead, store the list of filenames in the memory. The trade-offs are increased initial load time and memory required to store filenames. [#70823](https://github.com/ClickHouse/ClickHouse/pull/70823) ([Julia Kartseva](https://github.com/jkartseva)). +* A check if table has both `storage_policy` and `disk` set after alter query is added. A check if a new storage policy is compatible with an old one when using `disk` setting is added. [#70839](https://github.com/ClickHouse/ClickHouse/pull/70839) ([Kirill](https://github.com/kirillgarbar)). +* Add system.s3_queue_settings and system.azure_queue_settings. [#70841](https://github.com/ClickHouse/ClickHouse/pull/70841) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Functions `base58Encode` and `base58Decode` now accept arguments of type `FixedString`. Example: `SELECT base58Encode(toFixedString('plaintext', 9));`. [#70846](https://github.com/ClickHouse/ClickHouse/pull/70846) ([Faizan Patel](https://github.com/faizan2786)). +* Add the `partition` column to every entry type of the part log. Previously, it was set only for some entries. This closes [#70819](https://github.com/ClickHouse/ClickHouse/issues/70819). [#70848](https://github.com/ClickHouse/ClickHouse/pull/70848) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add merge start and mutate start events into `system.part_log` which helps with merges analysis and visualization. [#70850](https://github.com/ClickHouse/ClickHouse/pull/70850) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Do not call the LIST object storage API when determining if a file or directory exists on the plain rewritable disk, as it can be cost-inefficient. [#70852](https://github.com/ClickHouse/ClickHouse/pull/70852) ([Julia Kartseva](https://github.com/jkartseva)). +* Add a profile event about the number of merged source parts. It allows the monitoring of the fanout of the merge tree in production. [#70908](https://github.com/ClickHouse/ClickHouse/pull/70908) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Reduce the number of object storage HEAD API requests in the plain_rewritable disk. [#70915](https://github.com/ClickHouse/ClickHouse/pull/70915) ([Julia Kartseva](https://github.com/jkartseva)). +* Background downloads to filesystem cache was enabled back. [#70929](https://github.com/ClickHouse/ClickHouse/pull/70929) ([Nikita Taranov](https://github.com/nickitat)). +* Add a new merge selector algorithm, named `Trivial`, for professional usage only. It is worse than the `Simple` merge selector. [#70969](https://github.com/ClickHouse/ClickHouse/pull/70969) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### Bug Fix (user-visible misbehavior in an official stable release) +* Fix toHour-like conversion functions' monotonicity when optional time zone argument is passed. [#60264](https://github.com/ClickHouse/ClickHouse/pull/60264) ([Amos Bird](https://github.com/amosbird)). +* Relax `supportsPrewhere` check for StorageMerge. This fixes [#61064](https://github.com/ClickHouse/ClickHouse/issues/61064). It was hardened unnecessarily in [#60082](https://github.com/ClickHouse/ClickHouse/issues/60082). [#61091](https://github.com/ClickHouse/ClickHouse/pull/61091) ([Amos Bird](https://github.com/amosbird)). +* Fix `use_concurrency_control` setting handling for proper `concurrent_threads_soft_limit_num` limit enforcing. This enables concurrency control by default because previously it was broken. [#61473](https://github.com/ClickHouse/ClickHouse/pull/61473) ([Sergei Trifonov](https://github.com/serxa)). +* Fix incorrect JOIN ON section optimization in case of `IS NULL` check under any other function (like `NOT`) that may lead to wrong results. Closes [#67915](https://github.com/ClickHouse/ClickHouse/issues/67915). [#68049](https://github.com/ClickHouse/ClickHouse/pull/68049) ([Vladimir Cherkasov](https://github.com/vdimir)). +* Prevent `ALTER` queries that would make the `CREATE` query of tables invalid. [#68574](https://github.com/ClickHouse/ClickHouse/pull/68574) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Fix inconsistent AST formatting for `negate` (`-`) and `NOT` functions with tuples and arrays. [#68600](https://github.com/ClickHouse/ClickHouse/pull/68600) ([Vladimir Cherkasov](https://github.com/vdimir)). +* Fix insertion of incomplete type into Dynamic during deserialization. It could lead to `Parameter out of bound` errors. [#69291](https://github.com/ClickHouse/ClickHouse/pull/69291) ([Pavel Kruglov](https://github.com/Avogar)). +* Fix inf loop after `restore replica` in the replicated merge tree with zero copy. [#69293](https://github.com/ClickHouse/ClickHouse/pull/69293) ([MikhailBurdukov](https://github.com/MikhailBurdukov)). +* Return back default value of `processing_threads_num` as number of cpu cores in storage `S3Queue`. [#69384](https://github.com/ClickHouse/ClickHouse/pull/69384) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Bypass try/catch flow when de/serializing nested repeated protobuf to nested columns ( fixes [#41971](https://github.com/ClickHouse/ClickHouse/issues/41971) ). [#69556](https://github.com/ClickHouse/ClickHouse/pull/69556) ([Eliot Hautefeuille](https://github.com/hileef)). +* Fix vrash during insertion into FixedString column in PostgreSQL engine. [#69584](https://github.com/ClickHouse/ClickHouse/pull/69584) ([Pavel Kruglov](https://github.com/Avogar)). +* Fix crash when executing `create view t as (with recursive 42 as ttt select ttt);`. [#69676](https://github.com/ClickHouse/ClickHouse/pull/69676) ([Han Fei](https://github.com/hanfei1991)). +* Added `strict_once` mode to aggregate function `windowFunnel` to avoid counting one event several times in case it matches multiple conditions, close [#21835](https://github.com/ClickHouse/ClickHouse/issues/21835). [#69738](https://github.com/ClickHouse/ClickHouse/pull/69738) ([Vladimir Cherkasov](https://github.com/vdimir)). +* Fixed `maxMapState` throwing 'Bad get' if value type is DateTime64. [#69787](https://github.com/ClickHouse/ClickHouse/pull/69787) ([Michael Kolupaev](https://github.com/al13n321)). +* Fix `getSubcolumn` with `LowCardinality` columns by overriding `useDefaultImplementationForLowCardinalityColumns` to return `true`. [#69831](https://github.com/ClickHouse/ClickHouse/pull/69831) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)). +* Fix permanent blocked distributed sends if DROP of distributed table fails. [#69843](https://github.com/ClickHouse/ClickHouse/pull/69843) ([Azat Khuzhin](https://github.com/azat)). +* Fix non-cancellable queries containing WITH FILL with NaN keys. This closes [#69261](https://github.com/ClickHouse/ClickHouse/issues/69261). [#69845](https://github.com/ClickHouse/ClickHouse/pull/69845) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix analyzer default with old compatibility value. [#69895](https://github.com/ClickHouse/ClickHouse/pull/69895) ([Raúl Marín](https://github.com/Algunenano)). +* Don't check dependencies during CREATE OR REPLACE VIEW during DROP of old table. Previously CREATE OR REPLACE query failed when there are dependent tables of the recreated view. [#69907](https://github.com/ClickHouse/ClickHouse/pull/69907) ([Pavel Kruglov](https://github.com/Avogar)). +* Implement missing decimal cases for `zeroField`. Fixes [#69730](https://github.com/ClickHouse/ClickHouse/issues/69730). [#69978](https://github.com/ClickHouse/ClickHouse/pull/69978) ([Arthur Passos](https://github.com/arthurpassos)). +* Now SQL security will work with parameterized views correctly. [#69984](https://github.com/ClickHouse/ClickHouse/pull/69984) ([pufit](https://github.com/pufit)). +* Closes [#69752](https://github.com/ClickHouse/ClickHouse/issues/69752). [#69985](https://github.com/ClickHouse/ClickHouse/pull/69985) ([pufit](https://github.com/pufit)). +* Fixed a bug when the timezone could change the result of the query with a `Date` or `Date32` arguments. [#70036](https://github.com/ClickHouse/ClickHouse/pull/70036) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). +* Fixes `Block structure mismatch` for queries with nested views and `WHERE` condition. Fixes [#66209](https://github.com/ClickHouse/ClickHouse/issues/66209). [#70054](https://github.com/ClickHouse/ClickHouse/pull/70054) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Avoid reusing columns among different named tuples when evaluating `tuple` functions. This fixes [#70022](https://github.com/ClickHouse/ClickHouse/issues/70022). [#70103](https://github.com/ClickHouse/ClickHouse/pull/70103) ([Amos Bird](https://github.com/amosbird)). +* Fix wrong LOGICAL_ERROR when replacing literals in ranges. [#70122](https://github.com/ClickHouse/ClickHouse/pull/70122) ([Pablo Marcos](https://github.com/pamarcos)). +* Check for Nullable(Nothing) type during ALTER TABLE MODIFY COLUMN/QUERY to prevent tables with such data type. [#70123](https://github.com/ClickHouse/ClickHouse/pull/70123) ([Pavel Kruglov](https://github.com/Avogar)). +* Proper error message for illegal query `JOIN ... ON *` , close [#68650](https://github.com/ClickHouse/ClickHouse/issues/68650). [#70124](https://github.com/ClickHouse/ClickHouse/pull/70124) ([Vladimir Cherkasov](https://github.com/vdimir)). +* Fix wrong result with skipping index. [#70127](https://github.com/ClickHouse/ClickHouse/pull/70127) ([Raúl Marín](https://github.com/Algunenano)). +* Fix data race in ColumnObject/ColumnTuple decompress method that could lead to heap use after free. [#70137](https://github.com/ClickHouse/ClickHouse/pull/70137) ([Pavel Kruglov](https://github.com/Avogar)). +* Fix possible hung in ALTER COLUMN with Dynamic type. [#70144](https://github.com/ClickHouse/ClickHouse/pull/70144) ([Pavel Kruglov](https://github.com/Avogar)). +* Now ClickHouse will consider more errors as retriable and will not mark data parts as broken in case of such errors. [#70145](https://github.com/ClickHouse/ClickHouse/pull/70145) ([alesapin](https://github.com/alesapin)). +* Use correct `max_types` parameter during Dynamic type creation for JSON subcolumn. [#70147](https://github.com/ClickHouse/ClickHouse/pull/70147) ([Pavel Kruglov](https://github.com/Avogar)). +* Fix the password being displayed in `system.query_log` for users with bcrypt password authentication method. [#70148](https://github.com/ClickHouse/ClickHouse/pull/70148) ([Nikolay Degterinsky](https://github.com/evillique)). +* Fix event counter for native interface (InterfaceNativeSendBytes). [#70153](https://github.com/ClickHouse/ClickHouse/pull/70153) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Fix possible crash in JSON column. [#70172](https://github.com/ClickHouse/ClickHouse/pull/70172) ([Pavel Kruglov](https://github.com/Avogar)). +* Fix multiple issues with arrayMin and arrayMax. [#70207](https://github.com/ClickHouse/ClickHouse/pull/70207) ([Raúl Marín](https://github.com/Algunenano)). +* Respect setting allow_simdjson in JSON type parser. [#70218](https://github.com/ClickHouse/ClickHouse/pull/70218) ([Pavel Kruglov](https://github.com/Avogar)). +* Fix server segfault on creating a materialized view with two selects and an `INTERSECT`, e.g. `CREATE MATERIALIZED VIEW v0 AS (SELECT 1) INTERSECT (SELECT 1);`. [#70264](https://github.com/ClickHouse/ClickHouse/pull/70264) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Don't modify global settings with startup scripts. Previously, changing a setting in a startup script would change it globally. [#70310](https://github.com/ClickHouse/ClickHouse/pull/70310) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix ALTER of Dynamic type with reducing max_types parameter that could lead to server crash. [#70328](https://github.com/ClickHouse/ClickHouse/pull/70328) ([Pavel Kruglov](https://github.com/Avogar)). +* Fix crash when using WITH FILL incorrectly. [#70338](https://github.com/ClickHouse/ClickHouse/pull/70338) ([Raúl Marín](https://github.com/Algunenano)). +* Fix possible use-after-free in `SYSTEM DROP FORMAT SCHEMA CACHE FOR Protobuf`. [#70358](https://github.com/ClickHouse/ClickHouse/pull/70358) ([Azat Khuzhin](https://github.com/azat)). +* Fix crash during GROUP BY JSON sub-object subcolumn. [#70374](https://github.com/ClickHouse/ClickHouse/pull/70374) ([Pavel Kruglov](https://github.com/Avogar)). +* Don't prefetch parts for vertical merges if part has no rows. [#70452](https://github.com/ClickHouse/ClickHouse/pull/70452) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix crash in WHERE with lambda functions. [#70464](https://github.com/ClickHouse/ClickHouse/pull/70464) ([Raúl Marín](https://github.com/Algunenano)). +* Fix table creation with `CREATE ... AS table_function()` with database `Replicated` and unavailable table function source on secondary replica. [#70511](https://github.com/ClickHouse/ClickHouse/pull/70511) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Ignore all output on async insert with `wait_for_async_insert=1`. Closes [#62644](https://github.com/ClickHouse/ClickHouse/issues/62644). [#70530](https://github.com/ClickHouse/ClickHouse/pull/70530) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Ignore frozen_metadata.txt while traversing shadow directory from system.remote_data_paths. [#70590](https://github.com/ClickHouse/ClickHouse/pull/70590) ([Aleksei Filatov](https://github.com/aalexfvk)). +* Fix creation of stateful window functions on misaligned memory. [#70631](https://github.com/ClickHouse/ClickHouse/pull/70631) ([Raúl Marín](https://github.com/Algunenano)). +* Fixed rare crashes in `SELECT`-s and merges after adding a column of `Array` type with non-empty default expression. [#70695](https://github.com/ClickHouse/ClickHouse/pull/70695) ([Anton Popov](https://github.com/CurtizJ)). +* Insert into table function s3 respect query settings. [#70696](https://github.com/ClickHouse/ClickHouse/pull/70696) ([Vladimir Cherkasov](https://github.com/vdimir)). +* Fix infinite recursion when infering a proto schema with skip unsupported fields enabled. [#70697](https://github.com/ClickHouse/ClickHouse/pull/70697) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#71122](https://github.com/ClickHouse/ClickHouse/issues/71122): `GroupArraySortedData` uses a PODArray with non-POD elements, manually calling constructors and destructors for the elements as needed. But it wasn't careful enough: in two places it forgot to call destructor, in one place it left elements uninitialized if an exception is thrown when deserializing previous elements. Then `GroupArraySortedData`'s destructor called destructors on uninitialized elements and crashed: ``` 2024.10.17 22:58:23.523790 [ 5233 ] {} BaseDaemon: ########## Short fault info ############ 2024.10.17 22:58:23.523834 [ 5233 ] {} BaseDaemon: (version 24.6.1.4609 (official build), build id: 5423339A6571004018D55BBE05D464AFA35E6718, git hash: fa6cdfda8a94890eb19bc7f22f8b0b56292f7a26) (from thread 682) Received signal 11 2024.10.17 22:58:23.523862 [ 5233 ] {} BaseDaemon: Signal description: Segmentation fault 2024.10.17 22:58:23.523883 [ 5233 ] {} BaseDaemon: Address: 0x8f. Access: . Address not mapped to object. 2024.10.17 22:58:23.523908 [ 5233 ] {} BaseDaemon: Stack trace: 0x0000aaaac4b78308 0x0000ffffb7701850 0x0000aaaac0104855 0x0000aaaac01048a0 0x0000aaaac501e84c 0x0000aaaac7c510d0 0x0000aaaac7c4ba20 0x0000aaaac968bbfc 0x0000aaaac968fab0 0x0000aaaac969bf50 0x0000aaaac9b7520c 0x0000aaaac9b74c74 0x0000aaaac9b8a150 0x0000aaaac9b809f0 0x0000aaaac9b80574 0x0000aaaac9b8e364 0x0000aaaac9b8e4fc 0x0000aaaac94f4328 0x0000aaaac94f428c 0x0000aaaac94f7df0 0x0000aaaac98b5a3c 0x0000aaaac950b234 0x0000aaaac49ae264 0x0000aaaac49b1dd0 0x0000aaaac49b0a80 0x0000ffffb755d5c8 0x0000ffffb75c5edc 2024.10.17 22:58:23.523936 [ 5233 ] {} BaseDaemon: ######################################## 2024.10.17 22:58:23.523959 [ 5233 ] {} BaseDaemon: (version 24.6.1.4609 (official build), build id: 5423339A6571004018D55BBE05D464AFA35E6718, git hash: fa6cdfda8a94890eb19bc7f22f8b0b56292f7a26) (from thread 682) (query_id: 6c8a33a2-f45a-4a3b-bd71-ded6a1c9ccd3::202410_534066_534078_2) (query: ) Received signal Segmentation fault (11) 2024.10.17 22:58:23.523977 [ 5233 ] {} BaseDaemon: Address: 0x8f. Access: . Address not mapped to object. 2024.10.17 22:58:23.523993 [ 5233 ] {} BaseDaemon: Stack trace: 0x0000aaaac4b78308 0x0000ffffb7701850 0x0000aaaac0104855 0x0000aaaac01048a0 0x0000aaaac501e84c 0x0000aaaac7c510d0 0x0000aaaac7c4ba20 0x0000aaaac968bbfc 0x0000aaaac968fab0 0x0000aaaac969bf50 0x0000aaaac9b7520c 0x0000aaaac9b74c74 0x0000aaaac9b8a150 0x0000aaaac9b809f0 0x0000aaaac9b80574 0x0000aaaac9b8e364 0x0000aaaac9b8e4fc 0x0000aaaac94f4328 0x0000aaaac94f428c 0x0000aaaac94f7df0 0x0000aaaac98b5a3c 0x0000aaaac950b234 0x0000aaaac49ae264 0x0000aaaac49b1dd0 0x0000aaaac49b0a80 0x0000ffffb755d5c8 0x0000ffffb75c5edc 2024.10.17 22:58:23.524817 [ 5233 ] {} BaseDaemon: 0. signalHandler(int, siginfo_t*, void*) @ 0x000000000c6f8308 2024.10.17 22:58:23.524917 [ 5233 ] {} BaseDaemon: 1. ? @ 0x0000ffffb7701850 2024.10.17 22:58:23.524962 [ 5233 ] {} BaseDaemon: 2. DB::Field::~Field() @ 0x0000000007c84855 2024.10.17 22:58:23.525012 [ 5233 ] {} BaseDaemon: 3. DB::Field::~Field() @ 0x0000000007c848a0 2024.10.17 22:58:23.526626 [ 5233 ] {} BaseDaemon: 4. DB::IAggregateFunctionDataHelper, DB::(anonymous namespace)::GroupArraySorted, DB::Field>>::destroy(char*) const (.5a6a451027f732f9fd91c13f4a13200c) @ 0x000000000cb9e84c 2024.10.17 22:58:23.527322 [ 5233 ] {} BaseDaemon: 5. DB::SerializationAggregateFunction::deserializeBinaryBulk(DB::IColumn&, DB::ReadBuffer&, unsigned long, double) const @ 0x000000000f7d10d0 2024.10.17 22:58:23.528470 [ 5233 ] {} BaseDaemon: 6. DB::ISerialization::deserializeBinaryBulkWithMultipleStreams(COW::immutable_ptr&, unsigned long, DB::ISerialization::DeserializeBinaryBulkSettings&, std::shared_ptr&, std::unordered_map::immutable_ptr, std::hash, std::equal_to, std::allocator::immutable_ptr>>>*) const @ 0x000000000f7cba20 2024.10.17 22:58:23.529213 [ 5233 ] {} BaseDaemon: 7. DB::MergeTreeReaderCompact::readData(DB::NameAndTypePair const&, COW::immutable_ptr&, unsigned long, std::function const&) @ 0x000000001120bbfc 2024.10.17 22:58:23.529277 [ 5233 ] {} BaseDaemon: 8. DB::MergeTreeReaderCompactSingleBuffer::readRows(unsigned long, unsigned long, bool, unsigned long, std::vector::immutable_ptr, std::allocator::immutable_ptr>>&) @ 0x000000001120fab0 2024.10.17 22:58:23.529319 [ 5233 ] {} BaseDaemon: 9. DB::MergeTreeSequentialSource::generate() @ 0x000000001121bf50 2024.10.17 22:58:23.529346 [ 5233 ] {} BaseDaemon: 10. DB::ISource::tryGenerate() @ 0x00000000116f520c 2024.10.17 22:58:23.529653 [ 5233 ] {} BaseDaemon: 11. DB::ISource::work() @ 0x00000000116f4c74 2024.10.17 22:58:23.529679 [ 5233 ] {} BaseDaemon: 12. DB::ExecutionThreadContext::executeTask() @ 0x000000001170a150 2024.10.17 22:58:23.529733 [ 5233 ] {} BaseDaemon: 13. DB::PipelineExecutor::executeStepImpl(unsigned long, std::atomic*) @ 0x00000000117009f0 2024.10.17 22:58:23.529763 [ 5233 ] {} BaseDaemon: 14. DB::PipelineExecutor::executeStep(std::atomic*) @ 0x0000000011700574 2024.10.17 22:58:23.530089 [ 5233 ] {} BaseDaemon: 15. DB::PullingPipelineExecutor::pull(DB::Chunk&) @ 0x000000001170e364 2024.10.17 22:58:23.530277 [ 5233 ] {} BaseDaemon: 16. DB::PullingPipelineExecutor::pull(DB::Block&) @ 0x000000001170e4fc 2024.10.17 22:58:23.530295 [ 5233 ] {} BaseDaemon: 17. DB::MergeTask::ExecuteAndFinalizeHorizontalPart::executeImpl() @ 0x0000000011074328 2024.10.17 22:58:23.530318 [ 5233 ] {} BaseDaemon: 18. DB::MergeTask::ExecuteAndFinalizeHorizontalPart::execute() @ 0x000000001107428c 2024.10.17 22:58:23.530339 [ 5233 ] {} BaseDaemon: 19. DB::MergeTask::execute() @ 0x0000000011077df0 2024.10.17 22:58:23.530362 [ 5233 ] {} BaseDaemon: 20. DB::SharedMergeMutateTaskBase::executeStep() @ 0x0000000011435a3c 2024.10.17 22:58:23.530384 [ 5233 ] {} BaseDaemon: 21. DB::MergeTreeBackgroundExecutor::threadFunction() @ 0x000000001108b234 2024.10.17 22:58:23.530410 [ 5233 ] {} BaseDaemon: 22. ThreadPoolImpl>::worker(std::__list_iterator, void*>) @ 0x000000000c52e264 2024.10.17 22:58:23.530448 [ 5233 ] {} BaseDaemon: 23. void std::__function::__policy_invoker::__call_impl::ThreadFromGlobalPoolImpl>::scheduleImpl(std::function, Priority, std::optional, bool)::'lambda0'()>(void&&)::'lambda'(), void ()>>(std::__function::__policy_storage const*) @ 0x000000000c531dd0 2024.10.17 22:58:23.530476 [ 5233 ] {} BaseDaemon: 24. void* std::__thread_proxy[abi:v15000]>, void ThreadPoolImpl::scheduleImpl(std::function, Priority, std::optional, bool)::'lambda0'()>>(void*) @ 0x000000000c530a80 2024.10.17 22:58:23.530514 [ 5233 ] {} BaseDaemon: 25. ? @ 0x000000000007d5c8 2024.10.17 22:58:23.530534 [ 5233 ] {} BaseDaemon: 26. ? @ 0x00000000000e5edc 2024.10.17 22:58:23.530551 [ 5233 ] {} BaseDaemon: Integrity check of the executable skipped because the reference checksum could not be read. 2024.10.17 22:58:23.531083 [ 5233 ] {} BaseDaemon: Report this error to https://github.com/ClickHouse/ClickHouse/issues 2024.10.17 22:58:23.531294 [ 5233 ] {} BaseDaemon: Changed settings: max_insert_threads = 4, max_threads = 42, use_hedged_requests = false, distributed_foreground_insert = true, alter_sync = 0, enable_memory_bound_merging_of_aggregation_results = true, cluster_for_parallel_replicas = 'default', do_not_merge_across_partitions_select_final = false, log_queries = true, log_queries_probability = 1., max_http_get_redirects = 10, enable_deflate_qpl_codec = false, enable_zstd_qat_codec = false, query_profiler_real_time_period_ns = 0, query_profiler_cpu_time_period_ns = 0, max_bytes_before_external_group_by = 90194313216, max_bytes_before_external_sort = 90194313216, max_memory_usage = 180388626432, backup_restore_keeper_retry_max_backoff_ms = 60000, cancel_http_readonly_queries_on_client_close = true, max_table_size_to_drop = 1000000000000, max_partition_size_to_drop = 1000000000000, default_table_engine = 'ReplicatedMergeTree', mutations_sync = 0, optimize_trivial_insert_select = false, database_replicated_allow_only_replicated_engine = true, cloud_mode = true, cloud_mode_engine = 2, distributed_ddl_output_mode = 'none_only_active', distributed_ddl_entry_format_version = 6, async_insert_max_data_size = 10485760, async_insert_busy_timeout_max_ms = 1000, enable_filesystem_cache_on_write_operations = true, load_marks_asynchronously = true, allow_prefetched_read_pool_for_remote_filesystem = true, filesystem_prefetch_max_memory_usage = 18038862643, filesystem_prefetches_limit = 200, compatibility = '24.6', insert_keeper_max_retries = 20, allow_experimental_materialized_postgresql_table = false, date_time_input_format = 'best_effort' ```. [#70820](https://github.com/ClickHouse/ClickHouse/pull/70820) ([Michael Kolupaev](https://github.com/al13n321)). +* Disable enable_named_columns_in_function_tuple by default. [#70833](https://github.com/ClickHouse/ClickHouse/pull/70833) ([Raúl Marín](https://github.com/Algunenano)). +* Fix S3Queue table engine setting processing_threads_num not being effective in case it was deduced from the number of cpu cores on the server. [#70837](https://github.com/ClickHouse/ClickHouse/pull/70837) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Normalize named tuple arguments in aggregation states. This fixes [#69732](https://github.com/ClickHouse/ClickHouse/issues/69732) . [#70853](https://github.com/ClickHouse/ClickHouse/pull/70853) ([Amos Bird](https://github.com/amosbird)). +* Fix a logical error due to negative zeros in the two-level hash table. This closes [#70973](https://github.com/ClickHouse/ClickHouse/issues/70973). [#70979](https://github.com/ClickHouse/ClickHouse/pull/70979) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Backported in [#71214](https://github.com/ClickHouse/ClickHouse/issues/71214): Fix logical error in `StorageS3Queue` "Cannot create a persistent node in /processed since it already exists". [#70984](https://github.com/ClickHouse/ClickHouse/pull/70984) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Backported in [#71243](https://github.com/ClickHouse/ClickHouse/issues/71243): Fixed named sessions not being closed and hanging on forever under certain circumstances. [#70998](https://github.com/ClickHouse/ClickHouse/pull/70998) ([Márcio Martins](https://github.com/marcio-absmartly)). +* Backported in [#71157](https://github.com/ClickHouse/ClickHouse/issues/71157): Fix the bug that didn't consider _row_exists column in rebuild option of projection lightweight delete. [#71089](https://github.com/ClickHouse/ClickHouse/pull/71089) ([Shichao Jin](https://github.com/jsc0218)). +* Backported in [#71265](https://github.com/ClickHouse/ClickHouse/issues/71265): Fix wrong value in system.query_metric_log due to unexpected race condition. [#71124](https://github.com/ClickHouse/ClickHouse/pull/71124) ([Pablo Marcos](https://github.com/pamarcos)). +* Backported in [#71331](https://github.com/ClickHouse/ClickHouse/issues/71331): Fix async inserts with empty blocks via native protocol. [#71312](https://github.com/ClickHouse/ClickHouse/pull/71312) ([Anton Popov](https://github.com/CurtizJ)). + +#### Build/Testing/Packaging Improvement +* Docker in integration tests runner is updated to latest version. It was previously pinned u until patch release 24.0.3 was out. https://github.com/moby/moby/issues/45770#issuecomment-1618255130. - HDFS image was deprecated and not running with current docker version. Switched to newer version of a derivative image based on ubuntu. - HDFS tests were hardened to allow them to run with python-repeat. [#66867](https://github.com/ClickHouse/ClickHouse/pull/66867) ([Ilya Yatsishin](https://github.com/qoega)). +* Alpine docker images now use ubuntu 22.04 as glibc donor, results in upgrade of glibc version delivered with alpine images from 2.31 to 2.35. [#69033](https://github.com/ClickHouse/ClickHouse/pull/69033) ([filimonov](https://github.com/filimonov)). +* Makes dbms independent from clickhouse_functions. [#69914](https://github.com/ClickHouse/ClickHouse/pull/69914) ([Raúl Marín](https://github.com/Algunenano)). +* Fix FreeBSD compilation of the MariaDB connector. [#70007](https://github.com/ClickHouse/ClickHouse/pull/70007) ([Raúl Marín](https://github.com/Algunenano)). +* Building on Apple Mac OS X Darwin does not produce strange warnings anymore. [#70411](https://github.com/ClickHouse/ClickHouse/pull/70411) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix building with ARCH_NATIVE CMake flag. [#70585](https://github.com/ClickHouse/ClickHouse/pull/70585) ([Daniil Gentili](https://github.com/danog)). +* The universal installer will download Musl build on Alpine Linux. Some Docker containers are using Alpine Linux, but it was not possible to install ClickHouse there with `curl https://clickhouse.com/ | sh`. [#70767](https://github.com/ClickHouse/ClickHouse/pull/70767) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### NO CL CATEGORY + +* Backported in [#71259](https://github.com/ClickHouse/ClickHouse/issues/71259):. [#71220](https://github.com/ClickHouse/ClickHouse/pull/71220) ([Raúl Marín](https://github.com/Algunenano)). + +#### NO CL ENTRY + +* NO CL ENTRY: 'Revert "JSONCompactWithProgress query output format"'. [#69989](https://github.com/ClickHouse/ClickHouse/pull/69989) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* NO CL ENTRY: 'Revert "Support CREATE OR REPLACE VIEW atomically"'. [#70535](https://github.com/ClickHouse/ClickHouse/pull/70535) ([Raúl Marín](https://github.com/Algunenano)). +* NO CL ENTRY: 'Revert "Revert "Support CREATE OR REPLACE VIEW atomically""'. [#70536](https://github.com/ClickHouse/ClickHouse/pull/70536) ([Raúl Marín](https://github.com/Algunenano)). +* NO CL ENTRY: 'Revert "Add projections size to system.projections"'. [#70858](https://github.com/ClickHouse/ClickHouse/pull/70858) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Allow writing argument of `has` or `hasAny` or `hasAll` as string values if array element type is `Enum`. [#56555](https://github.com/ClickHouse/ClickHouse/pull/56555) ([Duc Canh Le](https://github.com/canhld94)). +* Rename FileSegmentKind::Ephemeral and other changes. [#66600](https://github.com/ClickHouse/ClickHouse/pull/66600) ([Vladimir Cherkasov](https://github.com/vdimir)). +* Closes [#67345](https://github.com/ClickHouse/ClickHouse/issues/67345). [#67346](https://github.com/ClickHouse/ClickHouse/pull/67346) ([KrJin](https://github.com/jincong8973)). +* Because it is too complicated to support. [#68410](https://github.com/ClickHouse/ClickHouse/pull/68410) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix 01600_parts_states_metrics_long flakiness. [#68521](https://github.com/ClickHouse/ClickHouse/pull/68521) ([Azat Khuzhin](https://github.com/azat)). +* Reduce client start time in debug/sanitizer mode. [#68980](https://github.com/ClickHouse/ClickHouse/pull/68980) ([Raúl Marín](https://github.com/Algunenano)). +* Closes [#69038](https://github.com/ClickHouse/ClickHouse/issues/69038). [#69040](https://github.com/ClickHouse/ClickHouse/pull/69040) ([Nikolay Degterinsky](https://github.com/evillique)). +* Better exception for unsupported full_text index with non-full parts. [#69067](https://github.com/ClickHouse/ClickHouse/pull/69067) ([Vladimir Cherkasov](https://github.com/vdimir)). +* Catch additional zk connection erros while creating table and make sure to cleanup dirs if necessary for retries. [#69093](https://github.com/ClickHouse/ClickHouse/pull/69093) ([Sumit](https://github.com/sum12)). +* Update version_date.tsv and changelog after v24.7.5.37-stable. [#69185](https://github.com/ClickHouse/ClickHouse/pull/69185) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* DOCS: Replace live view with refreshable since the former is deprecated. [#69392](https://github.com/ClickHouse/ClickHouse/pull/69392) ([Damian Kula](https://github.com/heavelock)). +* Update ORC to the current HEAD. [#69473](https://github.com/ClickHouse/ClickHouse/pull/69473) ([Nikita Taranov](https://github.com/nickitat)). +* Make a test ready for flaky check. [#69586](https://github.com/ClickHouse/ClickHouse/pull/69586) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Support antlr parser to parse sql with some keywords as alias, make the behaviour same as the clickhouse-server - remove redundant `for` in the `keyword` field. [#69614](https://github.com/ClickHouse/ClickHouse/pull/69614) ([Z.H.](https://github.com/onlyacat)). +* Allow default implementations for null in function mapFromArrays for spark compatiability in apache gluten. Current change doesn't have any side effects on clickhouse in theory. [#69715](https://github.com/ClickHouse/ClickHouse/pull/69715) ([李扬](https://github.com/taiyang-li)). +* Fix exception message in AzureBlobStorage. [#69728](https://github.com/ClickHouse/ClickHouse/pull/69728) ([Pavel Kruglov](https://github.com/Avogar)). +* Add test parsing s3 URL with a bucket name including a dot. [#69743](https://github.com/ClickHouse/ClickHouse/pull/69743) ([Kaushik Iska](https://github.com/iskakaushik)). +* Make `clang-tidy` happy. [#69765](https://github.com/ClickHouse/ClickHouse/pull/69765) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Prepare to enable `clang-tidy` `readability-else-after-return`. [#69768](https://github.com/ClickHouse/ClickHouse/pull/69768) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* S3Queue: support having deprecated settings to not fail server startup. [#69769](https://github.com/ClickHouse/ClickHouse/pull/69769) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Use only adaptive heuristic to choose task sizes for remote reading. [#69778](https://github.com/ClickHouse/ClickHouse/pull/69778) ([Nikita Taranov](https://github.com/nickitat)). +* Remove unused buggy code. [#69780](https://github.com/ClickHouse/ClickHouse/pull/69780) ([Raúl Marín](https://github.com/Algunenano)). +* Fix bugfix check. [#69789](https://github.com/ClickHouse/ClickHouse/pull/69789) ([Antonio Andelic](https://github.com/antonio2368)). +* Followup for [#63279](https://github.com/ClickHouse/ClickHouse/issues/63279). [#69790](https://github.com/ClickHouse/ClickHouse/pull/69790) ([Vladimir Cherkasov](https://github.com/vdimir)). +* Update version after release. [#69816](https://github.com/ClickHouse/ClickHouse/pull/69816) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Update ext-dict-functions.md. [#69819](https://github.com/ClickHouse/ClickHouse/pull/69819) ([kurikuQwQ](https://github.com/kurikuQwQ)). +* Allow cyrillic characters in generated contributor names. [#69820](https://github.com/ClickHouse/ClickHouse/pull/69820) ([Raúl Marín](https://github.com/Algunenano)). +* CI: praktika integration 1. [#69822](https://github.com/ClickHouse/ClickHouse/pull/69822) ([Max Kainov](https://github.com/maxknv)). +* Fix `test_delayed_replica_failover`. [#69826](https://github.com/ClickHouse/ClickHouse/pull/69826) ([Antonio Andelic](https://github.com/antonio2368)). +* minor change, less conflicts. [#69830](https://github.com/ClickHouse/ClickHouse/pull/69830) ([Vladimir Cherkasov](https://github.com/vdimir)). +* Improve error message DDLWorker.cpp. [#69835](https://github.com/ClickHouse/ClickHouse/pull/69835) ([Denny Crane](https://github.com/den-crane)). +* Fix typo in description: mutation_sync -> mutations_sync. [#69838](https://github.com/ClickHouse/ClickHouse/pull/69838) ([Alexander Gololobov](https://github.com/davenger)). +* Fix changelog. [#69841](https://github.com/ClickHouse/ClickHouse/pull/69841) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* This closes [#49940](https://github.com/ClickHouse/ClickHouse/issues/49940). [#69842](https://github.com/ClickHouse/ClickHouse/pull/69842) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* This closes [#51036](https://github.com/ClickHouse/ClickHouse/issues/51036). [#69844](https://github.com/ClickHouse/ClickHouse/pull/69844) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Update README.md - Update meetups. [#69849](https://github.com/ClickHouse/ClickHouse/pull/69849) ([Tanya Bragin](https://github.com/tbragin)). +* Revert [#69790](https://github.com/ClickHouse/ClickHouse/issues/69790) and [#63279](https://github.com/ClickHouse/ClickHouse/issues/63279). [#69850](https://github.com/ClickHouse/ClickHouse/pull/69850) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* See [#63279](https://github.com/ClickHouse/ClickHouse/issues/63279). [#69851](https://github.com/ClickHouse/ClickHouse/pull/69851) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add a test for [#50928](https://github.com/ClickHouse/ClickHouse/issues/50928). [#69852](https://github.com/ClickHouse/ClickHouse/pull/69852) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add a test for [#55981](https://github.com/ClickHouse/ClickHouse/issues/55981). [#69853](https://github.com/ClickHouse/ClickHouse/pull/69853) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add a test for [#56823](https://github.com/ClickHouse/ClickHouse/issues/56823). [#69854](https://github.com/ClickHouse/ClickHouse/pull/69854) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* This closes [#62350](https://github.com/ClickHouse/ClickHouse/issues/62350). [#69855](https://github.com/ClickHouse/ClickHouse/pull/69855) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Refactor functions and variables in statistics code. [#69860](https://github.com/ClickHouse/ClickHouse/pull/69860) ([Robert Schulze](https://github.com/rschu1ze)). +* Resubmit [#63279](https://github.com/ClickHouse/ClickHouse/issues/63279). [#69861](https://github.com/ClickHouse/ClickHouse/pull/69861) ([Vladimir Cherkasov](https://github.com/vdimir)). +* Improve stateless test runner. [#69864](https://github.com/ClickHouse/ClickHouse/pull/69864) ([Alexey Katsman](https://github.com/alexkats)). +* Adjust fast test time limit a bit. [#69874](https://github.com/ClickHouse/ClickHouse/pull/69874) ([Raúl Marín](https://github.com/Algunenano)). +* Add initial 24.9 CHANGELOG. [#69876](https://github.com/ClickHouse/ClickHouse/pull/69876) ([Raúl Marín](https://github.com/Algunenano)). +* Fix test `01278_random_string_utf8`. [#69878](https://github.com/ClickHouse/ClickHouse/pull/69878) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix minor fuzzer issue with experimental statistics. [#69881](https://github.com/ClickHouse/ClickHouse/pull/69881) ([Robert Schulze](https://github.com/rschu1ze)). +* Fix linking after settings refactoring. [#69882](https://github.com/ClickHouse/ClickHouse/pull/69882) ([Robert Schulze](https://github.com/rschu1ze)). +* Add Proj Obsolete Setting. [#69883](https://github.com/ClickHouse/ClickHouse/pull/69883) ([Shichao Jin](https://github.com/jsc0218)). +* Improve remote queries startup time. [#69884](https://github.com/ClickHouse/ClickHouse/pull/69884) ([Igor Nikonov](https://github.com/devcrafter)). +* Revert "Merge pull request [#69032](https://github.com/ClickHouse/ClickHouse/issues/69032) from alexon1234/include_real_time_execution_in_http_header". [#69885](https://github.com/ClickHouse/ClickHouse/pull/69885) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* A dedicated commits from https://github.com/ClickHouse/ClickHouse/pull/61473. [#69896](https://github.com/ClickHouse/ClickHouse/pull/69896) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Added aliases `time_bucket`(from TimescaleDB) and `date_bin`(from PostgreSQL) for `toStartOfInterval`. [#69900](https://github.com/ClickHouse/ClickHouse/pull/69900) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). +* RIPE is an acronym and thus should be capital. RIPE stands for **R**ACE **I**ntegrity **P**rimitives **E**valuation and RACE stands for **R**esearch and Development in **A**dvanced **C**ommunications **T**echnologies in **E**urope. [#69901](https://github.com/ClickHouse/ClickHouse/pull/69901) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Replace error codes with error names in stateless tests. [#69906](https://github.com/ClickHouse/ClickHouse/pull/69906) ([Dmitry Novik](https://github.com/novikd)). +* Move setting to 24.10. [#69913](https://github.com/ClickHouse/ClickHouse/pull/69913) ([Raúl Marín](https://github.com/Algunenano)). +* Minor: Reduce diff between public and private repo. [#69928](https://github.com/ClickHouse/ClickHouse/pull/69928) ([Robert Schulze](https://github.com/rschu1ze)). +* Followup for [#69861](https://github.com/ClickHouse/ClickHouse/issues/69861). [#69930](https://github.com/ClickHouse/ClickHouse/pull/69930) ([Vladimir Cherkasov](https://github.com/vdimir)). +* Fix test_dictionaries_all_layouts_separate_sources. [#69962](https://github.com/ClickHouse/ClickHouse/pull/69962) ([Vladimir Cherkasov](https://github.com/vdimir)). +* Fix test_keeper_mntr_data_size. [#69965](https://github.com/ClickHouse/ClickHouse/pull/69965) ([Antonio Andelic](https://github.com/antonio2368)). +* This closes [#49823](https://github.com/ClickHouse/ClickHouse/issues/49823). [#69981](https://github.com/ClickHouse/ClickHouse/pull/69981) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add changelog for 24.9. [#69982](https://github.com/ClickHouse/ClickHouse/pull/69982) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add a test for [#45303](https://github.com/ClickHouse/ClickHouse/issues/45303). [#69987](https://github.com/ClickHouse/ClickHouse/pull/69987) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Update CHANGELOG.md. [#69988](https://github.com/ClickHouse/ClickHouse/pull/69988) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Update README.md. [#69991](https://github.com/ClickHouse/ClickHouse/pull/69991) ([Tyler Hannan](https://github.com/tylerhannan)). +* Disable `03215_parallel_replicas_crash_after_refactoring.sql` for Azure. [#69992](https://github.com/ClickHouse/ClickHouse/pull/69992) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Update CHANGELOG.md. [#69993](https://github.com/ClickHouse/ClickHouse/pull/69993) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Update CHANGELOG.md. [#70004](https://github.com/ClickHouse/ClickHouse/pull/70004) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Revert "Add RIPEMD160 function". [#70005](https://github.com/ClickHouse/ClickHouse/pull/70005) ([Robert Schulze](https://github.com/rschu1ze)). +* Update CHANGELOG.md. [#70009](https://github.com/ClickHouse/ClickHouse/pull/70009) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Update CHANGELOG.md. [#70010](https://github.com/ClickHouse/ClickHouse/pull/70010) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Make the pylint stricter. [#70013](https://github.com/ClickHouse/ClickHouse/pull/70013) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Added a setting `restore_replace_external_dictionary_source_to_null` which enables replacing dictionary source with Null on restore for external dictionaries (useful for testing). [#70032](https://github.com/ClickHouse/ClickHouse/pull/70032) ([Alexander Tokmakov](https://github.com/tavplubix)). +* `isort` is a simple import sorter for the python to comply [pep-8](https://peps.python.org/pep-0008/#imports) requirements. It will allow to decrease conflicts during sync and beautify the code. The import block is divided into three sub-blocks: `standard library` -> `third-party libraries` -> `local imports` -> `.local imports`. Each sub-block is ordered alphabetically with sub-sub-blocks `import X` -> `from X import Y`. [#70038](https://github.com/ClickHouse/ClickHouse/pull/70038) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Update version_date.tsv and changelog after v24.9.1.3278-stable. [#70049](https://github.com/ClickHouse/ClickHouse/pull/70049) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Despite the fact that we set the org-level workflow parameter `PYTHONUNBUFFERED`, it's not inherited in workflows. [#70050](https://github.com/ClickHouse/ClickHouse/pull/70050) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Fix ubsan issue in function sqid. [#70061](https://github.com/ClickHouse/ClickHouse/pull/70061) ([Robert Schulze](https://github.com/rschu1ze)). +* Delete a setting change. [#70071](https://github.com/ClickHouse/ClickHouse/pull/70071) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Fix `test_distributed_ddl`. [#70075](https://github.com/ClickHouse/ClickHouse/pull/70075) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Remove unused placeholder from exception message string. [#70086](https://github.com/ClickHouse/ClickHouse/pull/70086) ([Alsu Giliazova](https://github.com/alsugiliazova)). +* Better exception message when some of the permission is missing. [#70088](https://github.com/ClickHouse/ClickHouse/pull/70088) ([pufit](https://github.com/pufit)). +* Make vector similarity indexes work with adaptive granularity. [#70101](https://github.com/ClickHouse/ClickHouse/pull/70101) ([Robert Schulze](https://github.com/rschu1ze)). +* Add missing columns `total_rows`, `data_compressed_bytes`, and `data_uncompressed_bytes` to `system.projections`. Part of https://github.com/ClickHouse/ClickHouse/pull/68901. [#70106](https://github.com/ClickHouse/ClickHouse/pull/70106) ([Jordi Villar](https://github.com/jrdi)). +* Make `00938_fix_rwlock_segfault_long` non flaky. [#70109](https://github.com/ClickHouse/ClickHouse/pull/70109) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Remove TODO. [#70110](https://github.com/ClickHouse/ClickHouse/pull/70110) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Change the default threshold to enable hyper threading. [#70111](https://github.com/ClickHouse/ClickHouse/pull/70111) ([Jiebin Sun](https://github.com/jiebinn)). +* Fixed [#69092](https://github.com/ClickHouse/ClickHouse/issues/69092): if `materialized_postgresql_tables_list=table1(id, code),table(id,name)` (`table1` has name that is a substring for `table`) `getTableAllowedColumns` method returns `[id, code]` for `table` before this fix. [#70114](https://github.com/ClickHouse/ClickHouse/pull/70114) ([Kruglov Kirill](https://github.com/1on)). +* Reduce log level. [#70117](https://github.com/ClickHouse/ClickHouse/pull/70117) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Rename `getNumberOfPhysicalCPUCores` and fix its decription. [#70130](https://github.com/ClickHouse/ClickHouse/pull/70130) ([Nikita Taranov](https://github.com/nickitat)). +* Adding 24.10. [#70132](https://github.com/ClickHouse/ClickHouse/pull/70132) ([Tyler Hannan](https://github.com/tylerhannan)). +* (Re?)-enable libcxx asserts for debug builds. [#70134](https://github.com/ClickHouse/ClickHouse/pull/70134) ([Robert Schulze](https://github.com/rschu1ze)). +* Refactor reading from object storage. [#70141](https://github.com/ClickHouse/ClickHouse/pull/70141) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Silence UBSAN for integer overflows in some datetime functions. [#70142](https://github.com/ClickHouse/ClickHouse/pull/70142) ([Michael Kolupaev](https://github.com/al13n321)). +* Improve pipdeptree generator for docker images. - Update requirements.txt for the integration tests runner container - Remove some small dependencies, improve `helpers/retry_decorator.py` - Upgrade docker-compose from EOL version 1 to version 2. [#70146](https://github.com/ClickHouse/ClickHouse/pull/70146) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Fix 'QueryPlan was not initialized' in 'loop' with empty MergeTree. [#70149](https://github.com/ClickHouse/ClickHouse/pull/70149) ([Michael Kolupaev](https://github.com/al13n321)). +* Remove QueryPlan DataStream. [#70158](https://github.com/ClickHouse/ClickHouse/pull/70158) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Update test_storage_s3_queue/test.py. [#70159](https://github.com/ClickHouse/ClickHouse/pull/70159) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Small docs fix. [#70160](https://github.com/ClickHouse/ClickHouse/pull/70160) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). +* Test: PR local plan, non-constant in source stream. [#70173](https://github.com/ClickHouse/ClickHouse/pull/70173) ([Igor Nikonov](https://github.com/devcrafter)). +* Fix performance checks. [#70175](https://github.com/ClickHouse/ClickHouse/pull/70175) ([Antonio Andelic](https://github.com/antonio2368)). +* Simplify test 03246_range_literal_replacement_works. [#70176](https://github.com/ClickHouse/ClickHouse/pull/70176) ([Pablo Marcos](https://github.com/pamarcos)). +* Update 01079_parallel_alter_add_drop_column_zookeeper.sh. [#70196](https://github.com/ClickHouse/ClickHouse/pull/70196) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Require bugfix job for a set of labels. [#70197](https://github.com/ClickHouse/ClickHouse/pull/70197) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* CI: Praktika integration, fast test. [#70239](https://github.com/ClickHouse/ClickHouse/pull/70239) ([Max Kainov](https://github.com/maxknv)). +* Avoid `Cannot schedule a task` error when loading parts. [#70257](https://github.com/ClickHouse/ClickHouse/pull/70257) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Bump usearch to v2.15.2 and SimSIMD to v5.0.0. [#70270](https://github.com/ClickHouse/ClickHouse/pull/70270) ([Robert Schulze](https://github.com/rschu1ze)). +* Instead of balancing tests by `crc32(file_name)` we'll use `add tests to a group with a minimal number of tests`. [#70272](https://github.com/ClickHouse/ClickHouse/pull/70272) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Closes [#70263](https://github.com/ClickHouse/ClickHouse/issues/70263). [#70273](https://github.com/ClickHouse/ClickHouse/pull/70273) ([flynn](https://github.com/ucasfl)). +* Hide MergeTreeSettings implementation. [#70285](https://github.com/ClickHouse/ClickHouse/pull/70285) ([Raúl Marín](https://github.com/Algunenano)). +* CI: Remove await feature from release branches. [#70294](https://github.com/ClickHouse/ClickHouse/pull/70294) ([Max Kainov](https://github.com/maxknv)). +* Fix `test_keeper_four_word_command`. [#70298](https://github.com/ClickHouse/ClickHouse/pull/70298) ([Antonio Andelic](https://github.com/antonio2368)). +* Update version_date.tsv and changelog after v24.9.2.42-stable. [#70301](https://github.com/ClickHouse/ClickHouse/pull/70301) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Synchronize settings with private. [#70320](https://github.com/ClickHouse/ClickHouse/pull/70320) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add Ignore Option In DeduplicateMergeProjectionMode. [#70327](https://github.com/ClickHouse/ClickHouse/pull/70327) ([Shichao Jin](https://github.com/jsc0218)). +* CI: Enable Integration Tests for backport PRs. [#70329](https://github.com/ClickHouse/ClickHouse/pull/70329) ([Max Kainov](https://github.com/maxknv)). +* There is [a failed CI job](https://s3.amazonaws.com/clickhouse-test-reports/69778/2d81c38874958bd9d54a25524173bdb1ddf2b75c/stateless_tests__release_.html) which is triggered by [03237_create_or_replace_view_atomically_with_atomic_engine](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/03237_create_or_replace_view_atomically_with_atomic_engine.sh). [#70330](https://github.com/ClickHouse/ClickHouse/pull/70330) ([tuanpach](https://github.com/tuanpach)). +* Fix flaky test `03237_insert_sparse_columns_mem`. [#70333](https://github.com/ClickHouse/ClickHouse/pull/70333) ([Anton Popov](https://github.com/CurtizJ)). +* Rename enable_secure_identifiers -> enforce_strict_identifier_format. [#70335](https://github.com/ClickHouse/ClickHouse/pull/70335) ([Vladimir Cherkasov](https://github.com/vdimir)). +* Attempt to fix flaky RabbitMQ tests. Maybe closes [#45160](https://github.com/ClickHouse/ClickHouse/issues/45160). [#70336](https://github.com/ClickHouse/ClickHouse/pull/70336) ([filimonov](https://github.com/filimonov)). +* Don't fail the stateless check script if we can't collect minio logs. [#70350](https://github.com/ClickHouse/ClickHouse/pull/70350) ([Raúl Marín](https://github.com/Algunenano)). +* Fix tiny mistake, responsible for some of kafka test flaps. Example [report](https://s3.amazonaws.com/clickhouse-test-reports/0/3198aafac59c368993e7b5f49d95674cc1b1be18/integration_tests__release__[2_4].html). [#70352](https://github.com/ClickHouse/ClickHouse/pull/70352) ([filimonov](https://github.com/filimonov)). +* Closes [#69634](https://github.com/ClickHouse/ClickHouse/issues/69634). [#70354](https://github.com/ClickHouse/ClickHouse/pull/70354) ([pufit](https://github.com/pufit)). +* Fix 02346_fulltext_index_bug52019. [#70357](https://github.com/ClickHouse/ClickHouse/pull/70357) ([Vladimir Cherkasov](https://github.com/vdimir)). +* Use new JSON for collecting minio logs. [#70359](https://github.com/ClickHouse/ClickHouse/pull/70359) ([Antonio Andelic](https://github.com/antonio2368)). +* Update comments in VectorSimilarityCondition (WHERE is not supported). [#70360](https://github.com/ClickHouse/ClickHouse/pull/70360) ([Azat Khuzhin](https://github.com/azat)). +* Remove 02492_clickhouse_local_context_uaf test. [#70363](https://github.com/ClickHouse/ClickHouse/pull/70363) ([Azat Khuzhin](https://github.com/azat)). +* Fix `clang-19` build issues. [#70412](https://github.com/ClickHouse/ClickHouse/pull/70412) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Ignore "Invalid multibyte data detected" error during completion. [#70422](https://github.com/ClickHouse/ClickHouse/pull/70422) ([Azat Khuzhin](https://github.com/azat)). +* Make QueryPlan explain methods const. [#70444](https://github.com/ClickHouse/ClickHouse/pull/70444) ([Alexander Gololobov](https://github.com/davenger)). +* Fix 0.1 second delay for interactive queries (due to keystroke interceptor). [#70445](https://github.com/ClickHouse/ClickHouse/pull/70445) ([Azat Khuzhin](https://github.com/azat)). +* Increase lock timeout in attempt to fix 02125_many_mutations. [#70448](https://github.com/ClickHouse/ClickHouse/pull/70448) ([Azat Khuzhin](https://github.com/azat)). +* Fix order in 03249_dynamic_alter_consistency. [#70453](https://github.com/ClickHouse/ClickHouse/pull/70453) ([Alexander Gololobov](https://github.com/davenger)). +* Fix refreshable MV in system database breaking server startup. [#70460](https://github.com/ClickHouse/ClickHouse/pull/70460) ([Michael Kolupaev](https://github.com/al13n321)). +* Fix flaky test_refreshable_mv_in_replicated_db. [#70462](https://github.com/ClickHouse/ClickHouse/pull/70462) ([Michael Kolupaev](https://github.com/al13n321)). +* Update version_date.tsv and changelog after v24.8.5.115-lts. [#70463](https://github.com/ClickHouse/ClickHouse/pull/70463) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Decrease probability of "Server died" due to 00913_many_threads. [#70473](https://github.com/ClickHouse/ClickHouse/pull/70473) ([Azat Khuzhin](https://github.com/azat)). +* Fixes for killing leftovers in clikhouse-test. [#70474](https://github.com/ClickHouse/ClickHouse/pull/70474) ([Azat Khuzhin](https://github.com/azat)). +* Update version_date.tsv and changelog after v24.3.12.75-lts. [#70485](https://github.com/ClickHouse/ClickHouse/pull/70485) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Use logging instead of print. [#70505](https://github.com/ClickHouse/ClickHouse/pull/70505) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Remove slow poll() logs in keeper. [#70508](https://github.com/ClickHouse/ClickHouse/pull/70508) ([Raúl Marín](https://github.com/Algunenano)). +* Add timeouts for retry loops in test_storage_rabbitmq. It should prevent cascading failures of the whole test suite caused by deadloop in one of the test scenarios. Also added small sleeps in a 'tight' loops to make retries bit less agressive. [#70510](https://github.com/ClickHouse/ClickHouse/pull/70510) ([filimonov](https://github.com/filimonov)). +* CI: Fix for canceled Sync workflow. [#70521](https://github.com/ClickHouse/ClickHouse/pull/70521) ([Max Kainov](https://github.com/maxknv)). +* Debug build faild with clang-18 after https://github.com/ClickHouse/ClickHouse/pull/70412, don't know why it's ok in release build, simply changing `_` to `_1` is ok for both release and debug build. [#70532](https://github.com/ClickHouse/ClickHouse/pull/70532) ([Chang chen](https://github.com/baibaichen)). +* Refreshable materialized views are not experimental anymore. [#70550](https://github.com/ClickHouse/ClickHouse/pull/70550) ([Michael Kolupaev](https://github.com/al13n321)). +* Fix 24.9 setting compatibility `database_replicated_allow_explicit_uuid`. [#70565](https://github.com/ClickHouse/ClickHouse/pull/70565) ([Nikita Fomichev](https://github.com/fm4v)). +* Fix typos. [#70588](https://github.com/ClickHouse/ClickHouse/pull/70588) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Vector search: allow to specify HNSW parameter `ef_search` at query time. [#70616](https://github.com/ClickHouse/ClickHouse/pull/70616) ([Robert Schulze](https://github.com/rschu1ze)). +* Increase max_rows_to_read limit in some tests. [#70617](https://github.com/ClickHouse/ClickHouse/pull/70617) ([Raúl Marín](https://github.com/Algunenano)). +* Reduce sync efforts with private. [#70634](https://github.com/ClickHouse/ClickHouse/pull/70634) ([Raúl Marín](https://github.com/Algunenano)). +* Fix parsing of some formats into sparse columns. [#70635](https://github.com/ClickHouse/ClickHouse/pull/70635) ([Anton Popov](https://github.com/CurtizJ)). +* Fix typos. [#70637](https://github.com/ClickHouse/ClickHouse/pull/70637) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Try fix 00180_no_seek_avoiding_when_reading_from_cache. [#70640](https://github.com/ClickHouse/ClickHouse/pull/70640) ([Kseniia Sumarokova](https://github.com/kssenii)). +* When the `PR Check` status is set, it's a valid RunConfig job failure. [#70643](https://github.com/ClickHouse/ClickHouse/pull/70643) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Fix timeout in materialized pg tests. [#70646](https://github.com/ClickHouse/ClickHouse/pull/70646) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Introduced MergeTree setting which allow to change merge selecting algorithm. However we still have only one algorithm and it's mostly for future experiments. [#70647](https://github.com/ClickHouse/ClickHouse/pull/70647) ([alesapin](https://github.com/alesapin)). +* Docs: Follow-up for [#70585](https://github.com/ClickHouse/ClickHouse/issues/70585). [#70654](https://github.com/ClickHouse/ClickHouse/pull/70654) ([Robert Schulze](https://github.com/rschu1ze)). +* Remove strange file. [#70662](https://github.com/ClickHouse/ClickHouse/pull/70662) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Locally I had lots of errors like `'AllocList' does not refer to a value` around places which used `offsetof`. Changing it to `__builtin_offsetof ` helped and I didn't debug any further. [#70671](https://github.com/ClickHouse/ClickHouse/pull/70671) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Adding the report link to a test result and files' list. [#70677](https://github.com/ClickHouse/ClickHouse/pull/70677) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* materialized postgres: minor fixes. [#70710](https://github.com/ClickHouse/ClickHouse/pull/70710) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Probably fix flaky test_refreshable_mv_in_replicated_db. [#70714](https://github.com/ClickHouse/ClickHouse/pull/70714) ([Michael Kolupaev](https://github.com/al13n321)). +* Move more setting structs to pImpl. [#70739](https://github.com/ClickHouse/ClickHouse/pull/70739) ([Raúl Marín](https://github.com/Algunenano)). +* Reduce sync effort. [#70747](https://github.com/ClickHouse/ClickHouse/pull/70747) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#71198](https://github.com/ClickHouse/ClickHouse/issues/71198): Check number of arguments for function with Dynamic argument. [#70749](https://github.com/ClickHouse/ClickHouse/pull/70749) ([Nikita Taranov](https://github.com/nickitat)). +* Add s3queue settings check for cloud. [#70750](https://github.com/ClickHouse/ClickHouse/pull/70750) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix readiness/health check for OpenLDAP container. [#70755](https://github.com/ClickHouse/ClickHouse/pull/70755) ([Julian Maicher](https://github.com/jmaicher)). +* Allow update plan headers for all the steps. [#70761](https://github.com/ClickHouse/ClickHouse/pull/70761) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Autogenerate documentation for settings. [#70768](https://github.com/ClickHouse/ClickHouse/pull/70768) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Not a logical error. [#70770](https://github.com/ClickHouse/ClickHouse/pull/70770) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* CI: Aarch64 build with Asan. [#70778](https://github.com/ClickHouse/ClickHouse/pull/70778) ([Max Kainov](https://github.com/maxknv)). +* Minor fix. [#70783](https://github.com/ClickHouse/ClickHouse/pull/70783) ([Anton Popov](https://github.com/CurtizJ)). +* The docs for settings should be located in the source code. Now, the CI supports that. [#70784](https://github.com/ClickHouse/ClickHouse/pull/70784) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Update style-test image. [#70785](https://github.com/ClickHouse/ClickHouse/pull/70785) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Avoid double finalization of `WriteBuffer` in library bridge. [#70799](https://github.com/ClickHouse/ClickHouse/pull/70799) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Make Array Field serialization consistent. [#70803](https://github.com/ClickHouse/ClickHouse/pull/70803) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* A follow-up for [#70785](https://github.com/ClickHouse/ClickHouse/issues/70785), [jwt](https://pypi.org/project/jwt/#history) looks very outdated, and we have issue with conflicting paths. [#70815](https://github.com/ClickHouse/ClickHouse/pull/70815) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Remove inneficient code. [#70816](https://github.com/ClickHouse/ClickHouse/pull/70816) ([Raúl Marín](https://github.com/Algunenano)). +* Allow large object files if OMIT_HEAVY_DEBUG_SYMBOLS = 0. [#70818](https://github.com/ClickHouse/ClickHouse/pull/70818) ([Michael Kolupaev](https://github.com/al13n321)). +* Add test with distributed queries for 15768. [#70834](https://github.com/ClickHouse/ClickHouse/pull/70834) ([Nikita Taranov](https://github.com/nickitat)). +* More setting structs to pImpl and reuse code. [#70840](https://github.com/ClickHouse/ClickHouse/pull/70840) ([Raúl Marín](https://github.com/Algunenano)). +* Update default HNSW parameter settings. [#70873](https://github.com/ClickHouse/ClickHouse/pull/70873) ([Robert Schulze](https://github.com/rschu1ze)). +* Limiting logging some lines about configs. [#70879](https://github.com/ClickHouse/ClickHouse/pull/70879) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). +* Fix `limit by`, `limit with ties` for distributed and parallel replicas. [#70880](https://github.com/ClickHouse/ClickHouse/pull/70880) ([Nikita Taranov](https://github.com/nickitat)). +* Fix darwin build. [#70894](https://github.com/ClickHouse/ClickHouse/pull/70894) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Add dots for consistency. [#70909](https://github.com/ClickHouse/ClickHouse/pull/70909) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Logical error fix for substrings, found by fuzzer. [#70914](https://github.com/ClickHouse/ClickHouse/pull/70914) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). +* More setting structs to pImpl. [#70942](https://github.com/ClickHouse/ClickHouse/pull/70942) ([Raúl Marín](https://github.com/Algunenano)). +* Add logging for mock HTTP servers used in minio integration tests. [#70943](https://github.com/ClickHouse/ClickHouse/pull/70943) ([Vitaly Baranov](https://github.com/vitlibar)). +* Minor fixups of [#70011](https://github.com/ClickHouse/ClickHouse/issues/70011) and [#69918](https://github.com/ClickHouse/ClickHouse/issues/69918). [#70959](https://github.com/ClickHouse/ClickHouse/pull/70959) ([Robert Schulze](https://github.com/rschu1ze)). +* CI: Do not skip Build report and status fix. [#70965](https://github.com/ClickHouse/ClickHouse/pull/70965) ([Max Kainov](https://github.com/maxknv)). +* Fix Keeper entry serialization compatibility. [#70972](https://github.com/ClickHouse/ClickHouse/pull/70972) ([Antonio Andelic](https://github.com/antonio2368)). +* Update exception message. [#70975](https://github.com/ClickHouse/ClickHouse/pull/70975) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix `utils/c++expr` option `-b`. [#70978](https://github.com/ClickHouse/ClickHouse/pull/70978) ([Sergei Trifonov](https://github.com/serxa)). +* Fix `test_keeper_broken_logs`. [#70982](https://github.com/ClickHouse/ClickHouse/pull/70982) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix `01039_test_setting_parse`. [#70986](https://github.com/ClickHouse/ClickHouse/pull/70986) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Tests for languages support for Embedded Dictionaries. [#71004](https://github.com/ClickHouse/ClickHouse/pull/71004) ([Max Vostrikov](https://github.com/max-vostrikov)). +* Required for internal test runs with the same image build in public CI. [#71008](https://github.com/ClickHouse/ClickHouse/pull/71008) ([Ilya Yatsishin](https://github.com/qoega)). +* Move remaining settings objects to pImpl and start simplification. [#71019](https://github.com/ClickHouse/ClickHouse/pull/71019) ([Raúl Marín](https://github.com/Algunenano)). +* CI: Rearrange directories for praktika ci. [#71029](https://github.com/ClickHouse/ClickHouse/pull/71029) ([Max Kainov](https://github.com/maxknv)). +* Fix assert in RemoteSource::onAsyncJobReady(). [#71034](https://github.com/ClickHouse/ClickHouse/pull/71034) ([Igor Nikonov](https://github.com/devcrafter)). +* Fix showing error message in ReadBufferFromS3 when retrying. Without this PR information about a retryable failure in `ReadBufferFromS3` could look like this:. [#71038](https://github.com/ClickHouse/ClickHouse/pull/71038) ([Vitaly Baranov](https://github.com/vitlibar)). +* Fix `test_truncate_database`. [#71057](https://github.com/ClickHouse/ClickHouse/pull/71057) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix clickhouse-test useless 5 second delay in case of multiple threads are used. [#71069](https://github.com/ClickHouse/ClickHouse/pull/71069) ([Azat Khuzhin](https://github.com/azat)). +* Backported in [#71142](https://github.com/ClickHouse/ClickHouse/issues/71142): Followup [#70520](https://github.com/ClickHouse/ClickHouse/issues/70520). [#71129](https://github.com/ClickHouse/ClickHouse/pull/71129) ([Vladimir Cherkasov](https://github.com/vdimir)). +* Backported in [#71189](https://github.com/ClickHouse/ClickHouse/issues/71189): Update compatibility setting for `hnsw_candidate_list_size_for_search`. [#71133](https://github.com/ClickHouse/ClickHouse/pull/71133) ([Robert Schulze](https://github.com/rschu1ze)). +* Backported in [#71222](https://github.com/ClickHouse/ClickHouse/issues/71222): Fixes for interactive metrics. [#71173](https://github.com/ClickHouse/ClickHouse/pull/71173) ([Julia Kartseva](https://github.com/jkartseva)). +* Backported in [#71205](https://github.com/ClickHouse/ClickHouse/issues/71205): Maybe not GWPAsan by default. [#71174](https://github.com/ClickHouse/ClickHouse/pull/71174) ([Antonio Andelic](https://github.com/antonio2368)). +* Backported in [#71277](https://github.com/ClickHouse/ClickHouse/issues/71277): Fix LOGICAL_ERROR on wrong scalar subquery argument to table functions. [#71216](https://github.com/ClickHouse/ClickHouse/pull/71216) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#71253](https://github.com/ClickHouse/ClickHouse/issues/71253): Disable enable_named_columns_in_function_tuple for 24.10. [#71219](https://github.com/ClickHouse/ClickHouse/pull/71219) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#71303](https://github.com/ClickHouse/ClickHouse/issues/71303): Improve system.query_metric_log to remove flakiness. [#71295](https://github.com/ClickHouse/ClickHouse/pull/71295) ([Pablo Marcos](https://github.com/pamarcos)). +* Backported in [#71317](https://github.com/ClickHouse/ClickHouse/issues/71317): Fix debug log timestamp. [#71311](https://github.com/ClickHouse/ClickHouse/pull/71311) ([Pablo Marcos](https://github.com/pamarcos)). + +#### Not for changeling + +* Reverted. [#69812](https://github.com/ClickHouse/ClickHouse/pull/69812) ([tuanpach](https://github.com/tuanpach)). + diff --git a/docs/changelogs/v24.3.13.40-lts.md b/docs/changelogs/v24.3.13.40-lts.md new file mode 100644 index 00000000000..bce45e88710 --- /dev/null +++ b/docs/changelogs/v24.3.13.40-lts.md @@ -0,0 +1,31 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v24.3.13.40-lts (7acabd77389) FIXME as compared to v24.3.12.75-lts (7cb5dff8019) + +#### Bug Fix (user-visible misbehavior in an official stable release) +* Backported in [#63976](https://github.com/ClickHouse/ClickHouse/issues/63976): Fix intersect parts when restart after drop range. [#63202](https://github.com/ClickHouse/ClickHouse/pull/63202) ([Han Fei](https://github.com/hanfei1991)). +* Backported in [#71482](https://github.com/ClickHouse/ClickHouse/issues/71482): Fix `Content-Encoding` not sent in some compressed responses. [#64802](https://github.com/ClickHouse/ClickHouse/issues/64802). [#68975](https://github.com/ClickHouse/ClickHouse/pull/68975) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Backported in [#70451](https://github.com/ClickHouse/ClickHouse/issues/70451): Fix vrash during insertion into FixedString column in PostgreSQL engine. [#69584](https://github.com/ClickHouse/ClickHouse/pull/69584) ([Pavel Kruglov](https://github.com/Avogar)). +* Backported in [#70619](https://github.com/ClickHouse/ClickHouse/issues/70619): Fix server segfault on creating a materialized view with two selects and an `INTERSECT`, e.g. `CREATE MATERIALIZED VIEW v0 AS (SELECT 1) INTERSECT (SELECT 1);`. [#70264](https://github.com/ClickHouse/ClickHouse/pull/70264) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Backported in [#70877](https://github.com/ClickHouse/ClickHouse/issues/70877): Fix table creation with `CREATE ... AS table_function()` with database `Replicated` and unavailable table function source on secondary replica. [#70511](https://github.com/ClickHouse/ClickHouse/pull/70511) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Backported in [#70571](https://github.com/ClickHouse/ClickHouse/issues/70571): Ignore all output on async insert with `wait_for_async_insert=1`. Closes [#62644](https://github.com/ClickHouse/ClickHouse/issues/62644). [#70530](https://github.com/ClickHouse/ClickHouse/pull/70530) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Backported in [#71146](https://github.com/ClickHouse/ClickHouse/issues/71146): Ignore frozen_metadata.txt while traversing shadow directory from system.remote_data_paths. [#70590](https://github.com/ClickHouse/ClickHouse/pull/70590) ([Aleksei Filatov](https://github.com/aalexfvk)). +* Backported in [#70682](https://github.com/ClickHouse/ClickHouse/issues/70682): Fix creation of stateful window functions on misaligned memory. [#70631](https://github.com/ClickHouse/ClickHouse/pull/70631) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#71113](https://github.com/ClickHouse/ClickHouse/issues/71113): Fix a crash and a leak in AggregateFunctionGroupArraySorted. [#70820](https://github.com/ClickHouse/ClickHouse/pull/70820) ([Michael Kolupaev](https://github.com/al13n321)). +* Backported in [#70990](https://github.com/ClickHouse/ClickHouse/issues/70990): Fix a logical error due to negative zeros in the two-level hash table. This closes [#70973](https://github.com/ClickHouse/ClickHouse/issues/70973). [#70979](https://github.com/ClickHouse/ClickHouse/pull/70979) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Backported in [#71246](https://github.com/ClickHouse/ClickHouse/issues/71246): Fixed named sessions not being closed and hanging on forever under certain circumstances. [#70998](https://github.com/ClickHouse/ClickHouse/pull/70998) ([Márcio Martins](https://github.com/marcio-absmartly)). +* Backported in [#71371](https://github.com/ClickHouse/ClickHouse/issues/71371): Add try/catch to data parts destructors to avoid terminate. [#71364](https://github.com/ClickHouse/ClickHouse/pull/71364) ([alesapin](https://github.com/alesapin)). +* Backported in [#71594](https://github.com/ClickHouse/ClickHouse/issues/71594): Prevent crash in SortCursor with 0 columns (old analyzer). [#71494](https://github.com/ClickHouse/ClickHouse/pull/71494) ([Raúl Marín](https://github.com/Algunenano)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Backported in [#71022](https://github.com/ClickHouse/ClickHouse/issues/71022): Fix dropping of file cache in CHECK query in case of enabled transactions. [#69256](https://github.com/ClickHouse/ClickHouse/pull/69256) ([Anton Popov](https://github.com/CurtizJ)). +* Backported in [#70384](https://github.com/ClickHouse/ClickHouse/issues/70384): CI: Enable Integration Tests for backport PRs. [#70329](https://github.com/ClickHouse/ClickHouse/pull/70329) ([Max Kainov](https://github.com/maxknv)). +* Backported in [#70538](https://github.com/ClickHouse/ClickHouse/issues/70538): Remove slow poll() logs in keeper. [#70508](https://github.com/ClickHouse/ClickHouse/pull/70508) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#70971](https://github.com/ClickHouse/ClickHouse/issues/70971): Limiting logging some lines about configs. [#70879](https://github.com/ClickHouse/ClickHouse/pull/70879) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). + diff --git a/docs/changelogs/v24.8.6.70-lts.md b/docs/changelogs/v24.8.6.70-lts.md new file mode 100644 index 00000000000..81fa4db1458 --- /dev/null +++ b/docs/changelogs/v24.8.6.70-lts.md @@ -0,0 +1,50 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v24.8.6.70-lts (ddb8c219771) FIXME as compared to v24.8.5.115-lts (8c4cb00a384) + +#### Backward Incompatible Change +* Backported in [#71359](https://github.com/ClickHouse/ClickHouse/issues/71359): Fix possible error `No such file or directory` due to unescaped special symbols in files for JSON subcolumns. [#71182](https://github.com/ClickHouse/ClickHouse/pull/71182) ([Pavel Kruglov](https://github.com/Avogar)). + +#### Improvement +* Backported in [#70680](https://github.com/ClickHouse/ClickHouse/issues/70680): Don't do validation when synchronizing user_directories from keeper. [#70644](https://github.com/ClickHouse/ClickHouse/pull/70644) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#71395](https://github.com/ClickHouse/ClickHouse/issues/71395): Do not call the object storage API when listing directories, as this may be cost-inefficient. Instead, store the list of filenames in the memory. The trade-offs are increased initial load time and memory required to store filenames. [#70823](https://github.com/ClickHouse/ClickHouse/pull/70823) ([Julia Kartseva](https://github.com/jkartseva)). +* Backported in [#71287](https://github.com/ClickHouse/ClickHouse/issues/71287): Reduce the number of object storage HEAD API requests in the plain_rewritable disk. [#70915](https://github.com/ClickHouse/ClickHouse/pull/70915) ([Julia Kartseva](https://github.com/jkartseva)). + +#### Bug Fix (user-visible misbehavior in an official stable release) +* Backported in [#70934](https://github.com/ClickHouse/ClickHouse/issues/70934): Fix incorrect JOIN ON section optimization in case of `IS NULL` check under any other function (like `NOT`) that may lead to wrong results. Closes [#67915](https://github.com/ClickHouse/ClickHouse/issues/67915). [#68049](https://github.com/ClickHouse/ClickHouse/pull/68049) ([Vladimir Cherkasov](https://github.com/vdimir)). +* Backported in [#70735](https://github.com/ClickHouse/ClickHouse/issues/70735): Fix unexpected exception when passing empty tuple in array. This fixes [#68618](https://github.com/ClickHouse/ClickHouse/issues/68618). [#68848](https://github.com/ClickHouse/ClickHouse/pull/68848) ([Amos Bird](https://github.com/amosbird)). +* Backported in [#71138](https://github.com/ClickHouse/ClickHouse/issues/71138): Fix propogating structure argument in s3Cluster. Previously the `DEFAULT` expression of the column could be lost when sending the query to the replicas in s3Cluster. [#69147](https://github.com/ClickHouse/ClickHouse/pull/69147) ([Pavel Kruglov](https://github.com/Avogar)). +* Backported in [#70561](https://github.com/ClickHouse/ClickHouse/issues/70561): Fix `getSubcolumn` with `LowCardinality` columns by overriding `useDefaultImplementationForLowCardinalityColumns` to return `true`. [#69831](https://github.com/ClickHouse/ClickHouse/pull/69831) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)). +* Backported in [#70903](https://github.com/ClickHouse/ClickHouse/issues/70903): Avoid reusing columns among different named tuples when evaluating `tuple` functions. This fixes [#70022](https://github.com/ClickHouse/ClickHouse/issues/70022). [#70103](https://github.com/ClickHouse/ClickHouse/pull/70103) ([Amos Bird](https://github.com/amosbird)). +* Backported in [#70623](https://github.com/ClickHouse/ClickHouse/issues/70623): Fix server segfault on creating a materialized view with two selects and an `INTERSECT`, e.g. `CREATE MATERIALIZED VIEW v0 AS (SELECT 1) INTERSECT (SELECT 1);`. [#70264](https://github.com/ClickHouse/ClickHouse/pull/70264) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Backported in [#70688](https://github.com/ClickHouse/ClickHouse/issues/70688): Fix possible use-after-free in `SYSTEM DROP FORMAT SCHEMA CACHE FOR Protobuf`. [#70358](https://github.com/ClickHouse/ClickHouse/pull/70358) ([Azat Khuzhin](https://github.com/azat)). +* Backported in [#70494](https://github.com/ClickHouse/ClickHouse/issues/70494): Fix crash during GROUP BY JSON sub-object subcolumn. [#70374](https://github.com/ClickHouse/ClickHouse/pull/70374) ([Pavel Kruglov](https://github.com/Avogar)). +* Backported in [#70482](https://github.com/ClickHouse/ClickHouse/issues/70482): Don't prefetch parts for vertical merges if part has no rows. [#70452](https://github.com/ClickHouse/ClickHouse/pull/70452) ([Antonio Andelic](https://github.com/antonio2368)). +* Backported in [#70556](https://github.com/ClickHouse/ClickHouse/issues/70556): Fix crash in WHERE with lambda functions. [#70464](https://github.com/ClickHouse/ClickHouse/pull/70464) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#70878](https://github.com/ClickHouse/ClickHouse/issues/70878): Fix table creation with `CREATE ... AS table_function()` with database `Replicated` and unavailable table function source on secondary replica. [#70511](https://github.com/ClickHouse/ClickHouse/pull/70511) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Backported in [#70575](https://github.com/ClickHouse/ClickHouse/issues/70575): Ignore all output on async insert with `wait_for_async_insert=1`. Closes [#62644](https://github.com/ClickHouse/ClickHouse/issues/62644). [#70530](https://github.com/ClickHouse/ClickHouse/pull/70530) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Backported in [#71052](https://github.com/ClickHouse/ClickHouse/issues/71052): Ignore frozen_metadata.txt while traversing shadow directory from system.remote_data_paths. [#70590](https://github.com/ClickHouse/ClickHouse/pull/70590) ([Aleksei Filatov](https://github.com/aalexfvk)). +* Backported in [#70651](https://github.com/ClickHouse/ClickHouse/issues/70651): Fix creation of stateful window functions on misaligned memory. [#70631](https://github.com/ClickHouse/ClickHouse/pull/70631) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#70757](https://github.com/ClickHouse/ClickHouse/issues/70757): Fixed rare crashes in `SELECT`-s and merges after adding a column of `Array` type with non-empty default expression. [#70695](https://github.com/ClickHouse/ClickHouse/pull/70695) ([Anton Popov](https://github.com/CurtizJ)). +* Backported in [#70763](https://github.com/ClickHouse/ClickHouse/issues/70763): Fix infinite recursion when infering a proto schema with skip unsupported fields enabled. [#70697](https://github.com/ClickHouse/ClickHouse/pull/70697) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#71118](https://github.com/ClickHouse/ClickHouse/issues/71118): `GroupArraySortedData` uses a PODArray with non-POD elements, manually calling constructors and destructors for the elements as needed. But it wasn't careful enough: in two places it forgot to call destructor, in one place it left elements uninitialized if an exception is thrown when deserializing previous elements. Then `GroupArraySortedData`'s destructor called destructors on uninitialized elements and crashed: ``` 2024.10.17 22:58:23.523790 [ 5233 ] {} BaseDaemon: ########## Short fault info ############ 2024.10.17 22:58:23.523834 [ 5233 ] {} BaseDaemon: (version 24.6.1.4609 (official build), build id: 5423339A6571004018D55BBE05D464AFA35E6718, git hash: fa6cdfda8a94890eb19bc7f22f8b0b56292f7a26) (from thread 682) Received signal 11 2024.10.17 22:58:23.523862 [ 5233 ] {} BaseDaemon: Signal description: Segmentation fault 2024.10.17 22:58:23.523883 [ 5233 ] {} BaseDaemon: Address: 0x8f. Access: . Address not mapped to object. 2024.10.17 22:58:23.523908 [ 5233 ] {} BaseDaemon: Stack trace: 0x0000aaaac4b78308 0x0000ffffb7701850 0x0000aaaac0104855 0x0000aaaac01048a0 0x0000aaaac501e84c 0x0000aaaac7c510d0 0x0000aaaac7c4ba20 0x0000aaaac968bbfc 0x0000aaaac968fab0 0x0000aaaac969bf50 0x0000aaaac9b7520c 0x0000aaaac9b74c74 0x0000aaaac9b8a150 0x0000aaaac9b809f0 0x0000aaaac9b80574 0x0000aaaac9b8e364 0x0000aaaac9b8e4fc 0x0000aaaac94f4328 0x0000aaaac94f428c 0x0000aaaac94f7df0 0x0000aaaac98b5a3c 0x0000aaaac950b234 0x0000aaaac49ae264 0x0000aaaac49b1dd0 0x0000aaaac49b0a80 0x0000ffffb755d5c8 0x0000ffffb75c5edc 2024.10.17 22:58:23.523936 [ 5233 ] {} BaseDaemon: ######################################## 2024.10.17 22:58:23.523959 [ 5233 ] {} BaseDaemon: (version 24.6.1.4609 (official build), build id: 5423339A6571004018D55BBE05D464AFA35E6718, git hash: fa6cdfda8a94890eb19bc7f22f8b0b56292f7a26) (from thread 682) (query_id: 6c8a33a2-f45a-4a3b-bd71-ded6a1c9ccd3::202410_534066_534078_2) (query: ) Received signal Segmentation fault (11) 2024.10.17 22:58:23.523977 [ 5233 ] {} BaseDaemon: Address: 0x8f. Access: . Address not mapped to object. 2024.10.17 22:58:23.523993 [ 5233 ] {} BaseDaemon: Stack trace: 0x0000aaaac4b78308 0x0000ffffb7701850 0x0000aaaac0104855 0x0000aaaac01048a0 0x0000aaaac501e84c 0x0000aaaac7c510d0 0x0000aaaac7c4ba20 0x0000aaaac968bbfc 0x0000aaaac968fab0 0x0000aaaac969bf50 0x0000aaaac9b7520c 0x0000aaaac9b74c74 0x0000aaaac9b8a150 0x0000aaaac9b809f0 0x0000aaaac9b80574 0x0000aaaac9b8e364 0x0000aaaac9b8e4fc 0x0000aaaac94f4328 0x0000aaaac94f428c 0x0000aaaac94f7df0 0x0000aaaac98b5a3c 0x0000aaaac950b234 0x0000aaaac49ae264 0x0000aaaac49b1dd0 0x0000aaaac49b0a80 0x0000ffffb755d5c8 0x0000ffffb75c5edc 2024.10.17 22:58:23.524817 [ 5233 ] {} BaseDaemon: 0. signalHandler(int, siginfo_t*, void*) @ 0x000000000c6f8308 2024.10.17 22:58:23.524917 [ 5233 ] {} BaseDaemon: 1. ? @ 0x0000ffffb7701850 2024.10.17 22:58:23.524962 [ 5233 ] {} BaseDaemon: 2. DB::Field::~Field() @ 0x0000000007c84855 2024.10.17 22:58:23.525012 [ 5233 ] {} BaseDaemon: 3. DB::Field::~Field() @ 0x0000000007c848a0 2024.10.17 22:58:23.526626 [ 5233 ] {} BaseDaemon: 4. DB::IAggregateFunctionDataHelper, DB::(anonymous namespace)::GroupArraySorted, DB::Field>>::destroy(char*) const (.5a6a451027f732f9fd91c13f4a13200c) @ 0x000000000cb9e84c 2024.10.17 22:58:23.527322 [ 5233 ] {} BaseDaemon: 5. DB::SerializationAggregateFunction::deserializeBinaryBulk(DB::IColumn&, DB::ReadBuffer&, unsigned long, double) const @ 0x000000000f7d10d0 2024.10.17 22:58:23.528470 [ 5233 ] {} BaseDaemon: 6. DB::ISerialization::deserializeBinaryBulkWithMultipleStreams(COW::immutable_ptr&, unsigned long, DB::ISerialization::DeserializeBinaryBulkSettings&, std::shared_ptr&, std::unordered_map::immutable_ptr, std::hash, std::equal_to, std::allocator::immutable_ptr>>>*) const @ 0x000000000f7cba20 2024.10.17 22:58:23.529213 [ 5233 ] {} BaseDaemon: 7. DB::MergeTreeReaderCompact::readData(DB::NameAndTypePair const&, COW::immutable_ptr&, unsigned long, std::function const&) @ 0x000000001120bbfc 2024.10.17 22:58:23.529277 [ 5233 ] {} BaseDaemon: 8. DB::MergeTreeReaderCompactSingleBuffer::readRows(unsigned long, unsigned long, bool, unsigned long, std::vector::immutable_ptr, std::allocator::immutable_ptr>>&) @ 0x000000001120fab0 2024.10.17 22:58:23.529319 [ 5233 ] {} BaseDaemon: 9. DB::MergeTreeSequentialSource::generate() @ 0x000000001121bf50 2024.10.17 22:58:23.529346 [ 5233 ] {} BaseDaemon: 10. DB::ISource::tryGenerate() @ 0x00000000116f520c 2024.10.17 22:58:23.529653 [ 5233 ] {} BaseDaemon: 11. DB::ISource::work() @ 0x00000000116f4c74 2024.10.17 22:58:23.529679 [ 5233 ] {} BaseDaemon: 12. DB::ExecutionThreadContext::executeTask() @ 0x000000001170a150 2024.10.17 22:58:23.529733 [ 5233 ] {} BaseDaemon: 13. DB::PipelineExecutor::executeStepImpl(unsigned long, std::atomic*) @ 0x00000000117009f0 2024.10.17 22:58:23.529763 [ 5233 ] {} BaseDaemon: 14. DB::PipelineExecutor::executeStep(std::atomic*) @ 0x0000000011700574 2024.10.17 22:58:23.530089 [ 5233 ] {} BaseDaemon: 15. DB::PullingPipelineExecutor::pull(DB::Chunk&) @ 0x000000001170e364 2024.10.17 22:58:23.530277 [ 5233 ] {} BaseDaemon: 16. DB::PullingPipelineExecutor::pull(DB::Block&) @ 0x000000001170e4fc 2024.10.17 22:58:23.530295 [ 5233 ] {} BaseDaemon: 17. DB::MergeTask::ExecuteAndFinalizeHorizontalPart::executeImpl() @ 0x0000000011074328 2024.10.17 22:58:23.530318 [ 5233 ] {} BaseDaemon: 18. DB::MergeTask::ExecuteAndFinalizeHorizontalPart::execute() @ 0x000000001107428c 2024.10.17 22:58:23.530339 [ 5233 ] {} BaseDaemon: 19. DB::MergeTask::execute() @ 0x0000000011077df0 2024.10.17 22:58:23.530362 [ 5233 ] {} BaseDaemon: 20. DB::SharedMergeMutateTaskBase::executeStep() @ 0x0000000011435a3c 2024.10.17 22:58:23.530384 [ 5233 ] {} BaseDaemon: 21. DB::MergeTreeBackgroundExecutor::threadFunction() @ 0x000000001108b234 2024.10.17 22:58:23.530410 [ 5233 ] {} BaseDaemon: 22. ThreadPoolImpl>::worker(std::__list_iterator, void*>) @ 0x000000000c52e264 2024.10.17 22:58:23.530448 [ 5233 ] {} BaseDaemon: 23. void std::__function::__policy_invoker::__call_impl::ThreadFromGlobalPoolImpl>::scheduleImpl(std::function, Priority, std::optional, bool)::'lambda0'()>(void&&)::'lambda'(), void ()>>(std::__function::__policy_storage const*) @ 0x000000000c531dd0 2024.10.17 22:58:23.530476 [ 5233 ] {} BaseDaemon: 24. void* std::__thread_proxy[abi:v15000]>, void ThreadPoolImpl::scheduleImpl(std::function, Priority, std::optional, bool)::'lambda0'()>>(void*) @ 0x000000000c530a80 2024.10.17 22:58:23.530514 [ 5233 ] {} BaseDaemon: 25. ? @ 0x000000000007d5c8 2024.10.17 22:58:23.530534 [ 5233 ] {} BaseDaemon: 26. ? @ 0x00000000000e5edc 2024.10.17 22:58:23.530551 [ 5233 ] {} BaseDaemon: Integrity check of the executable skipped because the reference checksum could not be read. 2024.10.17 22:58:23.531083 [ 5233 ] {} BaseDaemon: Report this error to https://github.com/ClickHouse/ClickHouse/issues 2024.10.17 22:58:23.531294 [ 5233 ] {} BaseDaemon: Changed settings: max_insert_threads = 4, max_threads = 42, use_hedged_requests = false, distributed_foreground_insert = true, alter_sync = 0, enable_memory_bound_merging_of_aggregation_results = true, cluster_for_parallel_replicas = 'default', do_not_merge_across_partitions_select_final = false, log_queries = true, log_queries_probability = 1., max_http_get_redirects = 10, enable_deflate_qpl_codec = false, enable_zstd_qat_codec = false, query_profiler_real_time_period_ns = 0, query_profiler_cpu_time_period_ns = 0, max_bytes_before_external_group_by = 90194313216, max_bytes_before_external_sort = 90194313216, max_memory_usage = 180388626432, backup_restore_keeper_retry_max_backoff_ms = 60000, cancel_http_readonly_queries_on_client_close = true, max_table_size_to_drop = 1000000000000, max_partition_size_to_drop = 1000000000000, default_table_engine = 'ReplicatedMergeTree', mutations_sync = 0, optimize_trivial_insert_select = false, database_replicated_allow_only_replicated_engine = true, cloud_mode = true, cloud_mode_engine = 2, distributed_ddl_output_mode = 'none_only_active', distributed_ddl_entry_format_version = 6, async_insert_max_data_size = 10485760, async_insert_busy_timeout_max_ms = 1000, enable_filesystem_cache_on_write_operations = true, load_marks_asynchronously = true, allow_prefetched_read_pool_for_remote_filesystem = true, filesystem_prefetch_max_memory_usage = 18038862643, filesystem_prefetches_limit = 200, compatibility = '24.6', insert_keeper_max_retries = 20, allow_experimental_materialized_postgresql_table = false, date_time_input_format = 'best_effort' ```. [#70820](https://github.com/ClickHouse/ClickHouse/pull/70820) ([Michael Kolupaev](https://github.com/al13n321)). +* Backported in [#70896](https://github.com/ClickHouse/ClickHouse/issues/70896): Disable enable_named_columns_in_function_tuple by default. [#70833](https://github.com/ClickHouse/ClickHouse/pull/70833) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#70994](https://github.com/ClickHouse/ClickHouse/issues/70994): Fix a logical error due to negative zeros in the two-level hash table. This closes [#70973](https://github.com/ClickHouse/ClickHouse/issues/70973). [#70979](https://github.com/ClickHouse/ClickHouse/pull/70979) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Backported in [#71210](https://github.com/ClickHouse/ClickHouse/issues/71210): Fix logical error in `StorageS3Queue` "Cannot create a persistent node in /processed since it already exists". [#70984](https://github.com/ClickHouse/ClickHouse/pull/70984) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Backported in [#71248](https://github.com/ClickHouse/ClickHouse/issues/71248): Fixed named sessions not being closed and hanging on forever under certain circumstances. [#70998](https://github.com/ClickHouse/ClickHouse/pull/70998) ([Márcio Martins](https://github.com/marcio-absmartly)). +* Backported in [#71375](https://github.com/ClickHouse/ClickHouse/issues/71375): Add try/catch to data parts destructors to avoid terminate. [#71364](https://github.com/ClickHouse/ClickHouse/pull/71364) ([alesapin](https://github.com/alesapin)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Backported in [#71026](https://github.com/ClickHouse/ClickHouse/issues/71026): Fix dropping of file cache in CHECK query in case of enabled transactions. [#69256](https://github.com/ClickHouse/ClickHouse/pull/69256) ([Anton Popov](https://github.com/CurtizJ)). +* Backported in [#70388](https://github.com/ClickHouse/ClickHouse/issues/70388): CI: Enable Integration Tests for backport PRs. [#70329](https://github.com/ClickHouse/ClickHouse/pull/70329) ([Max Kainov](https://github.com/maxknv)). +* Backported in [#70701](https://github.com/ClickHouse/ClickHouse/issues/70701): Fix order in 03249_dynamic_alter_consistency. [#70453](https://github.com/ClickHouse/ClickHouse/pull/70453) ([Alexander Gololobov](https://github.com/davenger)). +* Backported in [#70542](https://github.com/ClickHouse/ClickHouse/issues/70542): Remove slow poll() logs in keeper. [#70508](https://github.com/ClickHouse/ClickHouse/pull/70508) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#70804](https://github.com/ClickHouse/ClickHouse/issues/70804): When the `PR Check` status is set, it's a valid RunConfig job failure. [#70643](https://github.com/ClickHouse/ClickHouse/pull/70643) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Backported in [#71229](https://github.com/ClickHouse/ClickHouse/issues/71229): Maybe not GWPAsan by default. [#71174](https://github.com/ClickHouse/ClickHouse/pull/71174) ([Antonio Andelic](https://github.com/antonio2368)). + diff --git a/docs/en/engines/table-engines/integrations/embedded-rocksdb.md b/docs/en/engines/table-engines/integrations/embedded-rocksdb.md index 1958250ed73..41c4e8fc4a9 100644 --- a/docs/en/engines/table-engines/integrations/embedded-rocksdb.md +++ b/docs/en/engines/table-engines/integrations/embedded-rocksdb.md @@ -4,9 +4,13 @@ sidebar_position: 50 sidebar_label: EmbeddedRocksDB --- +import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; + # EmbeddedRocksDB Engine -This engine allows integrating ClickHouse with [rocksdb](http://rocksdb.org/). + + +This engine allows integrating ClickHouse with [RocksDB](http://rocksdb.org/). ## Creating a Table {#creating-a-table} diff --git a/docs/en/getting-started/index.md b/docs/en/getting-started/index.md index b520220984c..7898ca01129 100644 --- a/docs/en/getting-started/index.md +++ b/docs/en/getting-started/index.md @@ -23,6 +23,7 @@ functions in ClickHouse. The sample datasets include: - The [NYPD Complaint Data](../getting-started/example-datasets/nypd_complaint_data.md) demonstrates how to use data inference to simplify creating tables - The ["What's on the Menu?" dataset](../getting-started/example-datasets/menus.md) has an example of denormalizing data - The [Laion dataset](../getting-started/example-datasets/laion.md) has an example of [Approximate nearest neighbor search indexes](../engines/table-engines/mergetree-family/annindexes.md) usage +- The [TPC-H](../getting-started/example-datasets/tpch.md), [TPC-DS](../getting-started/example-datasets/tpcds.md), and [Star Schema (SSB)](../getting-started/example-datasets/star-schema.md) industry benchmarks for analytics databases - [Getting Data Into ClickHouse - Part 1](https://clickhouse.com/blog/getting-data-into-clickhouse-part-1) provides examples of defining a schema and loading a small Hacker News dataset - [Getting Data Into ClickHouse - Part 3 - Using S3](https://clickhouse.com/blog/getting-data-into-clickhouse-part-3-s3) has examples of loading data from s3 - [Generating random data in ClickHouse](https://clickhouse.com/blog/generating-random-test-distribution-data-for-clickhouse) shows how to generate random data if none of the above fit your needs. diff --git a/docs/en/interfaces/cli.md b/docs/en/interfaces/cli.md index 66291014ed7..504f6eec6de 100644 --- a/docs/en/interfaces/cli.md +++ b/docs/en/interfaces/cli.md @@ -190,6 +190,7 @@ You can pass parameters to `clickhouse-client` (all parameters have a default va - `--config-file` – The name of the configuration file. - `--secure` – If specified, will connect to server over secure connection (TLS). You might need to configure your CA certificates in the [configuration file](#configuration_files). The available configuration settings are the same as for [server-side TLS configuration](../operations/server-configuration-parameters/settings.md#openssl). - `--history_file` — Path to a file containing command history. +- `--history_max_entries` — Maximum number of entries in the history file. Default value: 1 000 000. - `--param_` — Value for a [query with parameters](#cli-queries-with-parameters). - `--hardware-utilization` — Print hardware utilization information in progress bar. - `--print-profile-events` – Print `ProfileEvents` packets. diff --git a/docs/en/interfaces/prometheus.md b/docs/en/interfaces/prometheus.md index 8e7023cc51f..11f503b54d7 100644 --- a/docs/en/interfaces/prometheus.md +++ b/docs/en/interfaces/prometheus.md @@ -9,7 +9,7 @@ sidebar_label: Prometheus protocols ## Exposing metrics {#expose} :::note -ClickHouse Cloud does not currently support connecting to Prometheus. To be notified when this feature is supported, please contact support@clickhouse.com. +If you are using ClickHouse Cloud, you can expose metrics to Prometheus using the [Prometheus Integration](/en/integrations/prometheus). ::: ClickHouse can expose its own metrics for scraping from Prometheus: diff --git a/docs/en/operations/_troubleshooting.md b/docs/en/operations/_troubleshooting.md index 77389782675..f0ee1ca1d29 100644 --- a/docs/en/operations/_troubleshooting.md +++ b/docs/en/operations/_troubleshooting.md @@ -65,6 +65,34 @@ sudo rm -f /etc/yum.repos.d/clickhouse.repo After that follow the [install guide](../getting-started/install.md#from-rpm-packages) +### You Can't Run Docker Container + +You are running a simple `docker run clickhouse/clickhouse-server` and it crashes with a stack trace similar to following: + +``` +$ docker run -it clickhouse/clickhouse-server +........ +2024.11.06 21:04:48.912036 [ 1 ] {} SentryWriter: Sending crash reports is disabled +Poco::Exception. Code: 1000, e.code() = 0, System exception: cannot start thread, Stack trace (when copying this message, always include the lines below): + +0. Poco::ThreadImpl::startImpl(Poco::SharedPtr>) @ 0x00000000157c7b34 +1. Poco::Thread::start(Poco::Runnable&) @ 0x00000000157c8a0e +2. BaseDaemon::initializeTerminationAndSignalProcessing() @ 0x000000000d267a14 +3. BaseDaemon::initialize(Poco::Util::Application&) @ 0x000000000d2652cb +4. DB::Server::initialize(Poco::Util::Application&) @ 0x000000000d128b38 +5. Poco::Util::Application::run() @ 0x000000001581cfda +6. DB::Server::run() @ 0x000000000d1288f0 +7. Poco::Util::ServerApplication::run(int, char**) @ 0x0000000015825e27 +8. mainEntryClickHouseServer(int, char**) @ 0x000000000d125b38 +9. main @ 0x0000000007ea4eee +10. ? @ 0x00007f67ff946d90 +11. ? @ 0x00007f67ff946e40 +12. _start @ 0x00000000062e802e + (version 24.10.1.2812 (official build)) +``` + +The reason is an old docker daemon with version lower than `20.10.10`. A way to fix it either upgrading it, or running `docker run [--privileged | --security-opt seccomp=unconfined]`. The latter has security implications. + ## Connecting to the Server {#troubleshooting-accepts-no-connections} Possible issues: diff --git a/docs/en/operations/query-cache.md b/docs/en/operations/query-cache.md index 955cec0234e..f0941aa28aa 100644 --- a/docs/en/operations/query-cache.md +++ b/docs/en/operations/query-cache.md @@ -25,9 +25,10 @@ Query caches can generally be viewed as transactionally consistent or inconsiste slowly enough that the database only needs to compute the report once (represented by the first `SELECT` query). Further queries can be served directly from the query cache. In this example, a reasonable validity period could be 30 min. -Transactionally inconsistent caching is traditionally provided by client tools or proxy packages interacting with the database. As a result, -the same caching logic and configuration is often duplicated. With ClickHouse's query cache, the caching logic moves to the server side. -This reduces maintenance effort and avoids redundancy. +Transactionally inconsistent caching is traditionally provided by client tools or proxy packages (e.g. +[chproxy](https://www.chproxy.org/configuration/caching/)) interacting with the database. As a result, the same caching logic and +configuration is often duplicated. With ClickHouse's query cache, the caching logic moves to the server side. This reduces maintenance +effort and avoids redundancy. ## Configuration Settings and Usage @@ -138,7 +139,10 @@ is only cached if the query runs longer than 5 seconds. It is also possible to s cached - for that use setting [query_cache_min_query_runs](settings/settings.md#query-cache-min-query-runs). Entries in the query cache become stale after a certain time period (time-to-live). By default, this period is 60 seconds but a different -value can be specified at session, profile or query level using setting [query_cache_ttl](settings/settings.md#query-cache-ttl). +value can be specified at session, profile or query level using setting [query_cache_ttl](settings/settings.md#query-cache-ttl). The query +cache evicts entries "lazily", i.e. when an entry becomes stale, it is not immediately removed from the cache. Instead, when a new entry +is to be inserted into the query cache, the database checks whether the cache has enough free space for the new entry. If this is not the +case, the database tries to remove all stale entries. If the cache still has not enough free space, the new entry is not inserted. Entries in the query cache are compressed by default. This reduces the overall memory consumption at the cost of slower writes into / reads from the query cache. To disable compression, use setting [query_cache_compress_entries](settings/settings.md#query-cache-compress-entries). @@ -188,14 +192,9 @@ Also, results of queries with non-deterministic functions are not cached by defa To force caching of results of queries with non-deterministic functions regardless, use setting [query_cache_nondeterministic_function_handling](settings/settings.md#query-cache-nondeterministic-function-handling). -Results of queries that involve system tables, e.g. `system.processes` or `information_schema.tables`, are not cached by default. To force -caching of results of queries with system tables regardless, use setting -[query_cache_system_table_handling](settings/settings.md#query-cache-system-table-handling). - -:::note -Prior to ClickHouse v23.11, setting 'query_cache_store_results_of_queries_with_nondeterministic_functions = 0 / 1' controlled whether -results of queries with non-deterministic results were cached. In newer ClickHouse versions, this setting is obsolete and has no effect. -::: +Results of queries that involve system tables (e.g. [system.processes](system-tables/processes.md)` or +[information_schema.tables](system-tables/information_schema.md)) are not cached by default. To force caching of results of queries with +system tables regardless, use setting [query_cache_system_table_handling](settings/settings.md#query-cache-system-table-handling). Finally, entries in the query cache are not shared between users due to security reasons. For example, user A must not be able to bypass a row policy on a table by running the same query as another user B for whom no such policy exists. However, if necessary, cache entries can diff --git a/docs/en/operations/system-tables/grants.md b/docs/en/operations/system-tables/grants.md index 262a53a87a5..debc3146008 100644 --- a/docs/en/operations/system-tables/grants.md +++ b/docs/en/operations/system-tables/grants.md @@ -19,7 +19,7 @@ Columns: - `column` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — Name of a column to which access is granted. - `is_partial_revoke` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Logical value. It shows whether some privileges have been revoked. Possible values: -- `0` — The row describes a partial revoke. -- `1` — The row describes a grant. +- `0` — The row describes a grant. +- `1` — The row describes a partial revoke. - `grant_option` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Permission is granted `WITH GRANT OPTION`, see [GRANT](../../sql-reference/statements/grant.md#granting-privilege-syntax). diff --git a/docs/en/sql-reference/aggregate-functions/reference/anylast.md b/docs/en/sql-reference/aggregate-functions/reference/anylast.md index 202d2e9fb10..4fe21531c76 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/anylast.md +++ b/docs/en/sql-reference/aggregate-functions/reference/anylast.md @@ -17,7 +17,7 @@ anyLast(column) [RESPECT NULLS] - `column`: The column name. :::note -Supports the `RESPECT NULLS` modifier after the function name. Using this modifier will ensure the function selects the first value passed, regardless of whether it is `NULL` or not. +Supports the `RESPECT NULLS` modifier after the function name. Using this modifier will ensure the function selects the last value passed, regardless of whether it is `NULL` or not. ::: **Returned value** @@ -40,4 +40,4 @@ SELECT anyLast(city) FROM any_last_nulls; ┌─anyLast(city)─┐ │ Valencia │ └───────────────┘ -``` \ No newline at end of file +``` diff --git a/docs/en/sql-reference/data-types/dynamic.md b/docs/en/sql-reference/data-types/dynamic.md index 5fbf47f7ef2..aa7455c8f68 100644 --- a/docs/en/sql-reference/data-types/dynamic.md +++ b/docs/en/sql-reference/data-types/dynamic.md @@ -512,6 +512,8 @@ The result of operator `<` for values `d1` with underlying type `T1` and `d2` wi - If `T1 = T2 = T`, the result will be `d1.T < d2.T` (underlying values will be compared). - If `T1 != T2`, the result will be `T1 < T2` (type names will be compared). +By default `Dynamic` type is not allowed in `GROUP BY`/`ORDER BY` keys, if you want to use it consider its special comparison rule and enable `allow_suspicious_types_in_group_by`/`allow_suspicious_types_in_order_by` settings. + Examples: ```sql CREATE TABLE test (d Dynamic) ENGINE=Memory; @@ -535,7 +537,7 @@ SELECT d, dynamicType(d) FROM test; ``` ```sql -SELECT d, dynamicType(d) FROM test ORDER BY d; +SELECT d, dynamicType(d) FROM test ORDER BY d SETTINGS allow_suspicious_types_in_order_by=1; ``` ```sql @@ -557,7 +559,7 @@ Example: ```sql CREATE TABLE test (d Dynamic) ENGINE=Memory; INSERT INTO test VALUES (1::UInt32), (1::Int64), (100::UInt32), (100::Int64); -SELECT d, dynamicType(d) FROM test ORDER by d; +SELECT d, dynamicType(d) FROM test ORDER BY d SETTINGS allow_suspicious_types_in_order_by=1; ``` ```text @@ -570,7 +572,7 @@ SELECT d, dynamicType(d) FROM test ORDER by d; ``` ```sql -SELECT d, dynamicType(d) FROM test GROUP by d; +SELECT d, dynamicType(d) FROM test GROUP by d SETTINGS allow_suspicious_types_in_group_by=1; ``` ```text @@ -582,7 +584,7 @@ SELECT d, dynamicType(d) FROM test GROUP by d; └─────┴────────────────┘ ``` -**Note**: the described comparison rule is not applied during execution of comparison functions like `<`/`>`/`=` and others because of [special work](#using-dynamic-type-in-functions) of functions with `Dynamic` type +**Note:** the described comparison rule is not applied during execution of comparison functions like `<`/`>`/`=` and others because of [special work](#using-dynamic-type-in-functions) of functions with `Dynamic` type ## Reaching the limit in number of different data types stored inside Dynamic diff --git a/docs/en/sql-reference/data-types/newjson.md b/docs/en/sql-reference/data-types/newjson.md index 7e6d4dd934f..4a21900545d 100644 --- a/docs/en/sql-reference/data-types/newjson.md +++ b/docs/en/sql-reference/data-types/newjson.md @@ -58,10 +58,10 @@ SELECT json FROM test; └───────────────────────────────────┘ ``` -Using CAST from 'String': +Using CAST from `String`: ```sql -SELECT '{"a" : {"b" : 42},"c" : [1, 2, 3], "d" : "Hello, World!"}'::JSON as json; +SELECT '{"a" : {"b" : 42},"c" : [1, 2, 3], "d" : "Hello, World!"}'::JSON AS json; ``` ```text @@ -70,7 +70,47 @@ SELECT '{"a" : {"b" : 42},"c" : [1, 2, 3], "d" : "Hello, World!"}'::JSON as json └────────────────────────────────────────────────┘ ``` -CAST from `JSON`, named `Tuple`, `Map` and `Object('json')` to `JSON` type will be supported later. +Using CAST from `Tuple`: + +```sql +SELECT (tuple(42 AS b) AS a, [1, 2, 3] AS c, 'Hello, World!' AS d)::JSON AS json; +``` + +```text +┌─json───────────────────────────────────────────┐ +│ {"a":{"b":42},"c":[1,2,3],"d":"Hello, World!"} │ +└────────────────────────────────────────────────┘ +``` + +Using CAST from `Map`: + +```sql +SELECT map('a', map('b', 42), 'c', [1,2,3], 'd', 'Hello, World!')::JSON AS json; +``` + +```text +┌─json───────────────────────────────────────────┐ +│ {"a":{"b":42},"c":[1,2,3],"d":"Hello, World!"} │ +└────────────────────────────────────────────────┘ +``` + +Using CAST from deprecated `Object('json')`: + +```sql + SELECT '{"a" : {"b" : 42},"c" : [1, 2, 3], "d" : "Hello, World!"}'::Object('json')::JSON AS json; + ``` + +```text +┌─json───────────────────────────────────────────┐ +│ {"a":{"b":42},"c":[1,2,3],"d":"Hello, World!"} │ +└────────────────────────────────────────────────┘ +``` + +:::note +CAST from `Tuple`/`Map`/`Object('json')` to `JSON` is implemented via serializing the column into `String` column containing JSON objects and deserializing it back to `JSON` type column. +::: + +CAST between `JSON` types with different arguments will be supported later. ## Reading JSON paths as subcolumns @@ -630,6 +670,28 @@ SELECT arrayJoin(distinctJSONPathsAndTypes(json)) FROM s3('s3://clickhouse-publi └─arrayJoin(distinctJSONPathsAndTypes(json))──────────────────┘ ``` +## ALTER MODIFY COLUMN to JSON type + +It's possible to alter an existing table and change the type of the column to the new `JSON` type. Right now only alter from `String` type is supported. + +**Example** + +```sql +CREATE TABLE test (json String) ENGINE=MergeTree ORDeR BY tuple(); +INSERT INTO test VALUES ('{"a" : 42}'), ('{"a" : 43, "b" : "Hello"}'), ('{"a" : 44, "b" : [1, 2, 3]}')), ('{"c" : "2020-01-01"}'); +ALTER TABLE test MODIFY COLUMN json JSON; +SELECT json, json.a, json.b, json.c FROM test; +``` + +```text +┌─json─────────────────────────┬─json.a─┬─json.b──┬─json.c─────┐ +│ {"a":"42"} │ 42 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ +│ {"a":"43","b":"Hello"} │ 43 │ Hello │ ᴺᵁᴸᴸ │ +│ {"a":"44","b":["1","2","3"]} │ 44 │ [1,2,3] │ ᴺᵁᴸᴸ │ +│ {"c":"2020-01-01"} │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 2020-01-01 │ +└──────────────────────────────┴────────┴─────────┴────────────┘ +``` + ## Tips for better usage of the JSON type Before creating `JSON` column and loading data into it, consider the following tips: diff --git a/docs/en/sql-reference/data-types/variant.md b/docs/en/sql-reference/data-types/variant.md index 3c2b6e0a362..7cb0f4ad4ea 100644 --- a/docs/en/sql-reference/data-types/variant.md +++ b/docs/en/sql-reference/data-types/variant.md @@ -441,6 +441,8 @@ SELECT v, variantType(v) FROM test ORDER by v; └─────┴────────────────┘ ``` +**Note** by default `Variant` type is not allowed in `GROUP BY`/`ORDER BY` keys, if you want to use it consider its special comparison rule and enable `allow_suspicious_types_in_group_by`/`allow_suspicious_types_in_order_by` settings. + ## JSONExtract functions with Variant All `JSONExtract*` functions support `Variant` type: diff --git a/docs/en/sql-reference/statements/create/view.md b/docs/en/sql-reference/statements/create/view.md index 0e5d5250e0f..c770348bce0 100644 --- a/docs/en/sql-reference/statements/create/view.md +++ b/docs/en/sql-reference/statements/create/view.md @@ -55,7 +55,7 @@ SELECT * FROM view(column1=value1, column2=value2 ...) ## Materialized View ``` sql -CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]name] [ENGINE = engine] [POPULATE] +CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster_name] [TO[db.]name] [ENGINE = engine] [POPULATE] [DEFINER = { user | CURRENT_USER }] [SQL SECURITY { DEFINER | INVOKER | NONE }] AS SELECT ... [COMMENT 'comment'] diff --git a/docs/en/sql-reference/statements/grant.md b/docs/en/sql-reference/statements/grant.md index 19305675ec8..6decaf19d5b 100644 --- a/docs/en/sql-reference/statements/grant.md +++ b/docs/en/sql-reference/statements/grant.md @@ -117,6 +117,7 @@ GRANT SELECT ON db*.* TO john -- correct GRANT SELECT ON *.my_table TO john -- wrong GRANT SELECT ON foo*bar TO john -- wrong GRANT SELECT ON *suffix TO john -- wrong +GRANT SELECT(foo) ON db.table* TO john -- wrong ``` ## Privileges @@ -242,10 +243,13 @@ Hierarchy of privileges: - `HDFS` - `HIVE` - `JDBC` + - `KAFKA` - `MONGO` - `MYSQL` + - `NATS` - `ODBC` - `POSTGRES` + - `RABBITMQ` - `REDIS` - `REMOTE` - `S3` @@ -524,10 +528,13 @@ Allows using external data sources. Applies to [table engines](../../engines/tab - `HDFS`. Level: `GLOBAL` - `HIVE`. Level: `GLOBAL` - `JDBC`. Level: `GLOBAL` + - `KAFKA`. Level: `GLOBAL` - `MONGO`. Level: `GLOBAL` - `MYSQL`. Level: `GLOBAL` + - `NATS`. Level: `GLOBAL` - `ODBC`. Level: `GLOBAL` - `POSTGRES`. Level: `GLOBAL` + - `RABBITMQ`. Level: `GLOBAL` - `REDIS`. Level: `GLOBAL` - `REMOTE`. Level: `GLOBAL` - `S3`. Level: `GLOBAL` diff --git a/docs/en/sql-reference/statements/select/order-by.md b/docs/en/sql-reference/statements/select/order-by.md index 512a58d7cd9..25d2e7123fd 100644 --- a/docs/en/sql-reference/statements/select/order-by.md +++ b/docs/en/sql-reference/statements/select/order-by.md @@ -291,7 +291,7 @@ All missed values of `expr` column will be filled sequentially and other columns To fill multiple columns, add `WITH FILL` modifier with optional parameters after each field name in `ORDER BY` section. ``` sql -ORDER BY expr [WITH FILL] [FROM const_expr] [TO const_expr] [STEP const_numeric_expr], ... exprN [WITH FILL] [FROM expr] [TO expr] [STEP numeric_expr] +ORDER BY expr [WITH FILL] [FROM const_expr] [TO const_expr] [STEP const_numeric_expr] [STALENESS const_numeric_expr], ... exprN [WITH FILL] [FROM expr] [TO expr] [STEP numeric_expr] [STALENESS numeric_expr] [INTERPOLATE [(col [AS expr], ... colN [AS exprN])]] ``` @@ -300,6 +300,7 @@ When `FROM const_expr` not defined sequence of filling use minimal `expr` field When `TO const_expr` not defined sequence of filling use maximum `expr` field value from `ORDER BY`. When `STEP const_numeric_expr` defined then `const_numeric_expr` interprets `as is` for numeric types, as `days` for Date type, as `seconds` for DateTime type. It also supports [INTERVAL](https://clickhouse.com/docs/en/sql-reference/data-types/special-data-types/interval/) data type representing time and date intervals. When `STEP const_numeric_expr` omitted then sequence of filling use `1.0` for numeric type, `1 day` for Date type and `1 second` for DateTime type. +When `STALENESS const_numeric_expr` is defined, the query will generate rows until the difference from the previous row in the original data exceeds `const_numeric_expr`. `INTERPOLATE` can be applied to columns not participating in `ORDER BY WITH FILL`. Such columns are filled based on previous fields values by applying `expr`. If `expr` is not present will repeat previous value. Omitted list will result in including all allowed columns. Example of a query without `WITH FILL`: @@ -497,6 +498,64 @@ Result: └────────────┴────────────┴──────────┘ ``` +Example of a query without `STALENESS`: + +``` sql +SELECT number as key, 5 * number value, 'original' AS source +FROM numbers(16) WHERE key % 5 == 0 +ORDER BY key WITH FILL; +``` + +Result: + +``` text + ┌─key─┬─value─┬─source───┐ + 1. │ 0 │ 0 │ original │ + 2. │ 1 │ 0 │ │ + 3. │ 2 │ 0 │ │ + 4. │ 3 │ 0 │ │ + 5. │ 4 │ 0 │ │ + 6. │ 5 │ 25 │ original │ + 7. │ 6 │ 0 │ │ + 8. │ 7 │ 0 │ │ + 9. │ 8 │ 0 │ │ +10. │ 9 │ 0 │ │ +11. │ 10 │ 50 │ original │ +12. │ 11 │ 0 │ │ +13. │ 12 │ 0 │ │ +14. │ 13 │ 0 │ │ +15. │ 14 │ 0 │ │ +16. │ 15 │ 75 │ original │ + └─────┴───────┴──────────┘ +``` + +Same query after applying `STALENESS 3`: + +``` sql +SELECT number as key, 5 * number value, 'original' AS source +FROM numbers(16) WHERE key % 5 == 0 +ORDER BY key WITH FILL STALENESS 3; +``` + +Result: + +``` text + ┌─key─┬─value─┬─source───┐ + 1. │ 0 │ 0 │ original │ + 2. │ 1 │ 0 │ │ + 3. │ 2 │ 0 │ │ + 4. │ 5 │ 25 │ original │ + 5. │ 6 │ 0 │ │ + 6. │ 7 │ 0 │ │ + 7. │ 10 │ 50 │ original │ + 8. │ 11 │ 0 │ │ + 9. │ 12 │ 0 │ │ +10. │ 15 │ 75 │ original │ +11. │ 16 │ 0 │ │ +12. │ 17 │ 0 │ │ + └─────┴───────┴──────────┘ +``` + Example of a query without `INTERPOLATE`: ``` sql diff --git a/docs/ru/getting-started/install.md b/docs/ru/getting-started/install.md index f8a660fbec9..083ddc8c39c 100644 --- a/docs/ru/getting-started/install.md +++ b/docs/ru/getting-started/install.md @@ -95,7 +95,7 @@ sudo yum install -y clickhouse-server clickhouse-client sudo systemctl enable clickhouse-server sudo systemctl start clickhouse-server sudo systemctl status clickhouse-server -clickhouse-client # илм "clickhouse-client --password" если установлен пароль +clickhouse-client # или "clickhouse-client --password" если установлен пароль ``` Для использования наиболее свежих версий нужно заменить `stable` на `testing` (рекомендуется для тестовых окружений). Также иногда доступен `prestable`. diff --git a/docs/ru/sql-reference/statements/create/view.md b/docs/ru/sql-reference/statements/create/view.md index 8fa30446bb3..5dbffd90205 100644 --- a/docs/ru/sql-reference/statements/create/view.md +++ b/docs/ru/sql-reference/statements/create/view.md @@ -39,7 +39,7 @@ SELECT a, b, c FROM (SELECT ...) ## Материализованные представления {#materialized} ``` sql -CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]name] [ENGINE = engine] [POPULATE] +CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster_name] [TO[db.]name] [ENGINE = engine] [POPULATE] [DEFINER = { user | CURRENT_USER }] [SQL SECURITY { DEFINER | INVOKER | NONE }] AS SELECT ... ``` diff --git a/docs/ru/sql-reference/statements/grant.md b/docs/ru/sql-reference/statements/grant.md index 2ccc2d05452..79682dc42cd 100644 --- a/docs/ru/sql-reference/statements/grant.md +++ b/docs/ru/sql-reference/statements/grant.md @@ -192,14 +192,23 @@ GRANT SELECT(x,y) ON db.table TO john WITH GRANT OPTION - `addressToSymbol` - `demangle` - [SOURCES](#grant-sources) + - `AZURE` - `FILE` - - `URL` - - `REMOTE` - - `MYSQL` - - `ODBC` - - `JDBC` - `HDFS` + - `HIVE` + - `JDBC` + - `KAFKA` + - `MONGO` + - `MYSQL` + - `NATS` + - `ODBC` + - `POSTGRES` + - `RABBITMQ` + - `REDIS` + - `REMOTE` - `S3` + - `SQLITE` + - `URL` - [dictGet](#grant-dictget) Примеры того, как трактуется данная иерархия: @@ -461,14 +470,23 @@ GRANT INSERT(x,y) ON db.table TO john Разрешает использовать внешние источники данных. Применяется к [движкам таблиц](../../engines/table-engines/index.md) и [табличным функциям](../table-functions/index.md#table-functions). - `SOURCES`. Уровень: `GROUP` + - `AZURE`. Уровень: `GLOBAL` - `FILE`. Уровень: `GLOBAL` - - `URL`. Уровень: `GLOBAL` - - `REMOTE`. Уровень: `GLOBAL` - - `MYSQL`. Уровень: `GLOBAL` - - `ODBC`. Уровень: `GLOBAL` - - `JDBC`. Уровень: `GLOBAL` - `HDFS`. Уровень: `GLOBAL` + - `HIVE`. Уровень: `GLOBAL` + - `JDBC`. Уровень: `GLOBAL` + - `KAFKA`. Уровень: `GLOBAL` + - `MONGO`. Уровень: `GLOBAL` + - `MYSQL`. Уровень: `GLOBAL` + - `NATS`. Уровень: `GLOBAL` + - `ODBC`. Уровень: `GLOBAL` + - `POSTGRES`. Уровень: `GLOBAL` + - `RABBITMQ`. Уровень: `GLOBAL` + - `REDIS`. Уровень: `GLOBAL` + - `REMOTE`. Уровень: `GLOBAL` - `S3`. Уровень: `GLOBAL` + - `SQLITE`. Уровень: `GLOBAL` + - `URL`. Уровень: `GLOBAL` Привилегия `SOURCES` разрешает использование всех источников. Также вы можете присвоить привилегию для каждого источника отдельно. Для использования источников необходимы дополнительные привилегии. diff --git a/docs/zh/sql-reference/statements/create/view.md b/docs/zh/sql-reference/statements/create/view.md index 49a1d66bdf1..6c93240644d 100644 --- a/docs/zh/sql-reference/statements/create/view.md +++ b/docs/zh/sql-reference/statements/create/view.md @@ -39,7 +39,7 @@ SELECT a, b, c FROM (SELECT ...) ## Materialized {#materialized} ``` sql -CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]name] [ENGINE = engine] [POPULATE] AS SELECT ... +CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster_name] [TO[db.]name] [ENGINE = engine] [POPULATE] AS SELECT ... ``` 物化视图存储由相应的[SELECT](../../../sql-reference/statements/select/index.md)管理. diff --git a/docs/zh/sql-reference/statements/grant.md b/docs/zh/sql-reference/statements/grant.md index fea51d590d5..3fd314c791f 100644 --- a/docs/zh/sql-reference/statements/grant.md +++ b/docs/zh/sql-reference/statements/grant.md @@ -170,14 +170,23 @@ GRANT SELECT(x,y) ON db.table TO john WITH GRANT OPTION - `addressToSymbol` - `demangle` - [SOURCES](#grant-sources) + - `AZURE` - `FILE` - - `URL` - - `REMOTE` - - `YSQL` - - `ODBC` - - `JDBC` - `HDFS` + - `HIVE` + - `JDBC` + - `KAFKA` + - `MONGO` + - `MYSQL` + - `NATS` + - `ODBC` + - `POSTGRES` + - `RABBITMQ` + - `REDIS` + - `REMOTE` - `S3` + - `SQLITE` + - `URL` - [dictGet](#grant-dictget) 如何对待该层级的示例: @@ -428,14 +437,23 @@ GRANT INSERT(x,y) ON db.table TO john 允许在 [table engines](../../engines/table-engines/index.md) 和 [table functions](../../sql-reference/table-functions/index.md#table-functions)中使用外部数据源。 - `SOURCES`. 级别: `GROUP` + - `AZURE`. 级别: `GLOBAL` - `FILE`. 级别: `GLOBAL` - - `URL`. 级别: `GLOBAL` - - `REMOTE`. 级别: `GLOBAL` - - `YSQL`. 级别: `GLOBAL` - - `ODBC`. 级别: `GLOBAL` - - `JDBC`. 级别: `GLOBAL` - `HDFS`. 级别: `GLOBAL` + - `HIVE`. 级别: `GLOBAL` + - `JDBC`. 级别: `GLOBAL` + - `KAFKA`. 级别: `GLOBAL` + - `MONGO`. 级别: `GLOBAL` + - `MYSQL`. 级别: `GLOBAL` + - `NATS`. 级别: `GLOBAL` + - `ODBC`. 级别: `GLOBAL` + - `POSTGRES`. 级别: `GLOBAL` + - `RABBITMQ`. 级别: `GLOBAL` + - `REDIS`. 级别: `GLOBAL` + - `REMOTE`. 级别: `GLOBAL` - `S3`. 级别: `GLOBAL` + - `SQLITE`. 级别: `GLOBAL` + - `URL`. 级别: `GLOBAL` `SOURCES` 权限允许使用所有数据源。当然也可以单独对每个数据源进行授权。要使用数据源时,还需要额外的权限。 diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index 4aab7fcae14..d7190444f0b 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -192,6 +192,10 @@ void Client::parseConnectionsCredentials(Poco::Util::AbstractConfiguration & con history_file = home_path + "/" + history_file.substr(1); config.setString("history_file", history_file); } + if (config.has(prefix + ".history_max_entries")) + { + config.setUInt("history_max_entries", history_max_entries); + } if (config.has(prefix + ".accept-invalid-certificate")) config.setBool("accept-invalid-certificate", config.getBool(prefix + ".accept-invalid-certificate")); } diff --git a/programs/disks/DisksApp.cpp b/programs/disks/DisksApp.cpp index 5fddfce0678..610d8eaa638 100644 --- a/programs/disks/DisksApp.cpp +++ b/programs/disks/DisksApp.cpp @@ -236,6 +236,7 @@ void DisksApp::runInteractiveReplxx() ReplxxLineReader lr( suggest, history_file, + history_max_entries, /* multiline= */ false, /* ignore_shell_suspend= */ false, query_extenders, @@ -398,6 +399,8 @@ void DisksApp::initializeHistoryFile() throw; } } + + history_max_entries = config().getUInt("history-max-entries", 1000000); } void DisksApp::init(const std::vector & common_arguments) diff --git a/programs/disks/DisksApp.h b/programs/disks/DisksApp.h index 5b240648508..4f2bd7fcad6 100644 --- a/programs/disks/DisksApp.h +++ b/programs/disks/DisksApp.h @@ -62,6 +62,8 @@ private: // Fields responsible for the REPL work String history_file; + UInt32 history_max_entries = 0; /// Maximum number of entries in the history file. Needs to be initialized to 0 since we don't have a proper constructor. Worry not, actual value is set within the initializeHistoryFile method. + LineReader::Suggest suggest; static LineReader::Patterns query_extenders; static LineReader::Patterns query_delimiters; diff --git a/programs/keeper-client/KeeperClient.cpp b/programs/keeper-client/KeeperClient.cpp index 101ed270fc5..2a426fad7ac 100644 --- a/programs/keeper-client/KeeperClient.cpp +++ b/programs/keeper-client/KeeperClient.cpp @@ -243,6 +243,8 @@ void KeeperClient::initialize(Poco::Util::Application & /* self */) } } + history_max_entries = config().getUInt("history-max-entries", 1000000); + String default_log_level; if (config().has("query")) /// We don't want to see any information log in query mode, unless it was set explicitly @@ -319,6 +321,7 @@ void KeeperClient::runInteractiveReplxx() ReplxxLineReader lr( suggest, history_file, + history_max_entries, /* multiline= */ false, /* ignore_shell_suspend= */ false, query_extenders, diff --git a/programs/keeper-client/KeeperClient.h b/programs/keeper-client/KeeperClient.h index 0d3db3c2f02..359663c6a13 100644 --- a/programs/keeper-client/KeeperClient.h +++ b/programs/keeper-client/KeeperClient.h @@ -59,6 +59,8 @@ protected: std::vector getCompletions(const String & prefix) const; String history_file; + UInt32 history_max_entries; /// Maximum number of entries in the history file. + LineReader::Suggest suggest; zkutil::ZooKeeperArgs zk_args; diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 1f481381b2b..5159f95419e 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -1353,9 +1353,11 @@ try } FailPointInjection::enableFromGlobalConfig(config()); +#endif memory_worker.start(); +#if defined(OS_LINUX) int default_oom_score = 0; #if !defined(NDEBUG) diff --git a/src/Access/AccessControl.cpp b/src/Access/AccessControl.cpp index e8ee363be1a..9b3b8d2a977 100644 --- a/src/Access/AccessControl.cpp +++ b/src/Access/AccessControl.cpp @@ -608,7 +608,7 @@ AuthResult AccessControl::authenticate(const Credentials & credentials, const Po } catch (...) { - tryLogCurrentException(getLogger(), "from: " + address.toString() + ", user: " + credentials.getUserName() + ": Authentication failed"); + tryLogCurrentException(getLogger(), "from: " + address.toString() + ", user: " + credentials.getUserName() + ": Authentication failed", LogsLevel::information); WriteBufferFromOwnString message; message << credentials.getUserName() << ": Authentication failed: password is incorrect, or there is no user with such name."; @@ -622,8 +622,9 @@ AuthResult AccessControl::authenticate(const Credentials & credentials, const Po << "and deleting this file will reset the password.\n" << "See also /etc/clickhouse-server/users.xml on the server where ClickHouse is installed.\n\n"; - /// We use the same message for all authentication failures because we don't want to give away any unnecessary information for security reasons, - /// only the log will show the exact reason. + /// We use the same message for all authentication failures because we don't want to give away any unnecessary information for security reasons. + /// Only the log ((*), above) will show the exact reason. Note that (*) logs at information level instead of the default error level as + /// authentication failures are not an unusual event. throw Exception(PreformattedMessage{message.str(), "{}: Authentication failed: password is incorrect, or there is no user with such name", std::vector{credentials.getUserName()}}, diff --git a/src/Access/Common/AccessType.h b/src/Access/Common/AccessType.h index 242dfcd8c35..ec543104167 100644 --- a/src/Access/Common/AccessType.h +++ b/src/Access/Common/AccessType.h @@ -243,6 +243,9 @@ enum class AccessType : uint8_t M(S3, "", GLOBAL, SOURCES) \ M(HIVE, "", GLOBAL, SOURCES) \ M(AZURE, "", GLOBAL, SOURCES) \ + M(KAFKA, "", GLOBAL, SOURCES) \ + M(NATS, "", GLOBAL, SOURCES) \ + M(RABBITMQ, "", GLOBAL, SOURCES) \ M(SOURCES, "", GROUP, ALL) \ \ M(CLUSTER, "", GLOBAL, ALL) /* ON CLUSTER queries */ \ diff --git a/src/Access/ContextAccess.cpp b/src/Access/ContextAccess.cpp index a5d0451714b..06e89d78339 100644 --- a/src/Access/ContextAccess.cpp +++ b/src/Access/ContextAccess.cpp @@ -52,7 +52,10 @@ namespace {AccessType::HDFS, "HDFS"}, {AccessType::S3, "S3"}, {AccessType::HIVE, "Hive"}, - {AccessType::AZURE, "AzureBlobStorage"} + {AccessType::AZURE, "AzureBlobStorage"}, + {AccessType::KAFKA, "Kafka"}, + {AccessType::NATS, "NATS"}, + {AccessType::RABBITMQ, "RabbitMQ"} }; diff --git a/src/Access/Credentials.h b/src/Access/Credentials.h index f220b8d2c48..b21b7e6921f 100644 --- a/src/Access/Credentials.h +++ b/src/Access/Credentials.h @@ -15,6 +15,9 @@ public: explicit Credentials() = default; explicit Credentials(const String & user_name_); + Credentials(const Credentials &) = default; + Credentials(Credentials &&) = default; + virtual ~Credentials() = default; const String & getUserName() const; diff --git a/src/AggregateFunctions/AggregateFunctionQuantileExactWeighted.cpp b/src/AggregateFunctions/AggregateFunctionQuantileExactWeighted.cpp index 58b3b75b056..116b04bf4ba 100644 --- a/src/AggregateFunctions/AggregateFunctionQuantileExactWeighted.cpp +++ b/src/AggregateFunctions/AggregateFunctionQuantileExactWeighted.cpp @@ -387,7 +387,7 @@ template using FuncQuantileExactWeighted = AggregateFunctionQuantile< Value, QuantileExactWeighted, - NameQuantileExactWeighted, + std::conditional_t, true, std::conditional_t, false, @@ -396,7 +396,7 @@ template using FuncQuantilesExactWeighted = AggregateFunctionQuantile< Value, QuantileExactWeighted, - NameQuantilesExactWeighted, + std::conditional_t, true, std::conditional_t, true, diff --git a/src/AggregateFunctions/fuzzers/CMakeLists.txt b/src/AggregateFunctions/fuzzers/CMakeLists.txt index 6a7be0d4377..f01bcb0b631 100644 --- a/src/AggregateFunctions/fuzzers/CMakeLists.txt +++ b/src/AggregateFunctions/fuzzers/CMakeLists.txt @@ -1,2 +1,2 @@ clickhouse_add_executable(aggregate_function_state_deserialization_fuzzer aggregate_function_state_deserialization_fuzzer.cpp ${SRCS}) -target_link_libraries(aggregate_function_state_deserialization_fuzzer PRIVATE clickhouse_aggregate_functions) +target_link_libraries(aggregate_function_state_deserialization_fuzzer PRIVATE clickhouse_aggregate_functions dbms) diff --git a/src/Analyzer/QueryNode.h b/src/Analyzer/QueryNode.h index aef0c8805bb..2333fc56218 100644 --- a/src/Analyzer/QueryNode.h +++ b/src/Analyzer/QueryNode.h @@ -602,9 +602,21 @@ public: return projection_columns; } + /// Returns true if query node is resolved, false otherwise + bool isResolved() const + { + return !projection_columns.empty(); + } + /// Resolve query node projection columns void resolveProjectionColumns(NamesAndTypes projection_columns_value); + /// Clear query node projection columns + void clearProjectionColumns() + { + projection_columns.clear(); + } + /// Remove unused projection columns void removeUnusedProjectionColumns(const std::unordered_set & used_projection_columns); diff --git a/src/Analyzer/QueryTreeBuilder.cpp b/src/Analyzer/QueryTreeBuilder.cpp index 39c59d27e2c..d3c88d39213 100644 --- a/src/Analyzer/QueryTreeBuilder.cpp +++ b/src/Analyzer/QueryTreeBuilder.cpp @@ -498,6 +498,8 @@ QueryTreeNodePtr QueryTreeBuilder::buildSortList(const ASTPtr & order_by_express sort_node->getFillTo() = buildExpression(order_by_element.getFillTo(), context); if (order_by_element.getFillStep()) sort_node->getFillStep() = buildExpression(order_by_element.getFillStep(), context); + if (order_by_element.getFillStaleness()) + sort_node->getFillStaleness() = buildExpression(order_by_element.getFillStaleness(), context); list_node->getNodes().push_back(std::move(sort_node)); } diff --git a/src/Analyzer/Resolve/QueryAnalyzer.cpp b/src/Analyzer/Resolve/QueryAnalyzer.cpp index cb3087af707..4bb283cbf3e 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.cpp +++ b/src/Analyzer/Resolve/QueryAnalyzer.cpp @@ -103,6 +103,8 @@ namespace Setting extern const SettingsBool single_join_prefer_left_table; extern const SettingsBool transform_null_in; extern const SettingsUInt64 use_structure_from_insertion_table_in_table_functions; + extern const SettingsBool allow_suspicious_types_in_group_by; + extern const SettingsBool allow_suspicious_types_in_order_by; extern const SettingsBool use_concurrency_control; } @@ -437,8 +439,13 @@ ProjectionName QueryAnalyzer::calculateWindowProjectionName(const QueryTreeNodeP return buffer.str(); } -ProjectionName QueryAnalyzer::calculateSortColumnProjectionName(const QueryTreeNodePtr & sort_column_node, const ProjectionName & sort_expression_projection_name, - const ProjectionName & fill_from_expression_projection_name, const ProjectionName & fill_to_expression_projection_name, const ProjectionName & fill_step_expression_projection_name) +ProjectionName QueryAnalyzer::calculateSortColumnProjectionName( + const QueryTreeNodePtr & sort_column_node, + const ProjectionName & sort_expression_projection_name, + const ProjectionName & fill_from_expression_projection_name, + const ProjectionName & fill_to_expression_projection_name, + const ProjectionName & fill_step_expression_projection_name, + const ProjectionName & fill_staleness_expression_projection_name) { auto & sort_node_typed = sort_column_node->as(); @@ -468,6 +475,9 @@ ProjectionName QueryAnalyzer::calculateSortColumnProjectionName(const QueryTreeN if (sort_node_typed.hasFillStep()) sort_column_projection_name_buffer << " STEP " << fill_step_expression_projection_name; + + if (sort_node_typed.hasFillStaleness()) + sort_column_projection_name_buffer << " STALENESS " << fill_staleness_expression_projection_name; } return sort_column_projection_name_buffer.str(); @@ -2958,27 +2968,29 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi /// Replace storage with values storage of insertion block if (StoragePtr storage = scope.context->getViewSource()) { - QueryTreeNodePtr table_expression; - /// Process possibly nested sub-selects - for (auto * query_node = in_second_argument->as(); query_node; query_node = table_expression->as()) - table_expression = extractLeftTableExpression(query_node->getJoinTree()); + QueryTreeNodePtr table_expression = in_second_argument; - if (table_expression) + /// Process possibly nested sub-selects + while (table_expression) { - if (auto * query_table_node = table_expression->as()) - { - if (query_table_node->getStorageID().getFullNameNotQuoted() == storage->getStorageID().getFullNameNotQuoted()) - { - auto replacement_table_expression = std::make_shared(storage, scope.context); - if (std::optional table_expression_modifiers = query_table_node->getTableExpressionModifiers()) - replacement_table_expression->setTableExpressionModifiers(*table_expression_modifiers); - in_second_argument = in_second_argument->cloneAndReplace(table_expression, std::move(replacement_table_expression)); - } - } + if (auto * query_node = table_expression->as()) + table_expression = extractLeftTableExpression(query_node->getJoinTree()); + else if (auto * union_node = table_expression->as()) + table_expression = union_node->getQueries().getNodes().at(0); + else + break; + } + + TableNode * table_expression_table_node = table_expression ? table_expression->as() : nullptr; + + if (table_expression_table_node && + table_expression_table_node->getStorageID().getFullNameNotQuoted() == storage->getStorageID().getFullNameNotQuoted()) + { + auto replacement_table_expression_table_node = table_expression_table_node->clone(); + replacement_table_expression_table_node->as().updateStorage(storage, scope.context); + in_second_argument = in_second_argument->cloneAndReplace(table_expression, std::move(replacement_table_expression_table_node)); } } - - resolveExpressionNode(in_second_argument, scope, false /*allow_lambda_expression*/, true /*allow_table_expression*/); } /// Edge case when the first argument of IN is scalar subquery. @@ -3998,6 +4010,7 @@ ProjectionNames QueryAnalyzer::resolveSortNodeList(QueryTreeNodePtr & sort_node_ ProjectionNames fill_from_expression_projection_names; ProjectionNames fill_to_expression_projection_names; ProjectionNames fill_step_expression_projection_names; + ProjectionNames fill_staleness_expression_projection_names; auto & sort_node_list_typed = sort_node_list->as(); for (auto & node : sort_node_list_typed.getNodes()) @@ -4019,6 +4032,8 @@ ProjectionNames QueryAnalyzer::resolveSortNodeList(QueryTreeNodePtr & sort_node_ sort_node.getExpression() = sort_column_list_node->getNodes().front(); } + validateSortingKeyType(sort_node.getExpression()->getResultType(), scope); + size_t sort_expression_projection_names_size = sort_expression_projection_names.size(); if (sort_expression_projection_names_size != 1) throw Exception(ErrorCodes::LOGICAL_ERROR, @@ -4088,11 +4103,38 @@ ProjectionNames QueryAnalyzer::resolveSortNodeList(QueryTreeNodePtr & sort_node_ fill_step_expression_projection_names_size); } + if (sort_node.hasFillStaleness()) + { + fill_staleness_expression_projection_names = resolveExpressionNode(sort_node.getFillStaleness(), scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/); + + const auto * constant_node = sort_node.getFillStaleness()->as(); + if (!constant_node) + throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, + "Sort FILL STALENESS expression must be constant with numeric or interval type. Actual {}. In scope {}", + sort_node.getFillStaleness()->formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + + bool is_number = isColumnedAsNumber(constant_node->getResultType()); + bool is_interval = WhichDataType(constant_node->getResultType()).isInterval(); + if (!is_number && !is_interval) + throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, + "Sort FILL STALENESS expression must be constant with numeric or interval type. Actual {}. In scope {}", + sort_node.getFillStaleness()->formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + + size_t fill_staleness_expression_projection_names_size = fill_staleness_expression_projection_names.size(); + if (fill_staleness_expression_projection_names_size != 1) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Sort FILL STALENESS expression expected 1 projection name. Actual {}", + fill_staleness_expression_projection_names_size); + } + auto sort_column_projection_name = calculateSortColumnProjectionName(node, sort_expression_projection_names[0], fill_from_expression_projection_names.empty() ? "" : fill_from_expression_projection_names.front(), fill_to_expression_projection_names.empty() ? "" : fill_to_expression_projection_names.front(), - fill_step_expression_projection_names.empty() ? "" : fill_step_expression_projection_names.front()); + fill_step_expression_projection_names.empty() ? "" : fill_step_expression_projection_names.front(), + fill_staleness_expression_projection_names.empty() ? "" : fill_staleness_expression_projection_names.front()); result_projection_names.push_back(std::move(sort_column_projection_name)); @@ -4100,11 +4142,32 @@ ProjectionNames QueryAnalyzer::resolveSortNodeList(QueryTreeNodePtr & sort_node_ fill_from_expression_projection_names.clear(); fill_to_expression_projection_names.clear(); fill_step_expression_projection_names.clear(); + fill_staleness_expression_projection_names.clear(); } return result_projection_names; } +void QueryAnalyzer::validateSortingKeyType(const DataTypePtr & sorting_key_type, const IdentifierResolveScope & scope) const +{ + if (scope.context->getSettingsRef()[Setting::allow_suspicious_types_in_order_by]) + return; + + auto check = [](const IDataType & type) + { + if (isDynamic(type) || isVariant(type)) + throw Exception( + ErrorCodes::ILLEGAL_COLUMN, + "Data types Variant/Dynamic are not allowed in ORDER BY keys, because it can lead to unexpected results. " + "Consider using a subcolumn with a specific data type instead (for example 'column.Int64' or 'json.some.path.:Int64' if " + "its a JSON path subcolumn) or casting this column to a specific data type. " + "Set setting allow_suspicious_types_in_order_by = 1 in order to allow it"); + }; + + check(*sorting_key_type); + sorting_key_type->forEachChild(check); +} + namespace { @@ -4144,11 +4207,12 @@ void QueryAnalyzer::resolveGroupByNode(QueryNode & query_node_typed, IdentifierR expandTuplesInList(group_by_list); } - if (scope.group_by_use_nulls) + for (const auto & grouping_set : query_node_typed.getGroupBy().getNodes()) { - for (const auto & grouping_set : query_node_typed.getGroupBy().getNodes()) + for (const auto & group_by_elem : grouping_set->as()->getNodes()) { - for (const auto & group_by_elem : grouping_set->as()->getNodes()) + validateGroupByKeyType(group_by_elem->getResultType(), scope); + if (scope.group_by_use_nulls) scope.nullable_group_by_keys.insert(group_by_elem); } } @@ -4164,14 +4228,37 @@ void QueryAnalyzer::resolveGroupByNode(QueryNode & query_node_typed, IdentifierR auto & group_by_list = query_node_typed.getGroupBy().getNodes(); expandTuplesInList(group_by_list); - if (scope.group_by_use_nulls) + for (const auto & group_by_elem : query_node_typed.getGroupBy().getNodes()) { - for (const auto & group_by_elem : query_node_typed.getGroupBy().getNodes()) + validateGroupByKeyType(group_by_elem->getResultType(), scope); + if (scope.group_by_use_nulls) scope.nullable_group_by_keys.insert(group_by_elem); } } } +/** Validate data types of GROUP BY key. + */ +void QueryAnalyzer::validateGroupByKeyType(const DataTypePtr & group_by_key_type, const IdentifierResolveScope & scope) const +{ + if (scope.context->getSettingsRef()[Setting::allow_suspicious_types_in_group_by]) + return; + + auto check = [](const IDataType & type) + { + if (isDynamic(type) || isVariant(type)) + throw Exception( + ErrorCodes::ILLEGAL_COLUMN, + "Data types Variant/Dynamic are not allowed in GROUP BY keys, because it can lead to unexpected results. " + "Consider using a subcolumn with a specific data type instead (for example 'column.Int64' or 'json.some.path.:Int64' if " + "its a JSON path subcolumn) or casting this column to a specific data type. " + "Set setting allow_suspicious_types_in_group_by = 1 in order to allow it"); + }; + + check(*group_by_key_type); + group_by_key_type->forEachChild(check); +} + /** Resolve interpolate columns nodes list. */ void QueryAnalyzer::resolveInterpolateColumnsNodeList(QueryTreeNodePtr & interpolate_node_list, IdentifierResolveScope & scope) @@ -5310,6 +5397,16 @@ void QueryAnalyzer::resolveQuery(const QueryTreeNodePtr & query_node, Identifier auto & query_node_typed = query_node->as(); + /** It is unsafe to call resolveQuery on already resolved query node, because during identifier resolution process + * we replace identifiers with expressions without aliases, also at the end of resolveQuery all aliases from all nodes will be removed. + * For subsequent resolveQuery executions it is possible to have wrong projection header, because for nodes + * with aliases projection name is alias. + * + * If for client it is necessary to resolve query node after clone, client must clear projection columns from query node before resolve. + */ + if (query_node_typed.isResolved()) + return; + if (query_node_typed.isCTE()) ctes_in_resolve_process.insert(query_node_typed.getCTEName()); @@ -5448,16 +5545,13 @@ void QueryAnalyzer::resolveQuery(const QueryTreeNodePtr & query_node, Identifier */ scope.use_identifier_lookup_to_result_cache = false; - if (query_node_typed.getJoinTree()) - { - TableExpressionsAliasVisitor table_expressions_visitor(scope); - table_expressions_visitor.visit(query_node_typed.getJoinTree()); + TableExpressionsAliasVisitor table_expressions_visitor(scope); + table_expressions_visitor.visit(query_node_typed.getJoinTree()); - initializeQueryJoinTreeNode(query_node_typed.getJoinTree(), scope); - scope.aliases.alias_name_to_table_expression_node.clear(); + initializeQueryJoinTreeNode(query_node_typed.getJoinTree(), scope); + scope.aliases.alias_name_to_table_expression_node.clear(); - resolveQueryJoinTreeNode(query_node_typed.getJoinTree(), scope, visitor); - } + resolveQueryJoinTreeNode(query_node_typed.getJoinTree(), scope, visitor); if (!scope.group_by_use_nulls) scope.use_identifier_lookup_to_result_cache = true; @@ -5675,6 +5769,9 @@ void QueryAnalyzer::resolveUnion(const QueryTreeNodePtr & union_node, Identifier { auto & union_node_typed = union_node->as(); + if (union_node_typed.isResolved()) + return; + if (union_node_typed.isCTE()) ctes_in_resolve_process.insert(union_node_typed.getCTEName()); diff --git a/src/Analyzer/Resolve/QueryAnalyzer.h b/src/Analyzer/Resolve/QueryAnalyzer.h index 0d4309843e6..ae6cf05bcdc 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.h +++ b/src/Analyzer/Resolve/QueryAnalyzer.h @@ -140,7 +140,8 @@ private: const ProjectionName & sort_expression_projection_name, const ProjectionName & fill_from_expression_projection_name, const ProjectionName & fill_to_expression_projection_name, - const ProjectionName & fill_step_expression_projection_name); + const ProjectionName & fill_step_expression_projection_name, + const ProjectionName & fill_staleness_expression_projection_name); QueryTreeNodePtr tryGetLambdaFromSQLUserDefinedFunctions(const std::string & function_name, ContextPtr context); @@ -219,8 +220,12 @@ private: ProjectionNames resolveSortNodeList(QueryTreeNodePtr & sort_node_list, IdentifierResolveScope & scope); + void validateSortingKeyType(const DataTypePtr & sorting_key_type, const IdentifierResolveScope & scope) const; + void resolveGroupByNode(QueryNode & query_node_typed, IdentifierResolveScope & scope); + void validateGroupByKeyType(const DataTypePtr & group_by_key_type, const IdentifierResolveScope & scope) const; + void resolveInterpolateColumnsNodeList(QueryTreeNodePtr & interpolate_node_list, IdentifierResolveScope & scope); void resolveWindowNodeList(QueryTreeNodePtr & window_node_list, IdentifierResolveScope & scope); diff --git a/src/Analyzer/SortNode.cpp b/src/Analyzer/SortNode.cpp index e891046626a..42c010e4784 100644 --- a/src/Analyzer/SortNode.cpp +++ b/src/Analyzer/SortNode.cpp @@ -69,6 +69,12 @@ void SortNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, si buffer << '\n' << std::string(indent + 2, ' ') << "FILL STEP\n"; getFillStep()->dumpTreeImpl(buffer, format_state, indent + 4); } + + if (hasFillStaleness()) + { + buffer << '\n' << std::string(indent + 2, ' ') << "FILL STALENESS\n"; + getFillStaleness()->dumpTreeImpl(buffer, format_state, indent + 4); + } } bool SortNode::isEqualImpl(const IQueryTreeNode & rhs, CompareOptions) const @@ -132,6 +138,8 @@ ASTPtr SortNode::toASTImpl(const ConvertToASTOptions & options) const result->setFillTo(getFillTo()->toAST(options)); if (hasFillStep()) result->setFillStep(getFillStep()->toAST(options)); + if (hasFillStaleness()) + result->setFillStaleness(getFillStaleness()->toAST(options)); return result; } diff --git a/src/Analyzer/SortNode.h b/src/Analyzer/SortNode.h index 0ebdde61912..6f0010abdaa 100644 --- a/src/Analyzer/SortNode.h +++ b/src/Analyzer/SortNode.h @@ -105,6 +105,24 @@ public: return children[fill_step_child_index]; } + /// Returns true if sort node has fill staleness, false otherwise + bool hasFillStaleness() const + { + return children[fill_staleness_child_index] != nullptr; + } + + /// Get fill staleness + const QueryTreeNodePtr & getFillStaleness() const + { + return children[fill_staleness_child_index]; + } + + /// Get fill staleness + QueryTreeNodePtr & getFillStaleness() + { + return children[fill_staleness_child_index]; + } + /// Get collator const std::shared_ptr & getCollator() const { @@ -144,7 +162,8 @@ private: static constexpr size_t fill_from_child_index = 1; static constexpr size_t fill_to_child_index = 2; static constexpr size_t fill_step_child_index = 3; - static constexpr size_t children_size = fill_step_child_index + 1; + static constexpr size_t fill_staleness_child_index = 4; + static constexpr size_t children_size = fill_staleness_child_index + 1; SortDirection sort_direction = SortDirection::ASCENDING; std::optional nulls_sort_direction; diff --git a/src/Analyzer/UnionNode.cpp b/src/Analyzer/UnionNode.cpp index 6f70f01e519..545a6b2195b 100644 --- a/src/Analyzer/UnionNode.cpp +++ b/src/Analyzer/UnionNode.cpp @@ -35,6 +35,7 @@ namespace ErrorCodes { extern const int TYPE_MISMATCH; extern const int BAD_ARGUMENTS; + extern const int LOGICAL_ERROR; } UnionNode::UnionNode(ContextMutablePtr context_, SelectUnionMode union_mode_) @@ -50,6 +51,26 @@ UnionNode::UnionNode(ContextMutablePtr context_, SelectUnionMode union_mode_) children[queries_child_index] = std::make_shared(); } +bool UnionNode::isResolved() const +{ + for (const auto & query_node : getQueries().getNodes()) + { + bool is_resolved = false; + + if (auto * query_node_typed = query_node->as()) + is_resolved = query_node_typed->isResolved(); + else if (auto * union_node_typed = query_node->as()) + is_resolved = union_node_typed->isResolved(); + else + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected query tree node type in UNION node"); + + if (!is_resolved) + return false; + } + + return true; +} + NamesAndTypes UnionNode::computeProjectionColumns() const { if (recursive_cte_table) diff --git a/src/Analyzer/UnionNode.h b/src/Analyzer/UnionNode.h index 40baad1ad57..85d6afb1e47 100644 --- a/src/Analyzer/UnionNode.h +++ b/src/Analyzer/UnionNode.h @@ -163,6 +163,9 @@ public: return children[queries_child_index]; } + /// Returns true if union node is resolved, false otherwise + bool isResolved() const; + /// Compute union node projection columns NamesAndTypes computeProjectionColumns() const; diff --git a/src/Backups/BackupConcurrencyCheck.cpp b/src/Backups/BackupConcurrencyCheck.cpp new file mode 100644 index 00000000000..8b29ae41b53 --- /dev/null +++ b/src/Backups/BackupConcurrencyCheck.cpp @@ -0,0 +1,135 @@ +#include + +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int CONCURRENT_ACCESS_NOT_SUPPORTED; +} + + +BackupConcurrencyCheck::BackupConcurrencyCheck( + const UUID & backup_or_restore_uuid_, + bool is_restore_, + bool on_cluster_, + bool allow_concurrency_, + BackupConcurrencyCounters & counters_) + : is_restore(is_restore_), backup_or_restore_uuid(backup_or_restore_uuid_), on_cluster(on_cluster_), counters(counters_) +{ + std::lock_guard lock{counters.mutex}; + + if (!allow_concurrency_) + { + bool found_concurrent_operation = false; + if (is_restore) + { + size_t num_local_restores = counters.local_restores; + size_t num_on_cluster_restores = counters.on_cluster_restores.size(); + if (on_cluster) + { + if (!counters.on_cluster_restores.contains(backup_or_restore_uuid)) + ++num_on_cluster_restores; + } + else + { + ++num_local_restores; + } + found_concurrent_operation = (num_local_restores + num_on_cluster_restores > 1); + } + else + { + size_t num_local_backups = counters.local_backups; + size_t num_on_cluster_backups = counters.on_cluster_backups.size(); + if (on_cluster) + { + if (!counters.on_cluster_backups.contains(backup_or_restore_uuid)) + ++num_on_cluster_backups; + } + else + { + ++num_local_backups; + } + found_concurrent_operation = (num_local_backups + num_on_cluster_backups > 1); + } + + if (found_concurrent_operation) + throwConcurrentOperationNotAllowed(is_restore); + } + + if (on_cluster) + { + if (is_restore) + ++counters.on_cluster_restores[backup_or_restore_uuid]; + else + ++counters.on_cluster_backups[backup_or_restore_uuid]; + } + else + { + if (is_restore) + ++counters.local_restores; + else + ++counters.local_backups; + } +} + + +BackupConcurrencyCheck::~BackupConcurrencyCheck() +{ + std::lock_guard lock{counters.mutex}; + + if (on_cluster) + { + if (is_restore) + { + auto it = counters.on_cluster_restores.find(backup_or_restore_uuid); + if (it != counters.on_cluster_restores.end()) + { + if (!--it->second) + counters.on_cluster_restores.erase(it); + } + } + else + { + auto it = counters.on_cluster_backups.find(backup_or_restore_uuid); + if (it != counters.on_cluster_backups.end()) + { + if (!--it->second) + counters.on_cluster_backups.erase(it); + } + } + } + else + { + if (is_restore) + --counters.local_restores; + else + --counters.local_backups; + } +} + + +void BackupConcurrencyCheck::throwConcurrentOperationNotAllowed(bool is_restore) +{ + throw Exception( + ErrorCodes::CONCURRENT_ACCESS_NOT_SUPPORTED, + "Concurrent {} are not allowed, turn on setting '{}'", + is_restore ? "restores" : "backups", + is_restore ? "allow_concurrent_restores" : "allow_concurrent_backups"); +} + + +BackupConcurrencyCounters::BackupConcurrencyCounters() = default; + + +BackupConcurrencyCounters::~BackupConcurrencyCounters() +{ + if (local_backups > 0 || local_restores > 0 || !on_cluster_backups.empty() || !on_cluster_restores.empty()) + LOG_ERROR(getLogger(__PRETTY_FUNCTION__), "Some backups or restores are processing"); +} + +} diff --git a/src/Backups/BackupConcurrencyCheck.h b/src/Backups/BackupConcurrencyCheck.h new file mode 100644 index 00000000000..048a23a716a --- /dev/null +++ b/src/Backups/BackupConcurrencyCheck.h @@ -0,0 +1,55 @@ +#pragma once + +#include +#include +#include +#include + + +namespace DB +{ +class BackupConcurrencyCounters; + +/// Local checker for concurrent BACKUP or RESTORE operations. +/// This class is used by implementations of IBackupCoordination and IRestoreCoordination +/// to throw an exception if concurrent backups or restores are not allowed. +class BackupConcurrencyCheck +{ +public: + /// Checks concurrency of a BACKUP operation or a RESTORE operation. + /// Keep a constructed instance of BackupConcurrencyCheck until the operation is done. + BackupConcurrencyCheck( + const UUID & backup_or_restore_uuid_, + bool is_restore_, + bool on_cluster_, + bool allow_concurrency_, + BackupConcurrencyCounters & counters_); + + ~BackupConcurrencyCheck(); + + [[noreturn]] static void throwConcurrentOperationNotAllowed(bool is_restore); + +private: + const bool is_restore; + const UUID backup_or_restore_uuid; + const bool on_cluster; + BackupConcurrencyCounters & counters; +}; + + +class BackupConcurrencyCounters +{ +public: + BackupConcurrencyCounters(); + ~BackupConcurrencyCounters(); + +private: + friend class BackupConcurrencyCheck; + size_t local_backups TSA_GUARDED_BY(mutex) = 0; + size_t local_restores TSA_GUARDED_BY(mutex) = 0; + std::unordered_map on_cluster_backups TSA_GUARDED_BY(mutex); + std::unordered_map on_cluster_restores TSA_GUARDED_BY(mutex); + std::mutex mutex; +}; + +} diff --git a/src/Backups/BackupCoordinationCleaner.cpp b/src/Backups/BackupCoordinationCleaner.cpp new file mode 100644 index 00000000000..1f5068a94de --- /dev/null +++ b/src/Backups/BackupCoordinationCleaner.cpp @@ -0,0 +1,64 @@ +#include + + +namespace DB +{ + +BackupCoordinationCleaner::BackupCoordinationCleaner(const String & zookeeper_path_, const WithRetries & with_retries_, LoggerPtr log_) + : zookeeper_path(zookeeper_path_), with_retries(with_retries_), log(log_) +{ +} + +void BackupCoordinationCleaner::cleanup() +{ + tryRemoveAllNodes(/* throw_if_error = */ true, /* retries_kind = */ WithRetries::kNormal); +} + +bool BackupCoordinationCleaner::tryCleanupAfterError() noexcept +{ + return tryRemoveAllNodes(/* throw_if_error = */ false, /* retries_kind = */ WithRetries::kNormal); +} + +bool BackupCoordinationCleaner::tryRemoveAllNodes(bool throw_if_error, WithRetries::Kind retries_kind) +{ + { + std::lock_guard lock{mutex}; + if (cleanup_result.succeeded) + return true; + if (cleanup_result.exception) + { + if (throw_if_error) + std::rethrow_exception(cleanup_result.exception); + return false; + } + } + + try + { + LOG_TRACE(log, "Removing nodes from ZooKeeper"); + auto holder = with_retries.createRetriesControlHolder("removeAllNodes", retries_kind); + holder.retries_ctl.retryLoop([&, &zookeeper = holder.faulty_zookeeper]() + { + with_retries.renewZooKeeper(zookeeper); + zookeeper->removeRecursive(zookeeper_path); + }); + + std::lock_guard lock{mutex}; + cleanup_result.succeeded = true; + return true; + } + catch (...) + { + LOG_TRACE(log, "Caught exception while removing nodes from ZooKeeper for this restore: {}", + getCurrentExceptionMessage(/* with_stacktrace= */ false, /* check_embedded_stacktrace= */ true)); + + std::lock_guard lock{mutex}; + cleanup_result.exception = std::current_exception(); + + if (throw_if_error) + throw; + return false; + } +} + +} diff --git a/src/Backups/BackupCoordinationCleaner.h b/src/Backups/BackupCoordinationCleaner.h new file mode 100644 index 00000000000..43e095d9f33 --- /dev/null +++ b/src/Backups/BackupCoordinationCleaner.h @@ -0,0 +1,40 @@ +#pragma once + +#include + + +namespace DB +{ + +/// Removes all the nodes from ZooKeeper used to coordinate a BACKUP ON CLUSTER operation or +/// a RESTORE ON CLUSTER operation (successful or not). +/// This class is used by BackupCoordinationOnCluster and RestoreCoordinationOnCluster to cleanup. +class BackupCoordinationCleaner +{ +public: + BackupCoordinationCleaner(const String & zookeeper_path_, const WithRetries & with_retries_, LoggerPtr log_); + + void cleanup(); + bool tryCleanupAfterError() noexcept; + +private: + bool tryRemoveAllNodes(bool throw_if_error, WithRetries::Kind retries_kind); + + const String zookeeper_path; + + /// A reference to a field of the parent object which is either BackupCoordinationOnCluster or RestoreCoordinationOnCluster. + const WithRetries & with_retries; + + const LoggerPtr log; + + struct CleanupResult + { + bool succeeded = false; + std::exception_ptr exception; + }; + CleanupResult cleanup_result TSA_GUARDED_BY(mutex); + + std::mutex mutex; +}; + +} diff --git a/src/Backups/BackupCoordinationLocal.cpp b/src/Backups/BackupCoordinationLocal.cpp index efdc18cc29c..8bd6b4d327d 100644 --- a/src/Backups/BackupCoordinationLocal.cpp +++ b/src/Backups/BackupCoordinationLocal.cpp @@ -1,5 +1,7 @@ #include + #include +#include #include #include #include @@ -8,27 +10,20 @@ namespace DB { -BackupCoordinationLocal::BackupCoordinationLocal(bool plain_backup_) - : log(getLogger("BackupCoordinationLocal")), file_infos(plain_backup_) +BackupCoordinationLocal::BackupCoordinationLocal( + const UUID & backup_uuid_, + bool is_plain_backup_, + bool allow_concurrent_backup_, + BackupConcurrencyCounters & concurrency_counters_) + : log(getLogger("BackupCoordinationLocal")) + , concurrency_check(backup_uuid_, /* is_restore = */ false, /* on_cluster = */ false, allow_concurrent_backup_, concurrency_counters_) + , file_infos(is_plain_backup_) { } BackupCoordinationLocal::~BackupCoordinationLocal() = default; -void BackupCoordinationLocal::setStage(const String &, const String &) -{ -} - -void BackupCoordinationLocal::setError(const Exception &) -{ -} - -Strings BackupCoordinationLocal::waitForStage(const String &) -{ - return {}; -} - -Strings BackupCoordinationLocal::waitForStage(const String &, std::chrono::milliseconds) +ZooKeeperRetriesInfo BackupCoordinationLocal::getOnClusterInitializationKeeperRetriesInfo() const { return {}; } @@ -135,15 +130,4 @@ bool BackupCoordinationLocal::startWritingFile(size_t data_file_index) return writing_files.emplace(data_file_index).second; } - -bool BackupCoordinationLocal::hasConcurrentBackups(const std::atomic & num_active_backups) const -{ - if (num_active_backups > 1) - { - LOG_WARNING(log, "Found concurrent backups: num_active_backups={}", num_active_backups); - return true; - } - return false; -} - } diff --git a/src/Backups/BackupCoordinationLocal.h b/src/Backups/BackupCoordinationLocal.h index a7f15c79649..09991c0d301 100644 --- a/src/Backups/BackupCoordinationLocal.h +++ b/src/Backups/BackupCoordinationLocal.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include #include #include @@ -21,13 +22,21 @@ namespace DB class BackupCoordinationLocal : public IBackupCoordination { public: - explicit BackupCoordinationLocal(bool plain_backup_); + explicit BackupCoordinationLocal( + const UUID & backup_uuid_, + bool is_plain_backup_, + bool allow_concurrent_backup_, + BackupConcurrencyCounters & concurrency_counters_); + ~BackupCoordinationLocal() override; - void setStage(const String & new_stage, const String & message) override; - void setError(const Exception & exception) override; - Strings waitForStage(const String & stage_to_wait) override; - Strings waitForStage(const String & stage_to_wait, std::chrono::milliseconds timeout) override; + Strings setStage(const String &, const String &, bool) override { return {}; } + void setBackupQueryWasSentToOtherHosts() override {} + bool trySetError(std::exception_ptr) override { return true; } + void finish() override {} + bool tryFinishAfterError() noexcept override { return true; } + void waitForOtherHostsToFinish() override {} + bool tryWaitForOtherHostsToFinishAfterError() noexcept override { return true; } void addReplicatedPartNames(const String & table_zk_path, const String & table_name_for_logs, const String & replica_name, const std::vector & part_names_and_checksums) override; @@ -54,17 +63,18 @@ public: BackupFileInfos getFileInfosForAllHosts() const override; bool startWritingFile(size_t data_file_index) override; - bool hasConcurrentBackups(const std::atomic & num_active_backups) const override; + ZooKeeperRetriesInfo getOnClusterInitializationKeeperRetriesInfo() const override; private: LoggerPtr const log; + BackupConcurrencyCheck concurrency_check; - BackupCoordinationReplicatedTables TSA_GUARDED_BY(replicated_tables_mutex) replicated_tables; - BackupCoordinationReplicatedAccess TSA_GUARDED_BY(replicated_access_mutex) replicated_access; - BackupCoordinationReplicatedSQLObjects TSA_GUARDED_BY(replicated_sql_objects_mutex) replicated_sql_objects; - BackupCoordinationFileInfos TSA_GUARDED_BY(file_infos_mutex) file_infos; + BackupCoordinationReplicatedTables replicated_tables TSA_GUARDED_BY(replicated_tables_mutex); + BackupCoordinationReplicatedAccess replicated_access TSA_GUARDED_BY(replicated_access_mutex); + BackupCoordinationReplicatedSQLObjects replicated_sql_objects TSA_GUARDED_BY(replicated_sql_objects_mutex); + BackupCoordinationFileInfos file_infos TSA_GUARDED_BY(file_infos_mutex); BackupCoordinationKeeperMapTables keeper_map_tables TSA_GUARDED_BY(keeper_map_tables_mutex); - std::unordered_set TSA_GUARDED_BY(writing_files_mutex) writing_files; + std::unordered_set writing_files TSA_GUARDED_BY(writing_files_mutex); mutable std::mutex replicated_tables_mutex; mutable std::mutex replicated_access_mutex; diff --git a/src/Backups/BackupCoordinationRemote.cpp b/src/Backups/BackupCoordinationOnCluster.cpp similarity index 73% rename from src/Backups/BackupCoordinationRemote.cpp rename to src/Backups/BackupCoordinationOnCluster.cpp index a60ac0c636f..dc34939f805 100644 --- a/src/Backups/BackupCoordinationRemote.cpp +++ b/src/Backups/BackupCoordinationOnCluster.cpp @@ -1,7 +1,4 @@ -#include - -#include -#include +#include #include #include @@ -26,8 +23,6 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } -namespace Stage = BackupCoordinationStage; - namespace { using PartNameAndChecksum = IBackupCoordination::PartNameAndChecksum; @@ -149,144 +144,152 @@ namespace }; } -size_t BackupCoordinationRemote::findCurrentHostIndex(const Strings & all_hosts, const String & current_host) +Strings BackupCoordinationOnCluster::excludeInitiator(const Strings & all_hosts) +{ + Strings all_hosts_without_initiator = all_hosts; + bool has_initiator = (std::erase(all_hosts_without_initiator, kInitiator) > 0); + chassert(has_initiator); + return all_hosts_without_initiator; +} + +size_t BackupCoordinationOnCluster::findCurrentHostIndex(const String & current_host, const Strings & all_hosts) { auto it = std::find(all_hosts.begin(), all_hosts.end(), current_host); if (it == all_hosts.end()) - return 0; + return all_hosts.size(); return it - all_hosts.begin(); } -BackupCoordinationRemote::BackupCoordinationRemote( - zkutil::GetZooKeeper get_zookeeper_, + +BackupCoordinationOnCluster::BackupCoordinationOnCluster( + const UUID & backup_uuid_, + bool is_plain_backup_, const String & root_zookeeper_path_, + zkutil::GetZooKeeper get_zookeeper_, const BackupKeeperSettings & keeper_settings_, - const String & backup_uuid_, - const Strings & all_hosts_, const String & current_host_, - bool plain_backup_, - bool is_internal_, + const Strings & all_hosts_, + bool allow_concurrent_backup_, + BackupConcurrencyCounters & concurrency_counters_, + ThreadPoolCallbackRunnerUnsafe schedule_, QueryStatusPtr process_list_element_) : root_zookeeper_path(root_zookeeper_path_) - , zookeeper_path(root_zookeeper_path_ + "/backup-" + backup_uuid_) + , zookeeper_path(root_zookeeper_path_ + "/backup-" + toString(backup_uuid_)) , keeper_settings(keeper_settings_) , backup_uuid(backup_uuid_) , all_hosts(all_hosts_) + , all_hosts_without_initiator(excludeInitiator(all_hosts)) , current_host(current_host_) - , current_host_index(findCurrentHostIndex(all_hosts, current_host)) - , plain_backup(plain_backup_) - , is_internal(is_internal_) - , log(getLogger("BackupCoordinationRemote")) - , with_retries( - log, - get_zookeeper_, - keeper_settings, - process_list_element_, - [my_zookeeper_path = zookeeper_path, my_current_host = current_host, my_is_internal = is_internal] - (WithRetries::FaultyKeeper & zk) - { - /// Recreate this ephemeral node to signal that we are alive. - if (my_is_internal) - { - String alive_node_path = my_zookeeper_path + "/stage/alive|" + my_current_host; - - /// Delete the ephemeral node from the previous connection so we don't have to wait for keeper to do it automatically. - zk->tryRemove(alive_node_path); - - zk->createAncestors(alive_node_path); - zk->create(alive_node_path, "", zkutil::CreateMode::Ephemeral); - } - }) + , current_host_index(findCurrentHostIndex(current_host, all_hosts)) + , plain_backup(is_plain_backup_) + , log(getLogger("BackupCoordinationOnCluster")) + , with_retries(log, get_zookeeper_, keeper_settings, process_list_element_, [root_zookeeper_path_](Coordination::ZooKeeperWithFaultInjection::Ptr zk) { zk->sync(root_zookeeper_path_); }) + , concurrency_check(backup_uuid_, /* is_restore = */ false, /* on_cluster = */ true, allow_concurrent_backup_, concurrency_counters_) + , stage_sync(/* is_restore = */ false, fs::path{zookeeper_path} / "stage", current_host, all_hosts, allow_concurrent_backup_, with_retries, schedule_, process_list_element_, log) + , cleaner(zookeeper_path, with_retries, log) { createRootNodes(); - - stage_sync.emplace( - zookeeper_path, - with_retries, - log); } -BackupCoordinationRemote::~BackupCoordinationRemote() +BackupCoordinationOnCluster::~BackupCoordinationOnCluster() { - try - { - if (!is_internal) - removeAllNodes(); - } - catch (...) - { - tryLogCurrentException(__PRETTY_FUNCTION__); - } + tryFinishImpl(); } -void BackupCoordinationRemote::createRootNodes() +void BackupCoordinationOnCluster::createRootNodes() { - auto holder = with_retries.createRetriesControlHolder("createRootNodes"); + auto holder = with_retries.createRetriesControlHolder("createRootNodes", WithRetries::kInitialization); holder.retries_ctl.retryLoop( [&, &zk = holder.faulty_zookeeper]() { with_retries.renewZooKeeper(zk); zk->createAncestors(zookeeper_path); - - Coordination::Requests ops; - Coordination::Responses responses; - ops.emplace_back(zkutil::makeCreateRequest(zookeeper_path, "", zkutil::CreateMode::Persistent)); - ops.emplace_back(zkutil::makeCreateRequest(zookeeper_path + "/repl_part_names", "", zkutil::CreateMode::Persistent)); - ops.emplace_back(zkutil::makeCreateRequest(zookeeper_path + "/repl_mutations", "", zkutil::CreateMode::Persistent)); - ops.emplace_back(zkutil::makeCreateRequest(zookeeper_path + "/repl_data_paths", "", zkutil::CreateMode::Persistent)); - ops.emplace_back(zkutil::makeCreateRequest(zookeeper_path + "/repl_access", "", zkutil::CreateMode::Persistent)); - ops.emplace_back(zkutil::makeCreateRequest(zookeeper_path + "/repl_sql_objects", "", zkutil::CreateMode::Persistent)); - ops.emplace_back(zkutil::makeCreateRequest(zookeeper_path + "/keeper_map_tables", "", zkutil::CreateMode::Persistent)); - ops.emplace_back(zkutil::makeCreateRequest(zookeeper_path + "/file_infos", "", zkutil::CreateMode::Persistent)); - ops.emplace_back(zkutil::makeCreateRequest(zookeeper_path + "/writing_files", "", zkutil::CreateMode::Persistent)); - zk->tryMulti(ops, responses); + zk->createIfNotExists(zookeeper_path, ""); + zk->createIfNotExists(zookeeper_path + "/repl_part_names", ""); + zk->createIfNotExists(zookeeper_path + "/repl_mutations", ""); + zk->createIfNotExists(zookeeper_path + "/repl_data_paths", ""); + zk->createIfNotExists(zookeeper_path + "/repl_access", ""); + zk->createIfNotExists(zookeeper_path + "/repl_sql_objects", ""); + zk->createIfNotExists(zookeeper_path + "/keeper_map_tables", ""); + zk->createIfNotExists(zookeeper_path + "/file_infos", ""); + zk->createIfNotExists(zookeeper_path + "/writing_files", ""); }); } -void BackupCoordinationRemote::removeAllNodes() +Strings BackupCoordinationOnCluster::setStage(const String & new_stage, const String & message, bool sync) { - auto holder = with_retries.createRetriesControlHolder("removeAllNodes"); - holder.retries_ctl.retryLoop( - [&, &zk = holder.faulty_zookeeper]() + stage_sync.setStage(new_stage, message); + + if (!sync) + return {}; + + return stage_sync.waitForHostsToReachStage(new_stage, all_hosts_without_initiator); +} + +void BackupCoordinationOnCluster::setBackupQueryWasSentToOtherHosts() +{ + backup_query_was_sent_to_other_hosts = true; +} + +bool BackupCoordinationOnCluster::trySetError(std::exception_ptr exception) +{ + return stage_sync.trySetError(exception); +} + +void BackupCoordinationOnCluster::finish() +{ + bool other_hosts_also_finished = false; + stage_sync.finish(other_hosts_also_finished); + + if ((current_host == kInitiator) && (other_hosts_also_finished || !backup_query_was_sent_to_other_hosts)) + cleaner.cleanup(); +} + +bool BackupCoordinationOnCluster::tryFinishAfterError() noexcept +{ + return tryFinishImpl(); +} + +bool BackupCoordinationOnCluster::tryFinishImpl() noexcept +{ + bool other_hosts_also_finished = false; + if (!stage_sync.tryFinishAfterError(other_hosts_also_finished)) + return false; + + if ((current_host == kInitiator) && (other_hosts_also_finished || !backup_query_was_sent_to_other_hosts)) { - /// Usually this function is called by the initiator when a backup is complete so we don't need the coordination anymore. - /// - /// However there can be a rare situation when this function is called after an error occurs on the initiator of a query - /// while some hosts are still making the backup. Removing all the nodes will remove the parent node of the backup coordination - /// at `zookeeper_path` which might cause such hosts to stop with exception "ZNONODE". Or such hosts might still do some useless part - /// of their backup work before that. Anyway in this case backup won't be finalized (because only an initiator can do that). - with_retries.renewZooKeeper(zk); - zk->removeRecursive(zookeeper_path); - }); + if (!cleaner.tryCleanupAfterError()) + return false; + } + + return true; } - -void BackupCoordinationRemote::setStage(const String & new_stage, const String & message) +void BackupCoordinationOnCluster::waitForOtherHostsToFinish() { - if (is_internal) - stage_sync->set(current_host, new_stage, message); - else - stage_sync->set(current_host, new_stage, /* message */ "", /* all_hosts */ true); + if ((current_host != kInitiator) || !backup_query_was_sent_to_other_hosts) + return; + stage_sync.waitForOtherHostsToFinish(); } -void BackupCoordinationRemote::setError(const Exception & exception) +bool BackupCoordinationOnCluster::tryWaitForOtherHostsToFinishAfterError() noexcept { - stage_sync->setError(current_host, exception); + if (current_host != kInitiator) + return false; + if (!backup_query_was_sent_to_other_hosts) + return true; + return stage_sync.tryWaitForOtherHostsToFinishAfterError(); } -Strings BackupCoordinationRemote::waitForStage(const String & stage_to_wait) +ZooKeeperRetriesInfo BackupCoordinationOnCluster::getOnClusterInitializationKeeperRetriesInfo() const { - return stage_sync->wait(all_hosts, stage_to_wait); + return ZooKeeperRetriesInfo{keeper_settings.max_retries_while_initializing, + static_cast(keeper_settings.retry_initial_backoff_ms.count()), + static_cast(keeper_settings.retry_max_backoff_ms.count())}; } -Strings BackupCoordinationRemote::waitForStage(const String & stage_to_wait, std::chrono::milliseconds timeout) -{ - return stage_sync->waitFor(all_hosts, stage_to_wait, timeout); -} - - -void BackupCoordinationRemote::serializeToMultipleZooKeeperNodes(const String & path, const String & value, const String & logging_name) +void BackupCoordinationOnCluster::serializeToMultipleZooKeeperNodes(const String & path, const String & value, const String & logging_name) { { auto holder = with_retries.createRetriesControlHolder(logging_name + "::create"); @@ -301,7 +304,7 @@ void BackupCoordinationRemote::serializeToMultipleZooKeeperNodes(const String & if (value.empty()) return; - size_t max_part_size = keeper_settings.keeper_value_max_size; + size_t max_part_size = keeper_settings.value_max_size; if (!max_part_size) max_part_size = value.size(); @@ -324,7 +327,7 @@ void BackupCoordinationRemote::serializeToMultipleZooKeeperNodes(const String & } } -String BackupCoordinationRemote::deserializeFromMultipleZooKeeperNodes(const String & path, const String & logging_name) const +String BackupCoordinationOnCluster::deserializeFromMultipleZooKeeperNodes(const String & path, const String & logging_name) const { Strings part_names; @@ -357,7 +360,7 @@ String BackupCoordinationRemote::deserializeFromMultipleZooKeeperNodes(const Str } -void BackupCoordinationRemote::addReplicatedPartNames( +void BackupCoordinationOnCluster::addReplicatedPartNames( const String & table_zk_path, const String & table_name_for_logs, const String & replica_name, @@ -381,14 +384,14 @@ void BackupCoordinationRemote::addReplicatedPartNames( }); } -Strings BackupCoordinationRemote::getReplicatedPartNames(const String & table_zk_path, const String & replica_name) const +Strings BackupCoordinationOnCluster::getReplicatedPartNames(const String & table_zk_path, const String & replica_name) const { std::lock_guard lock{replicated_tables_mutex}; prepareReplicatedTables(); return replicated_tables->getPartNames(table_zk_path, replica_name); } -void BackupCoordinationRemote::addReplicatedMutations( +void BackupCoordinationOnCluster::addReplicatedMutations( const String & table_zk_path, const String & table_name_for_logs, const String & replica_name, @@ -412,7 +415,7 @@ void BackupCoordinationRemote::addReplicatedMutations( }); } -std::vector BackupCoordinationRemote::getReplicatedMutations(const String & table_zk_path, const String & replica_name) const +std::vector BackupCoordinationOnCluster::getReplicatedMutations(const String & table_zk_path, const String & replica_name) const { std::lock_guard lock{replicated_tables_mutex}; prepareReplicatedTables(); @@ -420,7 +423,7 @@ std::vector BackupCoordinationRemote::getRepl } -void BackupCoordinationRemote::addReplicatedDataPath( +void BackupCoordinationOnCluster::addReplicatedDataPath( const String & table_zk_path, const String & data_path) { { @@ -441,7 +444,7 @@ void BackupCoordinationRemote::addReplicatedDataPath( }); } -Strings BackupCoordinationRemote::getReplicatedDataPaths(const String & table_zk_path) const +Strings BackupCoordinationOnCluster::getReplicatedDataPaths(const String & table_zk_path) const { std::lock_guard lock{replicated_tables_mutex}; prepareReplicatedTables(); @@ -449,7 +452,7 @@ Strings BackupCoordinationRemote::getReplicatedDataPaths(const String & table_zk } -void BackupCoordinationRemote::prepareReplicatedTables() const +void BackupCoordinationOnCluster::prepareReplicatedTables() const { if (replicated_tables) return; @@ -536,7 +539,7 @@ void BackupCoordinationRemote::prepareReplicatedTables() const replicated_tables->addDataPath(std::move(data_paths)); } -void BackupCoordinationRemote::addReplicatedAccessFilePath(const String & access_zk_path, AccessEntityType access_entity_type, const String & file_path) +void BackupCoordinationOnCluster::addReplicatedAccessFilePath(const String & access_zk_path, AccessEntityType access_entity_type, const String & file_path) { { std::lock_guard lock{replicated_access_mutex}; @@ -558,14 +561,14 @@ void BackupCoordinationRemote::addReplicatedAccessFilePath(const String & access }); } -Strings BackupCoordinationRemote::getReplicatedAccessFilePaths(const String & access_zk_path, AccessEntityType access_entity_type) const +Strings BackupCoordinationOnCluster::getReplicatedAccessFilePaths(const String & access_zk_path, AccessEntityType access_entity_type) const { std::lock_guard lock{replicated_access_mutex}; prepareReplicatedAccess(); return replicated_access->getFilePaths(access_zk_path, access_entity_type, current_host); } -void BackupCoordinationRemote::prepareReplicatedAccess() const +void BackupCoordinationOnCluster::prepareReplicatedAccess() const { if (replicated_access) return; @@ -601,7 +604,7 @@ void BackupCoordinationRemote::prepareReplicatedAccess() const replicated_access->addFilePath(std::move(file_path)); } -void BackupCoordinationRemote::addReplicatedSQLObjectsDir(const String & loader_zk_path, UserDefinedSQLObjectType object_type, const String & dir_path) +void BackupCoordinationOnCluster::addReplicatedSQLObjectsDir(const String & loader_zk_path, UserDefinedSQLObjectType object_type, const String & dir_path) { { std::lock_guard lock{replicated_sql_objects_mutex}; @@ -631,14 +634,14 @@ void BackupCoordinationRemote::addReplicatedSQLObjectsDir(const String & loader_ }); } -Strings BackupCoordinationRemote::getReplicatedSQLObjectsDirs(const String & loader_zk_path, UserDefinedSQLObjectType object_type) const +Strings BackupCoordinationOnCluster::getReplicatedSQLObjectsDirs(const String & loader_zk_path, UserDefinedSQLObjectType object_type) const { std::lock_guard lock{replicated_sql_objects_mutex}; prepareReplicatedSQLObjects(); return replicated_sql_objects->getDirectories(loader_zk_path, object_type, current_host); } -void BackupCoordinationRemote::prepareReplicatedSQLObjects() const +void BackupCoordinationOnCluster::prepareReplicatedSQLObjects() const { if (replicated_sql_objects) return; @@ -674,7 +677,7 @@ void BackupCoordinationRemote::prepareReplicatedSQLObjects() const replicated_sql_objects->addDirectory(std::move(directory)); } -void BackupCoordinationRemote::addKeeperMapTable(const String & table_zookeeper_root_path, const String & table_id, const String & data_path_in_backup) +void BackupCoordinationOnCluster::addKeeperMapTable(const String & table_zookeeper_root_path, const String & table_id, const String & data_path_in_backup) { { std::lock_guard lock{keeper_map_tables_mutex}; @@ -695,7 +698,7 @@ void BackupCoordinationRemote::addKeeperMapTable(const String & table_zookeeper_ }); } -void BackupCoordinationRemote::prepareKeeperMapTables() const +void BackupCoordinationOnCluster::prepareKeeperMapTables() const { if (keeper_map_tables) return; @@ -740,7 +743,7 @@ void BackupCoordinationRemote::prepareKeeperMapTables() const } -String BackupCoordinationRemote::getKeeperMapDataPath(const String & table_zookeeper_root_path) const +String BackupCoordinationOnCluster::getKeeperMapDataPath(const String & table_zookeeper_root_path) const { std::lock_guard lock(keeper_map_tables_mutex); prepareKeeperMapTables(); @@ -748,7 +751,7 @@ String BackupCoordinationRemote::getKeeperMapDataPath(const String & table_zooke } -void BackupCoordinationRemote::addFileInfos(BackupFileInfos && file_infos_) +void BackupCoordinationOnCluster::addFileInfos(BackupFileInfos && file_infos_) { { std::lock_guard lock{file_infos_mutex}; @@ -761,21 +764,21 @@ void BackupCoordinationRemote::addFileInfos(BackupFileInfos && file_infos_) serializeToMultipleZooKeeperNodes(zookeeper_path + "/file_infos/" + current_host, file_infos_str, "addFileInfos"); } -BackupFileInfos BackupCoordinationRemote::getFileInfos() const +BackupFileInfos BackupCoordinationOnCluster::getFileInfos() const { std::lock_guard lock{file_infos_mutex}; prepareFileInfos(); return file_infos->getFileInfos(current_host); } -BackupFileInfos BackupCoordinationRemote::getFileInfosForAllHosts() const +BackupFileInfos BackupCoordinationOnCluster::getFileInfosForAllHosts() const { std::lock_guard lock{file_infos_mutex}; prepareFileInfos(); return file_infos->getFileInfosForAllHosts(); } -void BackupCoordinationRemote::prepareFileInfos() const +void BackupCoordinationOnCluster::prepareFileInfos() const { if (file_infos) return; @@ -801,7 +804,7 @@ void BackupCoordinationRemote::prepareFileInfos() const } } -bool BackupCoordinationRemote::startWritingFile(size_t data_file_index) +bool BackupCoordinationOnCluster::startWritingFile(size_t data_file_index) { { /// Check if this host is already writing this file. @@ -842,66 +845,4 @@ bool BackupCoordinationRemote::startWritingFile(size_t data_file_index) } } -bool BackupCoordinationRemote::hasConcurrentBackups(const std::atomic &) const -{ - /// If its internal concurrency will be checked for the base backup - if (is_internal) - return false; - - std::string backup_stage_path = zookeeper_path + "/stage"; - - bool result = false; - - auto holder = with_retries.createRetriesControlHolder("getAllArchiveSuffixes"); - holder.retries_ctl.retryLoop( - [&, &zk = holder.faulty_zookeeper]() - { - with_retries.renewZooKeeper(zk); - - if (!zk->exists(root_zookeeper_path)) - zk->createAncestors(root_zookeeper_path); - - for (size_t attempt = 0; attempt < MAX_ZOOKEEPER_ATTEMPTS; ++attempt) - { - Coordination::Stat stat; - zk->get(root_zookeeper_path, &stat); - Strings existing_backup_paths = zk->getChildren(root_zookeeper_path); - - for (const auto & existing_backup_path : existing_backup_paths) - { - if (startsWith(existing_backup_path, "restore-")) - continue; - - String existing_backup_uuid = existing_backup_path; - existing_backup_uuid.erase(0, String("backup-").size()); - - if (existing_backup_uuid == toString(backup_uuid)) - continue; - - String status; - if (zk->tryGet(root_zookeeper_path + "/" + existing_backup_path + "/stage", status)) - { - /// Check if some other backup is in progress - if (status == Stage::SCHEDULED_TO_START) - { - LOG_WARNING(log, "Found a concurrent backup: {}, current backup: {}", existing_backup_uuid, toString(backup_uuid)); - result = true; - return; - } - } - } - - zk->createIfNotExists(backup_stage_path, ""); - auto code = zk->trySet(backup_stage_path, Stage::SCHEDULED_TO_START, stat.version); - if (code == Coordination::Error::ZOK) - break; - bool is_last_attempt = (attempt == MAX_ZOOKEEPER_ATTEMPTS - 1); - if ((code != Coordination::Error::ZBADVERSION) || is_last_attempt) - throw zkutil::KeeperException::fromPath(code, backup_stage_path); - } - }); - - return result; -} - } diff --git a/src/Backups/BackupCoordinationRemote.h b/src/Backups/BackupCoordinationOnCluster.h similarity index 67% rename from src/Backups/BackupCoordinationRemote.h rename to src/Backups/BackupCoordinationOnCluster.h index 7a56b1a4eb8..7369c2cc746 100644 --- a/src/Backups/BackupCoordinationRemote.h +++ b/src/Backups/BackupCoordinationOnCluster.h @@ -1,6 +1,8 @@ #pragma once #include +#include +#include #include #include #include @@ -13,32 +15,35 @@ namespace DB { -/// We try to store data to zookeeper several times due to possible version conflicts. -constexpr size_t MAX_ZOOKEEPER_ATTEMPTS = 10; - /// Implementation of the IBackupCoordination interface performing coordination via ZooKeeper. It's necessary for "BACKUP ON CLUSTER". -class BackupCoordinationRemote : public IBackupCoordination +class BackupCoordinationOnCluster : public IBackupCoordination { public: - using BackupKeeperSettings = WithRetries::KeeperSettings; + /// Empty string as the current host is used to mark the initiator of a BACKUP ON CLUSTER query. + static const constexpr std::string_view kInitiator; - BackupCoordinationRemote( - zkutil::GetZooKeeper get_zookeeper_, + BackupCoordinationOnCluster( + const UUID & backup_uuid_, + bool is_plain_backup_, const String & root_zookeeper_path_, + zkutil::GetZooKeeper get_zookeeper_, const BackupKeeperSettings & keeper_settings_, - const String & backup_uuid_, - const Strings & all_hosts_, const String & current_host_, - bool plain_backup_, - bool is_internal_, + const Strings & all_hosts_, + bool allow_concurrent_backup_, + BackupConcurrencyCounters & concurrency_counters_, + ThreadPoolCallbackRunnerUnsafe schedule_, QueryStatusPtr process_list_element_); - ~BackupCoordinationRemote() override; + ~BackupCoordinationOnCluster() override; - void setStage(const String & new_stage, const String & message) override; - void setError(const Exception & exception) override; - Strings waitForStage(const String & stage_to_wait) override; - Strings waitForStage(const String & stage_to_wait, std::chrono::milliseconds timeout) override; + Strings setStage(const String & new_stage, const String & message, bool sync) override; + void setBackupQueryWasSentToOtherHosts() override; + bool trySetError(std::exception_ptr exception) override; + void finish() override; + bool tryFinishAfterError() noexcept override; + void waitForOtherHostsToFinish() override; + bool tryWaitForOtherHostsToFinishAfterError() noexcept override; void addReplicatedPartNames( const String & table_zk_path, @@ -73,13 +78,14 @@ public: BackupFileInfos getFileInfosForAllHosts() const override; bool startWritingFile(size_t data_file_index) override; - bool hasConcurrentBackups(const std::atomic & num_active_backups) const override; + ZooKeeperRetriesInfo getOnClusterInitializationKeeperRetriesInfo() const override; - static size_t findCurrentHostIndex(const Strings & all_hosts, const String & current_host); + static Strings excludeInitiator(const Strings & all_hosts); + static size_t findCurrentHostIndex(const String & current_host, const Strings & all_hosts); private: void createRootNodes(); - void removeAllNodes(); + bool tryFinishImpl() noexcept; void serializeToMultipleZooKeeperNodes(const String & path, const String & value, const String & logging_name); String deserializeFromMultipleZooKeeperNodes(const String & path, const String & logging_name) const; @@ -96,26 +102,27 @@ private: const String root_zookeeper_path; const String zookeeper_path; const BackupKeeperSettings keeper_settings; - const String backup_uuid; + const UUID backup_uuid; const Strings all_hosts; + const Strings all_hosts_without_initiator; const String current_host; const size_t current_host_index; const bool plain_backup; - const bool is_internal; LoggerPtr const log; - /// The order of these two fields matters, because stage_sync holds a reference to with_retries object - mutable WithRetries with_retries; - std::optional stage_sync; + const WithRetries with_retries; + BackupConcurrencyCheck concurrency_check; + BackupCoordinationStageSync stage_sync; + BackupCoordinationCleaner cleaner; + std::atomic backup_query_was_sent_to_other_hosts = false; - mutable std::optional TSA_GUARDED_BY(replicated_tables_mutex) replicated_tables; - mutable std::optional TSA_GUARDED_BY(replicated_access_mutex) replicated_access; - mutable std::optional TSA_GUARDED_BY(replicated_sql_objects_mutex) replicated_sql_objects; - mutable std::optional TSA_GUARDED_BY(file_infos_mutex) file_infos; + mutable std::optional replicated_tables TSA_GUARDED_BY(replicated_tables_mutex); + mutable std::optional replicated_access TSA_GUARDED_BY(replicated_access_mutex); + mutable std::optional replicated_sql_objects TSA_GUARDED_BY(replicated_sql_objects_mutex); + mutable std::optional file_infos TSA_GUARDED_BY(file_infos_mutex); mutable std::optional keeper_map_tables TSA_GUARDED_BY(keeper_map_tables_mutex); - std::unordered_set TSA_GUARDED_BY(writing_files_mutex) writing_files; + std::unordered_set writing_files TSA_GUARDED_BY(writing_files_mutex); - mutable std::mutex zookeeper_mutex; mutable std::mutex replicated_tables_mutex; mutable std::mutex replicated_access_mutex; mutable std::mutex replicated_sql_objects_mutex; diff --git a/src/Backups/BackupCoordinationStage.h b/src/Backups/BackupCoordinationStage.h index 9abdc019784..2cd1efb5404 100644 --- a/src/Backups/BackupCoordinationStage.h +++ b/src/Backups/BackupCoordinationStage.h @@ -8,10 +8,6 @@ namespace DB namespace BackupCoordinationStage { - /// This stage is set after concurrency check so ensure we dont start other backup/restores - /// when concurrent backup/restores are not allowed - constexpr const char * SCHEDULED_TO_START = "scheduled to start"; - /// Finding all tables and databases which we're going to put to the backup and collecting their metadata. constexpr const char * GATHERING_METADATA = "gathering metadata"; @@ -46,10 +42,6 @@ namespace BackupCoordinationStage /// Coordination stage meaning that a host finished its work. constexpr const char * COMPLETED = "completed"; - - /// Coordination stage meaning that backup/restore has failed due to an error - /// Check '/error' for the error message - constexpr const char * ERROR = "error"; } } diff --git a/src/Backups/BackupCoordinationStageSync.cpp b/src/Backups/BackupCoordinationStageSync.cpp index 17ef163ce35..9a05f9490c2 100644 --- a/src/Backups/BackupCoordinationStageSync.cpp +++ b/src/Backups/BackupCoordinationStageSync.cpp @@ -9,267 +9,1117 @@ #include #include #include +#include +#include +#include + namespace DB { -namespace Stage = BackupCoordinationStage; - namespace ErrorCodes { extern const int FAILED_TO_SYNC_BACKUP_OR_RESTORE; + extern const int LOGICAL_ERROR; } +namespace +{ + /// The coordination version is stored in the 'start' node for each host + /// by each host when it starts working on this backup or restore. + enum Version + { + kInitialVersion = 1, + + /// This old version didn't create the 'finish' node, it uses stage "completed" to tell other hosts that the work is done. + /// If an error happened this old version didn't change any nodes to tell other hosts that the error handling is done. + /// So while using this old version hosts couldn't know when other hosts are done with the error handling, + /// and that situation caused weird errors in the logs somehow. + /// Also this old version didn't create the 'start' node for the initiator. + kVersionWithoutFinishNode = 1, + + /// Now we create the 'finish' node both if the work is done or if the error handling is done. + + kCurrentVersion = 2, + }; + + /// Empty string as the current host is used to mark the initiator of a BACKUP ON CLUSTER or RESTORE ON CLUSTER query. + const constexpr std::string_view kInitiator; +} + +bool BackupCoordinationStageSync::HostInfo::operator ==(const HostInfo & other) const +{ + /// We don't compare `last_connection_time` here. + return (host == other.host) && (started == other.started) && (connected == other.connected) && (finished == other.finished) + && (stages == other.stages) && (!!exception == !!other.exception); +} + +bool BackupCoordinationStageSync::HostInfo::operator !=(const HostInfo & other) const +{ + return !(*this == other); +} + +bool BackupCoordinationStageSync::State::operator ==(const State & other) const = default; +bool BackupCoordinationStageSync::State::operator !=(const State & other) const = default; + BackupCoordinationStageSync::BackupCoordinationStageSync( - const String & root_zookeeper_path_, - WithRetries & with_retries_, - LoggerPtr log_) - : zookeeper_path(root_zookeeper_path_ + "/stage") + bool is_restore_, + const String & zookeeper_path_, + const String & current_host_, + const Strings & all_hosts_, + bool allow_concurrency_, + const WithRetries & with_retries_, + ThreadPoolCallbackRunnerUnsafe schedule_, + QueryStatusPtr process_list_element_, + LoggerPtr log_) + : is_restore(is_restore_) + , operation_name(is_restore ? "restore" : "backup") + , current_host(current_host_) + , current_host_desc(getHostDesc(current_host)) + , all_hosts(all_hosts_) + , allow_concurrency(allow_concurrency_) , with_retries(with_retries_) + , schedule(schedule_) + , process_list_element(process_list_element_) , log(log_) + , failure_after_host_disconnected_for_seconds(with_retries.getKeeperSettings().failure_after_host_disconnected_for_seconds) + , finish_timeout_after_error(with_retries.getKeeperSettings().finish_timeout_after_error) + , sync_period_ms(with_retries.getKeeperSettings().sync_period_ms) + , max_attempts_after_bad_version(with_retries.getKeeperSettings().max_attempts_after_bad_version) + , zookeeper_path(zookeeper_path_) + , root_zookeeper_path(zookeeper_path.parent_path().parent_path()) + , operation_node_path(zookeeper_path.parent_path()) + , operation_node_name(zookeeper_path.parent_path().filename()) + , stage_node_path(zookeeper_path) + , start_node_path(zookeeper_path / ("started|" + current_host)) + , finish_node_path(zookeeper_path / ("finished|" + current_host)) + , num_hosts_node_path(zookeeper_path / "num_hosts") + , alive_node_path(zookeeper_path / ("alive|" + current_host)) + , alive_tracker_node_path(fs::path{root_zookeeper_path} / "alive_tracker") + , error_node_path(zookeeper_path / "error") + , zk_nodes_changed(std::make_shared()) { + if ((zookeeper_path.filename() != "stage") || !operation_node_name.starts_with(is_restore ? "restore-" : "backup-") + || (root_zookeeper_path == operation_node_path)) + { + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected path in ZooKeeper specified: {}", zookeeper_path); + } + + initializeState(); createRootNodes(); + + try + { + createStartAndAliveNodes(); + startWatchingThread(); + } + catch (...) + { + trySetError(std::current_exception()); + tryFinishImpl(); + throw; + } } + +BackupCoordinationStageSync::~BackupCoordinationStageSync() +{ + tryFinishImpl(); +} + + +void BackupCoordinationStageSync::initializeState() +{ + std::lock_guard lock{mutex}; + auto now = std::chrono::system_clock::now(); + auto monotonic_now = std::chrono::steady_clock::now(); + + for (const String & host : all_hosts) + state.hosts.emplace(host, HostInfo{.host = host, .last_connection_time = now, .last_connection_time_monotonic = monotonic_now}); +} + + +String BackupCoordinationStageSync::getHostDesc(const String & host) +{ + String res; + if (host.empty()) + { + res = "the initiator"; + } + else + { + try + { + res = "host "; + Poco::URI::decode(host, res); /// Append the decoded host name to `res`. + } + catch (const Poco::URISyntaxException &) + { + res = "host " + host; + } + } + return res; +} + + +String BackupCoordinationStageSync::getHostsDesc(const Strings & hosts) +{ + String res = "["; + for (const String & host : hosts) + { + if (res != "[") + res += ", "; + res += getHostDesc(host); + } + res += "]"; + return res; +} + + void BackupCoordinationStageSync::createRootNodes() { - auto holder = with_retries.createRetriesControlHolder("createRootNodes"); + auto holder = with_retries.createRetriesControlHolder("BackupStageSync::createRootNodes", WithRetries::kInitialization); holder.retries_ctl.retryLoop( [&, &zookeeper = holder.faulty_zookeeper]() + { + with_retries.renewZooKeeper(zookeeper); + zookeeper->createAncestors(root_zookeeper_path); + zookeeper->createIfNotExists(root_zookeeper_path, ""); + }); +} + + +void BackupCoordinationStageSync::createStartAndAliveNodes() +{ + auto holder = with_retries.createRetriesControlHolder("BackupStageSync::createStartAndAliveNodes", WithRetries::kInitialization); + holder.retries_ctl.retryLoop([&, &zookeeper = holder.faulty_zookeeper]() { with_retries.renewZooKeeper(zookeeper); - zookeeper->createAncestors(zookeeper_path); - zookeeper->createIfNotExists(zookeeper_path, ""); + createStartAndAliveNodes(zookeeper); }); } -void BackupCoordinationStageSync::set(const String & current_host, const String & new_stage, const String & message, const bool & all_hosts) -{ - auto holder = with_retries.createRetriesControlHolder("set"); - holder.retries_ctl.retryLoop( - [&, &zookeeper = holder.faulty_zookeeper]() - { - with_retries.renewZooKeeper(zookeeper); - if (all_hosts) +void BackupCoordinationStageSync::createStartAndAliveNodes(Coordination::ZooKeeperWithFaultInjection::Ptr zookeeper) +{ + /// The "num_hosts" node keeps the number of hosts which started (created the "started" node) + /// but not yet finished (not created the "finished" node). + /// The number of alive hosts can be less than that. + + /// The "alive_tracker" node always keeps an empty string, we track its version only. + /// The "alive_tracker" node increases its version each time when any "alive" nodes are created + /// so we use it to check concurrent backups/restores. + zookeeper->createIfNotExists(alive_tracker_node_path, ""); + + std::optional num_hosts; + int num_hosts_version = -1; + + bool check_concurrency = !allow_concurrency; + int alive_tracker_version = -1; + + for (size_t attempt_no = 1; attempt_no <= max_attempts_after_bad_version; ++attempt_no) + { + if (!num_hosts) { - auto code = zookeeper->trySet(zookeeper_path, new_stage); - if (code != Coordination::Error::ZOK) - throw zkutil::KeeperException::fromPath(code, zookeeper_path); + String num_hosts_str; + Coordination::Stat stat; + if (zookeeper->tryGet(num_hosts_node_path, num_hosts_str, &stat)) + { + num_hosts = parseFromString(num_hosts_str); + num_hosts_version = stat.version; + } + } + + String serialized_error; + if (zookeeper->tryGet(error_node_path, serialized_error)) + { + auto [exception, host] = parseErrorNode(serialized_error); + if (exception) + std::rethrow_exception(exception); + } + + if (check_concurrency) + { + Coordination::Stat stat; + zookeeper->exists(alive_tracker_node_path, &stat); + alive_tracker_version = stat.version; + + checkConcurrency(zookeeper); + check_concurrency = false; + } + + Coordination::Requests requests; + requests.reserve(6); + + size_t operation_node_path_pos = static_cast(-1); + if (!zookeeper->exists(operation_node_path)) + { + operation_node_path_pos = requests.size(); + requests.emplace_back(zkutil::makeCreateRequest(operation_node_path, "", zkutil::CreateMode::Persistent)); + } + + size_t stage_node_path_pos = static_cast(-1); + if (!zookeeper->exists(stage_node_path)) + { + stage_node_path_pos = requests.size(); + requests.emplace_back(zkutil::makeCreateRequest(stage_node_path, "", zkutil::CreateMode::Persistent)); + } + + size_t num_hosts_node_path_pos = requests.size(); + if (num_hosts) + requests.emplace_back(zkutil::makeSetRequest(num_hosts_node_path, toString(*num_hosts + 1), num_hosts_version)); + else + requests.emplace_back(zkutil::makeCreateRequest(num_hosts_node_path, "1", zkutil::CreateMode::Persistent)); + + size_t alive_tracker_node_path_pos = requests.size(); + requests.emplace_back(zkutil::makeSetRequest(alive_tracker_node_path, "", alive_tracker_version)); + + requests.emplace_back(zkutil::makeCreateRequest(start_node_path, std::to_string(kCurrentVersion), zkutil::CreateMode::Persistent)); + requests.emplace_back(zkutil::makeCreateRequest(alive_node_path, "", zkutil::CreateMode::Ephemeral)); + + Coordination::Responses responses; + auto code = zookeeper->tryMulti(requests, responses); + + if (code == Coordination::Error::ZOK) + { + LOG_INFO(log, "Created start node #{} in ZooKeeper for {} (coordination version: {})", + num_hosts.value_or(0) + 1, current_host_desc, kCurrentVersion); + return; + } + + auto show_error_before_next_attempt = [&](const String & message) + { + bool will_try_again = (attempt_no < max_attempts_after_bad_version); + LOG_TRACE(log, "{} (attempt #{}){}", message, attempt_no, will_try_again ? ", will try again" : ""); + }; + + if ((responses.size() > operation_node_path_pos) && + (responses[operation_node_path_pos]->error == Coordination::Error::ZNODEEXISTS)) + { + show_error_before_next_attempt(fmt::format("Node {} in ZooKeeper already exists", operation_node_path)); + /// needs another attempt + } + else if ((responses.size() > stage_node_path_pos) && + (responses[stage_node_path_pos]->error == Coordination::Error::ZNODEEXISTS)) + { + show_error_before_next_attempt(fmt::format("Node {} in ZooKeeper already exists", stage_node_path)); + /// needs another attempt + } + else if ((responses.size() > num_hosts_node_path_pos) && num_hosts && + (responses[num_hosts_node_path_pos]->error == Coordination::Error::ZBADVERSION)) + { + show_error_before_next_attempt("Other host changed the 'num_hosts' node in ZooKeeper"); + num_hosts.reset(); /// needs to reread 'num_hosts' again + } + else if ((responses.size() > num_hosts_node_path_pos) && num_hosts && + (responses[num_hosts_node_path_pos]->error == Coordination::Error::ZNONODE)) + { + show_error_before_next_attempt("Other host removed the 'num_hosts' node in ZooKeeper"); + num_hosts.reset(); /// needs to reread 'num_hosts' again + } + else if ((responses.size() > num_hosts_node_path_pos) && !num_hosts && + (responses[num_hosts_node_path_pos]->error == Coordination::Error::ZNODEEXISTS)) + { + show_error_before_next_attempt("Other host created the 'num_hosts' node in ZooKeeper"); + /// needs another attempt + } + else if ((responses.size() > alive_tracker_node_path_pos) && + (responses[alive_tracker_node_path_pos]->error == Coordination::Error::ZBADVERSION)) + { + show_error_before_next_attempt("Concurrent backup or restore changed some 'alive' nodes in ZooKeeper"); + check_concurrency = true; /// needs to recheck for concurrency again } else { - zookeeper->createIfNotExists(zookeeper_path + "/started|" + current_host, ""); - zookeeper->createIfNotExists(zookeeper_path + "/current|" + current_host + "|" + new_stage, message); + zkutil::KeeperMultiException::check(code, requests, responses); } + } + + throw Exception(ErrorCodes::FAILED_TO_SYNC_BACKUP_OR_RESTORE, + "Couldn't create the 'start' node in ZooKeeper for {} after {} attempts", + current_host_desc, max_attempts_after_bad_version); +} + + +void BackupCoordinationStageSync::checkConcurrency(Coordination::ZooKeeperWithFaultInjection::Ptr zookeeper) +{ + if (allow_concurrency) + return; + + Strings found_operations; + auto code = zookeeper->tryGetChildren(root_zookeeper_path, found_operations); + + if (!((code == Coordination::Error::ZOK) || (code == Coordination::Error::ZNONODE))) + throw zkutil::KeeperException::fromPath(code, root_zookeeper_path); + + if (code == Coordination::Error::ZNONODE) + return; + + for (const String & found_operation : found_operations) + { + if (found_operation.starts_with(is_restore ? "restore-" : "backup-") && (found_operation != operation_node_name)) + { + Strings stages; + code = zookeeper->tryGetChildren(fs::path{root_zookeeper_path} / found_operation / "stage", stages); + + if (!((code == Coordination::Error::ZOK) || (code == Coordination::Error::ZNONODE))) + throw zkutil::KeeperException::fromPath(code, fs::path{root_zookeeper_path} / found_operation / "stage"); + + if (code == Coordination::Error::ZOK) + { + for (const String & stage : stages) + { + if (stage.starts_with("alive")) + BackupConcurrencyCheck::throwConcurrentOperationNotAllowed(is_restore); + } + } + } + } +} + + +void BackupCoordinationStageSync::startWatchingThread() +{ + watching_thread_future = schedule([this]() { watchingThread(); }, Priority{}); +} + + +void BackupCoordinationStageSync::stopWatchingThread() +{ + should_stop_watching_thread = true; + + /// Wake up waiting threads. + if (zk_nodes_changed) + zk_nodes_changed->set(); + state_changed.notify_all(); + + if (watching_thread_future.valid()) + watching_thread_future.wait(); +} + + +void BackupCoordinationStageSync::watchingThread() +{ + while (!should_stop_watching_thread) + { + try + { + /// Check if the current BACKUP or RESTORE command is already cancelled. + checkIfQueryCancelled(); + + /// Reset the `connected` flag for each host, we'll set them to true again after we find the 'alive' nodes. + resetConnectedFlag(); + + /// Recreate the 'alive' node if necessary and read a new state from ZooKeeper. + auto holder = with_retries.createRetriesControlHolder("BackupStageSync::watchingThread"); + auto & zookeeper = holder.faulty_zookeeper; + with_retries.renewZooKeeper(zookeeper); + + if (should_stop_watching_thread) + return; + + /// Recreate the 'alive' node if it was removed. + createAliveNode(zookeeper); + + /// Reads the current state from nodes in ZooKeeper. + readCurrentState(zookeeper); + } + catch (...) + { + tryLogCurrentException(log, "Caugth exception while watching"); + } + + try + { + /// Cancel the query if there is an error on another host or if some host was disconnected too long. + cancelQueryIfError(); + cancelQueryIfDisconnectedTooLong(); + } + catch (...) + { + tryLogCurrentException(log, "Caugth exception while checking if the query should be cancelled"); + } + + zk_nodes_changed->tryWait(sync_period_ms.count()); + } +} + + +void BackupCoordinationStageSync::createAliveNode(Coordination::ZooKeeperWithFaultInjection::Ptr zookeeper) +{ + if (zookeeper->exists(alive_node_path)) + return; + + Coordination::Requests requests; + requests.emplace_back(zkutil::makeCreateRequest(alive_node_path, "", zkutil::CreateMode::Ephemeral)); + requests.emplace_back(zkutil::makeSetRequest(alive_tracker_node_path, "", -1)); + zookeeper->multi(requests); + + LOG_INFO(log, "The alive node was recreated for {}", current_host_desc); +} + + +void BackupCoordinationStageSync::resetConnectedFlag() +{ + std::lock_guard lock{mutex}; + for (auto & [_, host_info] : state.hosts) + host_info.connected = false; +} + + +void BackupCoordinationStageSync::readCurrentState(Coordination::ZooKeeperWithFaultInjection::Ptr zookeeper) +{ + zk_nodes_changed->reset(); + + /// Get zk nodes and subscribe on their changes. + Strings new_zk_nodes = zookeeper->getChildren(stage_node_path, nullptr, zk_nodes_changed); + std::sort(new_zk_nodes.begin(), new_zk_nodes.end()); /// Sorting is necessary because we compare the list of zk nodes with its previous versions. + + State new_state; + + { + std::lock_guard lock{mutex}; + + /// Log all changes in zookeeper nodes in the "stage" folder to make debugging easier. + Strings added_zk_nodes, removed_zk_nodes; + std::set_difference(new_zk_nodes.begin(), new_zk_nodes.end(), zk_nodes.begin(), zk_nodes.end(), back_inserter(added_zk_nodes)); + std::set_difference(zk_nodes.begin(), zk_nodes.end(), new_zk_nodes.begin(), new_zk_nodes.end(), back_inserter(removed_zk_nodes)); + if (!added_zk_nodes.empty()) + LOG_TRACE(log, "Detected new zookeeper nodes appeared in the stage folder: {}", boost::algorithm::join(added_zk_nodes, ", ")); + if (!removed_zk_nodes.empty()) + LOG_TRACE(log, "Detected that some zookeeper nodes disappeared from the stage folder: {}", boost::algorithm::join(removed_zk_nodes, ", ")); + + zk_nodes = new_zk_nodes; + new_state = state; + } + + auto get_host_info = [&](const String & host) -> HostInfo * + { + auto it = new_state.hosts.find(host); + if (it == new_state.hosts.end()) + return nullptr; + return &it->second; + }; + + auto now = std::chrono::system_clock::now(); + auto monotonic_now = std::chrono::steady_clock::now(); + + /// Read the current state from zookeeper nodes. + for (const auto & zk_node : new_zk_nodes) + { + if (zk_node == "error") + { + if (!new_state.host_with_error) + { + String serialized_error = zookeeper->get(error_node_path); + auto [exception, host] = parseErrorNode(serialized_error); + if (auto * host_info = get_host_info(host)) + { + host_info->exception = exception; + new_state.host_with_error = host; + } + } + } + else if (zk_node.starts_with("started|")) + { + String host = zk_node.substr(strlen("started|")); + if (auto * host_info = get_host_info(host)) + { + if (!host_info->started) + { + host_info->version = parseStartNode(zookeeper->get(zookeeper_path / zk_node), host); + host_info->started = true; + } + } + } + else if (zk_node.starts_with("finished|")) + { + String host = zk_node.substr(strlen("finished|")); + if (auto * host_info = get_host_info(host)) + host_info->finished = true; + } + else if (zk_node.starts_with("alive|")) + { + String host = zk_node.substr(strlen("alive|")); + if (auto * host_info = get_host_info(host)) + { + host_info->connected = true; + host_info->last_connection_time = now; + host_info->last_connection_time_monotonic = monotonic_now; + } + } + else if (zk_node.starts_with("current|")) + { + String host_and_stage = zk_node.substr(strlen("current|")); + size_t separator_pos = host_and_stage.find('|'); + if (separator_pos != String::npos) + { + String host = host_and_stage.substr(0, separator_pos); + String stage = host_and_stage.substr(separator_pos + 1); + if (auto * host_info = get_host_info(host)) + { + String result = zookeeper->get(fs::path{zookeeper_path} / zk_node); + host_info->stages[stage] = std::move(result); + + /// That old version didn't create the 'finish' node so we consider that a host finished its work + /// if it reached the "completed" stage. + if ((host_info->version == kVersionWithoutFinishNode) && (stage == BackupCoordinationStage::COMPLETED)) + host_info->finished = true; + } + } + } + } + + /// Check if the state has been just changed, and if so then wake up waiting threads (see waitHostsReachStage()). + bool was_state_changed = false; + + { + std::lock_guard lock{mutex}; + was_state_changed = (new_state != state); + state = std::move(new_state); + } + + if (was_state_changed) + state_changed.notify_all(); +} + + +int BackupCoordinationStageSync::parseStartNode(const String & start_node_contents, const String & host) const +{ + int version; + if (start_node_contents.empty()) + { + version = kInitialVersion; + } + else if (!tryParse(version, start_node_contents) || (version < kInitialVersion)) + { + throw Exception(ErrorCodes::FAILED_TO_SYNC_BACKUP_OR_RESTORE, + "Coordination version {} used by {} is not supported", start_node_contents, getHostDesc(host)); + } + + if (version < kCurrentVersion) + LOG_WARNING(log, "Coordination version {} used by {} is outdated", version, getHostDesc(host)); + return version; +} + + +std::pair BackupCoordinationStageSync::parseErrorNode(const String & error_node_contents) +{ + ReadBufferFromOwnString buf{error_node_contents}; + String host; + readStringBinary(host, buf); + auto exception = std::make_exception_ptr(readException(buf, fmt::format("Got error from {}", getHostDesc(host)))); + return {exception, host}; +} + + +void BackupCoordinationStageSync::checkIfQueryCancelled() +{ + if (process_list_element->checkTimeLimitSoft()) + return; /// Not cancelled. + + std::lock_guard lock{mutex}; + if (state.cancelled) + return; /// Already marked as cancelled. + + state.cancelled = true; + state_changed.notify_all(); +} + + +void BackupCoordinationStageSync::cancelQueryIfError() +{ + std::exception_ptr exception; + + { + std::lock_guard lock{mutex}; + if (state.cancelled || !state.host_with_error) + return; + + state.cancelled = true; + exception = state.hosts.at(*state.host_with_error).exception; + } + + process_list_element->cancelQuery(false, exception); + state_changed.notify_all(); +} + + +void BackupCoordinationStageSync::cancelQueryIfDisconnectedTooLong() +{ + std::exception_ptr exception; + + { + std::lock_guard lock{mutex}; + if (state.cancelled || state.host_with_error || ((failure_after_host_disconnected_for_seconds.count() == 0))) + return; + + auto monotonic_now = std::chrono::steady_clock::now(); + bool info_shown = false; + + for (auto & [host, host_info] : state.hosts) + { + if (!host_info.connected && !host_info.finished && (host != current_host)) + { + auto disconnected_duration = std::chrono::duration_cast(monotonic_now - host_info.last_connection_time_monotonic); + if (disconnected_duration > failure_after_host_disconnected_for_seconds) + { + /// Host `host` was disconnected too long. + /// We can't just throw an exception here because readCurrentState() is called from a background thread. + /// So here we're writingh the error to the `process_list_element` and let it to be thrown later + /// from `process_list_element->checkTimeLimit()`. + String message = fmt::format("The 'alive' node hasn't been updated in ZooKeeper for {} for {} " + "which is more than the specified timeout {}. Last time the 'alive' node was detected at {}", + getHostDesc(host), disconnected_duration, failure_after_host_disconnected_for_seconds, + host_info.last_connection_time); + LOG_WARNING(log, "Lost connection to {}: {}", getHostDesc(host), message); + exception = std::make_exception_ptr(Exception{ErrorCodes::FAILED_TO_SYNC_BACKUP_OR_RESTORE, "Lost connection to {}: {}", getHostDesc(host), message}); + break; + } + + if ((disconnected_duration >= std::chrono::seconds{1}) && !info_shown) + { + LOG_TRACE(log, "The 'alive' node hasn't been updated in ZooKeeper for {} for {}", getHostDesc(host), disconnected_duration); + info_shown = true; + } + } + } + + if (!exception) + return; + + state.cancelled = true; + } + + process_list_element->cancelQuery(false, exception); + state_changed.notify_all(); +} + + +void BackupCoordinationStageSync::setStage(const String & stage, const String & stage_result) +{ + LOG_INFO(log, "{} reached stage {}", current_host_desc, stage); + auto holder = with_retries.createRetriesControlHolder("BackupStageSync::setStage"); + holder.retries_ctl.retryLoop([&, &zookeeper = holder.faulty_zookeeper]() + { + with_retries.renewZooKeeper(zookeeper); + zookeeper->createIfNotExists(getStageNodePath(stage), stage_result); }); } -void BackupCoordinationStageSync::setError(const String & current_host, const Exception & exception) + +String BackupCoordinationStageSync::getStageNodePath(const String & stage) const { - auto holder = with_retries.createRetriesControlHolder("setError"); - holder.retries_ctl.retryLoop( - [&, &zookeeper = holder.faulty_zookeeper]() + return fs::path{zookeeper_path} / ("current|" + current_host + "|" + stage); +} + + +bool BackupCoordinationStageSync::trySetError(std::exception_ptr exception) noexcept +{ + try + { + std::rethrow_exception(exception); + } + catch (const Exception & e) + { + return trySetError(e); + } + catch (...) + { + return trySetError(Exception(getCurrentExceptionMessageAndPattern(true, true), getCurrentExceptionCode())); + } +} + + +bool BackupCoordinationStageSync::trySetError(const Exception & exception) +{ + try + { + setError(exception); + return true; + } + catch (...) + { + return false; + } +} + + +void BackupCoordinationStageSync::setError(const Exception & exception) +{ + /// Most likely this exception has been already logged so here we're logging it without stacktrace. + String exception_message = getExceptionMessage(exception, /* with_stacktrace= */ false, /* check_embedded_stacktrace= */ true); + LOG_INFO(log, "Sending exception from {} to other hosts: {}", current_host_desc, exception_message); + + auto holder = with_retries.createRetriesControlHolder("BackupStageSync::setError", WithRetries::kErrorHandling); + + holder.retries_ctl.retryLoop([&, &zookeeper = holder.faulty_zookeeper]() { with_retries.renewZooKeeper(zookeeper); WriteBufferFromOwnString buf; writeStringBinary(current_host, buf); writeException(exception, buf, true); - zookeeper->createIfNotExists(zookeeper_path + "/error", buf.str()); + auto code = zookeeper->tryCreate(error_node_path, buf.str(), zkutil::CreateMode::Persistent); - /// When backup/restore fails, it removes the nodes from Zookeeper. - /// Sometimes it fails to remove all nodes. It's possible that it removes /error node, but fails to remove /stage node, - /// so the following line tries to preserve the error status. - auto code = zookeeper->trySet(zookeeper_path, Stage::ERROR); - if (code != Coordination::Error::ZOK) - throw zkutil::KeeperException::fromPath(code, zookeeper_path); + if (code == Coordination::Error::ZOK) + { + LOG_TRACE(log, "Sent exception from {} to other hosts", current_host_desc); + } + else if (code == Coordination::Error::ZNODEEXISTS) + { + LOG_INFO(log, "An error has been already assigned for this {}", operation_name); + } + else + { + throw zkutil::KeeperException::fromPath(code, error_node_path); + } }); } -Strings BackupCoordinationStageSync::wait(const Strings & all_hosts, const String & stage_to_wait) + +Strings BackupCoordinationStageSync::waitForHostsToReachStage(const String & stage_to_wait, const Strings & hosts, std::optional timeout) const { - return waitImpl(all_hosts, stage_to_wait, {}); -} - -Strings BackupCoordinationStageSync::waitFor(const Strings & all_hosts, const String & stage_to_wait, std::chrono::milliseconds timeout) -{ - return waitImpl(all_hosts, stage_to_wait, timeout); -} - -namespace -{ - struct UnreadyHost - { - String host; - bool started = false; - }; -} - -struct BackupCoordinationStageSync::State -{ - std::optional results; - std::optional> error; - std::optional disconnected_host; - std::optional unready_host; -}; - -BackupCoordinationStageSync::State BackupCoordinationStageSync::readCurrentState( - WithRetries::RetriesControlHolder & retries_control_holder, - const Strings & zk_nodes, - const Strings & all_hosts, - const String & stage_to_wait) const -{ - auto zookeeper = retries_control_holder.faulty_zookeeper; - auto & retries_ctl = retries_control_holder.retries_ctl; - - std::unordered_set zk_nodes_set{zk_nodes.begin(), zk_nodes.end()}; - - State state; - if (zk_nodes_set.contains("error")) - { - String errors = zookeeper->get(zookeeper_path + "/error"); - ReadBufferFromOwnString buf{errors}; - String host; - readStringBinary(host, buf); - state.error = std::make_pair(host, readException(buf, fmt::format("Got error from {}", host))); - return state; - } - - std::optional unready_host; - - for (const auto & host : all_hosts) - { - if (!zk_nodes_set.contains("current|" + host + "|" + stage_to_wait)) - { - const String started_node_name = "started|" + host; - const String alive_node_name = "alive|" + host; - - bool started = zk_nodes_set.contains(started_node_name); - bool alive = zk_nodes_set.contains(alive_node_name); - - if (!alive) - { - /// If the "alive" node doesn't exist then we don't have connection to the corresponding host. - /// This node is ephemeral so probably it will be recreated soon. We use zookeeper retries to wait. - /// In worst case when we won't manage to see the alive node for a long time we will just abort the backup. - const auto * const suffix = retries_ctl.isLastRetry() ? "" : ", will retry"; - if (started) - retries_ctl.setUserError(Exception(ErrorCodes::FAILED_TO_SYNC_BACKUP_OR_RESTORE, - "Lost connection to host {}{}", host, suffix)); - else - retries_ctl.setUserError(Exception(ErrorCodes::FAILED_TO_SYNC_BACKUP_OR_RESTORE, - "No connection to host {} yet{}", host, suffix)); - - state.disconnected_host = host; - return state; - } - - if (!unready_host) - unready_host.emplace(UnreadyHost{.host = host, .started = started}); - } - } - - if (unready_host) - { - state.unready_host = std::move(unready_host); - return state; - } - Strings results; - for (const auto & host : all_hosts) - results.emplace_back(zookeeper->get(zookeeper_path + "/current|" + host + "|" + stage_to_wait)); - state.results = std::move(results); + results.resize(hosts.size()); - return state; + std::unique_lock lock{mutex}; + + /// TSA_NO_THREAD_SAFETY_ANALYSIS is here because Clang Thread Safety Analysis doesn't understand std::unique_lock. + auto check_if_hosts_ready = [&](bool time_is_out) TSA_NO_THREAD_SAFETY_ANALYSIS + { + return checkIfHostsReachStage(hosts, stage_to_wait, time_is_out, timeout, results); + }; + + if (timeout) + { + if (!state_changed.wait_for(lock, *timeout, [&] { return check_if_hosts_ready(/* time_is_out = */ false); })) + check_if_hosts_ready(/* time_is_out = */ true); + } + else + { + state_changed.wait(lock, [&] { return check_if_hosts_ready(/* time_is_out = */ false); }); + } + + return results; } -Strings BackupCoordinationStageSync::waitImpl( - const Strings & all_hosts, const String & stage_to_wait, std::optional timeout) const + +bool BackupCoordinationStageSync::checkIfHostsReachStage( + const Strings & hosts, + const String & stage_to_wait, + bool time_is_out, + std::optional timeout, + Strings & results) const { - if (all_hosts.empty()) - return {}; + if (should_stop_watching_thread) + throw Exception(ErrorCodes::LOGICAL_ERROR, "finish() was called while waiting for a stage"); - /// Wait until all hosts are ready or an error happens or time is out. + process_list_element->checkTimeLimit(); - bool use_timeout = timeout.has_value(); - std::chrono::steady_clock::time_point end_of_timeout; - if (use_timeout) - end_of_timeout = std::chrono::steady_clock::now() + std::chrono::duration_cast(*timeout); - - State state; - for (;;) + for (size_t i = 0; i != hosts.size(); ++i) { - LOG_INFO(log, "Waiting for the stage {}", stage_to_wait); - /// Set by ZooKepper when list of zk nodes have changed. - auto watch = std::make_shared(); - Strings zk_nodes; - { - auto holder = with_retries.createRetriesControlHolder("waitImpl"); - holder.retries_ctl.retryLoop( - [&, &zookeeper = holder.faulty_zookeeper]() - { - with_retries.renewZooKeeper(zookeeper); - watch->reset(); - /// Get zk nodes and subscribe on their changes. - zk_nodes = zookeeper->getChildren(zookeeper_path, nullptr, watch); + const String & host = hosts[i]; + auto it = state.hosts.find(host); - /// Read the current state of zk nodes. - state = readCurrentState(holder, zk_nodes, all_hosts, stage_to_wait); - }); + if (it == state.hosts.end()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "waitForHostsToReachStage() was called for unexpected {}, all hosts are {}", getHostDesc(host), getHostsDesc(all_hosts)); + + const HostInfo & host_info = it->second; + auto stage_it = host_info.stages.find(stage_to_wait); + if (stage_it != host_info.stages.end()) + { + results[i] = stage_it->second; + continue; } - /// Analyze the current state of zk nodes. - chassert(state.results || state.error || state.disconnected_host || state.unready_host); - - if (state.results || state.error || state.disconnected_host) - break; /// Everything is ready or error happened. - - /// Log what we will wait. - const auto & unready_host = *state.unready_host; - LOG_INFO(log, "Waiting on ZooKeeper watch for any node to be changed (currently waiting for host {}{})", - unready_host.host, - (!unready_host.started ? " which didn't start the operation yet" : "")); - - /// Wait until `watch_callback` is called by ZooKeeper meaning that zk nodes have changed. + if (host_info.finished) { - if (use_timeout) + throw Exception(ErrorCodes::FAILED_TO_SYNC_BACKUP_OR_RESTORE, + "{} finished without coming to stage {}", getHostDesc(host), stage_to_wait); + } + + String host_status; + if (!host_info.started) + host_status = fmt::format(": the host hasn't started working on this {} yet", operation_name); + else if (!host_info.connected) + host_status = fmt::format(": the host is currently disconnected, last connection was at {}", host_info.last_connection_time); + + if (!time_is_out) + { + LOG_TRACE(log, "Waiting for {} to reach stage {}{}", getHostDesc(host), stage_to_wait, host_status); + return false; + } + else + { + throw Exception(ErrorCodes::FAILED_TO_SYNC_BACKUP_OR_RESTORE, + "Waited longer than timeout {} for {} to reach stage {}{}", + *timeout, getHostDesc(host), stage_to_wait, host_status); + } + } + + LOG_INFO(log, "Hosts {} reached stage {}", getHostsDesc(hosts), stage_to_wait); + return true; +} + + +void BackupCoordinationStageSync::finish(bool & other_hosts_also_finished) +{ + tryFinishImpl(other_hosts_also_finished, /* throw_if_error = */ true, /* retries_kind = */ WithRetries::kNormal); +} + + +bool BackupCoordinationStageSync::tryFinishAfterError(bool & other_hosts_also_finished) noexcept +{ + return tryFinishImpl(other_hosts_also_finished, /* throw_if_error = */ false, /* retries_kind = */ WithRetries::kErrorHandling); +} + + +bool BackupCoordinationStageSync::tryFinishImpl() +{ + bool other_hosts_also_finished; + return tryFinishAfterError(other_hosts_also_finished); +} + + +bool BackupCoordinationStageSync::tryFinishImpl(bool & other_hosts_also_finished, bool throw_if_error, WithRetries::Kind retries_kind) +{ + auto get_value_other_hosts_also_finished = [&] TSA_REQUIRES(mutex) + { + other_hosts_also_finished = true; + for (const auto & [host, host_info] : state.hosts) + { + if ((host != current_host) && !host_info.finished) + other_hosts_also_finished = false; + } + }; + + { + std::lock_guard lock{mutex}; + if (finish_result.succeeded) + { + get_value_other_hosts_also_finished(); + return true; + } + if (finish_result.exception) + { + if (throw_if_error) + std::rethrow_exception(finish_result.exception); + return false; + } + } + + try + { + stopWatchingThread(); + + auto holder = with_retries.createRetriesControlHolder("BackupStageSync::finish", retries_kind); + holder.retries_ctl.retryLoop([&, &zookeeper = holder.faulty_zookeeper]() + { + with_retries.renewZooKeeper(zookeeper); + createFinishNodeAndRemoveAliveNode(zookeeper); + }); + + std::lock_guard lock{mutex}; + finish_result.succeeded = true; + get_value_other_hosts_also_finished(); + return true; + } + catch (...) + { + LOG_TRACE(log, "Caught exception while creating the 'finish' node for {}: {}", + current_host_desc, + getCurrentExceptionMessage(/* with_stacktrace= */ false, /* check_embedded_stacktrace= */ true)); + + std::lock_guard lock{mutex}; + finish_result.exception = std::current_exception(); + if (throw_if_error) + throw; + return false; + } +} + + +void BackupCoordinationStageSync::createFinishNodeAndRemoveAliveNode(Coordination::ZooKeeperWithFaultInjection::Ptr zookeeper) +{ + if (zookeeper->exists(finish_node_path)) + return; + + /// If the initiator of the query has that old version then it doesn't expect us to create the 'finish' node and moreover + /// the initiator can start removing all the nodes immediately after all hosts report about reaching the "completed" status. + /// So to avoid weird errors in the logs we won't create the 'finish' node if the initiator of the query has that old version. + if ((getInitiatorVersion() == kVersionWithoutFinishNode) && (current_host != kInitiator)) + { + LOG_INFO(log, "Skipped creating the 'finish' node because the initiator uses outdated version {}", getInitiatorVersion()); + return; + } + + std::optional num_hosts; + int num_hosts_version = -1; + + for (size_t attempt_no = 1; attempt_no <= max_attempts_after_bad_version; ++attempt_no) + { + if (!num_hosts) + { + Coordination::Stat stat; + num_hosts = parseFromString(zookeeper->get(num_hosts_node_path, &stat)); + num_hosts_version = stat.version; + } + + Coordination::Requests requests; + requests.reserve(3); + + requests.emplace_back(zkutil::makeCreateRequest(finish_node_path, "", zkutil::CreateMode::Persistent)); + + size_t num_hosts_node_path_pos = requests.size(); + requests.emplace_back(zkutil::makeSetRequest(num_hosts_node_path, toString(*num_hosts - 1), num_hosts_version)); + + size_t alive_node_path_pos = static_cast(-1); + if (zookeeper->exists(alive_node_path)) + { + alive_node_path_pos = requests.size(); + requests.emplace_back(zkutil::makeRemoveRequest(alive_node_path, -1)); + } + + Coordination::Responses responses; + auto code = zookeeper->tryMulti(requests, responses); + + if (code == Coordination::Error::ZOK) + { + --*num_hosts; + String hosts_left_desc = ((*num_hosts == 0) ? "no hosts left" : fmt::format("{} hosts left", *num_hosts)); + LOG_INFO(log, "Created the 'finish' node in ZooKeeper for {}, {}", current_host_desc, hosts_left_desc); + return; + } + + auto show_error_before_next_attempt = [&](const String & message) + { + bool will_try_again = (attempt_no < max_attempts_after_bad_version); + LOG_TRACE(log, "{} (attempt #{}){}", message, attempt_no, will_try_again ? ", will try again" : ""); + }; + + if ((responses.size() > num_hosts_node_path_pos) && + (responses[num_hosts_node_path_pos]->error == Coordination::Error::ZBADVERSION)) + { + show_error_before_next_attempt("Other host changed the 'num_hosts' node in ZooKeeper"); + num_hosts.reset(); /// needs to reread 'num_hosts' again + } + else if ((responses.size() > alive_node_path_pos) && + (responses[alive_node_path_pos]->error == Coordination::Error::ZNONODE)) + { + show_error_before_next_attempt(fmt::format("Node {} in ZooKeeper doesn't exist", alive_node_path_pos)); + /// needs another attempt + } + else + { + zkutil::KeeperMultiException::check(code, requests, responses); + } + } + + throw Exception(ErrorCodes::FAILED_TO_SYNC_BACKUP_OR_RESTORE, + "Couldn't create the 'finish' node for {} after {} attempts", + current_host_desc, max_attempts_after_bad_version); +} + + +int BackupCoordinationStageSync::getInitiatorVersion() const +{ + std::lock_guard lock{mutex}; + auto it = state.hosts.find(String{kInitiator}); + if (it == state.hosts.end()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "There is no initiator of this {} query, it's a bug", operation_name); + const HostInfo & host_info = it->second; + return host_info.version; +} + + +void BackupCoordinationStageSync::waitForOtherHostsToFinish() const +{ + tryWaitForOtherHostsToFinishImpl(/* reason = */ "", /* throw_if_error = */ true, /* timeout = */ {}); +} + + +bool BackupCoordinationStageSync::tryWaitForOtherHostsToFinishAfterError() const noexcept +{ + std::optional timeout; + if (finish_timeout_after_error.count() != 0) + timeout = finish_timeout_after_error; + + String reason = fmt::format("{} needs other hosts to finish before cleanup", current_host_desc); + return tryWaitForOtherHostsToFinishImpl(reason, /* throw_if_error = */ false, timeout); +} + + +bool BackupCoordinationStageSync::tryWaitForOtherHostsToFinishImpl(const String & reason, bool throw_if_error, std::optional timeout) const +{ + std::unique_lock lock{mutex}; + + /// TSA_NO_THREAD_SAFETY_ANALYSIS is here because Clang Thread Safety Analysis doesn't understand std::unique_lock. + auto check_if_other_hosts_finish = [&](bool time_is_out) TSA_NO_THREAD_SAFETY_ANALYSIS + { + return checkIfOtherHostsFinish(reason, throw_if_error, time_is_out, timeout); + }; + + if (timeout) + { + if (state_changed.wait_for(lock, *timeout, [&] { return check_if_other_hosts_finish(/* time_is_out = */ false); })) + return true; + return check_if_other_hosts_finish(/* time_is_out = */ true); + } + else + { + state_changed.wait(lock, [&] { return check_if_other_hosts_finish(/* time_is_out = */ false); }); + return true; + } +} + + +bool BackupCoordinationStageSync::checkIfOtherHostsFinish(const String & reason, bool throw_if_error, bool time_is_out, std::optional timeout) const +{ + if (should_stop_watching_thread) + throw Exception(ErrorCodes::LOGICAL_ERROR, "finish() was called while waiting for other hosts to finish"); + + if (throw_if_error) + process_list_element->checkTimeLimit(); + + for (const auto & [host, host_info] : state.hosts) + { + if ((host == current_host) || host_info.finished) + continue; + + String host_status; + if (!host_info.started) + host_status = fmt::format(": the host hasn't started working on this {} yet", operation_name); + else if (!host_info.connected) + host_status = fmt::format(": the host is currently disconnected, last connection was at {}", host_info.last_connection_time); + + if (!time_is_out) + { + String reason_text = reason.empty() ? "" : (" because " + reason); + LOG_TRACE(log, "Waiting for {} to finish{}{}", getHostDesc(host), reason_text, host_status); + return false; + } + else + { + String reason_text = reason.empty() ? "" : fmt::format(" (reason of waiting: {})", reason); + if (!throw_if_error) { - auto current_time = std::chrono::steady_clock::now(); - if ((current_time > end_of_timeout) - || !watch->tryWait(std::chrono::duration_cast(end_of_timeout - current_time).count())) - break; + LOG_INFO(log, "Waited longer than timeout {} for {} to finish{}{}", + *timeout, getHostDesc(host), host_status, reason_text); + return false; } else { - watch->wait(); + throw Exception(ErrorCodes::FAILED_TO_SYNC_BACKUP_OR_RESTORE, + "Waited longer than timeout {} for {} to finish{}{}", + *timeout, getHostDesc(host), host_status, reason_text); } } } - /// Rethrow an error raised originally on another host. - if (state.error) - state.error->second.rethrow(); - - /// Another host terminated without errors. - if (state.disconnected_host) - throw Exception(ErrorCodes::FAILED_TO_SYNC_BACKUP_OR_RESTORE, "No connection to host {}", *state.disconnected_host); - - /// Something's unready, timeout is probably not enough. - if (state.unready_host) - { - const auto & unready_host = *state.unready_host; - throw Exception( - ErrorCodes::FAILED_TO_SYNC_BACKUP_OR_RESTORE, - "Waited for host {} too long (> {}){}", - unready_host.host, - to_string(*timeout), - unready_host.started ? "" : ": Operation didn't start"); - } - - LOG_TRACE(log, "Everything is Ok. All hosts achieved stage {}", stage_to_wait); - return std::move(*state.results); + LOG_TRACE(log, "Other hosts finished working on this {}", operation_name); + return true; } } diff --git a/src/Backups/BackupCoordinationStageSync.h b/src/Backups/BackupCoordinationStageSync.h index a06c5c61041..dc0d3c3c83d 100644 --- a/src/Backups/BackupCoordinationStageSync.h +++ b/src/Backups/BackupCoordinationStageSync.h @@ -10,33 +10,193 @@ class BackupCoordinationStageSync { public: BackupCoordinationStageSync( - const String & root_zookeeper_path_, - WithRetries & with_retries_, + bool is_restore_, /// true if this is a RESTORE ON CLUSTER command, false if this is a BACKUP ON CLUSTER command + const String & zookeeper_path_, /// path to the "stage" folder in ZooKeeper + const String & current_host_, /// the current host, or an empty string if it's the initiator of the BACKUP/RESTORE ON CLUSTER command + const Strings & all_hosts_, /// all the hosts (including the initiator and the current host) performing the BACKUP/RESTORE ON CLUSTER command + bool allow_concurrency_, /// whether it's allowed to have concurrent backups or restores. + const WithRetries & with_retries_, + ThreadPoolCallbackRunnerUnsafe schedule_, + QueryStatusPtr process_list_element_, LoggerPtr log_); + ~BackupCoordinationStageSync(); + /// Sets the stage of the current host and signal other hosts if there were other hosts waiting for that. - void set(const String & current_host, const String & new_stage, const String & message, const bool & all_hosts = false); - void setError(const String & current_host, const Exception & exception); + void setStage(const String & stage, const String & stage_result = {}); - /// Sets the stage of the current host and waits until all hosts come to the same stage. - /// The function returns the messages all hosts set when they come to the required stage. - Strings wait(const Strings & all_hosts, const String & stage_to_wait); + /// Waits until all the specified hosts come to the specified stage. + /// The function returns the results which specified hosts set when they came to the required stage. + /// If it doesn't happen before the timeout then the function will stop waiting and throw an exception. + Strings waitForHostsToReachStage(const String & stage_to_wait, const Strings & hosts, std::optional timeout = {}) const; - /// Almost the same as setAndWait() but this one stops waiting and throws an exception after a specific amount of time. - Strings waitFor(const Strings & all_hosts, const String & stage_to_wait, std::chrono::milliseconds timeout); + /// Waits until all the other hosts finish their work. + /// Stops waiting and throws an exception if another host encounters an error or if some host gets cancelled. + void waitForOtherHostsToFinish() const; + + /// Lets other host know that the current host has finished its work. + void finish(bool & other_hosts_also_finished); + + /// Lets other hosts know that the current host has encountered an error. + bool trySetError(std::exception_ptr exception) noexcept; + + /// Waits until all the other hosts finish their work (as a part of error-handling process). + /// Doesn't stops waiting if some host encounters an error or gets cancelled. + bool tryWaitForOtherHostsToFinishAfterError() const noexcept; + + /// Lets other host know that the current host has finished its work (as a part of error-handling process). + bool tryFinishAfterError(bool & other_hosts_also_finished) noexcept; + + /// Returns a printable name of a specific host. For empty host the function returns "initiator". + static String getHostDesc(const String & host); + static String getHostsDesc(const Strings & hosts); private: + /// Initializes the original state. It will be updated then with readCurrentState(). + void initializeState(); + + /// Creates the root node in ZooKeeper. void createRootNodes(); - struct State; - State readCurrentState(WithRetries::RetriesControlHolder & retries_control_holder, const Strings & zk_nodes, const Strings & all_hosts, const String & stage_to_wait) const; + /// Atomically creates both 'start' and 'alive' nodes and also checks that there is no concurrent backup or restore if `allow_concurrency` is false. + void createStartAndAliveNodes(); + void createStartAndAliveNodes(Coordination::ZooKeeperWithFaultInjection::Ptr zookeeper); - Strings waitImpl(const Strings & all_hosts, const String & stage_to_wait, std::optional timeout) const; + /// Deserialize the version of a node stored in the 'start' node. + int parseStartNode(const String & start_node_contents, const String & host) const; - String zookeeper_path; - /// A reference to the field of parent object - BackupCoordinationRemote or RestoreCoordinationRemote - WithRetries & with_retries; - LoggerPtr log; + /// Recreates the 'alive' node if it doesn't exist. It's an ephemeral node so it's removed automatically after disconnections. + void createAliveNode(Coordination::ZooKeeperWithFaultInjection::Ptr zookeeper); + + /// Checks that there is no concurrent backup or restore if `allow_concurrency` is false. + void checkConcurrency(Coordination::ZooKeeperWithFaultInjection::Ptr zookeeper); + + /// Watching thread periodically reads the current state from ZooKeeper and recreates the 'alive' node. + void startWatchingThread(); + void stopWatchingThread(); + void watchingThread(); + + /// Reads the current state from ZooKeeper without throwing exceptions. + void readCurrentState(Coordination::ZooKeeperWithFaultInjection::Ptr zookeeper); + String getStageNodePath(const String & stage) const; + + /// Lets other hosts know that the current host has encountered an error. + bool trySetError(const Exception & exception); + void setError(const Exception & exception); + + /// Deserializes an error stored in the error node. + static std::pair parseErrorNode(const String & error_node_contents); + + /// Reset the `connected` flag for each host. + void resetConnectedFlag(); + + /// Checks if the current query is cancelled, and if so then the function sets the `cancelled` flag in the current state. + void checkIfQueryCancelled(); + + /// Checks if the current state contains an error, and if so then the function passes this error to the query status + /// to cancel the current BACKUP or RESTORE command. + void cancelQueryIfError(); + + /// Checks if some host was disconnected for too long, and if so then the function generates an error and pass it to the query status + /// to cancel the current BACKUP or RESTORE command. + void cancelQueryIfDisconnectedTooLong(); + + /// Used by waitForHostsToReachStage() to check if everything is ready to return. + bool checkIfHostsReachStage(const Strings & hosts, const String & stage_to_wait, bool time_is_out, std::optional timeout, Strings & results) const TSA_REQUIRES(mutex); + + /// Creates the 'finish' node. + bool tryFinishImpl(); + bool tryFinishImpl(bool & other_hosts_also_finished, bool throw_if_error, WithRetries::Kind retries_kind); + void createFinishNodeAndRemoveAliveNode(Coordination::ZooKeeperWithFaultInjection::Ptr zookeeper); + + /// Returns the version used by the initiator. + int getInitiatorVersion() const; + + /// Waits until all the other hosts finish their work. + bool tryWaitForOtherHostsToFinishImpl(const String & reason, bool throw_if_error, std::optional timeout) const; + bool checkIfOtherHostsFinish(const String & reason, bool throw_if_error, bool time_is_out, std::optional timeout) const TSA_REQUIRES(mutex); + + const bool is_restore; + const String operation_name; + const String current_host; + const String current_host_desc; + const Strings all_hosts; + const bool allow_concurrency; + + /// A reference to a field of the parent object which is either BackupCoordinationOnCluster or RestoreCoordinationOnCluster. + const WithRetries & with_retries; + + const ThreadPoolCallbackRunnerUnsafe schedule; + const QueryStatusPtr process_list_element; + const LoggerPtr log; + + const std::chrono::seconds failure_after_host_disconnected_for_seconds; + const std::chrono::seconds finish_timeout_after_error; + const std::chrono::milliseconds sync_period_ms; + const size_t max_attempts_after_bad_version; + + /// Paths in ZooKeeper. + const std::filesystem::path zookeeper_path; + const String root_zookeeper_path; + const String operation_node_path; + const String operation_node_name; + const String stage_node_path; + const String start_node_path; + const String finish_node_path; + const String num_hosts_node_path; + const String alive_node_path; + const String alive_tracker_node_path; + const String error_node_path; + + std::shared_ptr zk_nodes_changed; + + /// We store list of previously found ZooKeeper nodes to show better logging messages. + Strings zk_nodes; + + /// Information about one host read from ZooKeeper. + struct HostInfo + { + String host; + bool started = false; + bool connected = false; + bool finished = false; + int version = 1; + std::map stages = {}; /// std::map because we need to compare states + std::exception_ptr exception = nullptr; + + std::chrono::time_point last_connection_time = {}; + std::chrono::time_point last_connection_time_monotonic = {}; + + bool operator ==(const HostInfo & other) const; + bool operator !=(const HostInfo & other) const; + }; + + /// Information about all the host participating in the current BACKUP or RESTORE operation. + struct State + { + std::map hosts; /// std::map because we need to compare states + std::optional host_with_error; + bool cancelled = false; + + bool operator ==(const State & other) const; + bool operator !=(const State & other) const; + }; + + State state TSA_GUARDED_BY(mutex); + mutable std::condition_variable state_changed; + + std::future watching_thread_future; + std::atomic should_stop_watching_thread = false; + + struct FinishResult + { + bool succeeded = false; + std::exception_ptr exception; + bool other_hosts_also_finished = false; + }; + FinishResult finish_result TSA_GUARDED_BY(mutex); + + mutable std::mutex mutex; }; } diff --git a/src/Backups/BackupEntriesCollector.cpp b/src/Backups/BackupEntriesCollector.cpp index ae73630d41c..00a4471d994 100644 --- a/src/Backups/BackupEntriesCollector.cpp +++ b/src/Backups/BackupEntriesCollector.cpp @@ -102,7 +102,6 @@ BackupEntriesCollector::BackupEntriesCollector( , read_settings(read_settings_) , context(context_) , process_list_element(context->getProcessListElement()) - , on_cluster_first_sync_timeout(context->getConfigRef().getUInt64("backups.on_cluster_first_sync_timeout", 180000)) , collect_metadata_timeout(context->getConfigRef().getUInt64( "backups.collect_metadata_timeout", context->getConfigRef().getUInt64("backups.consistent_metadata_snapshot_timeout", 600000))) , attempts_to_collect_metadata_before_sleep(context->getConfigRef().getUInt("backups.attempts_to_collect_metadata_before_sleep", 2)) @@ -176,21 +175,7 @@ Strings BackupEntriesCollector::setStage(const String & new_stage, const String checkIsQueryCancelled(); current_stage = new_stage; - backup_coordination->setStage(new_stage, message); - - if (new_stage == Stage::formatGatheringMetadata(0)) - { - return backup_coordination->waitForStage(new_stage, on_cluster_first_sync_timeout); - } - if (new_stage.starts_with(Stage::GATHERING_METADATA)) - { - auto current_time = std::chrono::steady_clock::now(); - auto end_of_timeout = std::max(current_time, collect_metadata_end_time); - return backup_coordination->waitForStage( - new_stage, std::chrono::duration_cast(end_of_timeout - current_time)); - } - - return backup_coordination->waitForStage(new_stage); + return backup_coordination->setStage(new_stage, message, /* sync = */ true); } void BackupEntriesCollector::checkIsQueryCancelled() const diff --git a/src/Backups/BackupEntriesCollector.h b/src/Backups/BackupEntriesCollector.h index ae076a84c8b..504489cce6b 100644 --- a/src/Backups/BackupEntriesCollector.h +++ b/src/Backups/BackupEntriesCollector.h @@ -111,10 +111,6 @@ private: ContextPtr context; QueryStatusPtr process_list_element; - /// The time a BACKUP ON CLUSTER or RESTORE ON CLUSTER command will wait until all the nodes receive the BACKUP (or RESTORE) query and start working. - /// This setting is similar to `distributed_ddl_task_timeout`. - const std::chrono::milliseconds on_cluster_first_sync_timeout; - /// The time a BACKUP command will try to collect the metadata of tables & databases. const std::chrono::milliseconds collect_metadata_timeout; diff --git a/src/Backups/BackupIO.h b/src/Backups/BackupIO.h index ee2f38c785b..c9e0f25f9a0 100644 --- a/src/Backups/BackupIO.h +++ b/src/Backups/BackupIO.h @@ -5,6 +5,7 @@ namespace DB { + class IDisk; using DiskPtr = std::shared_ptr; class SeekableReadBuffer; @@ -63,9 +64,13 @@ public: virtual void copyFile(const String & destination, const String & source, size_t size) = 0; + /// Removes a file written to the backup, if it still exists. virtual void removeFile(const String & file_name) = 0; virtual void removeFiles(const Strings & file_names) = 0; + /// Removes the backup folder if it's empty or contains empty subfolders. + virtual void removeEmptyDirectories() = 0; + virtual const ReadSettings & getReadSettings() const = 0; virtual const WriteSettings & getWriteSettings() const = 0; virtual size_t getWriteBufferSize() const = 0; diff --git a/src/Backups/BackupIO_AzureBlobStorage.h b/src/Backups/BackupIO_AzureBlobStorage.h index c3b88f245ab..c90a030a1e7 100644 --- a/src/Backups/BackupIO_AzureBlobStorage.h +++ b/src/Backups/BackupIO_AzureBlobStorage.h @@ -81,6 +81,7 @@ public: void removeFile(const String & file_name) override; void removeFiles(const Strings & file_names) override; + void removeEmptyDirectories() override {} private: std::unique_ptr readFile(const String & file_name, size_t expected_file_size) override; diff --git a/src/Backups/BackupIO_Disk.cpp b/src/Backups/BackupIO_Disk.cpp index aeb07b154f5..794fb5be936 100644 --- a/src/Backups/BackupIO_Disk.cpp +++ b/src/Backups/BackupIO_Disk.cpp @@ -91,16 +91,36 @@ std::unique_ptr BackupWriterDisk::writeFile(const String & file_nam void BackupWriterDisk::removeFile(const String & file_name) { disk->removeFileIfExists(root_path / file_name); - if (disk->existsDirectory(root_path) && disk->isDirectoryEmpty(root_path)) - disk->removeDirectory(root_path); } void BackupWriterDisk::removeFiles(const Strings & file_names) { for (const auto & file_name : file_names) disk->removeFileIfExists(root_path / file_name); - if (disk->existsDirectory(root_path) && disk->isDirectoryEmpty(root_path)) - disk->removeDirectory(root_path); +} + +void BackupWriterDisk::removeEmptyDirectories() +{ + removeEmptyDirectoriesImpl(root_path); +} + +void BackupWriterDisk::removeEmptyDirectoriesImpl(const fs::path & current_dir) +{ + if (!disk->existsDirectory(current_dir)) + return; + + if (disk->isDirectoryEmpty(current_dir)) + { + disk->removeDirectory(current_dir); + return; + } + + /// Backups are not too deep, so recursion is good enough here. + for (auto it = disk->iterateDirectory(current_dir); it->isValid(); it->next()) + removeEmptyDirectoriesImpl(current_dir / it->name()); + + if (disk->isDirectoryEmpty(current_dir)) + disk->removeDirectory(current_dir); } void BackupWriterDisk::copyFileFromDisk(const String & path_in_backup, DiskPtr src_disk, const String & src_path, diff --git a/src/Backups/BackupIO_Disk.h b/src/Backups/BackupIO_Disk.h index 3d3253877bd..c77513935a9 100644 --- a/src/Backups/BackupIO_Disk.h +++ b/src/Backups/BackupIO_Disk.h @@ -50,9 +50,11 @@ public: void removeFile(const String & file_name) override; void removeFiles(const Strings & file_names) override; + void removeEmptyDirectories() override; private: std::unique_ptr readFile(const String & file_name, size_t expected_file_size) override; + void removeEmptyDirectoriesImpl(const std::filesystem::path & current_dir); const DiskPtr disk; const std::filesystem::path root_path; diff --git a/src/Backups/BackupIO_File.cpp b/src/Backups/BackupIO_File.cpp index 681513bf7ce..80f084d241c 100644 --- a/src/Backups/BackupIO_File.cpp +++ b/src/Backups/BackupIO_File.cpp @@ -106,16 +106,36 @@ std::unique_ptr BackupWriterFile::writeFile(const String & file_nam void BackupWriterFile::removeFile(const String & file_name) { (void)fs::remove(root_path / file_name); - if (fs::is_directory(root_path) && fs::is_empty(root_path)) - (void)fs::remove(root_path); } void BackupWriterFile::removeFiles(const Strings & file_names) { for (const auto & file_name : file_names) (void)fs::remove(root_path / file_name); - if (fs::is_directory(root_path) && fs::is_empty(root_path)) - (void)fs::remove(root_path); +} + +void BackupWriterFile::removeEmptyDirectories() +{ + removeEmptyDirectoriesImpl(root_path); +} + +void BackupWriterFile::removeEmptyDirectoriesImpl(const fs::path & current_dir) +{ + if (!fs::is_directory(current_dir)) + return; + + if (fs::is_empty(current_dir)) + { + (void)fs::remove(current_dir); + return; + } + + /// Backups are not too deep, so recursion is good enough here. + for (const auto & it : std::filesystem::directory_iterator{current_dir}) + removeEmptyDirectoriesImpl(it.path()); + + if (fs::is_empty(current_dir)) + (void)fs::remove(current_dir); } void BackupWriterFile::copyFileFromDisk(const String & path_in_backup, DiskPtr src_disk, const String & src_path, diff --git a/src/Backups/BackupIO_File.h b/src/Backups/BackupIO_File.h index ebe9a0f02cb..a2169ac7b4b 100644 --- a/src/Backups/BackupIO_File.h +++ b/src/Backups/BackupIO_File.h @@ -42,9 +42,11 @@ public: void removeFile(const String & file_name) override; void removeFiles(const Strings & file_names) override; + void removeEmptyDirectories() override; private: std::unique_ptr readFile(const String & file_name, size_t expected_file_size) override; + void removeEmptyDirectoriesImpl(const std::filesystem::path & current_dir); const std::filesystem::path root_path; const DataSourceDescription data_source_description; diff --git a/src/Backups/BackupIO_S3.h b/src/Backups/BackupIO_S3.h index a04f1c915b9..4ccf477b369 100644 --- a/src/Backups/BackupIO_S3.h +++ b/src/Backups/BackupIO_S3.h @@ -74,6 +74,7 @@ public: void removeFile(const String & file_name) override; void removeFiles(const Strings & file_names) override; + void removeEmptyDirectories() override {} private: std::unique_ptr readFile(const String & file_name, size_t expected_file_size) override; diff --git a/src/Backups/BackupImpl.cpp b/src/Backups/BackupImpl.cpp index b95a2e10b4d..af3fa5531b8 100644 --- a/src/Backups/BackupImpl.cpp +++ b/src/Backups/BackupImpl.cpp @@ -147,11 +147,11 @@ BackupImpl::BackupImpl( BackupImpl::~BackupImpl() { - if ((open_mode == OpenMode::WRITE) && !is_internal_backup && !writing_finalized && !std::uncaught_exceptions() && !std::current_exception()) + if ((open_mode == OpenMode::WRITE) && !writing_finalized && !corrupted) { /// It is suspicious to destroy BackupImpl without finalization while writing a backup when there is no exception. - LOG_ERROR(log, "BackupImpl is not finalized when destructor is called. Stack trace: {}", StackTrace().toString()); - chassert(false && "BackupImpl is not finalized when destructor is called."); + LOG_ERROR(log, "BackupImpl is not finalized or marked as corrupted when destructor is called. Stack trace: {}", StackTrace().toString()); + chassert(false, "BackupImpl is not finalized or marked as corrupted when destructor is called."); } try @@ -196,9 +196,6 @@ void BackupImpl::open() if (open_mode == OpenMode::READ) readBackupMetadata(); - - if ((open_mode == OpenMode::WRITE) && base_backup_info) - base_backup_uuid = getBaseBackupUnlocked()->getUUID(); } void BackupImpl::close() @@ -280,6 +277,8 @@ std::shared_ptr BackupImpl::getBaseBackupUnlocked() const toString(base_backup->getUUID()), (base_backup_uuid ? toString(*base_backup_uuid) : "")); } + + base_backup_uuid = base_backup->getUUID(); } return base_backup; } @@ -369,7 +368,7 @@ void BackupImpl::writeBackupMetadata() if (base_backup_in_use) { *out << "" << xml << base_backup_info->toString() << ""; - *out << "" << toString(*base_backup_uuid) << ""; + *out << "" << getBaseBackupUnlocked()->getUUID() << ""; } } @@ -594,9 +593,6 @@ bool BackupImpl::checkLockFile(bool throw_if_failed) const void BackupImpl::removeLockFile() { - if (is_internal_backup) - return; /// Internal backup must not remove the lock file (it's still used by the initiator). - if (checkLockFile(false)) writer->removeFile(lock_file_name); } @@ -989,8 +985,11 @@ void BackupImpl::finalizeWriting() if (open_mode != OpenMode::WRITE) throw Exception(ErrorCodes::LOGICAL_ERROR, "Backup is not opened for writing"); + if (corrupted) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Backup can't be finalized after an error happened"); + if (writing_finalized) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Backup is already finalized"); + return; if (!is_internal_backup) { @@ -1015,20 +1014,58 @@ void BackupImpl::setCompressedSize() } -void BackupImpl::tryRemoveAllFiles() +bool BackupImpl::setIsCorrupted() noexcept { - if (open_mode != OpenMode::WRITE) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Backup is not opened for writing"); - - if (is_internal_backup) - return; - try { - LOG_INFO(log, "Removing all files of backup {}", backup_name_for_logging); + std::lock_guard lock{mutex}; + if (open_mode != OpenMode::WRITE) + { + LOG_ERROR(log, "Backup is not opened for writing. Stack trace: {}", StackTrace().toString()); + chassert(false, "Backup is not opened for writing when setIsCorrupted() is called"); + return false; + } + + if (writing_finalized) + { + LOG_WARNING(log, "An error happened after the backup was completed successfully, the backup must be correct!"); + return false; + } + + if (corrupted) + return true; + + LOG_WARNING(log, "An error happened, the backup won't be completed"); + closeArchive(/* finalize= */ false); + corrupted = true; + return true; + } + catch (...) + { + DB::tryLogCurrentException(log, "Caught exception while setting that the backup was corrupted"); + return false; + } +} + + +bool BackupImpl::tryRemoveAllFiles() noexcept +{ + try + { + std::lock_guard lock{mutex}; + if (!corrupted) + { + LOG_ERROR(log, "Backup is not set as corrupted. Stack trace: {}", StackTrace().toString()); + chassert(false, "Backup is not set as corrupted when tryRemoveAllFiles() is called"); + return false; + } + + LOG_INFO(log, "Removing all files of backup {}", backup_name_for_logging); + Strings files_to_remove; + if (use_archive) { files_to_remove.push_back(archive_params.archive_name); @@ -1041,14 +1078,17 @@ void BackupImpl::tryRemoveAllFiles() } if (!checkLockFile(false)) - return; + return false; writer->removeFiles(files_to_remove); removeLockFile(); + writer->removeEmptyDirectories(); + return true; } catch (...) { - DB::tryLogCurrentException(__PRETTY_FUNCTION__); + DB::tryLogCurrentException(log, "Caught exception while removing files of a corrupted backup"); + return false; } } diff --git a/src/Backups/BackupImpl.h b/src/Backups/BackupImpl.h index d7846104c4c..4b0f9f879ec 100644 --- a/src/Backups/BackupImpl.h +++ b/src/Backups/BackupImpl.h @@ -86,7 +86,8 @@ public: void writeFile(const BackupFileInfo & info, BackupEntryPtr entry) override; bool supportsWritingInMultipleThreads() const override { return !use_archive; } void finalizeWriting() override; - void tryRemoveAllFiles() override; + bool setIsCorrupted() noexcept override; + bool tryRemoveAllFiles() noexcept override; private: void open(); @@ -146,13 +147,14 @@ private: int version; mutable std::optional base_backup_info; mutable std::shared_ptr base_backup; - std::optional base_backup_uuid; + mutable std::optional base_backup_uuid; std::shared_ptr archive_reader; std::shared_ptr archive_writer; String lock_file_name; std::atomic lock_file_before_first_file_checked = false; bool writing_finalized = false; + bool corrupted = false; bool deduplicate_files = true; bool use_same_s3_credentials_for_base_backup = false; bool use_same_password_for_base_backup = false; diff --git a/src/Backups/BackupKeeperSettings.cpp b/src/Backups/BackupKeeperSettings.cpp new file mode 100644 index 00000000000..180633cea1f --- /dev/null +++ b/src/Backups/BackupKeeperSettings.cpp @@ -0,0 +1,58 @@ +#include + +#include +#include +#include + + +namespace DB +{ + +namespace Setting +{ + extern const SettingsUInt64 backup_restore_keeper_max_retries; + extern const SettingsUInt64 backup_restore_keeper_retry_initial_backoff_ms; + extern const SettingsUInt64 backup_restore_keeper_retry_max_backoff_ms; + extern const SettingsUInt64 backup_restore_failure_after_host_disconnected_for_seconds; + extern const SettingsUInt64 backup_restore_keeper_max_retries_while_initializing; + extern const SettingsUInt64 backup_restore_keeper_max_retries_while_handling_error; + extern const SettingsUInt64 backup_restore_finish_timeout_after_error_sec; + extern const SettingsUInt64 backup_restore_keeper_value_max_size; + extern const SettingsUInt64 backup_restore_batch_size_for_keeper_multi; + extern const SettingsUInt64 backup_restore_batch_size_for_keeper_multiread; + extern const SettingsFloat backup_restore_keeper_fault_injection_probability; + extern const SettingsUInt64 backup_restore_keeper_fault_injection_seed; +} + +BackupKeeperSettings BackupKeeperSettings::fromContext(const ContextPtr & context) +{ + BackupKeeperSettings keeper_settings; + + const auto & settings = context->getSettingsRef(); + const auto & config = context->getConfigRef(); + + keeper_settings.max_retries = settings[Setting::backup_restore_keeper_max_retries]; + keeper_settings.retry_initial_backoff_ms = std::chrono::milliseconds{settings[Setting::backup_restore_keeper_retry_initial_backoff_ms]}; + keeper_settings.retry_max_backoff_ms = std::chrono::milliseconds{settings[Setting::backup_restore_keeper_retry_max_backoff_ms]}; + + keeper_settings.failure_after_host_disconnected_for_seconds = std::chrono::seconds{settings[Setting::backup_restore_failure_after_host_disconnected_for_seconds]}; + keeper_settings.max_retries_while_initializing = settings[Setting::backup_restore_keeper_max_retries_while_initializing]; + keeper_settings.max_retries_while_handling_error = settings[Setting::backup_restore_keeper_max_retries_while_handling_error]; + keeper_settings.finish_timeout_after_error = std::chrono::seconds(settings[Setting::backup_restore_finish_timeout_after_error_sec]); + + if (config.has("backups.sync_period_ms")) + keeper_settings.sync_period_ms = std::chrono::milliseconds{config.getUInt64("backups.sync_period_ms")}; + + if (config.has("backups.max_attempts_after_bad_version")) + keeper_settings.max_attempts_after_bad_version = config.getUInt64("backups.max_attempts_after_bad_version"); + + keeper_settings.value_max_size = settings[Setting::backup_restore_keeper_value_max_size]; + keeper_settings.batch_size_for_multi = settings[Setting::backup_restore_batch_size_for_keeper_multi]; + keeper_settings.batch_size_for_multiread = settings[Setting::backup_restore_batch_size_for_keeper_multiread]; + keeper_settings.fault_injection_probability = settings[Setting::backup_restore_keeper_fault_injection_probability]; + keeper_settings.fault_injection_seed = settings[Setting::backup_restore_keeper_fault_injection_seed]; + + return keeper_settings; +} + +} diff --git a/src/Backups/BackupKeeperSettings.h b/src/Backups/BackupKeeperSettings.h new file mode 100644 index 00000000000..6c4b2187094 --- /dev/null +++ b/src/Backups/BackupKeeperSettings.h @@ -0,0 +1,64 @@ +#pragma once + +#include + + +namespace DB +{ + +/// Settings for [Zoo]Keeper-related works during BACKUP or RESTORE. +struct BackupKeeperSettings +{ + /// Maximum number of retries in the middle of a BACKUP ON CLUSTER or RESTORE ON CLUSTER operation. + /// Should be big enough so the whole operation won't be cancelled in the middle of it because of a temporary ZooKeeper failure. + UInt64 max_retries{1000}; + + /// Initial backoff timeout for ZooKeeper operations during backup or restore. + std::chrono::milliseconds retry_initial_backoff_ms{100}; + + /// Max backoff timeout for ZooKeeper operations during backup or restore. + std::chrono::milliseconds retry_max_backoff_ms{5000}; + + /// If a host during BACKUP ON CLUSTER or RESTORE ON CLUSTER doesn't recreate its 'alive' node in ZooKeeper + /// for this amount of time then the whole backup or restore is considered as failed. + /// Should be bigger than any reasonable time for a host to reconnect to ZooKeeper after a failure. + /// Set to zero to disable (if it's zero and some host crashed then BACKUP ON CLUSTER or RESTORE ON CLUSTER will be waiting + /// for the crashed host forever until the operation is explicitly cancelled with KILL QUERY). + std::chrono::seconds failure_after_host_disconnected_for_seconds{3600}; + + /// Maximum number of retries during the initialization of a BACKUP ON CLUSTER or RESTORE ON CLUSTER operation. + /// Shouldn't be too big because if the operation is going to fail then it's better if it fails faster. + UInt64 max_retries_while_initializing{20}; + + /// Maximum number of retries while handling an error of a BACKUP ON CLUSTER or RESTORE ON CLUSTER operation. + /// Shouldn't be too big because those retries are just for cleanup after the operation has failed already. + UInt64 max_retries_while_handling_error{20}; + + /// How long the initiator should wait for other host to handle the 'error' node and finish their work. + std::chrono::seconds finish_timeout_after_error{180}; + + /// How often the "stage" folder in ZooKeeper must be scanned in a background thread to track changes done by other hosts. + std::chrono::milliseconds sync_period_ms{5000}; + + /// Number of attempts after getting error ZBADVERSION from ZooKeeper. + size_t max_attempts_after_bad_version{10}; + + /// Maximum size of data of a ZooKeeper's node during backup. + UInt64 value_max_size{1048576}; + + /// Maximum size of a batch for a multi request. + UInt64 batch_size_for_multi{1000}; + + /// Maximum size of a batch for a multiread request. + UInt64 batch_size_for_multiread{10000}; + + /// Approximate probability of failure for a keeper request during backup or restore. Valid value is in interval [0.0f, 1.0f]. + Float64 fault_injection_probability{0}; + + /// Seed for `fault_injection_probability`: 0 - random seed, otherwise the setting value. + UInt64 fault_injection_seed{0}; + + static BackupKeeperSettings fromContext(const ContextPtr & context); +}; + +} diff --git a/src/Backups/BackupSettings.cpp b/src/Backups/BackupSettings.cpp index 9b8117c6587..915989735c3 100644 --- a/src/Backups/BackupSettings.cpp +++ b/src/Backups/BackupSettings.cpp @@ -74,6 +74,17 @@ BackupSettings BackupSettings::fromBackupQuery(const ASTBackupQuery & query) return res; } +bool BackupSettings::isAsync(const ASTBackupQuery & query) +{ + if (query.settings) + { + const auto * field = query.settings->as().changes.tryGet("async"); + if (field) + return field->safeGet(); + } + return false; /// `async` is false by default. +} + void BackupSettings::copySettingsToQuery(ASTBackupQuery & query) const { auto query_settings = std::make_shared(); diff --git a/src/Backups/BackupSettings.h b/src/Backups/BackupSettings.h index 8c2ea21df01..fa1e5025935 100644 --- a/src/Backups/BackupSettings.h +++ b/src/Backups/BackupSettings.h @@ -101,6 +101,8 @@ struct BackupSettings static BackupSettings fromBackupQuery(const ASTBackupQuery & query); void copySettingsToQuery(ASTBackupQuery & query) const; + static bool isAsync(const ASTBackupQuery & query); + struct Util { static std::vector clusterHostIDsFromAST(const IAST & ast); diff --git a/src/Backups/BackupsWorker.cpp b/src/Backups/BackupsWorker.cpp index d3889295598..8480dc5d64d 100644 --- a/src/Backups/BackupsWorker.cpp +++ b/src/Backups/BackupsWorker.cpp @@ -1,4 +1,6 @@ #include + +#include #include #include #include @@ -6,9 +8,9 @@ #include #include #include -#include +#include #include -#include +#include #include #include #include @@ -43,21 +45,11 @@ namespace CurrentMetrics namespace DB { -namespace Setting -{ - extern const SettingsUInt64 backup_restore_batch_size_for_keeper_multiread; - extern const SettingsUInt64 backup_restore_keeper_max_retries; - extern const SettingsUInt64 backup_restore_keeper_retry_initial_backoff_ms; - extern const SettingsUInt64 backup_restore_keeper_retry_max_backoff_ms; - extern const SettingsUInt64 backup_restore_keeper_fault_injection_seed; - extern const SettingsFloat backup_restore_keeper_fault_injection_probability; -} namespace ErrorCodes { extern const int BAD_ARGUMENTS; extern const int LOGICAL_ERROR; - extern const int CONCURRENT_ACCESS_NOT_SUPPORTED; extern const int QUERY_WAS_CANCELLED; } @@ -66,102 +58,6 @@ namespace Stage = BackupCoordinationStage; namespace { - std::shared_ptr makeBackupCoordination(const ContextPtr & context, const BackupSettings & backup_settings, bool remote) - { - if (remote) - { - String root_zk_path = context->getConfigRef().getString("backups.zookeeper_path", "/clickhouse/backups"); - - auto get_zookeeper = [global_context = context->getGlobalContext()] { return global_context->getZooKeeper(); }; - - BackupCoordinationRemote::BackupKeeperSettings keeper_settings = WithRetries::KeeperSettings::fromContext(context); - - auto all_hosts = BackupSettings::Util::filterHostIDs( - backup_settings.cluster_host_ids, backup_settings.shard_num, backup_settings.replica_num); - - return std::make_shared( - get_zookeeper, - root_zk_path, - keeper_settings, - toString(*backup_settings.backup_uuid), - all_hosts, - backup_settings.host_id, - !backup_settings.deduplicate_files, - backup_settings.internal, - context->getProcessListElement()); - } - - return std::make_shared(!backup_settings.deduplicate_files); - } - - std::shared_ptr - makeRestoreCoordination(const ContextPtr & context, const RestoreSettings & restore_settings, bool remote) - { - if (remote) - { - String root_zk_path = context->getConfigRef().getString("backups.zookeeper_path", "/clickhouse/backups"); - - auto get_zookeeper = [global_context = context->getGlobalContext()] { return global_context->getZooKeeper(); }; - - RestoreCoordinationRemote::RestoreKeeperSettings keeper_settings - { - .keeper_max_retries = context->getSettingsRef()[Setting::backup_restore_keeper_max_retries], - .keeper_retry_initial_backoff_ms = context->getSettingsRef()[Setting::backup_restore_keeper_retry_initial_backoff_ms], - .keeper_retry_max_backoff_ms = context->getSettingsRef()[Setting::backup_restore_keeper_retry_max_backoff_ms], - .batch_size_for_keeper_multiread = context->getSettingsRef()[Setting::backup_restore_batch_size_for_keeper_multiread], - .keeper_fault_injection_probability = context->getSettingsRef()[Setting::backup_restore_keeper_fault_injection_probability], - .keeper_fault_injection_seed = context->getSettingsRef()[Setting::backup_restore_keeper_fault_injection_seed] - }; - - auto all_hosts = BackupSettings::Util::filterHostIDs( - restore_settings.cluster_host_ids, restore_settings.shard_num, restore_settings.replica_num); - - return std::make_shared( - get_zookeeper, - root_zk_path, - keeper_settings, - toString(*restore_settings.restore_uuid), - all_hosts, - restore_settings.host_id, - restore_settings.internal, - context->getProcessListElement()); - } - - return std::make_shared(); - } - - /// Sends information about an exception to IBackupCoordination or IRestoreCoordination. - template - void sendExceptionToCoordination(std::shared_ptr coordination, const Exception & exception) - { - try - { - if (coordination) - coordination->setError(exception); - } - catch (...) // NOLINT(bugprone-empty-catch) - { - } - } - - /// Sends information about the current exception to IBackupCoordination or IRestoreCoordination. - template - void sendCurrentExceptionToCoordination(std::shared_ptr coordination) - { - try - { - throw; - } - catch (const Exception & e) - { - sendExceptionToCoordination(coordination, e); - } - catch (...) - { - sendExceptionToCoordination(coordination, Exception(getCurrentExceptionMessageAndPattern(true, true), getCurrentExceptionCode())); - } - } - bool isFinishedSuccessfully(BackupStatus status) { return (status == BackupStatus::BACKUP_CREATED) || (status == BackupStatus::RESTORED); @@ -262,24 +158,27 @@ namespace /// while the thread pool is still occupied with the waiting task then a scheduled task can be never executed). enum class BackupsWorker::ThreadPoolId : uint8_t { - /// "BACKUP ON CLUSTER ASYNC" waits in background while "BACKUP ASYNC" is finished on the nodes of the cluster, then finalizes the backup. - BACKUP_ASYNC_ON_CLUSTER = 0, + /// Making a list of files to copy or copying those files. + BACKUP, - /// "BACKUP ASYNC" waits in background while all file infos are built and then it copies the backup's files. - BACKUP_ASYNC = 1, + /// Creating of tables and databases during RESTORE and filling them with data. + RESTORE, - /// Making a list of files to copy and copying of those files is always sequential, so those operations can share one thread pool. - BACKUP_MAKE_FILES_LIST = 2, - BACKUP_COPY_FILES = BACKUP_MAKE_FILES_LIST, + /// We need background threads for ASYNC backups and restores. + ASYNC_BACKGROUND_BACKUP, + ASYNC_BACKGROUND_RESTORE, - /// "RESTORE ON CLUSTER ASYNC" waits in background while "BACKUP ASYNC" is finished on the nodes of the cluster, then finalizes the backup. - RESTORE_ASYNC_ON_CLUSTER = 3, + /// We need background threads for coordination workers (see BackgroundCoordinationStageSync). + ON_CLUSTER_COORDINATION_BACKUP, + ON_CLUSTER_COORDINATION_RESTORE, - /// "RESTORE ASYNC" waits in background while the data of all tables are restored. - RESTORE_ASYNC = 4, - - /// Restores from backups. - RESTORE = 5, + /// We need separate threads for internal backups and restores. + /// An internal backup is a helper backup invoked on some shard and replica by a BACKUP ON CLUSTER command, + /// (see BackupSettings.internal); and the same for restores. + ASYNC_BACKGROUND_INTERNAL_BACKUP, + ASYNC_BACKGROUND_INTERNAL_RESTORE, + ON_CLUSTER_COORDINATION_INTERNAL_BACKUP, + ON_CLUSTER_COORDINATION_INTERNAL_RESTORE, }; @@ -312,22 +211,26 @@ public: switch (thread_pool_id) { - case ThreadPoolId::BACKUP_ASYNC: - case ThreadPoolId::BACKUP_ASYNC_ON_CLUSTER: - case ThreadPoolId::BACKUP_COPY_FILES: + case ThreadPoolId::BACKUP: + case ThreadPoolId::ASYNC_BACKGROUND_BACKUP: + case ThreadPoolId::ON_CLUSTER_COORDINATION_BACKUP: + case ThreadPoolId::ASYNC_BACKGROUND_INTERNAL_BACKUP: + case ThreadPoolId::ON_CLUSTER_COORDINATION_INTERNAL_BACKUP: { metric_threads = CurrentMetrics::BackupsThreads; metric_active_threads = CurrentMetrics::BackupsThreadsActive; metric_active_threads = CurrentMetrics::BackupsThreadsScheduled; max_threads = num_backup_threads; /// We don't use thread pool queues for thread pools with a lot of tasks otherwise that queue could be memory-wasting. - use_queue = (thread_pool_id != ThreadPoolId::BACKUP_COPY_FILES); + use_queue = (thread_pool_id != ThreadPoolId::BACKUP); break; } - case ThreadPoolId::RESTORE_ASYNC: - case ThreadPoolId::RESTORE_ASYNC_ON_CLUSTER: case ThreadPoolId::RESTORE: + case ThreadPoolId::ASYNC_BACKGROUND_RESTORE: + case ThreadPoolId::ON_CLUSTER_COORDINATION_RESTORE: + case ThreadPoolId::ASYNC_BACKGROUND_INTERNAL_RESTORE: + case ThreadPoolId::ON_CLUSTER_COORDINATION_INTERNAL_RESTORE: { metric_threads = CurrentMetrics::RestoreThreads; metric_active_threads = CurrentMetrics::RestoreThreadsActive; @@ -352,12 +255,20 @@ public: void wait() { auto wait_sequence = { - ThreadPoolId::RESTORE_ASYNC_ON_CLUSTER, - ThreadPoolId::RESTORE_ASYNC, + /// ASYNC_BACKGROUND_BACKUP must be before ASYNC_BACKGROUND_INTERNAL_BACKUP, + /// ASYNC_BACKGROUND_RESTORE must be before ASYNC_BACKGROUND_INTERNAL_RESTORE, + /// and everything else is after those ones. + ThreadPoolId::ASYNC_BACKGROUND_BACKUP, + ThreadPoolId::ASYNC_BACKGROUND_RESTORE, + ThreadPoolId::ASYNC_BACKGROUND_INTERNAL_BACKUP, + ThreadPoolId::ASYNC_BACKGROUND_INTERNAL_RESTORE, + /// Others: + ThreadPoolId::BACKUP, ThreadPoolId::RESTORE, - ThreadPoolId::BACKUP_ASYNC_ON_CLUSTER, - ThreadPoolId::BACKUP_ASYNC, - ThreadPoolId::BACKUP_COPY_FILES, + ThreadPoolId::ON_CLUSTER_COORDINATION_BACKUP, + ThreadPoolId::ON_CLUSTER_COORDINATION_INTERNAL_BACKUP, + ThreadPoolId::ON_CLUSTER_COORDINATION_RESTORE, + ThreadPoolId::ON_CLUSTER_COORDINATION_INTERNAL_RESTORE, }; for (auto thread_pool_id : wait_sequence) @@ -392,6 +303,7 @@ BackupsWorker::BackupsWorker(ContextMutablePtr global_context, size_t num_backup , log(getLogger("BackupsWorker")) , backup_log(global_context->getBackupLog()) , process_list(global_context->getProcessList()) + , concurrency_counters(std::make_unique()) { } @@ -405,7 +317,7 @@ ThreadPool & BackupsWorker::getThreadPool(ThreadPoolId thread_pool_id) } -OperationID BackupsWorker::start(const ASTPtr & backup_or_restore_query, ContextMutablePtr context) +std::pair BackupsWorker::start(const ASTPtr & backup_or_restore_query, ContextMutablePtr context) { const ASTBackupQuery & backup_query = typeid_cast(*backup_or_restore_query); if (backup_query.kind == ASTBackupQuery::Kind::BACKUP) @@ -414,180 +326,147 @@ OperationID BackupsWorker::start(const ASTPtr & backup_or_restore_query, Context } -OperationID BackupsWorker::startMakingBackup(const ASTPtr & query, const ContextPtr & context) +struct BackupsWorker::BackupStarter { - auto backup_query = std::static_pointer_cast(query->clone()); - auto backup_settings = BackupSettings::fromBackupQuery(*backup_query); - - auto backup_info = BackupInfo::fromAST(*backup_query->backup_name); - String backup_name_for_logging = backup_info.toStringForLogging(); - - if (!backup_settings.backup_uuid) - backup_settings.backup_uuid = UUIDHelpers::generateV4(); - - /// `backup_id` will be used as a key to the `infos` map, so it should be unique. - OperationID backup_id; - if (backup_settings.internal) - backup_id = "internal-" + toString(UUIDHelpers::generateV4()); /// Always generate `backup_id` for internal backup to avoid collision if both internal and non-internal backups are on the same host - else if (!backup_settings.id.empty()) - backup_id = backup_settings.id; - else - backup_id = toString(*backup_settings.backup_uuid); - + BackupsWorker & backups_worker; + std::shared_ptr backup_query; + ContextPtr query_context; /// We have to keep `query_context` until the end of the operation because a pointer to it is stored inside the ThreadGroup we're using. + ContextMutablePtr backup_context; + BackupSettings backup_settings; + BackupInfo backup_info; + String backup_id; + String backup_name_for_logging; + bool on_cluster; + bool is_internal_backup; std::shared_ptr backup_coordination; + ClusterPtr cluster; BackupMutablePtr backup; + std::shared_ptr process_list_element_holder; - /// Called in exception handlers below. This lambda function can be called on a separate thread, so it can't capture local variables by reference. - auto on_exception = [this](BackupMutablePtr & backup_, const OperationID & backup_id_, const String & backup_name_for_logging_, - const BackupSettings & backup_settings_, const std::shared_ptr & backup_coordination_) + BackupStarter(BackupsWorker & backups_worker_, const ASTPtr & query_, const ContextPtr & context_) + : backups_worker(backups_worker_) + , backup_query(std::static_pointer_cast(query_->clone())) + , query_context(context_) + , backup_context(Context::createCopy(query_context)) { - /// Something bad happened, the backup has not built. - tryLogCurrentException(log, fmt::format("Failed to make {} {}", (backup_settings_.internal ? "internal backup" : "backup"), backup_name_for_logging_)); - setStatusSafe(backup_id_, getBackupStatusFromCurrentException()); - sendCurrentExceptionToCoordination(backup_coordination_); + backup_context->makeQueryContext(); + backup_settings = BackupSettings::fromBackupQuery(*backup_query); + backup_info = BackupInfo::fromAST(*backup_query->backup_name); + backup_name_for_logging = backup_info.toStringForLogging(); + is_internal_backup = backup_settings.internal; + on_cluster = !backup_query->cluster.empty() || is_internal_backup; - if (backup_ && remove_backup_files_after_failure) - backup_->tryRemoveAllFiles(); - backup_.reset(); - }; + if (!backup_settings.backup_uuid) + backup_settings.backup_uuid = UUIDHelpers::generateV4(); + + /// `backup_id` will be used as a key to the `infos` map, so it should be unique. + if (is_internal_backup) + backup_id = "internal-" + toString(UUIDHelpers::generateV4()); /// Always generate `backup_id` for internal backup to avoid collision if both internal and non-internal backups are on the same host + else if (!backup_settings.id.empty()) + backup_id = backup_settings.id; + else + backup_id = toString(*backup_settings.backup_uuid); - try - { String base_backup_name; if (backup_settings.base_backup_info) base_backup_name = backup_settings.base_backup_info->toStringForLogging(); - addInfo(backup_id, + /// process_list_element_holder is used to make an element in ProcessList live while BACKUP is working asynchronously. + auto process_list_element = backup_context->getProcessListElement(); + if (process_list_element) + process_list_element_holder = process_list_element->getProcessListEntry(); + + backups_worker.addInfo(backup_id, backup_name_for_logging, base_backup_name, - context->getCurrentQueryId(), - backup_settings.internal, - context->getProcessListElement(), + backup_context->getCurrentQueryId(), + is_internal_backup, + process_list_element, BackupStatus::CREATING_BACKUP); + } - if (backup_settings.internal) + void doBackup() + { + chassert(!backup_coordination); + if (on_cluster && !is_internal_backup) { - /// The following call of makeBackupCoordination() is not essential because doBackup() will later create a backup coordination - /// if it's not created here. However to handle errors better it's better to make a coordination here because this way - /// if an exception will be thrown in startMakingBackup() other hosts will know about that. - backup_coordination = makeBackupCoordination(context, backup_settings, /* remote= */ true); + backup_query->cluster = backup_context->getMacros()->expand(backup_query->cluster); + cluster = backup_context->getCluster(backup_query->cluster); + backup_settings.cluster_host_ids = cluster->getHostIDs(); + } + backup_coordination = backups_worker.makeBackupCoordination(on_cluster, backup_settings, backup_context); + + chassert(!backup); + backup = backups_worker.openBackupForWriting(backup_info, backup_settings, backup_coordination, backup_context); + + backups_worker.doBackup( + backup, backup_query, backup_id, backup_name_for_logging, backup_settings, backup_coordination, backup_context, + on_cluster, cluster); + } + + void onException() + { + /// Something bad happened, the backup has not built. + tryLogCurrentException(backups_worker.log, fmt::format("Failed to make {} {}", + (is_internal_backup ? "internal backup" : "backup"), + backup_name_for_logging)); + + bool should_remove_files_in_backup = backup && !is_internal_backup && backups_worker.remove_backup_files_after_failure; + + if (backup && !backup->setIsCorrupted()) + should_remove_files_in_backup = false; + + if (backup_coordination && backup_coordination->trySetError(std::current_exception())) + { + bool other_hosts_finished = backup_coordination->tryWaitForOtherHostsToFinishAfterError(); + + if (should_remove_files_in_backup && other_hosts_finished) + backup->tryRemoveAllFiles(); + + backup_coordination->tryFinishAfterError(); } - /// Prepare context to use. - ContextPtr context_in_use = context; - ContextMutablePtr mutable_context; - bool on_cluster = !backup_query->cluster.empty(); - if (on_cluster || backup_settings.async) - { - /// We have to clone the query context here because: - /// if this is an "ON CLUSTER" query we need to change some settings, and - /// if this is an "ASYNC" query it's going to be executed in another thread. - context_in_use = mutable_context = Context::createCopy(context); - mutable_context->makeQueryContext(); - } + backups_worker.setStatusSafe(backup_id, getBackupStatusFromCurrentException()); + } +}; - if (backup_settings.async) - { - auto & thread_pool = getThreadPool(on_cluster ? ThreadPoolId::BACKUP_ASYNC_ON_CLUSTER : ThreadPoolId::BACKUP_ASYNC); - /// process_list_element_holder is used to make an element in ProcessList live while BACKUP is working asynchronously. - auto process_list_element = context_in_use->getProcessListElement(); +std::pair BackupsWorker::startMakingBackup(const ASTPtr & query, const ContextPtr & context) +{ + auto starter = std::make_shared(*this, query, context); - thread_pool.scheduleOrThrowOnError( - [this, - backup_query, - backup_id, - backup_name_for_logging, - backup_info, - backup_settings, - backup_coordination, - context_in_use, - mutable_context, - on_exception, - process_list_element_holder = process_list_element ? process_list_element->getProcessListEntry() : nullptr] + try + { + auto thread_pool_id = starter->is_internal_backup ? ThreadPoolId::ASYNC_BACKGROUND_INTERNAL_BACKUP: ThreadPoolId::ASYNC_BACKGROUND_BACKUP; + String thread_name = starter->is_internal_backup ? "BackupAsyncInt" : "BackupAsync"; + auto schedule = threadPoolCallbackRunnerUnsafe(thread_pools->getThreadPool(thread_pool_id), thread_name); + + schedule([starter] + { + try { - BackupMutablePtr backup_async; - try - { - setThreadName("BackupWorker"); - CurrentThread::QueryScope query_scope(context_in_use); - doBackup( - backup_async, - backup_query, - backup_id, - backup_name_for_logging, - backup_info, - backup_settings, - backup_coordination, - context_in_use, - mutable_context); - } - catch (...) - { - on_exception(backup_async, backup_id, backup_name_for_logging, backup_settings, backup_coordination); - } - }); - } - else - { - doBackup( - backup, - backup_query, - backup_id, - backup_name_for_logging, - backup_info, - backup_settings, - backup_coordination, - context_in_use, - mutable_context); - } + starter->doBackup(); + } + catch (...) + { + starter->onException(); + } + }, + Priority{}); - return backup_id; + return {starter->backup_id, BackupStatus::CREATING_BACKUP}; } catch (...) { - on_exception(backup, backup_id, backup_name_for_logging, backup_settings, backup_coordination); + starter->onException(); throw; } } -void BackupsWorker::doBackup( - BackupMutablePtr & backup, - const std::shared_ptr & backup_query, - const OperationID & backup_id, - const String & backup_name_for_logging, - const BackupInfo & backup_info, - BackupSettings backup_settings, - std::shared_ptr backup_coordination, - const ContextPtr & context, - ContextMutablePtr mutable_context) +BackupMutablePtr BackupsWorker::openBackupForWriting(const BackupInfo & backup_info, const BackupSettings & backup_settings, std::shared_ptr backup_coordination, const ContextPtr & context) const { - bool on_cluster = !backup_query->cluster.empty(); - assert(!on_cluster || mutable_context); - - /// Checks access rights if this is not ON CLUSTER query. - /// (If this is ON CLUSTER query executeDDLQueryOnCluster() will check access rights later.) - auto required_access = BackupUtils::getRequiredAccessToBackup(backup_query->elements); - if (!on_cluster) - context->checkAccess(required_access); - - ClusterPtr cluster; - if (on_cluster) - { - backup_query->cluster = context->getMacros()->expand(backup_query->cluster); - cluster = context->getCluster(backup_query->cluster); - backup_settings.cluster_host_ids = cluster->getHostIDs(); - } - - /// Make a backup coordination. - if (!backup_coordination) - backup_coordination = makeBackupCoordination(context, backup_settings, /* remote= */ on_cluster); - - if (!allow_concurrent_backups && backup_coordination->hasConcurrentBackups(std::ref(num_active_backups))) - throw Exception(ErrorCodes::CONCURRENT_ACCESS_NOT_SUPPORTED, "Concurrent backups not supported, turn on setting 'allow_concurrent_backups'"); - - /// Opens a backup for writing. + LOG_TRACE(log, "Opening backup for writing"); BackupFactory::CreateParams backup_create_params; backup_create_params.open_mode = IBackup::OpenMode::WRITE; backup_create_params.context = context; @@ -608,37 +487,57 @@ void BackupsWorker::doBackup( backup_create_params.azure_attempt_to_create_container = backup_settings.azure_attempt_to_create_container; backup_create_params.read_settings = getReadSettingsForBackup(context, backup_settings); backup_create_params.write_settings = getWriteSettingsForBackup(context); - backup = BackupFactory::instance().createBackup(backup_create_params); + auto backup = BackupFactory::instance().createBackup(backup_create_params); + LOG_INFO(log, "Opened backup for writing"); + return backup; +} + + +void BackupsWorker::doBackup( + BackupMutablePtr backup, + const std::shared_ptr & backup_query, + const OperationID & backup_id, + const String & backup_name_for_logging, + const BackupSettings & backup_settings, + std::shared_ptr backup_coordination, + ContextMutablePtr context, + bool on_cluster, + const ClusterPtr & cluster) +{ + bool is_internal_backup = backup_settings.internal; + + /// Checks access rights if this is not ON CLUSTER query. + /// (If this is ON CLUSTER query executeDDLQueryOnCluster() will check access rights later.) + auto required_access = BackupUtils::getRequiredAccessToBackup(backup_query->elements); + if (!on_cluster) + context->checkAccess(required_access); + + maybeSleepForTesting(); /// Write the backup. - if (on_cluster) + if (on_cluster && !is_internal_backup) { - DDLQueryOnClusterParams params; - params.cluster = cluster; - params.only_shard_num = backup_settings.shard_num; - params.only_replica_num = backup_settings.replica_num; - params.access_to_check = required_access; + /// Send the BACKUP query to other hosts. backup_settings.copySettingsToQuery(*backup_query); - - // executeDDLQueryOnCluster() will return without waiting for completion - mutable_context->setSetting("distributed_ddl_task_timeout", Field{0}); - mutable_context->setSetting("distributed_ddl_output_mode", Field{"none"}); - executeDDLQueryOnCluster(backup_query, mutable_context, params); + sendQueryToOtherHosts(*backup_query, cluster, backup_settings.shard_num, backup_settings.replica_num, + context, required_access, backup_coordination->getOnClusterInitializationKeeperRetriesInfo()); + backup_coordination->setBackupQueryWasSentToOtherHosts(); /// Wait until all the hosts have written their backup entries. - backup_coordination->waitForStage(Stage::COMPLETED); - backup_coordination->setStage(Stage::COMPLETED,""); + backup_coordination->waitForOtherHostsToFinish(); } else { backup_query->setCurrentDatabase(context->getCurrentDatabase()); + auto read_settings = getReadSettingsForBackup(context, backup_settings); + /// Prepare backup entries. BackupEntries backup_entries; { BackupEntriesCollector backup_entries_collector( backup_query->elements, backup_settings, backup_coordination, - backup_create_params.read_settings, context, getThreadPool(ThreadPoolId::BACKUP_MAKE_FILES_LIST)); + read_settings, context, getThreadPool(ThreadPoolId::BACKUP)); backup_entries = backup_entries_collector.run(); } @@ -646,11 +545,11 @@ void BackupsWorker::doBackup( chassert(backup); chassert(backup_coordination); chassert(context); - buildFileInfosForBackupEntries(backup, backup_entries, backup_create_params.read_settings, backup_coordination, context->getProcessListElement()); - writeBackupEntries(backup, std::move(backup_entries), backup_id, backup_coordination, backup_settings.internal, context->getProcessListElement()); + buildFileInfosForBackupEntries(backup, backup_entries, read_settings, backup_coordination, context->getProcessListElement()); + writeBackupEntries(backup, std::move(backup_entries), backup_id, backup_coordination, is_internal_backup, context->getProcessListElement()); - /// We have written our backup entries, we need to tell other hosts (they could be waiting for it). - backup_coordination->setStage(Stage::COMPLETED,""); + /// We have written our backup entries (there is no need to sync it with other hosts because it's the last stage). + backup_coordination->setStage(Stage::COMPLETED, "", /* sync = */ false); } size_t num_files = 0; @@ -660,9 +559,9 @@ void BackupsWorker::doBackup( UInt64 compressed_size = 0; /// Finalize backup (write its metadata). - if (!backup_settings.internal) + backup->finalizeWriting(); + if (!is_internal_backup) { - backup->finalizeWriting(); num_files = backup->getNumFiles(); total_size = backup->getTotalSize(); num_entries = backup->getNumEntries(); @@ -673,19 +572,22 @@ void BackupsWorker::doBackup( /// Close the backup. backup.reset(); - LOG_INFO(log, "{} {} was created successfully", (backup_settings.internal ? "Internal backup" : "Backup"), backup_name_for_logging); + /// The backup coordination is not needed anymore. + backup_coordination->finish(); + /// NOTE: we need to update metadata again after backup->finalizeWriting(), because backup metadata is written there. setNumFilesAndSize(backup_id, num_files, total_size, num_entries, uncompressed_size, compressed_size, 0, 0); + /// NOTE: setStatus is called after setNumFilesAndSize in order to have actual information in a backup log record + LOG_INFO(log, "{} {} was created successfully", (is_internal_backup ? "Internal backup" : "Backup"), backup_name_for_logging); setStatus(backup_id, BackupStatus::BACKUP_CREATED); } void BackupsWorker::buildFileInfosForBackupEntries(const BackupPtr & backup, const BackupEntries & backup_entries, const ReadSettings & read_settings, std::shared_ptr backup_coordination, QueryStatusPtr process_list_element) { - backup_coordination->setStage(Stage::BUILDING_FILE_INFOS, ""); - backup_coordination->waitForStage(Stage::BUILDING_FILE_INFOS); - backup_coordination->addFileInfos(::DB::buildFileInfosForBackupEntries(backup_entries, backup->getBaseBackup(), read_settings, getThreadPool(ThreadPoolId::BACKUP_MAKE_FILES_LIST), process_list_element)); + backup_coordination->setStage(Stage::BUILDING_FILE_INFOS, "", /* sync = */ true); + backup_coordination->addFileInfos(::DB::buildFileInfosForBackupEntries(backup_entries, backup->getBaseBackup(), read_settings, getThreadPool(ThreadPoolId::BACKUP), process_list_element)); } @@ -694,12 +596,11 @@ void BackupsWorker::writeBackupEntries( BackupEntries && backup_entries, const OperationID & backup_id, std::shared_ptr backup_coordination, - bool internal, + bool is_internal_backup, QueryStatusPtr process_list_element) { LOG_TRACE(log, "{}, num backup entries={}", Stage::WRITING_BACKUP, backup_entries.size()); - backup_coordination->setStage(Stage::WRITING_BACKUP, ""); - backup_coordination->waitForStage(Stage::WRITING_BACKUP); + backup_coordination->setStage(Stage::WRITING_BACKUP, "", /* sync = */ true); auto file_infos = backup_coordination->getFileInfos(); if (file_infos.size() != backup_entries.size()) @@ -715,7 +616,7 @@ void BackupsWorker::writeBackupEntries( std::atomic_bool failed = false; bool always_single_threaded = !backup->supportsWritingInMultipleThreads(); - auto & thread_pool = getThreadPool(ThreadPoolId::BACKUP_COPY_FILES); + auto & thread_pool = getThreadPool(ThreadPoolId::BACKUP); std::vector writing_order; if (test_randomize_order) @@ -751,7 +652,7 @@ void BackupsWorker::writeBackupEntries( maybeSleepForTesting(); // Update metadata - if (!internal) + if (!is_internal_backup) { setNumFilesAndSize( backup_id, @@ -783,142 +684,139 @@ void BackupsWorker::writeBackupEntries( } -OperationID BackupsWorker::startRestoring(const ASTPtr & query, ContextMutablePtr context) +struct BackupsWorker::RestoreStarter { - auto restore_query = std::static_pointer_cast(query->clone()); - auto restore_settings = RestoreSettings::fromRestoreQuery(*restore_query); - - auto backup_info = BackupInfo::fromAST(*restore_query->backup_name); - String backup_name_for_logging = backup_info.toStringForLogging(); - - if (!restore_settings.restore_uuid) - restore_settings.restore_uuid = UUIDHelpers::generateV4(); - - /// `restore_id` will be used as a key to the `infos` map, so it should be unique. - OperationID restore_id; - if (restore_settings.internal) - restore_id = "internal-" + toString(UUIDHelpers::generateV4()); /// Always generate `restore_id` for internal restore to avoid collision if both internal and non-internal restores are on the same host - else if (!restore_settings.id.empty()) - restore_id = restore_settings.id; - else - restore_id = toString(*restore_settings.restore_uuid); - + BackupsWorker & backups_worker; + std::shared_ptr restore_query; + ContextPtr query_context; /// We have to keep `query_context` until the end of the operation because a pointer to it is stored inside the ThreadGroup we're using. + ContextMutablePtr restore_context; + RestoreSettings restore_settings; + BackupInfo backup_info; + String restore_id; + String backup_name_for_logging; + bool on_cluster; + bool is_internal_restore; std::shared_ptr restore_coordination; + ClusterPtr cluster; + std::shared_ptr process_list_element_holder; - /// Called in exception handlers below. This lambda function can be called on a separate thread, so it can't capture local variables by reference. - auto on_exception = [this](const OperationID & restore_id_, const String & backup_name_for_logging_, - const RestoreSettings & restore_settings_, const std::shared_ptr & restore_coordination_) + RestoreStarter(BackupsWorker & backups_worker_, const ASTPtr & query_, const ContextPtr & context_) + : backups_worker(backups_worker_) + , restore_query(std::static_pointer_cast(query_->clone())) + , query_context(context_) + , restore_context(Context::createCopy(query_context)) { - /// Something bad happened, some data were not restored. - tryLogCurrentException(log, fmt::format("Failed to restore from {} {}", (restore_settings_.internal ? "internal backup" : "backup"), backup_name_for_logging_)); - setStatusSafe(restore_id_, getRestoreStatusFromCurrentException()); - sendCurrentExceptionToCoordination(restore_coordination_); - }; + restore_context->makeQueryContext(); + restore_settings = RestoreSettings::fromRestoreQuery(*restore_query); + backup_info = BackupInfo::fromAST(*restore_query->backup_name); + backup_name_for_logging = backup_info.toStringForLogging(); + is_internal_restore = restore_settings.internal; + on_cluster = !restore_query->cluster.empty() || is_internal_restore; + + if (!restore_settings.restore_uuid) + restore_settings.restore_uuid = UUIDHelpers::generateV4(); + + /// `restore_id` will be used as a key to the `infos` map, so it should be unique. + if (is_internal_restore) + restore_id = "internal-" + toString(UUIDHelpers::generateV4()); /// Always generate `restore_id` for internal restore to avoid collision if both internal and non-internal restores are on the same host + else if (!restore_settings.id.empty()) + restore_id = restore_settings.id; + else + restore_id = toString(*restore_settings.restore_uuid); - try - { String base_backup_name; if (restore_settings.base_backup_info) base_backup_name = restore_settings.base_backup_info->toStringForLogging(); - addInfo(restore_id, + /// process_list_element_holder is used to make an element in ProcessList live while BACKUP is working asynchronously. + auto process_list_element = restore_context->getProcessListElement(); + if (process_list_element) + process_list_element_holder = process_list_element->getProcessListEntry(); + + backups_worker.addInfo(restore_id, backup_name_for_logging, base_backup_name, - context->getCurrentQueryId(), - restore_settings.internal, - context->getProcessListElement(), + restore_context->getCurrentQueryId(), + is_internal_restore, + process_list_element, BackupStatus::RESTORING); + } - if (restore_settings.internal) + void doRestore() + { + chassert(!restore_coordination); + if (on_cluster && !is_internal_restore) { - /// The following call of makeRestoreCoordination() is not essential because doRestore() will later create a restore coordination - /// if it's not created here. However to handle errors better it's better to make a coordination here because this way - /// if an exception will be thrown in startRestoring() other hosts will know about that. - restore_coordination = makeRestoreCoordination(context, restore_settings, /* remote= */ true); + restore_query->cluster = restore_context->getMacros()->expand(restore_query->cluster); + cluster = restore_context->getCluster(restore_query->cluster); + restore_settings.cluster_host_ids = cluster->getHostIDs(); + } + restore_coordination = backups_worker.makeRestoreCoordination(on_cluster, restore_settings, restore_context); + + backups_worker.doRestore( + restore_query, + restore_id, + backup_name_for_logging, + backup_info, + restore_settings, + restore_coordination, + restore_context, + on_cluster, + cluster); + } + + void onException() + { + /// Something bad happened, some data were not restored. + tryLogCurrentException(backups_worker.log, fmt::format("Failed to restore from {} {}", (is_internal_restore ? "internal backup" : "backup"), backup_name_for_logging)); + + if (restore_coordination && restore_coordination->trySetError(std::current_exception())) + { + restore_coordination->tryWaitForOtherHostsToFinishAfterError(); + restore_coordination->tryFinishAfterError(); } - /// Prepare context to use. - ContextMutablePtr context_in_use = context; - bool on_cluster = !restore_query->cluster.empty(); - if (restore_settings.async || on_cluster) - { - /// We have to clone the query context here because: - /// if this is an "ON CLUSTER" query we need to change some settings, and - /// if this is an "ASYNC" query it's going to be executed in another thread. - context_in_use = Context::createCopy(context); - context_in_use->makeQueryContext(); - } + backups_worker.setStatusSafe(restore_id, getRestoreStatusFromCurrentException()); + } +}; - if (restore_settings.async) - { - auto & thread_pool = getThreadPool(on_cluster ? ThreadPoolId::RESTORE_ASYNC_ON_CLUSTER : ThreadPoolId::RESTORE_ASYNC); - /// process_list_element_holder is used to make an element in ProcessList live while RESTORE is working asynchronously. - auto process_list_element = context_in_use->getProcessListElement(); +std::pair BackupsWorker::startRestoring(const ASTPtr & query, ContextMutablePtr context) +{ + auto starter = std::make_shared(*this, query, context); - thread_pool.scheduleOrThrowOnError( - [this, - restore_query, - restore_id, - backup_name_for_logging, - backup_info, - restore_settings, - restore_coordination, - context_in_use, - on_exception, - process_list_element_holder = process_list_element ? process_list_element->getProcessListEntry() : nullptr] + try + { + auto thread_pool_id = starter->is_internal_restore ? ThreadPoolId::ASYNC_BACKGROUND_INTERNAL_RESTORE : ThreadPoolId::ASYNC_BACKGROUND_RESTORE; + String thread_name = starter->is_internal_restore ? "RestoreAsyncInt" : "RestoreAsync"; + auto schedule = threadPoolCallbackRunnerUnsafe(thread_pools->getThreadPool(thread_pool_id), thread_name); + + schedule([starter] + { + try { - try - { - setThreadName("RestorerWorker"); - CurrentThread::QueryScope query_scope(context_in_use); - doRestore( - restore_query, - restore_id, - backup_name_for_logging, - backup_info, - restore_settings, - restore_coordination, - context_in_use); - } - catch (...) - { - on_exception(restore_id, backup_name_for_logging, restore_settings, restore_coordination); - } - }); - } - else - { - doRestore( - restore_query, - restore_id, - backup_name_for_logging, - backup_info, - restore_settings, - restore_coordination, - context_in_use); - } + starter->doRestore(); + } + catch (...) + { + starter->onException(); + } + }, + Priority{}); - return restore_id; + return {starter->restore_id, BackupStatus::RESTORING}; } catch (...) { - on_exception(restore_id, backup_name_for_logging, restore_settings, restore_coordination); + starter->onException(); throw; } } -void BackupsWorker::doRestore( - const std::shared_ptr & restore_query, - const OperationID & restore_id, - const String & backup_name_for_logging, - const BackupInfo & backup_info, - RestoreSettings restore_settings, - std::shared_ptr restore_coordination, - ContextMutablePtr context) +BackupPtr BackupsWorker::openBackupForReading(const BackupInfo & backup_info, const RestoreSettings & restore_settings, const ContextPtr & context) const { - /// Open the backup for reading. + LOG_TRACE(log, "Opening backup for reading"); BackupFactory::CreateParams backup_open_params; backup_open_params.open_mode = IBackup::OpenMode::READ; backup_open_params.context = context; @@ -931,32 +829,35 @@ void BackupsWorker::doRestore( backup_open_params.read_settings = getReadSettingsForRestore(context); backup_open_params.write_settings = getWriteSettingsForRestore(context); backup_open_params.is_internal_backup = restore_settings.internal; - BackupPtr backup = BackupFactory::instance().createBackup(backup_open_params); + auto backup = BackupFactory::instance().createBackup(backup_open_params); + LOG_TRACE(log, "Opened backup for reading"); + return backup; +} + + +void BackupsWorker::doRestore( + const std::shared_ptr & restore_query, + const OperationID & restore_id, + const String & backup_name_for_logging, + const BackupInfo & backup_info, + RestoreSettings restore_settings, + std::shared_ptr restore_coordination, + ContextMutablePtr context, + bool on_cluster, + const ClusterPtr & cluster) +{ + bool is_internal_restore = restore_settings.internal; + + maybeSleepForTesting(); + + /// Open the backup for reading. + BackupPtr backup = openBackupForReading(backup_info, restore_settings, context); String current_database = context->getCurrentDatabase(); + /// Checks access rights if this is ON CLUSTER query. /// (If this isn't ON CLUSTER query RestorerFromBackup will check access rights later.) - ClusterPtr cluster; - bool on_cluster = !restore_query->cluster.empty(); - - if (on_cluster) - { - restore_query->cluster = context->getMacros()->expand(restore_query->cluster); - cluster = context->getCluster(restore_query->cluster); - restore_settings.cluster_host_ids = cluster->getHostIDs(); - } - - /// Make a restore coordination. - if (!restore_coordination) - restore_coordination = makeRestoreCoordination(context, restore_settings, /* remote= */ on_cluster); - - if (!allow_concurrent_restores && restore_coordination->hasConcurrentRestores(std::ref(num_active_restores))) - throw Exception( - ErrorCodes::CONCURRENT_ACCESS_NOT_SUPPORTED, - "Concurrent restores not supported, turn on setting 'allow_concurrent_restores'"); - - - if (on_cluster) + if (on_cluster && !is_internal_restore) { /// We cannot just use access checking provided by the function executeDDLQueryOnCluster(): it would be incorrect /// because different replicas can contain different set of tables and so the required access rights can differ too. @@ -975,27 +876,21 @@ void BackupsWorker::doRestore( } /// Do RESTORE. - if (on_cluster) + if (on_cluster && !is_internal_restore) { - - DDLQueryOnClusterParams params; - params.cluster = cluster; - params.only_shard_num = restore_settings.shard_num; - params.only_replica_num = restore_settings.replica_num; + /// Send the RESTORE query to other hosts. restore_settings.copySettingsToQuery(*restore_query); + sendQueryToOtherHosts(*restore_query, cluster, restore_settings.shard_num, restore_settings.replica_num, + context, {}, restore_coordination->getOnClusterInitializationKeeperRetriesInfo()); + restore_coordination->setRestoreQueryWasSentToOtherHosts(); - // executeDDLQueryOnCluster() will return without waiting for completion - context->setSetting("distributed_ddl_task_timeout", Field{0}); - context->setSetting("distributed_ddl_output_mode", Field{"none"}); - - executeDDLQueryOnCluster(restore_query, context, params); - - /// Wait until all the hosts have written their backup entries. - restore_coordination->waitForStage(Stage::COMPLETED); - restore_coordination->setStage(Stage::COMPLETED,""); + /// Wait until all the hosts have done with their restoring work. + restore_coordination->waitForOtherHostsToFinish(); } else { + maybeSleepForTesting(); + restore_query->setCurrentDatabase(current_database); auto after_task_callback = [&] @@ -1011,11 +906,115 @@ void BackupsWorker::doRestore( restorer.run(RestorerFromBackup::RESTORE); } - LOG_INFO(log, "Restored from {} {} successfully", (restore_settings.internal ? "internal backup" : "backup"), backup_name_for_logging); + /// The restore coordination is not needed anymore. + restore_coordination->finish(); + + LOG_INFO(log, "Restored from {} {} successfully", (is_internal_restore ? "internal backup" : "backup"), backup_name_for_logging); setStatus(restore_id, BackupStatus::RESTORED); } +void BackupsWorker::sendQueryToOtherHosts(const ASTBackupQuery & backup_or_restore_query, const ClusterPtr & cluster, + size_t only_shard_num, size_t only_replica_num, ContextMutablePtr context, const AccessRightsElements & access_to_check, + const ZooKeeperRetriesInfo & retries_info) const +{ + chassert(cluster); + + DDLQueryOnClusterParams params; + params.cluster = cluster; + params.only_shard_num = only_shard_num; + params.only_replica_num = only_replica_num; + params.access_to_check = access_to_check; + params.retries_info = retries_info; + + context->setSetting("distributed_ddl_task_timeout", Field{0}); + context->setSetting("distributed_ddl_output_mode", Field{"never_throw"}); + + // executeDDLQueryOnCluster() will return without waiting for completion + executeDDLQueryOnCluster(backup_or_restore_query.clone(), context, params); + + maybeSleepForTesting(); +} + + +std::shared_ptr +BackupsWorker::makeBackupCoordination(bool on_cluster, const BackupSettings & backup_settings, const ContextPtr & context) const +{ + if (!on_cluster) + { + return std::make_shared( + *backup_settings.backup_uuid, !backup_settings.deduplicate_files, allow_concurrent_backups, *concurrency_counters); + } + + bool is_internal_backup = backup_settings.internal; + + String root_zk_path = context->getConfigRef().getString("backups.zookeeper_path", "/clickhouse/backups"); + auto get_zookeeper = [global_context = context->getGlobalContext()] { return global_context->getZooKeeper(); }; + auto keeper_settings = BackupKeeperSettings::fromContext(context); + + auto all_hosts = BackupSettings::Util::filterHostIDs( + backup_settings.cluster_host_ids, backup_settings.shard_num, backup_settings.replica_num); + all_hosts.emplace_back(BackupCoordinationOnCluster::kInitiator); + + String current_host = is_internal_backup ? backup_settings.host_id : String{BackupCoordinationOnCluster::kInitiator}; + + auto thread_pool_id = is_internal_backup ? ThreadPoolId::ON_CLUSTER_COORDINATION_INTERNAL_BACKUP : ThreadPoolId::ON_CLUSTER_COORDINATION_BACKUP; + String thread_name = is_internal_backup ? "BackupCoordInt" : "BackupCoord"; + auto schedule = threadPoolCallbackRunnerUnsafe(thread_pools->getThreadPool(thread_pool_id), thread_name); + + return std::make_shared( + *backup_settings.backup_uuid, + !backup_settings.deduplicate_files, + root_zk_path, + get_zookeeper, + keeper_settings, + current_host, + all_hosts, + allow_concurrent_backups, + *concurrency_counters, + schedule, + context->getProcessListElement()); +} + +std::shared_ptr +BackupsWorker::makeRestoreCoordination(bool on_cluster, const RestoreSettings & restore_settings, const ContextPtr & context) const +{ + if (!on_cluster) + { + return std::make_shared( + *restore_settings.restore_uuid, allow_concurrent_restores, *concurrency_counters); + } + + bool is_internal_restore = restore_settings.internal; + + String root_zk_path = context->getConfigRef().getString("backups.zookeeper_path", "/clickhouse/backups"); + auto get_zookeeper = [global_context = context->getGlobalContext()] { return global_context->getZooKeeper(); }; + auto keeper_settings = BackupKeeperSettings::fromContext(context); + + auto all_hosts = BackupSettings::Util::filterHostIDs( + restore_settings.cluster_host_ids, restore_settings.shard_num, restore_settings.replica_num); + all_hosts.emplace_back(BackupCoordinationOnCluster::kInitiator); + + String current_host = is_internal_restore ? restore_settings.host_id : String{RestoreCoordinationOnCluster::kInitiator}; + + auto thread_pool_id = is_internal_restore ? ThreadPoolId::ON_CLUSTER_COORDINATION_INTERNAL_RESTORE : ThreadPoolId::ON_CLUSTER_COORDINATION_RESTORE; + String thread_name = is_internal_restore ? "RestoreCoordInt" : "RestoreCoord"; + auto schedule = threadPoolCallbackRunnerUnsafe(thread_pools->getThreadPool(thread_pool_id), thread_name); + + return std::make_shared( + *restore_settings.restore_uuid, + root_zk_path, + get_zookeeper, + keeper_settings, + current_host, + all_hosts, + allow_concurrent_restores, + *concurrency_counters, + schedule, + context->getProcessListElement()); +} + + void BackupsWorker::addInfo(const OperationID & id, const String & name, const String & base_backup_name, const String & query_id, bool internal, QueryStatusPtr process_list_element, BackupStatus status) { @@ -1135,23 +1134,25 @@ void BackupsWorker::maybeSleepForTesting() const } -void BackupsWorker::wait(const OperationID & backup_or_restore_id, bool rethrow_exception) +BackupStatus BackupsWorker::wait(const OperationID & backup_or_restore_id, bool rethrow_exception) { std::unique_lock lock{infos_mutex}; + BackupStatus current_status; status_changed.wait(lock, [&] { auto it = infos.find(backup_or_restore_id); if (it == infos.end()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown backup ID {}", backup_or_restore_id); const auto & info = it->second.info; - auto current_status = info.status; + current_status = info.status; if (rethrow_exception && isFailedOrCancelled(current_status)) std::rethrow_exception(info.exception); if (isFinalStatus(current_status)) return true; - LOG_INFO(log, "Waiting {} {}", isBackupStatus(info.status) ? "backup" : "restore", info.name); + LOG_INFO(log, "Waiting {} {} to complete", isBackupStatus(current_status) ? "backup" : "restore", info.name); return false; }); + return current_status; } void BackupsWorker::waitAll() @@ -1175,9 +1176,11 @@ void BackupsWorker::waitAll() LOG_INFO(log, "Backups and restores finished"); } -void BackupsWorker::cancel(const BackupOperationID & backup_or_restore_id, bool wait_) +BackupStatus BackupsWorker::cancel(const BackupOperationID & backup_or_restore_id, bool wait_) { QueryStatusPtr process_list_element; + BackupStatus current_status; + { std::unique_lock lock{infos_mutex}; auto it = infos.find(backup_or_restore_id); @@ -1186,17 +1189,20 @@ void BackupsWorker::cancel(const BackupOperationID & backup_or_restore_id, bool const auto & extended_info = it->second; const auto & info = extended_info.info; - if (isFinalStatus(info.status) || !extended_info.process_list_element) - return; + current_status = info.status; + if (isFinalStatus(current_status) || !extended_info.process_list_element) + return current_status; - LOG_INFO(log, "Cancelling {} {}", isBackupStatus(info.status) ? "backup" : "restore", info.name); + LOG_INFO(log, "Cancelling {} {}", isBackupStatus(current_status) ? "backup" : "restore", info.name); process_list_element = extended_info.process_list_element; } process_list.sendCancelToQuery(process_list_element); - if (wait_) - wait(backup_or_restore_id, /* rethrow_exception= */ false); + if (!wait_) + return current_status; + + return wait(backup_or_restore_id, /* rethrow_exception= */ false); } diff --git a/src/Backups/BackupsWorker.h b/src/Backups/BackupsWorker.h index 946562b575f..37f91e269a9 100644 --- a/src/Backups/BackupsWorker.h +++ b/src/Backups/BackupsWorker.h @@ -23,6 +23,7 @@ using BackupMutablePtr = std::shared_ptr; using BackupPtr = std::shared_ptr; class IBackupEntry; using BackupEntries = std::vector>>; +class BackupConcurrencyCounters; using DataRestoreTasks = std::vector>; struct ReadSettings; class BackupLog; @@ -31,6 +32,10 @@ using ThreadGroupPtr = std::shared_ptr; class QueryStatus; using QueryStatusPtr = std::shared_ptr; class ProcessList; +class Cluster; +using ClusterPtr = std::shared_ptr; +class AccessRightsElements; +struct ZooKeeperRetriesInfo; /// Manager of backups and restores: executes backups and restores' threads in the background. @@ -47,18 +52,18 @@ public: /// Starts executing a BACKUP or RESTORE query. Returns ID of the operation. /// For asynchronous operations the function throws no exceptions on failure usually, /// call getInfo() on a returned operation id to check for errors. - BackupOperationID start(const ASTPtr & backup_or_restore_query, ContextMutablePtr context); + std::pair start(const ASTPtr & backup_or_restore_query, ContextMutablePtr context); /// Waits until the specified backup or restore operation finishes or stops. /// The function returns immediately if the operation is already finished. - void wait(const BackupOperationID & backup_or_restore_id, bool rethrow_exception = true); + BackupStatus wait(const BackupOperationID & backup_or_restore_id, bool rethrow_exception = true); /// Waits until all running backup and restore operations finish or stop. void waitAll(); /// Cancels the specified backup or restore operation. /// The function does nothing if this operation has already finished. - void cancel(const BackupOperationID & backup_or_restore_id, bool wait_ = true); + BackupStatus cancel(const BackupOperationID & backup_or_restore_id, bool wait_ = true); /// Cancels all running backup and restore operations. void cancelAll(bool wait_ = true); @@ -67,26 +72,32 @@ public: std::vector getAllInfos() const; private: - BackupOperationID startMakingBackup(const ASTPtr & query, const ContextPtr & context); + std::pair startMakingBackup(const ASTPtr & query, const ContextPtr & context); + struct BackupStarter; + + BackupMutablePtr openBackupForWriting(const BackupInfo & backup_info, const BackupSettings & backup_settings, std::shared_ptr backup_coordination, const ContextPtr & context) const; void doBackup( - BackupMutablePtr & backup, + BackupMutablePtr backup, const std::shared_ptr & backup_query, const BackupOperationID & backup_id, const String & backup_name_for_logging, - const BackupInfo & backup_info, - BackupSettings backup_settings, + const BackupSettings & backup_settings, std::shared_ptr backup_coordination, - const ContextPtr & context, - ContextMutablePtr mutable_context); + ContextMutablePtr context, + bool on_cluster, + const ClusterPtr & cluster); /// Builds file infos for specified backup entries. void buildFileInfosForBackupEntries(const BackupPtr & backup, const BackupEntries & backup_entries, const ReadSettings & read_settings, std::shared_ptr backup_coordination, QueryStatusPtr process_list_element); /// Write backup entries to an opened backup. - void writeBackupEntries(BackupMutablePtr backup, BackupEntries && backup_entries, const BackupOperationID & backup_id, std::shared_ptr backup_coordination, bool internal, QueryStatusPtr process_list_element); + void writeBackupEntries(BackupMutablePtr backup, BackupEntries && backup_entries, const BackupOperationID & backup_id, std::shared_ptr backup_coordination, bool is_internal_backup, QueryStatusPtr process_list_element); - BackupOperationID startRestoring(const ASTPtr & query, ContextMutablePtr context); + std::pair startRestoring(const ASTPtr & query, ContextMutablePtr context); + struct RestoreStarter; + + BackupPtr openBackupForReading(const BackupInfo & backup_info, const RestoreSettings & restore_settings, const ContextPtr & context) const; void doRestore( const std::shared_ptr & restore_query, @@ -95,7 +106,17 @@ private: const BackupInfo & backup_info, RestoreSettings restore_settings, std::shared_ptr restore_coordination, - ContextMutablePtr context); + ContextMutablePtr context, + bool on_cluster, + const ClusterPtr & cluster); + + std::shared_ptr makeBackupCoordination(bool on_cluster, const BackupSettings & backup_settings, const ContextPtr & context) const; + std::shared_ptr makeRestoreCoordination(bool on_cluster, const RestoreSettings & restore_settings, const ContextPtr & context) const; + + /// Sends a BACKUP or RESTORE query to other hosts. + void sendQueryToOtherHosts(const ASTBackupQuery & backup_or_restore_query, const ClusterPtr & cluster, + size_t only_shard_num, size_t only_replica_num, ContextMutablePtr context, const AccessRightsElements & access_to_check, + const ZooKeeperRetriesInfo & retries_info) const; /// Run data restoring tasks which insert data to tables. void restoreTablesData(const BackupOperationID & restore_id, BackupPtr backup, DataRestoreTasks && tasks, ThreadPool & thread_pool, QueryStatusPtr process_list_element); @@ -139,6 +160,8 @@ private: std::shared_ptr backup_log; ProcessList & process_list; + + std::unique_ptr concurrency_counters; }; } diff --git a/src/Backups/IBackup.h b/src/Backups/IBackup.h index 0aa2d34657f..126b4d764da 100644 --- a/src/Backups/IBackup.h +++ b/src/Backups/IBackup.h @@ -121,8 +121,13 @@ public: /// Finalizes writing the backup, should be called after all entries have been successfully written. virtual void finalizeWriting() = 0; - /// Try to remove all files copied to the backup. Used after an exception or it the backup was cancelled. - virtual void tryRemoveAllFiles() = 0; + /// Sets that a non-retriable error happened while the backup was being written which means that + /// the backup is most likely corrupted and it can't be finalized. + /// This function is called while handling an exception or if the backup was cancelled. + virtual bool setIsCorrupted() noexcept = 0; + + /// Try to remove all files copied to the backup. Could be used after setIsCorrupted(). + virtual bool tryRemoveAllFiles() noexcept = 0; }; using BackupPtr = std::shared_ptr; diff --git a/src/Backups/IBackupCoordination.h b/src/Backups/IBackupCoordination.h index 166a2c5bbbc..c0eb90de89b 100644 --- a/src/Backups/IBackupCoordination.h +++ b/src/Backups/IBackupCoordination.h @@ -5,26 +5,44 @@ namespace DB { -class Exception; struct BackupFileInfo; using BackupFileInfos = std::vector; enum class AccessEntityType : uint8_t; enum class UserDefinedSQLObjectType : uint8_t; +struct ZooKeeperRetriesInfo; /// Replicas use this class to coordinate what they're writing to a backup while executing BACKUP ON CLUSTER. -/// There are two implementation of this interface: BackupCoordinationLocal and BackupCoordinationRemote. +/// There are two implementation of this interface: BackupCoordinationLocal and BackupCoordinationOnCluster. /// BackupCoordinationLocal is used while executing BACKUP without ON CLUSTER and performs coordination in memory. -/// BackupCoordinationRemote is used while executing BACKUP with ON CLUSTER and performs coordination via ZooKeeper. +/// BackupCoordinationOnCluster is used while executing BACKUP with ON CLUSTER and performs coordination via ZooKeeper. class IBackupCoordination { public: virtual ~IBackupCoordination() = default; /// Sets the current stage and waits for other hosts to come to this stage too. - virtual void setStage(const String & new_stage, const String & message) = 0; - virtual void setError(const Exception & exception) = 0; - virtual Strings waitForStage(const String & stage_to_wait) = 0; - virtual Strings waitForStage(const String & stage_to_wait, std::chrono::milliseconds timeout) = 0; + virtual Strings setStage(const String & new_stage, const String & message, bool sync) = 0; + + /// Sets that the backup query was sent to other hosts. + /// Function waitForOtherHostsToFinish() will check that to find out if it should really wait or not. + virtual void setBackupQueryWasSentToOtherHosts() = 0; + + /// Lets other hosts know that the current host has encountered an error. + virtual bool trySetError(std::exception_ptr exception) = 0; + + /// Lets other hosts know that the current host has finished its work. + virtual void finish() = 0; + + /// Lets other hosts know that the current host has finished its work (as a part of error-handling process). + virtual bool tryFinishAfterError() noexcept = 0; + + /// Waits until all the other hosts finish their work. + /// Stops waiting and throws an exception if another host encounters an error or if some host gets cancelled. + virtual void waitForOtherHostsToFinish() = 0; + + /// Waits until all the other hosts finish their work (as a part of error-handling process). + /// Doesn't stops waiting if some host encounters an error or gets cancelled. + virtual bool tryWaitForOtherHostsToFinishAfterError() noexcept = 0; struct PartNameAndChecksum { @@ -87,9 +105,7 @@ public: /// Starts writing a specified file, the function returns false if that file is already being written concurrently. virtual bool startWritingFile(size_t data_file_index) = 0; - /// This function is used to check if concurrent backups are running - /// other than the backup passed to the function - virtual bool hasConcurrentBackups(const std::atomic & num_active_backups) const = 0; + virtual ZooKeeperRetriesInfo getOnClusterInitializationKeeperRetriesInfo() const = 0; }; } diff --git a/src/Backups/IRestoreCoordination.h b/src/Backups/IRestoreCoordination.h index 37229534286..daabf1745f3 100644 --- a/src/Backups/IRestoreCoordination.h +++ b/src/Backups/IRestoreCoordination.h @@ -5,26 +5,42 @@ namespace DB { -class Exception; enum class UserDefinedSQLObjectType : uint8_t; class ASTCreateQuery; +struct ZooKeeperRetriesInfo; /// Replicas use this class to coordinate what they're reading from a backup while executing RESTORE ON CLUSTER. -/// There are two implementation of this interface: RestoreCoordinationLocal and RestoreCoordinationRemote. +/// There are two implementation of this interface: RestoreCoordinationLocal and RestoreCoordinationOnCluster. /// RestoreCoordinationLocal is used while executing RESTORE without ON CLUSTER and performs coordination in memory. -/// RestoreCoordinationRemote is used while executing RESTORE with ON CLUSTER and performs coordination via ZooKeeper. +/// RestoreCoordinationOnCluster is used while executing RESTORE with ON CLUSTER and performs coordination via ZooKeeper. class IRestoreCoordination { public: virtual ~IRestoreCoordination() = default; /// Sets the current stage and waits for other hosts to come to this stage too. - virtual void setStage(const String & new_stage, const String & message) = 0; - virtual void setError(const Exception & exception) = 0; - virtual Strings waitForStage(const String & stage_to_wait) = 0; - virtual Strings waitForStage(const String & stage_to_wait, std::chrono::milliseconds timeout) = 0; + virtual Strings setStage(const String & new_stage, const String & message, bool sync) = 0; - static constexpr const char * kErrorStatus = "error"; + /// Sets that the restore query was sent to other hosts. + /// Function waitForOtherHostsToFinish() will check that to find out if it should really wait or not. + virtual void setRestoreQueryWasSentToOtherHosts() = 0; + + /// Lets other hosts know that the current host has encountered an error. + virtual bool trySetError(std::exception_ptr exception) = 0; + + /// Lets other hosts know that the current host has finished its work. + virtual void finish() = 0; + + /// Lets other hosts know that the current host has finished its work (as a part of error-handling process). + virtual bool tryFinishAfterError() noexcept = 0; + + /// Waits until all the other hosts finish their work. + /// Stops waiting and throws an exception if another host encounters an error or if some host gets cancelled. + virtual void waitForOtherHostsToFinish() = 0; + + /// Waits until all the other hosts finish their work (as a part of error-handling process). + /// Doesn't stops waiting if some host encounters an error or gets cancelled. + virtual bool tryWaitForOtherHostsToFinishAfterError() noexcept = 0; /// Starts creating a table in a replicated database. Returns false if there is another host which is already creating this table. virtual bool acquireCreatingTableInReplicatedDatabase(const String & database_zk_path, const String & table_name) = 0; @@ -49,9 +65,7 @@ public: /// (because otherwise the macro "{uuid}" in the ZooKeeper path will not work correctly). virtual void generateUUIDForTable(ASTCreateQuery & create_query) = 0; - /// This function is used to check if concurrent restores are running - /// other than the restore passed to the function - virtual bool hasConcurrentRestores(const std::atomic & num_active_restores) const = 0; + virtual ZooKeeperRetriesInfo getOnClusterInitializationKeeperRetriesInfo() const = 0; }; } diff --git a/src/Backups/RestoreCoordinationLocal.cpp b/src/Backups/RestoreCoordinationLocal.cpp index 9fe22f874b4..569f58f1909 100644 --- a/src/Backups/RestoreCoordinationLocal.cpp +++ b/src/Backups/RestoreCoordinationLocal.cpp @@ -1,32 +1,24 @@ #include + #include #include +#include #include namespace DB { -RestoreCoordinationLocal::RestoreCoordinationLocal() : log(getLogger("RestoreCoordinationLocal")) +RestoreCoordinationLocal::RestoreCoordinationLocal( + const UUID & restore_uuid, bool allow_concurrent_restore_, BackupConcurrencyCounters & concurrency_counters_) + : log(getLogger("RestoreCoordinationLocal")) + , concurrency_check(restore_uuid, /* is_restore = */ true, /* on_cluster = */ false, allow_concurrent_restore_, concurrency_counters_) { } RestoreCoordinationLocal::~RestoreCoordinationLocal() = default; -void RestoreCoordinationLocal::setStage(const String &, const String &) -{ -} - -void RestoreCoordinationLocal::setError(const Exception &) -{ -} - -Strings RestoreCoordinationLocal::waitForStage(const String &) -{ - return {}; -} - -Strings RestoreCoordinationLocal::waitForStage(const String &, std::chrono::milliseconds) +ZooKeeperRetriesInfo RestoreCoordinationLocal::getOnClusterInitializationKeeperRetriesInfo() const { return {}; } @@ -63,7 +55,7 @@ void RestoreCoordinationLocal::generateUUIDForTable(ASTCreateQuery & create_quer { String query_str = serializeAST(create_query); - auto find_in_map = [&] + auto find_in_map = [&]() TSA_REQUIRES(mutex) { auto it = create_query_uuids.find(query_str); if (it != create_query_uuids.end()) @@ -91,14 +83,4 @@ void RestoreCoordinationLocal::generateUUIDForTable(ASTCreateQuery & create_quer } } -bool RestoreCoordinationLocal::hasConcurrentRestores(const std::atomic & num_active_restores) const -{ - if (num_active_restores > 1) - { - LOG_WARNING(log, "Found concurrent backups: num_active_restores={}", num_active_restores); - return true; - } - return false; -} - } diff --git a/src/Backups/RestoreCoordinationLocal.h b/src/Backups/RestoreCoordinationLocal.h index 35f93574b68..6be357c4b7e 100644 --- a/src/Backups/RestoreCoordinationLocal.h +++ b/src/Backups/RestoreCoordinationLocal.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include #include #include @@ -12,19 +13,20 @@ namespace DB { class ASTCreateQuery; - /// Implementation of the IRestoreCoordination interface performing coordination in memory. class RestoreCoordinationLocal : public IRestoreCoordination { public: - RestoreCoordinationLocal(); + RestoreCoordinationLocal(const UUID & restore_uuid_, bool allow_concurrent_restore_, BackupConcurrencyCounters & concurrency_counters_); ~RestoreCoordinationLocal() override; - /// Sets the current stage and waits for other hosts to come to this stage too. - void setStage(const String & new_stage, const String & message) override; - void setError(const Exception & exception) override; - Strings waitForStage(const String & stage_to_wait) override; - Strings waitForStage(const String & stage_to_wait, std::chrono::milliseconds timeout) override; + Strings setStage(const String &, const String &, bool) override { return {}; } + void setRestoreQueryWasSentToOtherHosts() override {} + bool trySetError(std::exception_ptr) override { return true; } + void finish() override {} + bool tryFinishAfterError() noexcept override { return true; } + void waitForOtherHostsToFinish() override {} + bool tryWaitForOtherHostsToFinishAfterError() noexcept override { return true; } /// Starts creating a table in a replicated database. Returns false if there is another host which is already creating this table. bool acquireCreatingTableInReplicatedDatabase(const String & database_zk_path, const String & table_name) override; @@ -49,15 +51,16 @@ public: /// (because otherwise the macro "{uuid}" in the ZooKeeper path will not work correctly). void generateUUIDForTable(ASTCreateQuery & create_query) override; - bool hasConcurrentRestores(const std::atomic & num_active_restores) const override; + ZooKeeperRetriesInfo getOnClusterInitializationKeeperRetriesInfo() const override; private: LoggerPtr const log; + BackupConcurrencyCheck concurrency_check; - std::set> acquired_tables_in_replicated_databases; - std::unordered_set acquired_data_in_replicated_tables; - std::unordered_map create_query_uuids; - std::unordered_set acquired_data_in_keeper_map_tables; + std::set> acquired_tables_in_replicated_databases TSA_GUARDED_BY(mutex); + std::unordered_set acquired_data_in_replicated_tables TSA_GUARDED_BY(mutex); + std::unordered_map create_query_uuids TSA_GUARDED_BY(mutex); + std::unordered_set acquired_data_in_keeper_map_tables TSA_GUARDED_BY(mutex); mutable std::mutex mutex; }; diff --git a/src/Backups/RestoreCoordinationOnCluster.cpp b/src/Backups/RestoreCoordinationOnCluster.cpp new file mode 100644 index 00000000000..2029ad8b072 --- /dev/null +++ b/src/Backups/RestoreCoordinationOnCluster.cpp @@ -0,0 +1,318 @@ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace DB +{ + +RestoreCoordinationOnCluster::RestoreCoordinationOnCluster( + const UUID & restore_uuid_, + const String & root_zookeeper_path_, + zkutil::GetZooKeeper get_zookeeper_, + const BackupKeeperSettings & keeper_settings_, + const String & current_host_, + const Strings & all_hosts_, + bool allow_concurrent_restore_, + BackupConcurrencyCounters & concurrency_counters_, + ThreadPoolCallbackRunnerUnsafe schedule_, + QueryStatusPtr process_list_element_) + : root_zookeeper_path(root_zookeeper_path_) + , keeper_settings(keeper_settings_) + , restore_uuid(restore_uuid_) + , zookeeper_path(root_zookeeper_path_ + "/restore-" + toString(restore_uuid_)) + , all_hosts(all_hosts_) + , all_hosts_without_initiator(BackupCoordinationOnCluster::excludeInitiator(all_hosts)) + , current_host(current_host_) + , current_host_index(BackupCoordinationOnCluster::findCurrentHostIndex(current_host, all_hosts)) + , log(getLogger("RestoreCoordinationOnCluster")) + , with_retries(log, get_zookeeper_, keeper_settings, process_list_element_, [root_zookeeper_path_](Coordination::ZooKeeperWithFaultInjection::Ptr zk) { zk->sync(root_zookeeper_path_); }) + , concurrency_check(restore_uuid_, /* is_restore = */ true, /* on_cluster = */ true, allow_concurrent_restore_, concurrency_counters_) + , stage_sync(/* is_restore = */ true, fs::path{zookeeper_path} / "stage", current_host, all_hosts, allow_concurrent_restore_, with_retries, schedule_, process_list_element_, log) + , cleaner(zookeeper_path, with_retries, log) +{ + createRootNodes(); +} + +RestoreCoordinationOnCluster::~RestoreCoordinationOnCluster() +{ + tryFinishImpl(); +} + +void RestoreCoordinationOnCluster::createRootNodes() +{ + auto holder = with_retries.createRetriesControlHolder("createRootNodes", WithRetries::kInitialization); + holder.retries_ctl.retryLoop( + [&, &zk = holder.faulty_zookeeper]() + { + with_retries.renewZooKeeper(zk); + + zk->createAncestors(zookeeper_path); + zk->createIfNotExists(zookeeper_path, ""); + zk->createIfNotExists(zookeeper_path + "/repl_databases_tables_acquired", ""); + zk->createIfNotExists(zookeeper_path + "/repl_tables_data_acquired", ""); + zk->createIfNotExists(zookeeper_path + "/repl_access_storages_acquired", ""); + zk->createIfNotExists(zookeeper_path + "/repl_sql_objects_acquired", ""); + zk->createIfNotExists(zookeeper_path + "/keeper_map_tables", ""); + zk->createIfNotExists(zookeeper_path + "/table_uuids", ""); + }); +} + +Strings RestoreCoordinationOnCluster::setStage(const String & new_stage, const String & message, bool sync) +{ + stage_sync.setStage(new_stage, message); + + if (!sync) + return {}; + + return stage_sync.waitForHostsToReachStage(new_stage, all_hosts_without_initiator); +} + +void RestoreCoordinationOnCluster::setRestoreQueryWasSentToOtherHosts() +{ + restore_query_was_sent_to_other_hosts = true; +} + +bool RestoreCoordinationOnCluster::trySetError(std::exception_ptr exception) +{ + return stage_sync.trySetError(exception); +} + +void RestoreCoordinationOnCluster::finish() +{ + bool other_hosts_also_finished = false; + stage_sync.finish(other_hosts_also_finished); + + if ((current_host == kInitiator) && (other_hosts_also_finished || !restore_query_was_sent_to_other_hosts)) + cleaner.cleanup(); +} + +bool RestoreCoordinationOnCluster::tryFinishAfterError() noexcept +{ + return tryFinishImpl(); +} + +bool RestoreCoordinationOnCluster::tryFinishImpl() noexcept +{ + bool other_hosts_also_finished = false; + if (!stage_sync.tryFinishAfterError(other_hosts_also_finished)) + return false; + + if ((current_host == kInitiator) && (other_hosts_also_finished || !restore_query_was_sent_to_other_hosts)) + { + if (!cleaner.tryCleanupAfterError()) + return false; + } + + return true; +} + +void RestoreCoordinationOnCluster::waitForOtherHostsToFinish() +{ + if ((current_host != kInitiator) || !restore_query_was_sent_to_other_hosts) + return; + stage_sync.waitForOtherHostsToFinish(); +} + +bool RestoreCoordinationOnCluster::tryWaitForOtherHostsToFinishAfterError() noexcept +{ + if (current_host != kInitiator) + return false; + if (!restore_query_was_sent_to_other_hosts) + return true; + return stage_sync.tryWaitForOtherHostsToFinishAfterError(); +} + +ZooKeeperRetriesInfo RestoreCoordinationOnCluster::getOnClusterInitializationKeeperRetriesInfo() const +{ + return ZooKeeperRetriesInfo{keeper_settings.max_retries_while_initializing, + static_cast(keeper_settings.retry_initial_backoff_ms.count()), + static_cast(keeper_settings.retry_max_backoff_ms.count())}; +} + +bool RestoreCoordinationOnCluster::acquireCreatingTableInReplicatedDatabase(const String & database_zk_path, const String & table_name) +{ + bool result = false; + auto holder = with_retries.createRetriesControlHolder("acquireCreatingTableInReplicatedDatabase"); + holder.retries_ctl.retryLoop( + [&, &zk = holder.faulty_zookeeper]() + { + with_retries.renewZooKeeper(zk); + + String path = zookeeper_path + "/repl_databases_tables_acquired/" + escapeForFileName(database_zk_path); + zk->createIfNotExists(path, ""); + + path += "/" + escapeForFileName(table_name); + auto code = zk->tryCreate(path, toString(current_host_index), zkutil::CreateMode::Persistent); + if ((code != Coordination::Error::ZOK) && (code != Coordination::Error::ZNODEEXISTS)) + throw zkutil::KeeperException::fromPath(code, path); + + if (code == Coordination::Error::ZOK) + { + result = true; + return; + } + + /// We need to check who created that node + result = zk->get(path) == toString(current_host_index); + }); + return result; +} + +bool RestoreCoordinationOnCluster::acquireInsertingDataIntoReplicatedTable(const String & table_zk_path) +{ + bool result = false; + auto holder = with_retries.createRetriesControlHolder("acquireInsertingDataIntoReplicatedTable"); + holder.retries_ctl.retryLoop( + [&, &zk = holder.faulty_zookeeper]() + { + with_retries.renewZooKeeper(zk); + + String path = zookeeper_path + "/repl_tables_data_acquired/" + escapeForFileName(table_zk_path); + auto code = zk->tryCreate(path, toString(current_host_index), zkutil::CreateMode::Persistent); + if ((code != Coordination::Error::ZOK) && (code != Coordination::Error::ZNODEEXISTS)) + throw zkutil::KeeperException::fromPath(code, path); + + if (code == Coordination::Error::ZOK) + { + result = true; + return; + } + + /// We need to check who created that node + result = zk->get(path) == toString(current_host_index); + }); + return result; +} + +bool RestoreCoordinationOnCluster::acquireReplicatedAccessStorage(const String & access_storage_zk_path) +{ + bool result = false; + auto holder = with_retries.createRetriesControlHolder("acquireReplicatedAccessStorage"); + holder.retries_ctl.retryLoop( + [&, &zk = holder.faulty_zookeeper]() + { + with_retries.renewZooKeeper(zk); + + String path = zookeeper_path + "/repl_access_storages_acquired/" + escapeForFileName(access_storage_zk_path); + auto code = zk->tryCreate(path, toString(current_host_index), zkutil::CreateMode::Persistent); + if ((code != Coordination::Error::ZOK) && (code != Coordination::Error::ZNODEEXISTS)) + throw zkutil::KeeperException::fromPath(code, path); + + if (code == Coordination::Error::ZOK) + { + result = true; + return; + } + + /// We need to check who created that node + result = zk->get(path) == toString(current_host_index); + }); + return result; +} + +bool RestoreCoordinationOnCluster::acquireReplicatedSQLObjects(const String & loader_zk_path, UserDefinedSQLObjectType object_type) +{ + bool result = false; + auto holder = with_retries.createRetriesControlHolder("acquireReplicatedSQLObjects"); + holder.retries_ctl.retryLoop( + [&, &zk = holder.faulty_zookeeper]() + { + with_retries.renewZooKeeper(zk); + + String path = zookeeper_path + "/repl_sql_objects_acquired/" + escapeForFileName(loader_zk_path); + zk->createIfNotExists(path, ""); + + path += "/"; + switch (object_type) + { + case UserDefinedSQLObjectType::Function: + path += "functions"; + break; + } + + auto code = zk->tryCreate(path, "", zkutil::CreateMode::Persistent); + if ((code != Coordination::Error::ZOK) && (code != Coordination::Error::ZNODEEXISTS)) + throw zkutil::KeeperException::fromPath(code, path); + + if (code == Coordination::Error::ZOK) + { + result = true; + return; + } + + /// We need to check who created that node + result = zk->get(path) == toString(current_host_index); + }); + return result; +} + +bool RestoreCoordinationOnCluster::acquireInsertingDataForKeeperMap(const String & root_zk_path, const String & table_unique_id) +{ + bool lock_acquired = false; + auto holder = with_retries.createRetriesControlHolder("acquireInsertingDataForKeeperMap"); + holder.retries_ctl.retryLoop( + [&, &zk = holder.faulty_zookeeper]() + { + with_retries.renewZooKeeper(zk); + + /// we need to remove leading '/' from root_zk_path + auto normalized_root_zk_path = root_zk_path.substr(1); + std::string restore_lock_path = fs::path(zookeeper_path) / "keeper_map_tables" / escapeForFileName(normalized_root_zk_path); + zk->createAncestors(restore_lock_path); + auto code = zk->tryCreate(restore_lock_path, table_unique_id, zkutil::CreateMode::Persistent); + + if (code == Coordination::Error::ZOK) + { + lock_acquired = true; + return; + } + + if (code == Coordination::Error::ZNODEEXISTS) + lock_acquired = table_unique_id == zk->get(restore_lock_path); + else + zkutil::KeeperException::fromPath(code, restore_lock_path); + }); + return lock_acquired; +} + +void RestoreCoordinationOnCluster::generateUUIDForTable(ASTCreateQuery & create_query) +{ + String query_str = serializeAST(create_query); + CreateQueryUUIDs new_uuids{create_query, /* generate_random= */ true, /* force_random= */ true}; + String new_uuids_str = new_uuids.toString(); + + auto holder = with_retries.createRetriesControlHolder("generateUUIDForTable"); + holder.retries_ctl.retryLoop( + [&, &zk = holder.faulty_zookeeper]() + { + with_retries.renewZooKeeper(zk); + + String path = zookeeper_path + "/table_uuids/" + escapeForFileName(query_str); + Coordination::Error res = zk->tryCreate(path, new_uuids_str, zkutil::CreateMode::Persistent); + + if (res == Coordination::Error::ZOK) + { + new_uuids.copyToQuery(create_query); + return; + } + + if (res == Coordination::Error::ZNODEEXISTS) + { + CreateQueryUUIDs::fromString(zk->get(path)).copyToQuery(create_query); + return; + } + + zkutil::KeeperException::fromPath(res, path); + }); +} + +} diff --git a/src/Backups/RestoreCoordinationRemote.h b/src/Backups/RestoreCoordinationOnCluster.h similarity index 62% rename from src/Backups/RestoreCoordinationRemote.h rename to src/Backups/RestoreCoordinationOnCluster.h index a3d57e9a4d0..87a8dd3ce83 100644 --- a/src/Backups/RestoreCoordinationRemote.h +++ b/src/Backups/RestoreCoordinationOnCluster.h @@ -1,6 +1,8 @@ #pragma once #include +#include +#include #include #include @@ -9,28 +11,33 @@ namespace DB { /// Implementation of the IRestoreCoordination interface performing coordination via ZooKeeper. It's necessary for "RESTORE ON CLUSTER". -class RestoreCoordinationRemote : public IRestoreCoordination +class RestoreCoordinationOnCluster : public IRestoreCoordination { public: - using RestoreKeeperSettings = WithRetries::KeeperSettings; + /// Empty string as the current host is used to mark the initiator of a RESTORE ON CLUSTER query. + static const constexpr std::string_view kInitiator; - RestoreCoordinationRemote( - zkutil::GetZooKeeper get_zookeeper_, + RestoreCoordinationOnCluster( + const UUID & restore_uuid_, const String & root_zookeeper_path_, - const RestoreKeeperSettings & keeper_settings_, - const String & restore_uuid_, - const Strings & all_hosts_, + zkutil::GetZooKeeper get_zookeeper_, + const BackupKeeperSettings & keeper_settings_, const String & current_host_, - bool is_internal_, + const Strings & all_hosts_, + bool allow_concurrent_restore_, + BackupConcurrencyCounters & concurrency_counters_, + ThreadPoolCallbackRunnerUnsafe schedule_, QueryStatusPtr process_list_element_); - ~RestoreCoordinationRemote() override; + ~RestoreCoordinationOnCluster() override; - /// Sets the current stage and waits for other hosts to come to this stage too. - void setStage(const String & new_stage, const String & message) override; - void setError(const Exception & exception) override; - Strings waitForStage(const String & stage_to_wait) override; - Strings waitForStage(const String & stage_to_wait, std::chrono::milliseconds timeout) override; + Strings setStage(const String & new_stage, const String & message, bool sync) override; + void setRestoreQueryWasSentToOtherHosts() override; + bool trySetError(std::exception_ptr exception) override; + void finish() override; + bool tryFinishAfterError() noexcept override; + void waitForOtherHostsToFinish() override; + bool tryWaitForOtherHostsToFinishAfterError() noexcept override; /// Starts creating a table in a replicated database. Returns false if there is another host which is already creating this table. bool acquireCreatingTableInReplicatedDatabase(const String & database_zk_path, const String & table_name) override; @@ -55,27 +62,27 @@ public: /// (because otherwise the macro "{uuid}" in the ZooKeeper path will not work correctly). void generateUUIDForTable(ASTCreateQuery & create_query) override; - bool hasConcurrentRestores(const std::atomic & num_active_restores) const override; + ZooKeeperRetriesInfo getOnClusterInitializationKeeperRetriesInfo() const override; private: void createRootNodes(); - void removeAllNodes(); + bool tryFinishImpl() noexcept; - /// get_zookeeper will provide a zookeeper client without any fault injection - const zkutil::GetZooKeeper get_zookeeper; const String root_zookeeper_path; - const RestoreKeeperSettings keeper_settings; - const String restore_uuid; + const BackupKeeperSettings keeper_settings; + const UUID restore_uuid; const String zookeeper_path; const Strings all_hosts; + const Strings all_hosts_without_initiator; const String current_host; const size_t current_host_index; - const bool is_internal; LoggerPtr const log; - mutable WithRetries with_retries; - std::optional stage_sync; - mutable std::mutex mutex; + const WithRetries with_retries; + BackupConcurrencyCheck concurrency_check; + BackupCoordinationStageSync stage_sync; + BackupCoordinationCleaner cleaner; + std::atomic restore_query_was_sent_to_other_hosts = false; }; } diff --git a/src/Backups/RestoreCoordinationRemote.cpp b/src/Backups/RestoreCoordinationRemote.cpp deleted file mode 100644 index 0a69bc0eafb..00000000000 --- a/src/Backups/RestoreCoordinationRemote.cpp +++ /dev/null @@ -1,379 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -namespace DB -{ - -namespace Stage = BackupCoordinationStage; - -RestoreCoordinationRemote::RestoreCoordinationRemote( - zkutil::GetZooKeeper get_zookeeper_, - const String & root_zookeeper_path_, - const RestoreKeeperSettings & keeper_settings_, - const String & restore_uuid_, - const Strings & all_hosts_, - const String & current_host_, - bool is_internal_, - QueryStatusPtr process_list_element_) - : get_zookeeper(get_zookeeper_) - , root_zookeeper_path(root_zookeeper_path_) - , keeper_settings(keeper_settings_) - , restore_uuid(restore_uuid_) - , zookeeper_path(root_zookeeper_path_ + "/restore-" + restore_uuid_) - , all_hosts(all_hosts_) - , current_host(current_host_) - , current_host_index(BackupCoordinationRemote::findCurrentHostIndex(all_hosts, current_host)) - , is_internal(is_internal_) - , log(getLogger("RestoreCoordinationRemote")) - , with_retries( - log, - get_zookeeper_, - keeper_settings, - process_list_element_, - [my_zookeeper_path = zookeeper_path, my_current_host = current_host, my_is_internal = is_internal] - (WithRetries::FaultyKeeper & zk) - { - /// Recreate this ephemeral node to signal that we are alive. - if (my_is_internal) - { - String alive_node_path = my_zookeeper_path + "/stage/alive|" + my_current_host; - - /// Delete the ephemeral node from the previous connection so we don't have to wait for keeper to do it automatically. - zk->tryRemove(alive_node_path); - - zk->createAncestors(alive_node_path); - zk->create(alive_node_path, "", zkutil::CreateMode::Ephemeral); - } - }) -{ - createRootNodes(); - - stage_sync.emplace( - zookeeper_path, - with_retries, - log); -} - -RestoreCoordinationRemote::~RestoreCoordinationRemote() -{ - try - { - if (!is_internal) - removeAllNodes(); - } - catch (...) - { - tryLogCurrentException(__PRETTY_FUNCTION__); - } -} - -void RestoreCoordinationRemote::createRootNodes() -{ - auto holder = with_retries.createRetriesControlHolder("createRootNodes"); - holder.retries_ctl.retryLoop( - [&, &zk = holder.faulty_zookeeper]() - { - with_retries.renewZooKeeper(zk); - zk->createAncestors(zookeeper_path); - - Coordination::Requests ops; - Coordination::Responses responses; - ops.emplace_back(zkutil::makeCreateRequest(zookeeper_path, "", zkutil::CreateMode::Persistent)); - ops.emplace_back(zkutil::makeCreateRequest(zookeeper_path + "/repl_databases_tables_acquired", "", zkutil::CreateMode::Persistent)); - ops.emplace_back(zkutil::makeCreateRequest(zookeeper_path + "/repl_tables_data_acquired", "", zkutil::CreateMode::Persistent)); - ops.emplace_back(zkutil::makeCreateRequest(zookeeper_path + "/repl_access_storages_acquired", "", zkutil::CreateMode::Persistent)); - ops.emplace_back(zkutil::makeCreateRequest(zookeeper_path + "/repl_sql_objects_acquired", "", zkutil::CreateMode::Persistent)); - ops.emplace_back(zkutil::makeCreateRequest(zookeeper_path + "/keeper_map_tables", "", zkutil::CreateMode::Persistent)); - ops.emplace_back(zkutil::makeCreateRequest(zookeeper_path + "/table_uuids", "", zkutil::CreateMode::Persistent)); - zk->tryMulti(ops, responses); - }); -} - -void RestoreCoordinationRemote::setStage(const String & new_stage, const String & message) -{ - if (is_internal) - stage_sync->set(current_host, new_stage, message); - else - stage_sync->set(current_host, new_stage, /* message */ "", /* all_hosts */ true); -} - -void RestoreCoordinationRemote::setError(const Exception & exception) -{ - stage_sync->setError(current_host, exception); -} - -Strings RestoreCoordinationRemote::waitForStage(const String & stage_to_wait) -{ - return stage_sync->wait(all_hosts, stage_to_wait); -} - -Strings RestoreCoordinationRemote::waitForStage(const String & stage_to_wait, std::chrono::milliseconds timeout) -{ - return stage_sync->waitFor(all_hosts, stage_to_wait, timeout); -} - -bool RestoreCoordinationRemote::acquireCreatingTableInReplicatedDatabase(const String & database_zk_path, const String & table_name) -{ - bool result = false; - auto holder = with_retries.createRetriesControlHolder("acquireCreatingTableInReplicatedDatabase"); - holder.retries_ctl.retryLoop( - [&, &zk = holder.faulty_zookeeper]() - { - with_retries.renewZooKeeper(zk); - - String path = zookeeper_path + "/repl_databases_tables_acquired/" + escapeForFileName(database_zk_path); - zk->createIfNotExists(path, ""); - - path += "/" + escapeForFileName(table_name); - auto code = zk->tryCreate(path, toString(current_host_index), zkutil::CreateMode::Persistent); - if ((code != Coordination::Error::ZOK) && (code != Coordination::Error::ZNODEEXISTS)) - throw zkutil::KeeperException::fromPath(code, path); - - if (code == Coordination::Error::ZOK) - { - result = true; - return; - } - - /// We need to check who created that node - result = zk->get(path) == toString(current_host_index); - }); - return result; -} - -bool RestoreCoordinationRemote::acquireInsertingDataIntoReplicatedTable(const String & table_zk_path) -{ - bool result = false; - auto holder = with_retries.createRetriesControlHolder("acquireInsertingDataIntoReplicatedTable"); - holder.retries_ctl.retryLoop( - [&, &zk = holder.faulty_zookeeper]() - { - with_retries.renewZooKeeper(zk); - - String path = zookeeper_path + "/repl_tables_data_acquired/" + escapeForFileName(table_zk_path); - auto code = zk->tryCreate(path, toString(current_host_index), zkutil::CreateMode::Persistent); - if ((code != Coordination::Error::ZOK) && (code != Coordination::Error::ZNODEEXISTS)) - throw zkutil::KeeperException::fromPath(code, path); - - if (code == Coordination::Error::ZOK) - { - result = true; - return; - } - - /// We need to check who created that node - result = zk->get(path) == toString(current_host_index); - }); - return result; -} - -bool RestoreCoordinationRemote::acquireReplicatedAccessStorage(const String & access_storage_zk_path) -{ - bool result = false; - auto holder = with_retries.createRetriesControlHolder("acquireReplicatedAccessStorage"); - holder.retries_ctl.retryLoop( - [&, &zk = holder.faulty_zookeeper]() - { - with_retries.renewZooKeeper(zk); - - String path = zookeeper_path + "/repl_access_storages_acquired/" + escapeForFileName(access_storage_zk_path); - auto code = zk->tryCreate(path, toString(current_host_index), zkutil::CreateMode::Persistent); - if ((code != Coordination::Error::ZOK) && (code != Coordination::Error::ZNODEEXISTS)) - throw zkutil::KeeperException::fromPath(code, path); - - if (code == Coordination::Error::ZOK) - { - result = true; - return; - } - - /// We need to check who created that node - result = zk->get(path) == toString(current_host_index); - }); - return result; -} - -bool RestoreCoordinationRemote::acquireReplicatedSQLObjects(const String & loader_zk_path, UserDefinedSQLObjectType object_type) -{ - bool result = false; - auto holder = with_retries.createRetriesControlHolder("acquireReplicatedSQLObjects"); - holder.retries_ctl.retryLoop( - [&, &zk = holder.faulty_zookeeper]() - { - with_retries.renewZooKeeper(zk); - - String path = zookeeper_path + "/repl_sql_objects_acquired/" + escapeForFileName(loader_zk_path); - zk->createIfNotExists(path, ""); - - path += "/"; - switch (object_type) - { - case UserDefinedSQLObjectType::Function: - path += "functions"; - break; - } - - auto code = zk->tryCreate(path, "", zkutil::CreateMode::Persistent); - if ((code != Coordination::Error::ZOK) && (code != Coordination::Error::ZNODEEXISTS)) - throw zkutil::KeeperException::fromPath(code, path); - - if (code == Coordination::Error::ZOK) - { - result = true; - return; - } - - /// We need to check who created that node - result = zk->get(path) == toString(current_host_index); - }); - return result; -} - -bool RestoreCoordinationRemote::acquireInsertingDataForKeeperMap(const String & root_zk_path, const String & table_unique_id) -{ - bool lock_acquired = false; - auto holder = with_retries.createRetriesControlHolder("acquireInsertingDataForKeeperMap"); - holder.retries_ctl.retryLoop( - [&, &zk = holder.faulty_zookeeper]() - { - with_retries.renewZooKeeper(zk); - - /// we need to remove leading '/' from root_zk_path - auto normalized_root_zk_path = root_zk_path.substr(1); - std::string restore_lock_path = fs::path(zookeeper_path) / "keeper_map_tables" / escapeForFileName(normalized_root_zk_path); - zk->createAncestors(restore_lock_path); - auto code = zk->tryCreate(restore_lock_path, table_unique_id, zkutil::CreateMode::Persistent); - - if (code == Coordination::Error::ZOK) - { - lock_acquired = true; - return; - } - - if (code == Coordination::Error::ZNODEEXISTS) - lock_acquired = table_unique_id == zk->get(restore_lock_path); - else - zkutil::KeeperException::fromPath(code, restore_lock_path); - }); - return lock_acquired; -} - -void RestoreCoordinationRemote::generateUUIDForTable(ASTCreateQuery & create_query) -{ - String query_str = serializeAST(create_query); - CreateQueryUUIDs new_uuids{create_query, /* generate_random= */ true, /* force_random= */ true}; - String new_uuids_str = new_uuids.toString(); - - auto holder = with_retries.createRetriesControlHolder("generateUUIDForTable"); - holder.retries_ctl.retryLoop( - [&, &zk = holder.faulty_zookeeper]() - { - with_retries.renewZooKeeper(zk); - - String path = zookeeper_path + "/table_uuids/" + escapeForFileName(query_str); - Coordination::Error res = zk->tryCreate(path, new_uuids_str, zkutil::CreateMode::Persistent); - - if (res == Coordination::Error::ZOK) - { - new_uuids.copyToQuery(create_query); - return; - } - - if (res == Coordination::Error::ZNODEEXISTS) - { - CreateQueryUUIDs::fromString(zk->get(path)).copyToQuery(create_query); - return; - } - - zkutil::KeeperException::fromPath(res, path); - }); -} - -void RestoreCoordinationRemote::removeAllNodes() -{ - /// Usually this function is called by the initiator when a restore operation is complete so we don't need the coordination anymore. - /// - /// However there can be a rare situation when this function is called after an error occurs on the initiator of a query - /// while some hosts are still restoring something. Removing all the nodes will remove the parent node of the restore coordination - /// at `zookeeper_path` which might cause such hosts to stop with exception "ZNONODE". Or such hosts might still do some part - /// of their restore work before that. - - auto holder = with_retries.createRetriesControlHolder("removeAllNodes"); - holder.retries_ctl.retryLoop( - [&, &zk = holder.faulty_zookeeper]() - { - with_retries.renewZooKeeper(zk); - zk->removeRecursive(zookeeper_path); - }); -} - -bool RestoreCoordinationRemote::hasConcurrentRestores(const std::atomic &) const -{ - /// If its internal concurrency will be checked for the base restore - if (is_internal) - return false; - - bool result = false; - std::string path = zookeeper_path + "/stage"; - - auto holder = with_retries.createRetriesControlHolder("createRootNodes"); - holder.retries_ctl.retryLoop( - [&, &zk = holder.faulty_zookeeper]() - { - with_retries.renewZooKeeper(zk); - - if (! zk->exists(root_zookeeper_path)) - zk->createAncestors(root_zookeeper_path); - - for (size_t attempt = 0; attempt < MAX_ZOOKEEPER_ATTEMPTS; ++attempt) - { - Coordination::Stat stat; - zk->get(root_zookeeper_path, &stat); - Strings existing_restore_paths = zk->getChildren(root_zookeeper_path); - for (const auto & existing_restore_path : existing_restore_paths) - { - if (startsWith(existing_restore_path, "backup-")) - continue; - - String existing_restore_uuid = existing_restore_path; - existing_restore_uuid.erase(0, String("restore-").size()); - - if (existing_restore_uuid == toString(restore_uuid)) - continue; - - String status; - if (zk->tryGet(root_zookeeper_path + "/" + existing_restore_path + "/stage", status)) - { - /// Check if some other restore is in progress - if (status == Stage::SCHEDULED_TO_START) - { - LOG_WARNING(log, "Found a concurrent restore: {}, current restore: {}", existing_restore_uuid, toString(restore_uuid)); - result = true; - return; - } - } - } - - zk->createIfNotExists(path, ""); - auto code = zk->trySet(path, Stage::SCHEDULED_TO_START, stat.version); - if (code == Coordination::Error::ZOK) - break; - bool is_last_attempt = (attempt == MAX_ZOOKEEPER_ATTEMPTS - 1); - if ((code != Coordination::Error::ZBADVERSION) || is_last_attempt) - throw zkutil::KeeperException::fromPath(code, path); - } - }); - - return result; -} - -} diff --git a/src/Backups/RestorerFromBackup.cpp b/src/Backups/RestorerFromBackup.cpp index eb4ba9424ff..29579aa7348 100644 --- a/src/Backups/RestorerFromBackup.cpp +++ b/src/Backups/RestorerFromBackup.cpp @@ -100,7 +100,6 @@ RestorerFromBackup::RestorerFromBackup( , context(context_) , process_list_element(context->getProcessListElement()) , after_task_callback(after_task_callback_) - , on_cluster_first_sync_timeout(context->getConfigRef().getUInt64("backups.on_cluster_first_sync_timeout", 180000)) , create_table_timeout(context->getConfigRef().getUInt64("backups.create_table_timeout", 300000)) , log(getLogger("RestorerFromBackup")) , tables_dependencies("RestorerFromBackup") @@ -119,12 +118,14 @@ RestorerFromBackup::~RestorerFromBackup() } } -void RestorerFromBackup::run(Mode mode) +void RestorerFromBackup::run(Mode mode_) { /// run() can be called onle once. if (!current_stage.empty()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Already restoring"); + mode = mode_; + /// Find other hosts working along with us to execute this ON CLUSTER query. all_hosts = BackupSettings::Util::filterHostIDs( restore_settings.cluster_host_ids, restore_settings.shard_num, restore_settings.replica_num); @@ -139,6 +140,7 @@ void RestorerFromBackup::run(Mode mode) setStage(Stage::FINDING_TABLES_IN_BACKUP); findDatabasesAndTablesInBackup(); waitFutures(); + logNumberOfDatabasesAndTablesToRestore(); /// Check access rights. setStage(Stage::CHECKING_ACCESS_RIGHTS); @@ -228,20 +230,8 @@ void RestorerFromBackup::setStage(const String & new_stage, const String & messa if (restore_coordination) { - restore_coordination->setStage(new_stage, message); - - /// The initiator of a RESTORE ON CLUSTER query waits for other hosts to complete their work (see waitForStage(Stage::COMPLETED) in BackupsWorker::doRestore), - /// but other hosts shouldn't wait for each others' completion. (That's simply unnecessary and also - /// the initiator may start cleaning up (e.g. removing restore-coordination ZooKeeper nodes) once all other hosts are in Stage::COMPLETED.) - bool need_wait = (new_stage != Stage::COMPLETED); - - if (need_wait) - { - if (new_stage == Stage::FINDING_TABLES_IN_BACKUP) - restore_coordination->waitForStage(new_stage, on_cluster_first_sync_timeout); - else - restore_coordination->waitForStage(new_stage); - } + /// There is no need to sync Stage::COMPLETED with other hosts because it's the last stage. + restore_coordination->setStage(new_stage, message, /* sync = */ (new_stage != Stage::COMPLETED)); } } @@ -384,8 +374,12 @@ void RestorerFromBackup::findDatabasesAndTablesInBackup() } } } +} - LOG_INFO(log, "Will restore {} databases and {} tables", getNumDatabases(), getNumTables()); +void RestorerFromBackup::logNumberOfDatabasesAndTablesToRestore() const +{ + std::string_view action = (mode == CHECK_ACCESS_ONLY) ? "check access rights for restoring" : "restore"; + LOG_INFO(log, "Will {} {} databases and {} tables", action, getNumDatabases(), getNumTables()); } void RestorerFromBackup::findTableInBackup(const QualifiedTableName & table_name_in_backup, bool skip_if_inner_table, const std::optional & partitions) diff --git a/src/Backups/RestorerFromBackup.h b/src/Backups/RestorerFromBackup.h index e0130ccfcb4..87290618487 100644 --- a/src/Backups/RestorerFromBackup.h +++ b/src/Backups/RestorerFromBackup.h @@ -53,7 +53,7 @@ public: using DataRestoreTasks = std::vector; /// Restores the metadata of databases and tables and returns tasks to restore the data of tables. - void run(Mode mode); + void run(Mode mode_); BackupPtr getBackup() const { return backup; } const RestoreSettings & getRestoreSettings() const { return restore_settings; } @@ -80,10 +80,10 @@ private: ContextMutablePtr context; QueryStatusPtr process_list_element; std::function after_task_callback; - std::chrono::milliseconds on_cluster_first_sync_timeout; std::chrono::milliseconds create_table_timeout; LoggerPtr log; + Mode mode = Mode::RESTORE; Strings all_hosts; DDLRenamingMap renaming_map; std::vector root_paths_in_backup; @@ -97,6 +97,7 @@ private: void findDatabaseInBackupImpl(const String & database_name_in_backup, const std::set & except_table_names); void findEverythingInBackup(const std::set & except_database_names, const std::set & except_table_names); + void logNumberOfDatabasesAndTablesToRestore() const; size_t getNumDatabases() const; size_t getNumTables() const; diff --git a/src/Backups/WithRetries.cpp b/src/Backups/WithRetries.cpp index 772f746e40a..9c18be3ca9e 100644 --- a/src/Backups/WithRetries.cpp +++ b/src/Backups/WithRetries.cpp @@ -1,57 +1,34 @@ #include -#include #include + namespace DB { -namespace Setting -{ - extern const SettingsUInt64 backup_restore_keeper_max_retries; - extern const SettingsUInt64 backup_restore_keeper_retry_initial_backoff_ms; - extern const SettingsUInt64 backup_restore_keeper_retry_max_backoff_ms; - extern const SettingsUInt64 backup_restore_batch_size_for_keeper_multiread; - extern const SettingsFloat backup_restore_keeper_fault_injection_probability; - extern const SettingsUInt64 backup_restore_keeper_fault_injection_seed; - extern const SettingsUInt64 backup_restore_keeper_value_max_size; - extern const SettingsUInt64 backup_restore_batch_size_for_keeper_multi; -} - -WithRetries::KeeperSettings WithRetries::KeeperSettings::fromContext(ContextPtr context) -{ - return - { - .keeper_max_retries = context->getSettingsRef()[Setting::backup_restore_keeper_max_retries], - .keeper_retry_initial_backoff_ms = context->getSettingsRef()[Setting::backup_restore_keeper_retry_initial_backoff_ms], - .keeper_retry_max_backoff_ms = context->getSettingsRef()[Setting::backup_restore_keeper_retry_max_backoff_ms], - .batch_size_for_keeper_multiread = context->getSettingsRef()[Setting::backup_restore_batch_size_for_keeper_multiread], - .keeper_fault_injection_probability = context->getSettingsRef()[Setting::backup_restore_keeper_fault_injection_probability], - .keeper_fault_injection_seed = context->getSettingsRef()[Setting::backup_restore_keeper_fault_injection_seed], - .keeper_value_max_size = context->getSettingsRef()[Setting::backup_restore_keeper_value_max_size], - .batch_size_for_keeper_multi = context->getSettingsRef()[Setting::backup_restore_batch_size_for_keeper_multi], - }; -} WithRetries::WithRetries( - LoggerPtr log_, zkutil::GetZooKeeper get_zookeeper_, const KeeperSettings & settings_, QueryStatusPtr process_list_element_, RenewerCallback callback_) + LoggerPtr log_, zkutil::GetZooKeeper get_zookeeper_, const BackupKeeperSettings & settings_, QueryStatusPtr process_list_element_, RenewerCallback callback_) : log(log_) , get_zookeeper(get_zookeeper_) , settings(settings_) , process_list_element(process_list_element_) , callback(callback_) - , global_zookeeper_retries_info( - settings.keeper_max_retries, settings.keeper_retry_initial_backoff_ms, settings.keeper_retry_max_backoff_ms) {} -WithRetries::RetriesControlHolder::RetriesControlHolder(const WithRetries * parent, const String & name) - : info(parent->global_zookeeper_retries_info) - , retries_ctl(name, parent->log, info, parent->process_list_element) +WithRetries::RetriesControlHolder::RetriesControlHolder(const WithRetries * parent, const String & name, Kind kind) + : info( (kind == kInitialization) ? parent->settings.max_retries_while_initializing + : (kind == kErrorHandling) ? parent->settings.max_retries_while_handling_error + : parent->settings.max_retries, + parent->settings.retry_initial_backoff_ms.count(), + parent->settings.retry_max_backoff_ms.count()) + /// We don't use process_list_element while handling an error because the error handling can't be cancellable. + , retries_ctl(name, parent->log, info, (kind == kErrorHandling) ? nullptr : parent->process_list_element) , faulty_zookeeper(parent->getFaultyZooKeeper()) {} -WithRetries::RetriesControlHolder WithRetries::createRetriesControlHolder(const String & name) +WithRetries::RetriesControlHolder WithRetries::createRetriesControlHolder(const String & name, Kind kind) const { - return RetriesControlHolder(this, name); + return RetriesControlHolder(this, name, kind); } void WithRetries::renewZooKeeper(FaultyKeeper my_faulty_zookeeper) const @@ -62,8 +39,8 @@ void WithRetries::renewZooKeeper(FaultyKeeper my_faulty_zookeeper) const { zookeeper = get_zookeeper(); my_faulty_zookeeper->setKeeper(zookeeper); - - callback(my_faulty_zookeeper); + if (callback) + callback(my_faulty_zookeeper); } else { @@ -71,7 +48,7 @@ void WithRetries::renewZooKeeper(FaultyKeeper my_faulty_zookeeper) const } } -const WithRetries::KeeperSettings & WithRetries::getKeeperSettings() const +const BackupKeeperSettings & WithRetries::getKeeperSettings() const { return settings; } @@ -88,8 +65,8 @@ WithRetries::FaultyKeeper WithRetries::getFaultyZooKeeper() const /// The reason is that ZooKeeperWithFaultInjection may reset the underlying pointer and there could be a race condition /// when the same object is used from multiple threads. auto faulty_zookeeper = ZooKeeperWithFaultInjection::createInstance( - settings.keeper_fault_injection_probability, - settings.keeper_fault_injection_seed, + settings.fault_injection_probability, + settings.fault_injection_seed, current_zookeeper, log->name(), log); diff --git a/src/Backups/WithRetries.h b/src/Backups/WithRetries.h index f795a963911..e465fbb1e50 100644 --- a/src/Backups/WithRetries.h +++ b/src/Backups/WithRetries.h @@ -1,9 +1,11 @@ #pragma once -#include +#include #include +#include #include + namespace DB { @@ -15,20 +17,13 @@ class WithRetries { public: using FaultyKeeper = Coordination::ZooKeeperWithFaultInjection::Ptr; - using RenewerCallback = std::function; + using RenewerCallback = std::function; - struct KeeperSettings + enum Kind { - UInt64 keeper_max_retries{0}; - UInt64 keeper_retry_initial_backoff_ms{0}; - UInt64 keeper_retry_max_backoff_ms{0}; - UInt64 batch_size_for_keeper_multiread{10000}; - Float64 keeper_fault_injection_probability{0}; - UInt64 keeper_fault_injection_seed{42}; - UInt64 keeper_value_max_size{1048576}; - UInt64 batch_size_for_keeper_multi{1000}; - - static KeeperSettings fromContext(ContextPtr context); + kNormal, + kInitialization, + kErrorHandling, }; /// For simplicity a separate ZooKeeperRetriesInfo and a faulty [Zoo]Keeper client @@ -48,23 +43,23 @@ public: private: friend class WithRetries; - RetriesControlHolder(const WithRetries * parent, const String & name); + RetriesControlHolder(const WithRetries * parent, const String & name, Kind kind); }; - RetriesControlHolder createRetriesControlHolder(const String & name); - WithRetries(LoggerPtr log, zkutil::GetZooKeeper get_zookeeper_, const KeeperSettings & settings, QueryStatusPtr process_list_element_, RenewerCallback callback); + RetriesControlHolder createRetriesControlHolder(const String & name, Kind kind = Kind::kNormal) const; + WithRetries(LoggerPtr log, zkutil::GetZooKeeper get_zookeeper_, const BackupKeeperSettings & settings, QueryStatusPtr process_list_element_, RenewerCallback callback = {}); /// Used to re-establish new connection inside a retry loop. void renewZooKeeper(FaultyKeeper my_faulty_zookeeper) const; - const KeeperSettings & getKeeperSettings() const; + const BackupKeeperSettings & getKeeperSettings() const; private: /// This will provide a special wrapper which is useful for testing FaultyKeeper getFaultyZooKeeper() const; LoggerPtr log; zkutil::GetZooKeeper get_zookeeper; - KeeperSettings settings; + BackupKeeperSettings settings; QueryStatusPtr process_list_element; /// This callback is called each time when a new [Zoo]Keeper session is created. @@ -76,7 +71,6 @@ private: /// it could lead just to a failed backup which could possibly be successful /// if there were a little bit more retries. RenewerCallback callback; - ZooKeeperRetriesInfo global_zookeeper_retries_info; /// This is needed only to protect zookeeper object mutable std::mutex zookeeper_mutex; diff --git a/src/Client/ClientApplicationBase.cpp b/src/Client/ClientApplicationBase.cpp index f7d2d0035d9..f506d7c99f5 100644 --- a/src/Client/ClientApplicationBase.cpp +++ b/src/Client/ClientApplicationBase.cpp @@ -167,7 +167,8 @@ void ClientApplicationBase::init(int argc, char ** argv) ("query_kind", po::value()->default_value("initial_query"), "One of initial_query/secondary_query/no_query") ("query_id", po::value(), "query_id") - ("history_file", po::value(), "path to history file") + ("history_file", po::value(), "Path to a file containing command history.") + ("history_max_entries", po::value()->default_value(1000000), "Maximum number of entries in the history file.") ("stage", po::value()->default_value("complete"), "Request query processing up to specified stage: complete,fetch_columns,with_mergeable_state,with_mergeable_state_after_aggregation,with_mergeable_state_after_aggregation_and_limit") ("progress", po::value()->implicit_value(ProgressOption::TTY, "tty")->default_value(ProgressOption::DEFAULT, "default"), "Print progress of queries execution - to TTY: tty|on|1|true|yes; to STDERR non-interactive mode: err; OFF: off|0|false|no; DEFAULT - interactive to TTY, non-interactive is off") @@ -350,6 +351,8 @@ void ClientApplicationBase::init(int argc, char ** argv) getClientConfiguration().setBool("highlight", options["highlight"].as()); if (options.count("history_file")) getClientConfiguration().setString("history_file", options["history_file"].as()); + if (options.count("history_max_entries")) + getClientConfiguration().setUInt("history_max_entries", options["history_max_entries"].as()); if (options.count("interactive")) getClientConfiguration().setBool("interactive", true); if (options.count("pager")) diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index b6bf637ab44..0a824753dc0 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -2665,6 +2665,8 @@ void ClientBase::runInteractive() } } + history_max_entries = getClientConfiguration().getUInt("history_max_entries"); + LineReader::Patterns query_extenders = {"\\"}; LineReader::Patterns query_delimiters = {";", "\\G", "\\G;"}; char word_break_characters[] = " \t\v\f\a\b\r\n`~!@#$%^&*()-=+[{]}\\|;:'\",<.>/?"; @@ -2677,6 +2679,7 @@ void ClientBase::runInteractive() ReplxxLineReader lr( *suggest, history_file, + history_max_entries, getClientConfiguration().has("multiline"), getClientConfiguration().getBool("ignore_shell_suspend", true), query_extenders, diff --git a/src/Client/ClientBase.h b/src/Client/ClientBase.h index 75f09e1d0a2..6b261714ff6 100644 --- a/src/Client/ClientBase.h +++ b/src/Client/ClientBase.h @@ -328,6 +328,7 @@ protected: String home_path; String history_file; /// Path to a file containing command history. + UInt32 history_max_entries; /// Maximum number of entries in the history file. String current_profile; diff --git a/src/Client/ReplxxLineReader.cpp b/src/Client/ReplxxLineReader.cpp index 37ceb471e5b..ee90a6cc7b7 100644 --- a/src/Client/ReplxxLineReader.cpp +++ b/src/Client/ReplxxLineReader.cpp @@ -293,6 +293,7 @@ void ReplxxLineReader::setLastIsDelimiter(bool flag) ReplxxLineReader::ReplxxLineReader( Suggest & suggest, const String & history_file_path_, + UInt32 history_max_entries_, bool multiline_, bool ignore_shell_suspend, Patterns extenders_, @@ -313,6 +314,8 @@ ReplxxLineReader::ReplxxLineReader( { using Replxx = replxx::Replxx; + rx.set_max_history_size(static_cast(history_max_entries_)); + if (!history_file_path.empty()) { history_file_fd = open(history_file_path.c_str(), O_RDWR); diff --git a/src/Client/ReplxxLineReader.h b/src/Client/ReplxxLineReader.h index 1dbad2c70dd..ccda47170e6 100644 --- a/src/Client/ReplxxLineReader.h +++ b/src/Client/ReplxxLineReader.h @@ -14,6 +14,7 @@ public: ( Suggest & suggest, const String & history_file_path, + UInt32 history_max_entries, bool multiline, bool ignore_shell_suspend, Patterns extenders_, diff --git a/src/Columns/ColumnArray.h b/src/Columns/ColumnArray.h index ec14b096055..a66f9041213 100644 --- a/src/Columns/ColumnArray.h +++ b/src/Columns/ColumnArray.h @@ -196,6 +196,13 @@ public: bool hasDynamicStructure() const override { return getData().hasDynamicStructure(); } void takeDynamicStructureFromSourceColumns(const Columns & source_columns) override; + bool dynamicStructureEquals(const IColumn & rhs) const override + { + if (const auto * rhs_concrete = typeid_cast(&rhs)) + return data->dynamicStructureEquals(*rhs_concrete->data); + return false; + } + private: WrappedPtr data; WrappedPtr offsets; diff --git a/src/Columns/ColumnDynamic.cpp b/src/Columns/ColumnDynamic.cpp index 41a9096bc0c..6eb22a8bdf7 100644 --- a/src/Columns/ColumnDynamic.cpp +++ b/src/Columns/ColumnDynamic.cpp @@ -1208,6 +1208,15 @@ void ColumnDynamic::prepareVariantsForSquashing(const Columns & source_columns) } } +bool ColumnDynamic::dynamicStructureEquals(const IColumn & rhs) const +{ + if (const auto * rhs_concrete = typeid_cast(&rhs)) + return max_dynamic_types == rhs_concrete->max_dynamic_types && global_max_dynamic_types == rhs_concrete->global_max_dynamic_types + && variant_info.variant_name == rhs_concrete->variant_info.variant_name + && variant_column->dynamicStructureEquals(*rhs_concrete->variant_column); + return false; +} + void ColumnDynamic::takeDynamicStructureFromSourceColumns(const Columns & source_columns) { if (!empty()) diff --git a/src/Columns/ColumnDynamic.h b/src/Columns/ColumnDynamic.h index 57a1545a832..fbab4d5da4c 100644 --- a/src/Columns/ColumnDynamic.h +++ b/src/Columns/ColumnDynamic.h @@ -376,6 +376,7 @@ public: bool addNewVariant(const DataTypePtr & new_variant) { return addNewVariant(new_variant, new_variant->getName()); } bool hasDynamicStructure() const override { return true; } + bool dynamicStructureEquals(const IColumn & rhs) const override; void takeDynamicStructureFromSourceColumns(const Columns & source_columns) override; const StatisticsPtr & getStatistics() const { return statistics; } diff --git a/src/Columns/ColumnMap.cpp b/src/Columns/ColumnMap.cpp index 7ebbed930d8..a5511dfeeb4 100644 --- a/src/Columns/ColumnMap.cpp +++ b/src/Columns/ColumnMap.cpp @@ -345,6 +345,13 @@ bool ColumnMap::structureEquals(const IColumn & rhs) const return false; } +bool ColumnMap::dynamicStructureEquals(const IColumn & rhs) const +{ + if (const auto * rhs_map = typeid_cast(&rhs)) + return nested->dynamicStructureEquals(*rhs_map->nested); + return false; +} + ColumnPtr ColumnMap::compress() const { auto compressed = nested->compress(); diff --git a/src/Columns/ColumnMap.h b/src/Columns/ColumnMap.h index 575114f8d3a..8dfa5bb5845 100644 --- a/src/Columns/ColumnMap.h +++ b/src/Columns/ColumnMap.h @@ -123,6 +123,7 @@ public: ColumnPtr compress() const override; bool hasDynamicStructure() const override { return nested->hasDynamicStructure(); } + bool dynamicStructureEquals(const IColumn & rhs) const override; void takeDynamicStructureFromSourceColumns(const Columns & source_columns) override; }; diff --git a/src/Columns/ColumnObject.cpp b/src/Columns/ColumnObject.cpp index 18ba8ed36ee..f4121435be9 100644 --- a/src/Columns/ColumnObject.cpp +++ b/src/Columns/ColumnObject.cpp @@ -1415,6 +1415,31 @@ void ColumnObject::prepareForSquashing(const std::vector & source_col } } +bool ColumnObject::dynamicStructureEquals(const IColumn & rhs) const +{ + const auto * rhs_object = typeid_cast(&rhs); + if (!rhs_object || typed_paths.size() != rhs_object->typed_paths.size() + || global_max_dynamic_paths != rhs_object->global_max_dynamic_paths || max_dynamic_types != rhs_object->max_dynamic_types + || dynamic_paths.size() != rhs_object->dynamic_paths.size()) + return false; + + for (const auto & [path, column] : typed_paths) + { + auto it = rhs_object->typed_paths.find(path); + if (it == rhs_object->typed_paths.end() || !it->second->dynamicStructureEquals(*column)) + return false; + } + + for (const auto & [path, column] : dynamic_paths) + { + auto it = rhs_object->dynamic_paths.find(path); + if (it == rhs_object->dynamic_paths.end() || !it->second->dynamicStructureEquals(*column)) + return false; + } + + return true; +} + void ColumnObject::takeDynamicStructureFromSourceColumns(const DB::Columns & source_columns) { if (!empty()) diff --git a/src/Columns/ColumnObject.h b/src/Columns/ColumnObject.h index 74ae7e136ce..7b8a381d571 100644 --- a/src/Columns/ColumnObject.h +++ b/src/Columns/ColumnObject.h @@ -177,6 +177,7 @@ public: bool isFinalized() const override; bool hasDynamicStructure() const override { return true; } + bool dynamicStructureEquals(const IColumn & rhs) const override; void takeDynamicStructureFromSourceColumns(const Columns & source_columns) override; const PathToColumnMap & getTypedPaths() const { return typed_paths; } @@ -227,6 +228,7 @@ public: void setDynamicPaths(const std::vector & paths); void setDynamicPaths(const std::vector> & paths); void setMaxDynamicPaths(size_t max_dynamic_paths_); + void setGlobalMaxDynamicPaths(size_t global_max_dynamic_paths_); void setStatistics(const StatisticsPtr & statistics_) { statistics = statistics_; } void serializePathAndValueIntoSharedData(ColumnString * shared_data_paths, ColumnString * shared_data_values, std::string_view path, const IColumn & column, size_t n); diff --git a/src/Columns/ColumnTuple.cpp b/src/Columns/ColumnTuple.cpp index c3f7d10f650..28e5f03cc3c 100644 --- a/src/Columns/ColumnTuple.cpp +++ b/src/Columns/ColumnTuple.cpp @@ -757,6 +757,26 @@ bool ColumnTuple::hasDynamicStructure() const return false; } +bool ColumnTuple::dynamicStructureEquals(const IColumn & rhs) const +{ + if (const auto * rhs_tuple = typeid_cast(&rhs)) + { + const size_t tuple_size = columns.size(); + if (tuple_size != rhs_tuple->columns.size()) + return false; + + for (size_t i = 0; i < tuple_size; ++i) + if (!columns[i]->dynamicStructureEquals(*rhs_tuple->columns[i])) + return false; + + return true; + } + else + { + return false; + } +} + void ColumnTuple::takeDynamicStructureFromSourceColumns(const Columns & source_columns) { std::vector nested_source_columns; diff --git a/src/Columns/ColumnTuple.h b/src/Columns/ColumnTuple.h index c73f90f13d9..d5eee911edc 100644 --- a/src/Columns/ColumnTuple.h +++ b/src/Columns/ColumnTuple.h @@ -141,6 +141,7 @@ public: ColumnPtr & getColumnPtr(size_t idx) { return columns[idx]; } bool hasDynamicStructure() const override; + bool dynamicStructureEquals(const IColumn & rhs) const override; void takeDynamicStructureFromSourceColumns(const Columns & source_columns) override; /// Empty tuple needs a public method to manage its size. diff --git a/src/Columns/ColumnVariant.cpp b/src/Columns/ColumnVariant.cpp index 564b60e1c1d..2fa59b8e33c 100644 --- a/src/Columns/ColumnVariant.cpp +++ b/src/Columns/ColumnVariant.cpp @@ -952,7 +952,7 @@ ColumnPtr ColumnVariant::permute(const Permutation & perm, size_t limit) const if (hasOnlyNulls()) { if (limit) - return cloneResized(limit); + return cloneResized(limit ? std::min(size(), limit) : size()); /// If no limit, we can just return current immutable column. return this->getPtr(); @@ -1409,6 +1409,23 @@ bool ColumnVariant::structureEquals(const IColumn & rhs) const return true; } +bool ColumnVariant::dynamicStructureEquals(const IColumn & rhs) const +{ + const auto * rhs_variant = typeid_cast(&rhs); + if (!rhs_variant) + return false; + + const size_t num_variants = variants.size(); + if (num_variants != rhs_variant->variants.size()) + return false; + + for (size_t i = 0; i < num_variants; ++i) + if (!variants[i]->dynamicStructureEquals(rhs_variant->getVariantByGlobalDiscriminator(globalDiscriminatorByLocal(i)))) + return false; + + return true; +} + ColumnPtr ColumnVariant::compress() const { ColumnPtr local_discriminators_compressed = local_discriminators->compress(); diff --git a/src/Columns/ColumnVariant.h b/src/Columns/ColumnVariant.h index f90a812703d..a68a961169c 100644 --- a/src/Columns/ColumnVariant.h +++ b/src/Columns/ColumnVariant.h @@ -336,6 +336,7 @@ public: void extend(const std::vector & old_to_new_global_discriminators, std::vector> && new_variants_and_discriminators); bool hasDynamicStructure() const override; + bool dynamicStructureEquals(const IColumn & rhs) const override; void takeDynamicStructureFromSourceColumns(const Columns & source_columns) override; private: diff --git a/src/Columns/IColumn.h b/src/Columns/IColumn.h index 95becba3fdb..c77b089812e 100644 --- a/src/Columns/IColumn.h +++ b/src/Columns/IColumn.h @@ -635,6 +635,9 @@ public: /// Checks if column has dynamic subcolumns. virtual bool hasDynamicStructure() const { return false; } + + /// For columns with dynamic subcolumns checks if columns have equal dynamic structure. + [[nodiscard]] virtual bool dynamicStructureEquals(const IColumn & rhs) const { return structureEquals(rhs); } /// For columns with dynamic subcolumns this method takes dynamic structure from source columns /// and creates proper resulting dynamic structure in advance for merge of these source columns. virtual void takeDynamicStructureFromSourceColumns(const std::vector & /*source_columns*/) {} diff --git a/src/Common/Exception.cpp b/src/Common/Exception.cpp index d68537513da..644c9a19738 100644 --- a/src/Common/Exception.cpp +++ b/src/Common/Exception.cpp @@ -251,7 +251,7 @@ void Exception::setThreadFramePointers(ThreadFramePointersBase frame_pointers) thread_frame_pointers.frame_pointers = std::move(frame_pointers); } -static void tryLogCurrentExceptionImpl(Poco::Logger * logger, const std::string & start_of_message) +static void tryLogCurrentExceptionImpl(Poco::Logger * logger, const std::string & start_of_message, LogsLevel level) { if (!isLoggingEnabled()) return; @@ -262,14 +262,25 @@ static void tryLogCurrentExceptionImpl(Poco::Logger * logger, const std::string if (!start_of_message.empty()) message.text = fmt::format("{}: {}", start_of_message, message.text); - LOG_ERROR(logger, message); + switch (level) + { + case LogsLevel::none: break; + case LogsLevel::test: LOG_TEST(logger, message); break; + case LogsLevel::trace: LOG_TRACE(logger, message); break; + case LogsLevel::debug: LOG_DEBUG(logger, message); break; + case LogsLevel::information: LOG_INFO(logger, message); break; + case LogsLevel::warning: LOG_WARNING(logger, message); break; + case LogsLevel::error: LOG_ERROR(logger, message); break; + case LogsLevel::fatal: LOG_FATAL(logger, message); break; + } + } catch (...) // NOLINT(bugprone-empty-catch) { } } -void tryLogCurrentException(const char * log_name, const std::string & start_of_message) +void tryLogCurrentException(const char * log_name, const std::string & start_of_message, LogsLevel level) { if (!isLoggingEnabled()) return; @@ -283,10 +294,10 @@ void tryLogCurrentException(const char * log_name, const std::string & start_of_ /// getLogger can allocate memory too auto logger = getLogger(log_name); - tryLogCurrentExceptionImpl(logger.get(), start_of_message); + tryLogCurrentExceptionImpl(logger.get(), start_of_message, level); } -void tryLogCurrentException(Poco::Logger * logger, const std::string & start_of_message) +void tryLogCurrentException(Poco::Logger * logger, const std::string & start_of_message, LogsLevel level) { /// Under high memory pressure, new allocations throw a /// MEMORY_LIMIT_EXCEEDED exception. @@ -295,17 +306,17 @@ void tryLogCurrentException(Poco::Logger * logger, const std::string & start_of_ /// MemoryTracker until the exception will be logged. LockMemoryExceptionInThread lock_memory_tracker(VariableContext::Global); - tryLogCurrentExceptionImpl(logger, start_of_message); + tryLogCurrentExceptionImpl(logger, start_of_message, level); } -void tryLogCurrentException(LoggerPtr logger, const std::string & start_of_message) +void tryLogCurrentException(LoggerPtr logger, const std::string & start_of_message, LogsLevel level) { - tryLogCurrentException(logger.get(), start_of_message); + tryLogCurrentException(logger.get(), start_of_message, level); } -void tryLogCurrentException(const AtomicLogger & logger, const std::string & start_of_message) +void tryLogCurrentException(const AtomicLogger & logger, const std::string & start_of_message, LogsLevel level) { - tryLogCurrentException(logger.load(), start_of_message); + tryLogCurrentException(logger.load(), start_of_message, level); } static void getNoSpaceLeftInfoMessage(std::filesystem::path path, String & msg) @@ -627,7 +638,7 @@ PreformattedMessage getExceptionMessageAndPattern(const Exception & e, bool with return PreformattedMessage{stream.str(), e.tryGetMessageFormatString(), e.getMessageFormatStringArgs()}; } -std::string getExceptionMessage(std::exception_ptr e, bool with_stacktrace) +std::string getExceptionMessage(std::exception_ptr e, bool with_stacktrace, bool check_embedded_stacktrace) { try { @@ -635,7 +646,7 @@ std::string getExceptionMessage(std::exception_ptr e, bool with_stacktrace) } catch (...) { - return getCurrentExceptionMessage(with_stacktrace); + return getCurrentExceptionMessage(with_stacktrace, check_embedded_stacktrace); } } diff --git a/src/Common/Exception.h b/src/Common/Exception.h index a4f55f41caa..edc1b95bca4 100644 --- a/src/Common/Exception.h +++ b/src/Common/Exception.h @@ -7,6 +7,7 @@ #include #include #include +#include #include #include @@ -276,10 +277,10 @@ using Exceptions = std::vector; * Can be used in destructors in the catch-all block. */ /// TODO: Logger leak constexpr overload -void tryLogCurrentException(const char * log_name, const std::string & start_of_message = ""); -void tryLogCurrentException(Poco::Logger * logger, const std::string & start_of_message = ""); -void tryLogCurrentException(LoggerPtr logger, const std::string & start_of_message = ""); -void tryLogCurrentException(const AtomicLogger & logger, const std::string & start_of_message = ""); +void tryLogCurrentException(const char * log_name, const std::string & start_of_message = "", LogsLevel level = LogsLevel::error); +void tryLogCurrentException(Poco::Logger * logger, const std::string & start_of_message = "", LogsLevel level = LogsLevel::error); +void tryLogCurrentException(LoggerPtr logger, const std::string & start_of_message = "", LogsLevel level = LogsLevel::error); +void tryLogCurrentException(const AtomicLogger & logger, const std::string & start_of_message = "", LogsLevel level = LogsLevel::error); /** Prints current exception in canonical format. @@ -329,7 +330,7 @@ void tryLogException(std::exception_ptr e, const AtomicLogger & logger, const st std::string getExceptionMessage(const Exception & e, bool with_stacktrace, bool check_embedded_stacktrace = false); PreformattedMessage getExceptionMessageAndPattern(const Exception & e, bool with_stacktrace, bool check_embedded_stacktrace = false); -std::string getExceptionMessage(std::exception_ptr e, bool with_stacktrace); +std::string getExceptionMessage(std::exception_ptr e, bool with_stacktrace, bool check_embedded_stacktrace = false); template diff --git a/src/Common/FieldVisitorScale.cpp b/src/Common/FieldVisitorScale.cpp new file mode 100644 index 00000000000..a6c0f6d0c5b --- /dev/null +++ b/src/Common/FieldVisitorScale.cpp @@ -0,0 +1,30 @@ +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +FieldVisitorScale::FieldVisitorScale(Int32 rhs_) : rhs(rhs_) {} + +void FieldVisitorScale::operator() (Int64 & x) const { x *= rhs; } +void FieldVisitorScale::operator() (UInt64 & x) const { x *= rhs; } +void FieldVisitorScale::operator() (Float64 & x) const { x *= rhs; } +void FieldVisitorScale::operator() (Null &) const { /*Do not scale anything*/ } + +void FieldVisitorScale::operator() (String &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale Strings"); } +void FieldVisitorScale::operator() (Array &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale Arrays"); } +void FieldVisitorScale::operator() (Tuple &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale Tuples"); } +void FieldVisitorScale::operator() (Map &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale Maps"); } +void FieldVisitorScale::operator() (Object &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale Objects"); } +void FieldVisitorScale::operator() (UUID &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale UUIDs"); } +void FieldVisitorScale::operator() (IPv4 &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale IPv4s"); } +void FieldVisitorScale::operator() (IPv6 &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale IPv6s"); } +void FieldVisitorScale::operator() (CustomType & x) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale custom type {}", x.getTypeName()); } +void FieldVisitorScale::operator() (AggregateFunctionStateData &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale AggregateFunctionStates"); } +void FieldVisitorScale::operator() (bool &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale Bools"); } + +} diff --git a/src/Common/FieldVisitorScale.h b/src/Common/FieldVisitorScale.h new file mode 100644 index 00000000000..90d86cc53bd --- /dev/null +++ b/src/Common/FieldVisitorScale.h @@ -0,0 +1,43 @@ +#pragma once + +#include +#include + +namespace DB +{ + +/** Implements `*=` operation by number + */ +class FieldVisitorScale : public StaticVisitor +{ +private: + Int32 rhs; + +public: + explicit FieldVisitorScale(Int32 rhs_); + + void operator() (Int64 & x) const; + void operator() (UInt64 & x) const; + void operator() (Float64 & x) const; + void operator() (Null &) const; + [[noreturn]] void operator() (String &) const; + [[noreturn]] void operator() (Array &) const; + [[noreturn]] void operator() (Tuple &) const; + [[noreturn]] void operator() (Map &) const; + [[noreturn]] void operator() (Object &) const; + [[noreturn]] void operator() (UUID &) const; + [[noreturn]] void operator() (IPv4 &) const; + [[noreturn]] void operator() (IPv6 &) const; + [[noreturn]] void operator() (AggregateFunctionStateData &) const; + [[noreturn]] void operator() (CustomType &) const; + [[noreturn]] void operator() (bool &) const; + + template + void operator() (DecimalField & x) const { x = DecimalField(x.getValue() * T(rhs), x.getScale()); } + + template + requires is_big_int_v + void operator() (T & x) const { x *= rhs; } +}; + +} diff --git a/src/Common/HashTable/HashTable.h b/src/Common/HashTable/HashTable.h index f4374a0f2ca..d379c3f6a87 100644 --- a/src/Common/HashTable/HashTable.h +++ b/src/Common/HashTable/HashTable.h @@ -658,16 +658,11 @@ protected: { if (!std::is_trivially_destructible_v) { - for (iterator it = begin(), it_end = end(); it != it_end; ++it) + for (iterator it = begin(), it_end = end(); it != it_end;) { - it.ptr->~Cell(); - /// In case of poison_in_dtor=1 it will be poisoned, - /// but it maybe used later, during iteration. - /// - /// NOTE, that technically this is UB [1], but OK for now. - /// - /// [1]: https://github.com/google/sanitizers/issues/854#issuecomment-329661378 - __msan_unpoison(it.ptr, sizeof(*it.ptr)); + auto ptr = it.ptr; + ++it; + ptr->~Cell(); } /// Everything had been destroyed in the loop above, reset the flag diff --git a/src/Common/NamedCollections/NamedCollectionsMetadataStorage.cpp b/src/Common/NamedCollections/NamedCollectionsMetadataStorage.cpp index b8413bfadd7..8bb411f1437 100644 --- a/src/Common/NamedCollections/NamedCollectionsMetadataStorage.cpp +++ b/src/Common/NamedCollections/NamedCollectionsMetadataStorage.cpp @@ -568,7 +568,7 @@ std::vector NamedCollectionsMetadataStorage::listCollections() cons std::vector collections; collections.reserve(paths.size()); for (const auto & path : paths) - collections.push_back(std::filesystem::path(path).stem()); + collections.push_back(unescapeForFileName(std::filesystem::path(path).stem())); return collections; } diff --git a/src/Common/ProfileEvents.cpp b/src/Common/ProfileEvents.cpp index 3a102238fbe..7b9f670d340 100644 --- a/src/Common/ProfileEvents.cpp +++ b/src/Common/ProfileEvents.cpp @@ -547,6 +547,7 @@ The server successfully detected this situation and will download merged part fr M(FilesystemCacheLoadMetadataMicroseconds, "Time spent loading filesystem cache metadata", ValueType::Microseconds) \ M(FilesystemCacheEvictedBytes, "Number of bytes evicted from filesystem cache", ValueType::Bytes) \ M(FilesystemCacheEvictedFileSegments, "Number of file segments evicted from filesystem cache", ValueType::Number) \ + M(FilesystemCacheBackgroundDownloadQueuePush, "Number of file segments sent for background download in filesystem cache", ValueType::Number) \ M(FilesystemCacheEvictionSkippedFileSegments, "Number of file segments skipped for eviction because of being in unreleasable state", ValueType::Number) \ M(FilesystemCacheEvictionSkippedEvictingFileSegments, "Number of file segments skipped for eviction because of being in evicting state", ValueType::Number) \ M(FilesystemCacheEvictionTries, "Number of filesystem cache eviction attempts", ValueType::Number) \ @@ -745,6 +746,12 @@ The server successfully detected this situation and will download merged part fr M(ReadTaskRequestsSentElapsedMicroseconds, "Time spent in callbacks requested from the remote server back to the initiator server to choose the read task (for s3Cluster table function and similar). Measured on the remote server side.", ValueType::Microseconds) \ M(MergeTreeReadTaskRequestsSentElapsedMicroseconds, "Time spent in callbacks requested from the remote server back to the initiator server to choose the read task (for MergeTree tables). Measured on the remote server side.", ValueType::Microseconds) \ M(MergeTreeAllRangesAnnouncementsSentElapsedMicroseconds, "Time spent in sending the announcement from the remote server to the initiator server about the set of data parts (for MergeTree tables). Measured on the remote server side.", ValueType::Microseconds) \ + M(MergerMutatorsGetPartsForMergeElapsedMicroseconds, "Time spent to take data parts snapshot to build ranges from them.", ValueType::Microseconds) \ + M(MergerMutatorPrepareRangesForMergeElapsedMicroseconds, "Time spent to prepare parts ranges which can be merged according to merge predicate.", ValueType::Microseconds) \ + M(MergerMutatorSelectPartsForMergeElapsedMicroseconds, "Time spent to select parts from ranges which can be merged.", ValueType::Microseconds) \ + M(MergerMutatorRangesForMergeCount, "Amount of candidate ranges for merge", ValueType::Number) \ + M(MergerMutatorPartsInRangesForMergeCount, "Amount of candidate parts for merge", ValueType::Number) \ + M(MergerMutatorSelectRangePartsCount, "Amount of parts in selected range for merge", ValueType::Number) \ \ M(ConnectionPoolIsFullMicroseconds, "Total time spent waiting for a slot in connection pool.", ValueType::Microseconds) \ M(AsyncLoaderWaitMicroseconds, "Total time a query was waiting for async loader jobs.", ValueType::Microseconds) \ diff --git a/src/Common/Scheduler/Nodes/UnifiedSchedulerNode.h b/src/Common/Scheduler/Nodes/UnifiedSchedulerNode.h index 84923c49c62..2c4b7c4f3bc 100644 --- a/src/Common/Scheduler/Nodes/UnifiedSchedulerNode.h +++ b/src/Common/Scheduler/Nodes/UnifiedSchedulerNode.h @@ -492,9 +492,9 @@ public: nodes.push_back(impl.semaphore); if (impl.branch.queue) nodes.push_back(impl.branch.queue); - for (auto & [_, branch] : impl.branch.branch.branches) + for (auto & [_0, branch] : impl.branch.branch.branches) { - for (auto & [_, child] : branch.children) + for (auto & [_1, child] : branch.children) child->addRawPointerNodes(nodes); } } diff --git a/src/Common/Scheduler/Workload/WorkloadEntityStorageBase.cpp b/src/Common/Scheduler/Workload/WorkloadEntityStorageBase.cpp index 1b7a559698c..c758111a53e 100644 --- a/src/Common/Scheduler/Workload/WorkloadEntityStorageBase.cpp +++ b/src/Common/Scheduler/Workload/WorkloadEntityStorageBase.cpp @@ -48,9 +48,9 @@ ASTPtr normalizeCreateWorkloadEntityQuery(const IAST & create_query) /// Returns a type of a workload entity `ptr` WorkloadEntityType getEntityType(const ASTPtr & ptr) { - if (auto * res = typeid_cast(ptr.get())) + if (auto * res = typeid_cast(ptr.get()); res) return WorkloadEntityType::Workload; - if (auto * res = typeid_cast(ptr.get())) + if (auto * res = typeid_cast(ptr.get()); res) return WorkloadEntityType::Resource; chassert(false); return WorkloadEntityType::MAX; @@ -106,7 +106,7 @@ void forEachReference( for (const String & resource : resources) func(resource, res->getWorkloadName(), ReferenceType::ForResource); } - if (auto * res = typeid_cast(source_entity.get())) + if (auto * res = typeid_cast(source_entity.get()); res) { // RESOURCE has no references to be validated, we allow mentioned disks to be created later } @@ -578,15 +578,15 @@ void WorkloadEntityStorageBase::setAllEntities(const std::vectorsecond)) { changes.emplace_back(entity_name, entity, it->second); // Update entities that are present in both `new_entities` and `entities` - LOG_TRACE(log, "Entity {} was updated", entity_name); + LOG_TRACE(log, "Workload entity {} was updated", entity_name); } else - LOG_TRACE(log, "Entity {} is the same", entity_name); + LOG_TRACE(log, "Workload entity {} is the same", entity_name); } else { changes.emplace_back(entity_name, entity, ASTPtr{}); // Remove entities that are not present in `new_entities` - LOG_TRACE(log, "Entity {} was dropped", entity_name); + LOG_TRACE(log, "Workload entity {} was dropped", entity_name); } } for (const auto & [entity_name, entity] : new_entities) @@ -594,7 +594,7 @@ void WorkloadEntityStorageBase::setAllEntities(const std::vectorcheckTimeLimit(); @@ -228,6 +230,10 @@ private: sleepForMilliseconds(current_backoff_ms); current_backoff_ms = std::min(current_backoff_ms * 2, retries_info.max_backoff_ms); + /// Check if the query was cancelled again after sleeping. + if (process_list_element) + process_list_element->checkTimeLimit(); + return true; } diff --git a/src/Core/ServerSettings.cpp b/src/Core/ServerSettings.cpp index a48d90730a8..92353d16b0c 100644 --- a/src/Core/ServerSettings.cpp +++ b/src/Core/ServerSettings.cpp @@ -193,6 +193,13 @@ namespace DB DECLARE(UInt64, keeper_multiread_batch_size, 10'000, "Maximum size of batch for MultiRead request to [Zoo]Keeper that support batching. If set to 0, batching is disabled. Available only in ClickHouse Cloud.", 0) \ DECLARE(Bool, use_legacy_mongodb_integration, true, "Use the legacy MongoDB integration implementation. Note: it's highly recommended to set this option to false, since legacy implementation will be removed in the future. Please submit any issues you encounter with the new implementation.", 0) \ DECLARE(Bool, validate_access_consistency_between_instances, true, "Validate that the instance has the same user with exactly the same access before executing a DDL query. Note: turning this setting off may expose your cluster to potential permission escalation. Change this setting only if you know what you are doing.", 0) \ + \ + DECLARE(UInt64, prefetch_threadpool_pool_size, 100, "Size of background pool for prefetches for remote object storages", 0) \ + DECLARE(UInt64, prefetch_threadpool_queue_size, 1000000, "Number of tasks which is possible to push into prefetches pool", 0) \ + DECLARE(UInt64, load_marks_threadpool_pool_size, 50, "Size of background pool for marks loading", 0) \ + DECLARE(UInt64, load_marks_threadpool_queue_size, 1000000, "Number of tasks which is possible to push into prefetches pool", 0) \ + DECLARE(UInt64, threadpool_writer_pool_size, 100, "Size of background pool for write requests to object storages", 0) \ + DECLARE(UInt64, threadpool_writer_queue_size, 1000000, "Number of tasks which is possible to push into background pool for write requests to object storages", 0) /// If you add a setting which can be updated at runtime, please update 'changeable_settings' map in dumpToSystemServerSettingsColumns below diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index a7b101b6e25..9ec3c25314a 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -873,6 +873,12 @@ In CREATE TABLE statement allows specifying Variant type with similar variant ty )", 0) \ DECLARE(Bool, allow_suspicious_primary_key, false, R"( Allow suspicious `PRIMARY KEY`/`ORDER BY` for MergeTree (i.e. SimpleAggregateFunction). +)", 0) \ + DECLARE(Bool, allow_suspicious_types_in_group_by, false, R"( +Allows or restricts using [Variant](../../sql-reference/data-types/variant.md) and [Dynamic](../../sql-reference/data-types/dynamic.md) types in GROUP BY keys. +)", 0) \ + DECLARE(Bool, allow_suspicious_types_in_order_by, false, R"( +Allows or restricts using [Variant](../../sql-reference/data-types/variant.md) and [Dynamic](../../sql-reference/data-types/dynamic.md) types in ORDER BY keys. )", 0) \ DECLARE(Bool, compile_expressions, false, R"( Compile some scalar functions and operators to native code. Due to a bug in the LLVM compiler infrastructure, on AArch64 machines, it is known to lead to a nullptr dereference and, consequently, server crash. Do not enable this setting. @@ -2665,29 +2671,44 @@ The maximum amount of data consumed by temporary files on disk in bytes for all The maximum amount of data consumed by temporary files on disk in bytes for all concurrently running queries. Zero means unlimited. )", 0)\ \ - DECLARE(UInt64, backup_restore_keeper_max_retries, 20, R"( -Max retries for keeper operations during backup or restore + DECLARE(UInt64, backup_restore_keeper_max_retries, 1000, R"( +Max retries for [Zoo]Keeper operations in the middle of a BACKUP or RESTORE operation. +Should be big enough so the whole operation won't fail because of a temporary [Zoo]Keeper failure. )", 0) \ DECLARE(UInt64, backup_restore_keeper_retry_initial_backoff_ms, 100, R"( Initial backoff timeout for [Zoo]Keeper operations during backup or restore )", 0) \ DECLARE(UInt64, backup_restore_keeper_retry_max_backoff_ms, 5000, R"( Max backoff timeout for [Zoo]Keeper operations during backup or restore +)", 0) \ + DECLARE(UInt64, backup_restore_failure_after_host_disconnected_for_seconds, 3600, R"( +If a host during a BACKUP ON CLUSTER or RESTORE ON CLUSTER operation doesn't recreate its ephemeral 'alive' node in ZooKeeper for this amount of time then the whole backup or restore is considered as failed. +This value should be bigger than any reasonable time for a host to reconnect to ZooKeeper after a failure. +Zero means unlimited. +)", 0) \ + DECLARE(UInt64, backup_restore_keeper_max_retries_while_initializing, 20, R"( +Max retries for [Zoo]Keeper operations during the initialization of a BACKUP ON CLUSTER or RESTORE ON CLUSTER operation. +)", 0) \ + DECLARE(UInt64, backup_restore_keeper_max_retries_while_handling_error, 20, R"( +Max retries for [Zoo]Keeper operations while handling an error of a BACKUP ON CLUSTER or RESTORE ON CLUSTER operation. +)", 0) \ + DECLARE(UInt64, backup_restore_finish_timeout_after_error_sec, 180, R"( +How long the initiator should wait for other host to react to the 'error' node and stop their work on the current BACKUP ON CLUSTER or RESTORE ON CLUSTER operation. +)", 0) \ + DECLARE(UInt64, backup_restore_keeper_value_max_size, 1048576, R"( +Maximum size of data of a [Zoo]Keeper's node during backup +)", 0) \ + DECLARE(UInt64, backup_restore_batch_size_for_keeper_multi, 1000, R"( +Maximum size of batch for multi request to [Zoo]Keeper during backup or restore +)", 0) \ + DECLARE(UInt64, backup_restore_batch_size_for_keeper_multiread, 10000, R"( +Maximum size of batch for multiread request to [Zoo]Keeper during backup or restore )", 0) \ DECLARE(Float, backup_restore_keeper_fault_injection_probability, 0.0f, R"( Approximate probability of failure for a keeper request during backup or restore. Valid value is in interval [0.0f, 1.0f] )", 0) \ DECLARE(UInt64, backup_restore_keeper_fault_injection_seed, 0, R"( 0 - random seed, otherwise the setting value -)", 0) \ - DECLARE(UInt64, backup_restore_keeper_value_max_size, 1048576, R"( -Maximum size of data of a [Zoo]Keeper's node during backup -)", 0) \ - DECLARE(UInt64, backup_restore_batch_size_for_keeper_multiread, 10000, R"( -Maximum size of batch for multiread request to [Zoo]Keeper during backup or restore -)", 0) \ - DECLARE(UInt64, backup_restore_batch_size_for_keeper_multi, 1000, R"( -Maximum size of batch for multi request to [Zoo]Keeper during backup or restore )", 0) \ DECLARE(UInt64, backup_restore_s3_retry_attempts, 1000, R"( Setting for Aws::Client::RetryStrategy, Aws::Client does retries itself, 0 means no retries. It takes place only for backup/restore. @@ -4221,7 +4242,7 @@ Rewrite aggregate functions with if expression as argument when logically equiva For example, `avg(if(cond, col, null))` can be rewritten to `avgOrNullIf(cond, col)`. It may improve performance. :::note -Supported only with experimental analyzer (`enable_analyzer = 1`). +Supported only with the analyzer (`enable_analyzer = 1`). ::: )", 0) \ DECLARE(Bool, optimize_rewrite_array_exists_to_has, false, R"( @@ -5111,6 +5132,15 @@ Only in ClickHouse Cloud. A maximum number of unacknowledged in-flight packets i )", 0) \ DECLARE(UInt64, distributed_cache_data_packet_ack_window, DistributedCache::ACK_DATA_PACKET_WINDOW, R"( Only in ClickHouse Cloud. A window for sending ACK for DataPacket sequence in a single distributed cache read request +)", 0) \ + DECLARE(Bool, distributed_cache_discard_connection_if_unread_data, true, R"( +Only in ClickHouse Cloud. Discard connection if some data is unread. +)", 0) \ + DECLARE(Bool, filesystem_cache_enable_background_download_for_metadata_files_in_packed_storage, true, R"( +Only in ClickHouse Cloud. Wait time to lock cache for space reservation in filesystem cache +)", 0) \ + DECLARE(Bool, filesystem_cache_enable_background_download_during_fetch, true, R"( +Only in ClickHouse Cloud. Wait time to lock cache for space reservation in filesystem cache )", 0) \ \ DECLARE(Bool, parallelize_output_from_storages, true, R"( @@ -5121,6 +5151,7 @@ The setting allows a user to provide own deduplication semantic in MergeTree/Rep For example, by providing a unique value for the setting in each INSERT statement, user can avoid the same inserted data being deduplicated. + Possible values: - Any string @@ -5595,7 +5626,7 @@ If true, and JOIN can be executed with parallel replicas algorithm, and all stor DECLARE(UInt64, parallel_replicas_mark_segment_size, 0, R"( Parts virtually divided into segments to be distributed between replicas for parallel reading. This setting controls the size of these segments. Not recommended to change until you're absolutely sure in what you're doing. Value should be in range [128; 16384] )", BETA) \ - DECLARE(Bool, parallel_replicas_local_plan, false, R"( + DECLARE(Bool, parallel_replicas_local_plan, true, R"( Build local plan for local replica )", BETA) \ \ @@ -5834,7 +5865,7 @@ Experimental data deduplication for SELECT queries based on part UUIDs // Please add settings related to formats in Core/FormatFactorySettings.h, move obsolete settings to OBSOLETE_SETTINGS and obsolete format settings to OBSOLETE_FORMAT_SETTINGS. #define OBSOLETE_SETTINGS(M, ALIAS) \ - /** Obsolete settings that do nothing but left for compatibility reasons. Remove each one after half a year of obsolescence. */ \ + /** Obsolete settings which are kept around for compatibility reasons. They have no effect anymore. */ \ MAKE_OBSOLETE(M, Bool, update_insert_deduplication_token_in_dependent_materialized_views, 0) \ MAKE_OBSOLETE(M, UInt64, max_memory_usage_for_all_queries, 0) \ MAKE_OBSOLETE(M, UInt64, multiple_joins_rewriter_version, 0) \ diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 3fe3e960dc6..c6223bef2b2 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -64,6 +64,18 @@ static std::initializer_listsize(), perm); + } /// Set the cursor to the beginning of the new block. - void reset(const Columns & columns, const Block & block, IColumn::Permutation * perm = nullptr) + void reset(const Columns & columns, const Block & block, UInt64 num_rows, IColumn::Permutation * perm = nullptr) { all_columns.clear(); sort_columns.clear(); @@ -125,7 +136,7 @@ struct SortCursorImpl } pos = 0; - rows = all_columns[0]->size(); + rows = num_rows; permutation = perm; } @@ -195,6 +206,15 @@ struct SortCursorHelper /// The last row of this cursor is no larger than the first row of the another cursor. return !derived().greaterAt(rhs.derived(), impl->rows - 1, 0); } + + bool ALWAYS_INLINE totallyLess(const SortCursorHelper & rhs) const + { + if (impl->rows == 0 || rhs.impl->rows == 0) + return false; + + /// The last row of this cursor is less than the first row of the another cursor. + return rhs.derived().template greaterAt(derived(), 0, impl->rows - 1); + } }; @@ -203,6 +223,7 @@ struct SortCursor : SortCursorHelper using SortCursorHelper::SortCursorHelper; /// The specified row of this cursor is greater than the specified row of another cursor. + template bool ALWAYS_INLINE greaterAt(const SortCursor & rhs, size_t lhs_pos, size_t rhs_pos) const { #if USE_EMBEDDED_COMPILER @@ -218,7 +239,10 @@ struct SortCursor : SortCursorHelper if (res < 0) return false; - return impl->order > rhs.impl->order; + if constexpr (consider_order) + return impl->order > rhs.impl->order; + else + return false; } #endif @@ -235,7 +259,10 @@ struct SortCursor : SortCursorHelper return false; } - return impl->order > rhs.impl->order; + if constexpr (consider_order) + return impl->order > rhs.impl->order; + else + return false; } }; @@ -245,6 +272,7 @@ struct SimpleSortCursor : SortCursorHelper { using SortCursorHelper::SortCursorHelper; + template bool ALWAYS_INLINE greaterAt(const SimpleSortCursor & rhs, size_t lhs_pos, size_t rhs_pos) const { int res = 0; @@ -271,7 +299,10 @@ struct SimpleSortCursor : SortCursorHelper if (res < 0) return false; - return impl->order > rhs.impl->order; + if constexpr (consider_order) + return impl->order > rhs.impl->order; + else + return false; } }; @@ -280,6 +311,7 @@ struct SpecializedSingleColumnSortCursor : SortCursorHelper::SortCursorHelper; + template bool ALWAYS_INLINE greaterAt(const SortCursorHelper & rhs, size_t lhs_pos, size_t rhs_pos) const { auto & this_impl = this->impl; @@ -302,7 +334,10 @@ struct SpecializedSingleColumnSortCursor : SortCursorHelperorder > rhs.impl->order; + if constexpr (consider_order) + return this_impl->order > rhs.impl->order; + else + return false; } }; @@ -311,6 +346,7 @@ struct SortCursorWithCollation : SortCursorHelper { using SortCursorHelper::SortCursorHelper; + template bool ALWAYS_INLINE greaterAt(const SortCursorWithCollation & rhs, size_t lhs_pos, size_t rhs_pos) const { for (size_t i = 0; i < impl->sort_columns_size; ++i) @@ -330,7 +366,10 @@ struct SortCursorWithCollation : SortCursorHelper if (res < 0) return false; } - return impl->order > rhs.impl->order; + if constexpr (consider_order) + return impl->order > rhs.impl->order; + else + return false; } }; diff --git a/src/Core/SortDescription.h b/src/Core/SortDescription.h index 5c6f3e3150a..7a7c92f3b53 100644 --- a/src/Core/SortDescription.h +++ b/src/Core/SortDescription.h @@ -33,9 +33,12 @@ struct FillColumnDescription DataTypePtr fill_to_type; Field fill_step; /// Default = +1 or -1 according to direction std::optional step_kind; + Field fill_staleness; /// Default = Null - should not be considered + std::optional staleness_kind; - using StepFunction = std::function; + using StepFunction = std::function; StepFunction step_func; + StepFunction staleness_step_func; }; /// Description of the sorting rule by one column. diff --git a/src/Core/fuzzers/CMakeLists.txt b/src/Core/fuzzers/CMakeLists.txt index c60ce0e097f..51db6fa0b53 100644 --- a/src/Core/fuzzers/CMakeLists.txt +++ b/src/Core/fuzzers/CMakeLists.txt @@ -1,2 +1,2 @@ clickhouse_add_executable (names_and_types_fuzzer names_and_types_fuzzer.cpp) -target_link_libraries (names_and_types_fuzzer PRIVATE) +target_link_libraries (names_and_types_fuzzer PRIVATE dbms) diff --git a/src/DataTypes/DataTypeObject.cpp b/src/DataTypes/DataTypeObject.cpp index 18bfed9c5c3..30bf470083d 100644 --- a/src/DataTypes/DataTypeObject.cpp +++ b/src/DataTypes/DataTypeObject.cpp @@ -1,6 +1,9 @@ #include #include #include +#include +#include +#include #include #include #include @@ -230,6 +233,15 @@ MutableColumnPtr DataTypeObject::createColumn() const return ColumnObject::create(std::move(typed_path_columns), max_dynamic_paths, max_dynamic_types); } +void DataTypeObject::forEachChild(const ChildCallback & callback) const +{ + for (const auto & [path, type] : typed_paths) + { + callback(*type); + type->forEachChild(callback); + } +} + namespace { @@ -522,6 +534,13 @@ static DataTypePtr createObject(const ASTPtr & arguments, const DataTypeObject:: return std::make_shared(schema_format, std::move(typed_paths), std::move(paths_to_skip), std::move(path_regexps_to_skip), max_dynamic_paths, max_dynamic_types); } +const DataTypePtr & DataTypeObject::getTypeOfSharedData() +{ + /// Array(Tuple(String, String)) + static const DataTypePtr type = std::make_shared(std::make_shared(DataTypes{std::make_shared(), std::make_shared()}, Names{"paths", "values"})); + return type; +} + static DataTypePtr createJSON(const ASTPtr & arguments) { auto context = CurrentThread::getQueryContext(); diff --git a/src/DataTypes/DataTypeObject.h b/src/DataTypes/DataTypeObject.h index 7eb2e7729de..70e2d4d177d 100644 --- a/src/DataTypes/DataTypeObject.h +++ b/src/DataTypes/DataTypeObject.h @@ -50,6 +50,8 @@ public: bool equals(const IDataType & rhs) const override; + void forEachChild(const ChildCallback &) const override; + bool hasDynamicSubcolumnsData() const override { return true; } std::unique_ptr getDynamicSubcolumnData(std::string_view subcolumn_name, const SubstreamData & data, bool throw_if_null) const override; @@ -63,6 +65,9 @@ public: size_t getMaxDynamicTypes() const { return max_dynamic_types; } size_t getMaxDynamicPaths() const { return max_dynamic_paths; } + /// Shared data has type Array(Tuple(String, String)). + static const DataTypePtr & getTypeOfSharedData(); + private: SchemaFormat schema_format; /// Set of paths with types that were specified in type declaration. diff --git a/src/DataTypes/Serializations/ISerialization.cpp b/src/DataTypes/Serializations/ISerialization.cpp index fdcdf9e0cda..5a60dc30b02 100644 --- a/src/DataTypes/Serializations/ISerialization.cpp +++ b/src/DataTypes/Serializations/ISerialization.cpp @@ -161,7 +161,7 @@ String getNameForSubstreamPath( String stream_name, SubstreamIterator begin, SubstreamIterator end, - bool escape_tuple_delimiter) + bool escape_for_file_name) { using Substream = ISerialization::Substream; @@ -186,7 +186,7 @@ String getNameForSubstreamPath( /// Because nested data may be represented not by Array of Tuple, /// but by separate Array columns with names in a form of a.b, /// and name is encoded as a whole. - if (it->type == Substream::TupleElement && escape_tuple_delimiter) + if (it->type == Substream::TupleElement && escape_for_file_name) stream_name += escapeForFileName(substream_name); else stream_name += substream_name; @@ -206,7 +206,7 @@ String getNameForSubstreamPath( else if (it->type == SubstreamType::ObjectSharedData) stream_name += ".object_shared_data"; else if (it->type == SubstreamType::ObjectTypedPath || it->type == SubstreamType::ObjectDynamicPath) - stream_name += "." + it->object_path_name; + stream_name += "." + (escape_for_file_name ? escapeForFileName(it->object_path_name) : it->object_path_name); } return stream_name; @@ -434,6 +434,14 @@ bool ISerialization::isDynamicSubcolumn(const DB::ISerialization::SubstreamPath return false; } +bool ISerialization::isLowCardinalityDictionarySubcolumn(const DB::ISerialization::SubstreamPath & path) +{ + if (path.empty()) + return false; + + return path[path.size() - 1].type == SubstreamType::DictionaryKeys; +} + ISerialization::SubstreamData ISerialization::createFromPath(const SubstreamPath & path, size_t prefix_len) { assert(prefix_len <= path.size()); diff --git a/src/DataTypes/Serializations/ISerialization.h b/src/DataTypes/Serializations/ISerialization.h index 7bd58a8a981..400bdbf32d3 100644 --- a/src/DataTypes/Serializations/ISerialization.h +++ b/src/DataTypes/Serializations/ISerialization.h @@ -463,6 +463,8 @@ public: /// Returns true if stream with specified path corresponds to dynamic subcolumn. static bool isDynamicSubcolumn(const SubstreamPath & path, size_t prefix_len); + static bool isLowCardinalityDictionarySubcolumn(const SubstreamPath & path); + protected: template State * checkAndGetState(const StatePtr & state) const; diff --git a/src/DataTypes/Serializations/SerializationDynamic.cpp b/src/DataTypes/Serializations/SerializationDynamic.cpp index 109f14d49f0..91c8797d43f 100644 --- a/src/DataTypes/Serializations/SerializationDynamic.cpp +++ b/src/DataTypes/Serializations/SerializationDynamic.cpp @@ -26,8 +26,8 @@ namespace ErrorCodes struct SerializeBinaryBulkStateDynamic : public ISerialization::SerializeBinaryBulkState { - SerializationDynamic::DynamicStructureSerializationVersion structure_version; - size_t max_dynamic_types; + SerializationDynamic::DynamicSerializationVersion structure_version; + size_t num_dynamic_types; DataTypePtr variant_type; Names variant_names; SerializationPtr variant_serialization; @@ -81,15 +81,15 @@ void SerializationDynamic::enumerateStreams( settings.path.pop_back(); } -SerializationDynamic::DynamicStructureSerializationVersion::DynamicStructureSerializationVersion(UInt64 version) : value(static_cast(version)) +SerializationDynamic::DynamicSerializationVersion::DynamicSerializationVersion(UInt64 version) : value(static_cast(version)) { checkVersion(version); } -void SerializationDynamic::DynamicStructureSerializationVersion::checkVersion(UInt64 version) +void SerializationDynamic::DynamicSerializationVersion::checkVersion(UInt64 version) { - if (version != VariantTypeName) - throw Exception(ErrorCodes::INCORRECT_DATA, "Invalid version for Dynamic structure serialization."); + if (version != V1 && version != V2) + throw Exception(ErrorCodes::INCORRECT_DATA, "Invalid version for Dynamic structure serialization: {}", version); } void SerializationDynamic::serializeBinaryBulkStatePrefix( @@ -108,22 +108,17 @@ void SerializationDynamic::serializeBinaryBulkStatePrefix( throw Exception(ErrorCodes::LOGICAL_ERROR, "Missing stream for Dynamic column structure during serialization of binary bulk state prefix"); /// Write structure serialization version. - UInt64 structure_version = DynamicStructureSerializationVersion::Value::VariantTypeName; + UInt64 structure_version = DynamicSerializationVersion::Value::V2; writeBinaryLittleEndian(structure_version, *stream); auto dynamic_state = std::make_shared(structure_version); - dynamic_state->max_dynamic_types = column_dynamic.getMaxDynamicTypes(); - /// Write max_dynamic_types parameter, because it can differ from the max_dynamic_types - /// that is specified in the Dynamic type (we could decrease it before merge). - writeVarUInt(dynamic_state->max_dynamic_types, *stream); - dynamic_state->variant_type = variant_info.variant_type; dynamic_state->variant_names = variant_info.variant_names; const auto & variant_column = column_dynamic.getVariantColumn(); - /// Write information about variants. - size_t num_variants = dynamic_state->variant_names.size() - 1; /// Don't write shared variant, Dynamic column should always have it. - writeVarUInt(num_variants, *stream); + /// Write information about dynamic types. + dynamic_state->num_dynamic_types = dynamic_state->variant_names.size() - 1; /// -1 for SharedVariant + writeVarUInt(dynamic_state->num_dynamic_types, *stream); if (settings.data_types_binary_encoding) { const auto & variants = assert_cast(*dynamic_state->variant_type).getVariants(); @@ -251,22 +246,25 @@ ISerialization::DeserializeBinaryBulkStatePtr SerializationDynamic::deserializeD UInt64 structure_version; readBinaryLittleEndian(structure_version, *structure_stream); auto structure_state = std::make_shared(structure_version); - /// Read max_dynamic_types parameter. - readVarUInt(structure_state->max_dynamic_types, *structure_stream); + if (structure_state->structure_version.value == DynamicSerializationVersion::Value::V1) + { + /// Skip max_dynamic_types parameter in V1 serialization version. + size_t max_dynamic_types; + readVarUInt(max_dynamic_types, *structure_stream); + } /// Read information about variants. DataTypes variants; - size_t num_variants; - readVarUInt(num_variants, *structure_stream); - variants.reserve(num_variants + 1); /// +1 for shared variant. + readVarUInt(structure_state->num_dynamic_types, *structure_stream); + variants.reserve(structure_state->num_dynamic_types + 1); /// +1 for shared variant. if (settings.data_types_binary_encoding) { - for (size_t i = 0; i != num_variants; ++i) + for (size_t i = 0; i != structure_state->num_dynamic_types; ++i) variants.push_back(decodeDataType(*structure_stream)); } else { String data_type_name; - for (size_t i = 0; i != num_variants; ++i) + for (size_t i = 0; i != structure_state->num_dynamic_types; ++i) { readStringBinary(data_type_name, *structure_stream); variants.push_back(DataTypeFactory::instance().get(data_type_name)); @@ -364,9 +362,6 @@ void SerializationDynamic::serializeBinaryBulkWithMultipleStreamsAndCountTotalSi if (!variant_info.variant_type->equals(*dynamic_state->variant_type)) throw Exception(ErrorCodes::LOGICAL_ERROR, "Mismatch of internal columns of Dynamic. Expected: {}, Got: {}", dynamic_state->variant_type->getName(), variant_info.variant_type->getName()); - if (column_dynamic.getMaxDynamicTypes() != dynamic_state->max_dynamic_types) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Mismatch of max_dynamic_types parameter of Dynamic. Expected: {}, Got: {}", dynamic_state->max_dynamic_types, column_dynamic.getMaxDynamicTypes()); - settings.path.push_back(Substream::DynamicData); assert_cast(*dynamic_state->variant_serialization) .serializeBinaryBulkWithMultipleStreamsAndUpdateVariantStatistics( @@ -424,7 +419,7 @@ void SerializationDynamic::deserializeBinaryBulkWithMultipleStreams( if (mutable_column->empty()) { - column_dynamic.setMaxDynamicPaths(structure_state->max_dynamic_types); + column_dynamic.setMaxDynamicPaths(structure_state->num_dynamic_types); column_dynamic.setVariantType(structure_state->variant_type); column_dynamic.setStatistics(structure_state->statistics); } diff --git a/src/DataTypes/Serializations/SerializationDynamic.h b/src/DataTypes/Serializations/SerializationDynamic.h index f34b5d0e770..ac98bbbc8b5 100644 --- a/src/DataTypes/Serializations/SerializationDynamic.h +++ b/src/DataTypes/Serializations/SerializationDynamic.h @@ -16,18 +16,28 @@ public: { } - struct DynamicStructureSerializationVersion + struct DynamicSerializationVersion { enum Value { - VariantTypeName = 1, + /// V1 serialization: + /// - DynamicStructure stream: + /// + /// + /// + /// (only in MergeTree serialization) + /// (only in MergeTree serialization) + /// - DynamicData stream: contains the data of nested Variant column. + V1 = 1, + /// V2 serialization: the same as V1 but without max_dynamic_types parameter in DynamicStructure stream. + V2 = 2, }; Value value; static void checkVersion(UInt64 version); - explicit DynamicStructureSerializationVersion(UInt64 version); + explicit DynamicSerializationVersion(UInt64 version); }; void enumerateStreams( @@ -113,9 +123,9 @@ private: struct DeserializeBinaryBulkStateDynamicStructure : public ISerialization::DeserializeBinaryBulkState { - DynamicStructureSerializationVersion structure_version; + DynamicSerializationVersion structure_version; DataTypePtr variant_type; - size_t max_dynamic_types; + size_t num_dynamic_types; ColumnDynamic::StatisticsPtr statistics; explicit DeserializeBinaryBulkStateDynamicStructure(UInt64 structure_version_) diff --git a/src/DataTypes/Serializations/SerializationLowCardinality.cpp b/src/DataTypes/Serializations/SerializationLowCardinality.cpp index baaab6ba3c3..248fe2681b0 100644 --- a/src/DataTypes/Serializations/SerializationLowCardinality.cpp +++ b/src/DataTypes/Serializations/SerializationLowCardinality.cpp @@ -54,7 +54,7 @@ void SerializationLowCardinality::enumerateStreams( .withSerializationInfo(data.serialization_info); settings.path.back().data = dict_data; - dict_inner_serialization->enumerateStreams(settings, callback, dict_data); + callback(settings.path); settings.path.back() = Substream::DictionaryIndexes; settings.path.back().data = data; diff --git a/src/DataTypes/Serializations/SerializationObject.cpp b/src/DataTypes/Serializations/SerializationObject.cpp index 0fbf8c54a22..1b95fddee9f 100644 --- a/src/DataTypes/Serializations/SerializationObject.cpp +++ b/src/DataTypes/Serializations/SerializationObject.cpp @@ -25,7 +25,7 @@ SerializationObject::SerializationObject( : typed_path_serializations(std::move(typed_path_serializations_)) , paths_to_skip(paths_to_skip_) , dynamic_serialization(std::make_shared()) - , shared_data_serialization(getTypeOfSharedData()->getDefaultSerialization()) + , shared_data_serialization(DataTypeObject::getTypeOfSharedData()->getDefaultSerialization()) { /// We will need sorted order of typed paths to serialize them in order for consistency. sorted_typed_paths.reserve(typed_path_serializations.size()); @@ -38,13 +38,6 @@ SerializationObject::SerializationObject( path_regexps_to_skip.emplace_back(regexp_str); } -const DataTypePtr & SerializationObject::getTypeOfSharedData() -{ - /// Array(Tuple(String, String)) - static const DataTypePtr type = std::make_shared(std::make_shared(DataTypes{std::make_shared(), std::make_shared()}, Names{"paths", "values"})); - return type; -} - bool SerializationObject::shouldSkipPath(const String & path) const { if (paths_to_skip.contains(path)) @@ -70,14 +63,13 @@ SerializationObject::ObjectSerializationVersion::ObjectSerializationVersion(UInt void SerializationObject::ObjectSerializationVersion::checkVersion(UInt64 version) { - if (version != V1 && version != STRING) + if (version != V1 && version != V2 && version != STRING) throw Exception(ErrorCodes::INCORRECT_DATA, "Invalid version for Object structure serialization."); } struct SerializeBinaryBulkStateObject: public ISerialization::SerializeBinaryBulkState { SerializationObject::ObjectSerializationVersion serialization_version; - size_t max_dynamic_paths; std::vector sorted_dynamic_paths; std::unordered_map typed_path_states; std::unordered_map dynamic_path_states; @@ -168,7 +160,7 @@ void SerializationObject::enumerateStreams(EnumerateStreamsSettings & settings, settings.path.push_back(Substream::ObjectSharedData); auto shared_data_substream_data = SubstreamData(shared_data_serialization) - .withType(getTypeOfSharedData()) + .withType(DataTypeObject::getTypeOfSharedData()) .withColumn(column_object ? column_object->getSharedDataPtr() : nullptr) .withSerializationInfo(data.serialization_info) .withDeserializeState(deserialize_state ? deserialize_state->shared_data_state : nullptr); @@ -195,7 +187,7 @@ void SerializationObject::serializeBinaryBulkStatePrefix( throw Exception(ErrorCodes::LOGICAL_ERROR, "Missing stream for Object column structure during serialization of binary bulk state prefix"); /// Write serialization version. - UInt64 serialization_version = settings.write_json_as_string ? ObjectSerializationVersion::Value::STRING : ObjectSerializationVersion::Value::V1; + UInt64 serialization_version = settings.write_json_as_string ? ObjectSerializationVersion::Value::STRING : ObjectSerializationVersion::Value::V2; writeBinaryLittleEndian(serialization_version, *stream); auto object_state = std::make_shared(serialization_version); @@ -205,9 +197,6 @@ void SerializationObject::serializeBinaryBulkStatePrefix( return; } - object_state->max_dynamic_paths = column_object.getMaxDynamicPaths(); - /// Write max_dynamic_paths parameter. - writeVarUInt(object_state->max_dynamic_paths, *stream); /// Write all dynamic paths in sorted order. object_state->sorted_dynamic_paths.reserve(dynamic_paths.size()); for (const auto & [path, _] : dynamic_paths) @@ -367,10 +356,15 @@ ISerialization::DeserializeBinaryBulkStatePtr SerializationObject::deserializeOb UInt64 serialization_version; readBinaryLittleEndian(serialization_version, *structure_stream); auto structure_state = std::make_shared(serialization_version); - if (structure_state->serialization_version.value == ObjectSerializationVersion::Value::V1) + if (structure_state->serialization_version.value == ObjectSerializationVersion::Value::V1 || structure_state->serialization_version.value == ObjectSerializationVersion::Value::V2) { - /// Read max_dynamic_paths parameter. - readVarUInt(structure_state->max_dynamic_paths, *structure_stream); + if (structure_state->serialization_version.value == ObjectSerializationVersion::Value::V1) + { + /// Skip max_dynamic_paths parameter in V1 serialization version. + size_t max_dynamic_paths; + readVarUInt(max_dynamic_paths, *structure_stream); + } + /// Read the sorted list of dynamic paths. size_t dynamic_paths_size; readVarUInt(dynamic_paths_size, *structure_stream); @@ -453,9 +447,6 @@ void SerializationObject::serializeBinaryBulkWithMultipleStreams( const auto & dynamic_paths = column_object.getDynamicPaths(); const auto & shared_data = column_object.getSharedDataPtr(); - if (column_object.getMaxDynamicPaths() != object_state->max_dynamic_paths) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Mismatch of max_dynamic_paths parameter of Object. Expected: {}, Got: {}", object_state->max_dynamic_paths, column_object.getMaxDynamicPaths()); - if (column_object.getDynamicPaths().size() != object_state->sorted_dynamic_paths.size()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Mismatch of number of dynamic paths in Object. Expected: {}, Got: {}", object_state->sorted_dynamic_paths.size(), column_object.getDynamicPaths().size()); @@ -604,7 +595,7 @@ void SerializationObject::deserializeBinaryBulkWithMultipleStreams( /// If it's a new object column, set dynamic paths and statistics. if (column_object.empty()) { - column_object.setMaxDynamicPaths(structure_state->max_dynamic_paths); + column_object.setMaxDynamicPaths(structure_state->sorted_dynamic_paths.size()); column_object.setDynamicPaths(structure_state->sorted_dynamic_paths); column_object.setStatistics(structure_state->statistics); } diff --git a/src/DataTypes/Serializations/SerializationObject.h b/src/DataTypes/Serializations/SerializationObject.h index 420293ba428..db772756a20 100644 --- a/src/DataTypes/Serializations/SerializationObject.h +++ b/src/DataTypes/Serializations/SerializationObject.h @@ -31,6 +31,8 @@ public: /// - ObjectDynamicPath stream for each column in dynamic paths /// - ObjectSharedData stream shared data column. V1 = 0, + /// V2 serialization: the same as V1 but without max_dynamic_paths parameter in ObjectStructure stream. + V2 = 2, /// String serialization: /// - ObjectData stream with single String column containing serialized JSON. STRING = 1, @@ -98,7 +100,6 @@ private: struct DeserializeBinaryBulkStateObjectStructure : public ISerialization::DeserializeBinaryBulkState { ObjectSerializationVersion serialization_version; - size_t max_dynamic_paths; std::vector sorted_dynamic_paths; std::unordered_set dynamic_paths; /// Paths statistics. Map (dynamic path) -> (number of non-null values in this path). @@ -111,9 +112,6 @@ private: DeserializeBinaryBulkSettings & settings, SubstreamsDeserializeStatesCache * cache); - /// Shared data has type Array(Tuple(String, String)). - static const DataTypePtr & getTypeOfSharedData(); - struct TypedPathSubcolumnCreator : public ISubcolumnCreator { String path; diff --git a/src/DataTypes/Serializations/SerializationObjectDynamicPath.cpp b/src/DataTypes/Serializations/SerializationObjectDynamicPath.cpp index 5323079c54b..c1f26eca792 100644 --- a/src/DataTypes/Serializations/SerializationObjectDynamicPath.cpp +++ b/src/DataTypes/Serializations/SerializationObjectDynamicPath.cpp @@ -18,7 +18,7 @@ SerializationObjectDynamicPath::SerializationObjectDynamicPath( , path(path_) , path_subcolumn(path_subcolumn_) , dynamic_serialization(std::make_shared()) - , shared_data_serialization(SerializationObject::getTypeOfSharedData()->getDefaultSerialization()) + , shared_data_serialization(DataTypeObject::getTypeOfSharedData()->getDefaultSerialization()) , max_dynamic_types(max_dynamic_types_) { } @@ -67,8 +67,8 @@ void SerializationObjectDynamicPath::enumerateStreams( { settings.path.push_back(Substream::ObjectSharedData); auto shared_data_substream_data = SubstreamData(shared_data_serialization) - .withType(data.type ? SerializationObject::getTypeOfSharedData() : nullptr) - .withColumn(data.column ? SerializationObject::getTypeOfSharedData()->createColumn() : nullptr) + .withType(data.type ? DataTypeObject::getTypeOfSharedData() : nullptr) + .withColumn(data.column ? DataTypeObject::getTypeOfSharedData()->createColumn() : nullptr) .withSerializationInfo(data.serialization_info) .withDeserializeState(deserialize_state->nested_state); settings.path.back().data = shared_data_substream_data; @@ -164,7 +164,7 @@ void SerializationObjectDynamicPath::deserializeBinaryBulkWithMultipleStreams( settings.path.push_back(Substream::ObjectSharedData); /// Initialize shared_data column if needed. if (result_column->empty()) - dynamic_path_state->shared_data = SerializationObject::getTypeOfSharedData()->createColumn(); + dynamic_path_state->shared_data = DataTypeObject::getTypeOfSharedData()->createColumn(); size_t prev_size = result_column->size(); shared_data_serialization->deserializeBinaryBulkWithMultipleStreams(dynamic_path_state->shared_data, limit, settings, dynamic_path_state->nested_state, cache); /// If we need to read a subcolumn from Dynamic column, create an empty Dynamic column, fill it and extract subcolumn. diff --git a/src/DataTypes/Serializations/SerializationSubObject.cpp b/src/DataTypes/Serializations/SerializationSubObject.cpp index 9084d46f9b2..ff61cb55572 100644 --- a/src/DataTypes/Serializations/SerializationSubObject.cpp +++ b/src/DataTypes/Serializations/SerializationSubObject.cpp @@ -17,7 +17,7 @@ SerializationSubObject::SerializationSubObject( : path_prefix(path_prefix_) , typed_paths_serializations(typed_paths_serializations_) , dynamic_serialization(std::make_shared()) - , shared_data_serialization(SerializationObject::getTypeOfSharedData()->getDefaultSerialization()) + , shared_data_serialization(DataTypeObject::getTypeOfSharedData()->getDefaultSerialization()) { } @@ -64,8 +64,8 @@ void SerializationSubObject::enumerateStreams( /// We will need to read shared data to find all paths with requested prefix. settings.path.push_back(Substream::ObjectSharedData); auto shared_data_substream_data = SubstreamData(shared_data_serialization) - .withType(data.type ? SerializationObject::getTypeOfSharedData() : nullptr) - .withColumn(data.column ? SerializationObject::getTypeOfSharedData()->createColumn() : nullptr) + .withType(data.type ? DataTypeObject::getTypeOfSharedData() : nullptr) + .withColumn(data.column ? DataTypeObject::getTypeOfSharedData()->createColumn() : nullptr) .withSerializationInfo(data.serialization_info) .withDeserializeState(deserialize_state ? deserialize_state->shared_data_state : nullptr); settings.path.back().data = shared_data_substream_data; @@ -208,7 +208,7 @@ void SerializationSubObject::deserializeBinaryBulkWithMultipleStreams( settings.path.push_back(Substream::ObjectSharedData); /// If it's a new object column, reinitialize column for shared data. if (result_column->empty()) - sub_object_state->shared_data = SerializationObject::getTypeOfSharedData()->createColumn(); + sub_object_state->shared_data = DataTypeObject::getTypeOfSharedData()->createColumn(); size_t prev_size = column_object.size(); shared_data_serialization->deserializeBinaryBulkWithMultipleStreams(sub_object_state->shared_data, limit, settings, sub_object_state->shared_data_state, cache); settings.path.pop_back(); diff --git a/src/DataTypes/fuzzers/CMakeLists.txt b/src/DataTypes/fuzzers/CMakeLists.txt index 9e5b1b3f673..8940586fc70 100644 --- a/src/DataTypes/fuzzers/CMakeLists.txt +++ b/src/DataTypes/fuzzers/CMakeLists.txt @@ -1,2 +1,3 @@ clickhouse_add_executable(data_type_deserialization_fuzzer data_type_deserialization_fuzzer.cpp ${SRCS}) -target_link_libraries(data_type_deserialization_fuzzer PRIVATE clickhouse_aggregate_functions) + +target_link_libraries(data_type_deserialization_fuzzer PRIVATE clickhouse_aggregate_functions dbms) diff --git a/src/DataTypes/fuzzers/data_type_deserialization_fuzzer.cpp b/src/DataTypes/fuzzers/data_type_deserialization_fuzzer.cpp index f9a733647e1..216b252ad0f 100644 --- a/src/DataTypes/fuzzers/data_type_deserialization_fuzzer.cpp +++ b/src/DataTypes/fuzzers/data_type_deserialization_fuzzer.cpp @@ -3,6 +3,7 @@ #include #include +#include #include #include diff --git a/src/Databases/DatabaseReplicatedWorker.cpp b/src/Databases/DatabaseReplicatedWorker.cpp index 5d75dff391a..6a711c92332 100644 --- a/src/Databases/DatabaseReplicatedWorker.cpp +++ b/src/Databases/DatabaseReplicatedWorker.cpp @@ -199,13 +199,12 @@ void DatabaseReplicatedDDLWorker::initializeReplication() active_node_holder = zkutil::EphemeralNodeHolder::existing(active_path, *active_node_holder_zookeeper); } -String DatabaseReplicatedDDLWorker::enqueueQuery(DDLLogEntry & entry) +String DatabaseReplicatedDDLWorker::enqueueQuery(DDLLogEntry & entry, const ZooKeeperRetriesInfo &, QueryStatusPtr) { auto zookeeper = getAndSetZooKeeper(); return enqueueQueryImpl(zookeeper, entry, database); } - bool DatabaseReplicatedDDLWorker::waitForReplicaToProcessAllEntries(UInt64 timeout_ms) { auto zookeeper = getAndSetZooKeeper(); diff --git a/src/Databases/DatabaseReplicatedWorker.h b/src/Databases/DatabaseReplicatedWorker.h index b690854e249..d2385cbdba3 100644 --- a/src/Databases/DatabaseReplicatedWorker.h +++ b/src/Databases/DatabaseReplicatedWorker.h @@ -24,7 +24,7 @@ class DatabaseReplicatedDDLWorker : public DDLWorker public: DatabaseReplicatedDDLWorker(DatabaseReplicated * db, ContextPtr context_); - String enqueueQuery(DDLLogEntry & entry) override; + String enqueueQuery(DDLLogEntry & entry, const ZooKeeperRetriesInfo &, QueryStatusPtr) override; String tryEnqueueAndExecuteEntry(DDLLogEntry & entry, ContextPtr query_context); diff --git a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp index 45fd52f27ab..5268dbcb59f 100644 --- a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp +++ b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp @@ -307,6 +307,13 @@ PostgreSQLTableStructure fetchPostgreSQLTableStructure( if (!columns.empty()) columns_part = fmt::format(" AND attname IN ('{}')", boost::algorithm::join(columns, "','")); + /// Bypassing the error of the missing column `attgenerated` in the system table `pg_attribute` for PostgreSQL versions below 12. + /// This trick involves executing a special query to the DBMS in advance to obtain the correct line with comment /// if column has GENERATED. + /// The result of the query will be the name of the column `attgenerated` or an empty string declaration for PostgreSQL version 11 and below. + /// This change does not degrade the function's performance but restores support for older versions and fix ERROR: column "attgenerated" does not exist. + pqxx::result gen_result{tx.exec("select case when current_setting('server_version_num')::int < 120000 then '''''' else 'attgenerated' end as generated")}; + std::string generated = gen_result[0][0].as(); + std::string query = fmt::format( "SELECT attname AS name, " /// column name "format_type(atttypid, atttypmod) AS type, " /// data type @@ -315,11 +322,11 @@ PostgreSQLTableStructure fetchPostgreSQLTableStructure( "atttypid as type_id, " "atttypmod as type_modifier, " "attnum as att_num, " - "attgenerated as generated " /// if column has GENERATED + "{} as generated " /// if column has GENERATED "FROM pg_attribute " "WHERE attrelid = (SELECT oid FROM pg_class WHERE {}) {}" "AND NOT attisdropped AND attnum > 0 " - "ORDER BY attnum ASC", where, columns_part); + "ORDER BY attnum ASC", generated, where, columns_part); /// Now we use variable `generated` to form query string. End of trick. auto postgres_table_with_schema = postgres_schema.empty() ? postgres_table : doubleQuoteString(postgres_schema) + '.' + doubleQuoteString(postgres_table); table.physical_columns = readNamesAndTypesList(tx, postgres_table_with_schema, query, use_nulls, false); diff --git a/src/Databases/enableAllExperimentalSettings.cpp b/src/Databases/enableAllExperimentalSettings.cpp index 6efbc429fd8..d51d2671992 100644 --- a/src/Databases/enableAllExperimentalSettings.cpp +++ b/src/Databases/enableAllExperimentalSettings.cpp @@ -32,6 +32,8 @@ void enableAllExperimentalSettings(ContextMutablePtr context) context->setSetting("allow_suspicious_low_cardinality_types", 1); context->setSetting("allow_suspicious_fixed_string_types", 1); + context->setSetting("allow_suspicious_types_in_group_by", 1); + context->setSetting("allow_suspicious_types_in_order_by", 1); context->setSetting("allow_suspicious_indices", 1); context->setSetting("allow_suspicious_codecs", 1); context->setSetting("allow_hyperscan", 1); diff --git a/src/Disks/IO/AsynchronousBoundedReadBuffer.cpp b/src/Disks/IO/AsynchronousBoundedReadBuffer.cpp index c405d296e60..01271d5342b 100644 --- a/src/Disks/IO/AsynchronousBoundedReadBuffer.cpp +++ b/src/Disks/IO/AsynchronousBoundedReadBuffer.cpp @@ -46,11 +46,13 @@ AsynchronousBoundedReadBuffer::AsynchronousBoundedReadBuffer( ImplPtr impl_, IAsynchronousReader & reader_, const ReadSettings & settings_, + size_t buffer_size_, AsyncReadCountersPtr async_read_counters_, FilesystemReadPrefetchesLogPtr prefetches_log_) : ReadBufferFromFileBase(0, nullptr, 0) , impl(std::move(impl_)) , read_settings(settings_) + , buffer_size(buffer_size_) , reader(reader_) , query_id(CurrentThread::isInitialized() && CurrentThread::get().getQueryContext() != nullptr ? CurrentThread::getQueryId() : "") , current_reader_id(getRandomASCIIString(8)) @@ -112,7 +114,7 @@ void AsynchronousBoundedReadBuffer::prefetch(Priority priority) last_prefetch_info.submit_time = std::chrono::system_clock::now(); last_prefetch_info.priority = priority; - prefetch_buffer.resize(chooseBufferSizeForRemoteReading(read_settings, impl->getFileSize())); + prefetch_buffer.resize(buffer_size); prefetch_future = readAsync(prefetch_buffer.data(), prefetch_buffer.size(), priority); ProfileEvents::increment(ProfileEvents::RemoteFSPrefetches); } @@ -211,7 +213,7 @@ bool AsynchronousBoundedReadBuffer::nextImpl() } else { - memory.resize(chooseBufferSizeForRemoteReading(read_settings, impl->getFileSize())); + memory.resize(buffer_size); { ProfileEventTimeIncrement watch(ProfileEvents::SynchronousRemoteReadWaitMicroseconds); diff --git a/src/Disks/IO/AsynchronousBoundedReadBuffer.h b/src/Disks/IO/AsynchronousBoundedReadBuffer.h index 3dc8fcc39cb..7664cc4d386 100644 --- a/src/Disks/IO/AsynchronousBoundedReadBuffer.h +++ b/src/Disks/IO/AsynchronousBoundedReadBuffer.h @@ -27,6 +27,7 @@ public: ImplPtr impl_, IAsynchronousReader & reader_, const ReadSettings & settings_, + size_t buffer_size_, AsyncReadCountersPtr async_read_counters_ = nullptr, FilesystemReadPrefetchesLogPtr prefetches_log_ = nullptr); @@ -53,6 +54,7 @@ public: private: const ImplPtr impl; const ReadSettings read_settings; + const size_t buffer_size; IAsynchronousReader & reader; size_t file_offset_of_buffer_end = 0; diff --git a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp index 51c6045cb68..1f806e9c1e5 100644 --- a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp +++ b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp @@ -535,7 +535,7 @@ bool CachedOnDiskReadBufferFromFile::completeFileSegmentAndGetNext() chassert(file_offset_of_buffer_end > completed_range.right); cache_file_reader.reset(); - file_segments->popFront(); + file_segments->completeAndPopFront(settings.filesystem_cache_allow_background_download); if (file_segments->empty() && !nextFileSegmentsBatch()) return false; @@ -556,6 +556,12 @@ CachedOnDiskReadBufferFromFile::~CachedOnDiskReadBufferFromFile() { appendFilesystemCacheLog(file_segments->front(), read_type); } + + if (file_segments && !file_segments->empty() && !file_segments->front().isCompleted()) + { + file_segments->completeAndPopFront(settings.filesystem_cache_allow_background_download); + file_segments = {}; + } } void CachedOnDiskReadBufferFromFile::predownload(FileSegment & file_segment) @@ -784,6 +790,7 @@ bool CachedOnDiskReadBufferFromFile::writeCache(char * data, size_t size, size_t LOG_INFO(log, "Insert into cache is skipped due to insufficient disk space. ({})", e.displayText()); return false; } + chassert(file_segment.state() == FileSegment::State::PARTIALLY_DOWNLOADED_NO_CONTINUATION); throw; } diff --git a/src/Disks/IO/CachedOnDiskReadBufferFromFile.h b/src/Disks/IO/CachedOnDiskReadBufferFromFile.h index 119fa166214..4881b6a309d 100644 --- a/src/Disks/IO/CachedOnDiskReadBufferFromFile.h +++ b/src/Disks/IO/CachedOnDiskReadBufferFromFile.h @@ -41,6 +41,8 @@ public: ~CachedOnDiskReadBufferFromFile() override; + bool isCached() const override { return true; } + bool nextImpl() override; off_t seek(off_t off, int whence) override; diff --git a/src/Disks/IO/CachedOnDiskWriteBufferFromFile.cpp b/src/Disks/IO/CachedOnDiskWriteBufferFromFile.cpp index 6aedc1f5d04..df6fb871772 100644 --- a/src/Disks/IO/CachedOnDiskWriteBufferFromFile.cpp +++ b/src/Disks/IO/CachedOnDiskWriteBufferFromFile.cpp @@ -196,7 +196,7 @@ void FileSegmentRangeWriter::completeFileSegment() if (file_segment.isDetached() || file_segment.isCompleted()) return; - file_segment.complete(); + file_segment.complete(false); appendFilesystemCacheLog(file_segment); } @@ -210,7 +210,7 @@ void FileSegmentRangeWriter::jumpToPosition(size_t position) if (position < current_write_offset) throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot jump backwards: {} < {}", position, current_write_offset); - file_segment.complete(); + file_segment.complete(false); file_segments.reset(); } expected_write_offset = position; diff --git a/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp b/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp index 8e4ec6f3dfb..8d3b9366261 100644 --- a/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp +++ b/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp @@ -18,24 +18,14 @@ namespace ErrorCodes extern const int CANNOT_SEEK_THROUGH_FILE; } -size_t chooseBufferSizeForRemoteReading(const DB::ReadSettings & settings, size_t file_size) -{ - /// Only when cache is used we could download bigger portions of FileSegments than what we actually gonna read within particular task. - if (!settings.enable_filesystem_cache && !settings.read_through_distributed_cache) - return settings.remote_fs_buffer_size; - - /// Buffers used for prefetch and pre-download better to have enough size, but not bigger than the whole file. - return std::min(std::max(settings.remote_fs_buffer_size, DBMS_DEFAULT_BUFFER_SIZE), file_size); -} - ReadBufferFromRemoteFSGather::ReadBufferFromRemoteFSGather( ReadBufferCreator && read_buffer_creator_, const StoredObjects & blobs_to_read_, const ReadSettings & settings_, std::shared_ptr cache_log_, - bool use_external_buffer_) - : ReadBufferFromFileBase(use_external_buffer_ ? 0 : chooseBufferSizeForRemoteReading( - settings_, getTotalSize(blobs_to_read_)), nullptr, 0) + bool use_external_buffer_, + size_t buffer_size) + : ReadBufferFromFileBase(use_external_buffer_ ? 0 : buffer_size, nullptr, 0) , settings(settings_) , blobs_to_read(blobs_to_read_) , read_buffer_creator(std::move(read_buffer_creator_)) diff --git a/src/Disks/IO/ReadBufferFromRemoteFSGather.h b/src/Disks/IO/ReadBufferFromRemoteFSGather.h index 27f94a3e552..c5f1966dc38 100644 --- a/src/Disks/IO/ReadBufferFromRemoteFSGather.h +++ b/src/Disks/IO/ReadBufferFromRemoteFSGather.h @@ -28,7 +28,8 @@ public: const StoredObjects & blobs_to_read_, const ReadSettings & settings_, std::shared_ptr cache_log_, - bool use_external_buffer_); + bool use_external_buffer_, + size_t buffer_size); ~ReadBufferFromRemoteFSGather() override; @@ -84,6 +85,4 @@ private: LoggerPtr log; }; - -size_t chooseBufferSizeForRemoteReading(const DB::ReadSettings & settings, size_t file_size); } diff --git a/src/Disks/ObjectStorages/DiskObjectStorage.cpp b/src/Disks/ObjectStorages/DiskObjectStorage.cpp index cc8a873c544..3720c04a471 100644 --- a/src/Disks/ObjectStorages/DiskObjectStorage.cpp +++ b/src/Disks/ObjectStorages/DiskObjectStorage.cpp @@ -641,19 +641,33 @@ std::unique_ptr DiskObjectStorage::readFile( return impl; }; + /// Avoid cache fragmentation by choosing bigger buffer size. + bool prefer_bigger_buffer_size = object_storage->supportsCache() && read_settings.enable_filesystem_cache; + size_t buffer_size = prefer_bigger_buffer_size + ? std::max(settings.remote_fs_buffer_size, DBMS_DEFAULT_BUFFER_SIZE) + : settings.remote_fs_buffer_size; + + size_t total_objects_size = file_size ? *file_size : getTotalSize(storage_objects); + if (total_objects_size) + buffer_size = std::min(buffer_size, total_objects_size); + const bool use_async_buffer = read_settings.remote_fs_method == RemoteFSReadMethod::threadpool; auto impl = std::make_unique( std::move(read_buffer_creator), storage_objects, read_settings, global_context->getFilesystemCacheLog(), - /* use_external_buffer */use_async_buffer); + /* use_external_buffer */use_async_buffer, + /* buffer_size */use_async_buffer ? 0 : buffer_size); if (use_async_buffer) { auto & reader = global_context->getThreadPoolReader(FilesystemReaderType::ASYNCHRONOUS_REMOTE_FS_READER); return std::make_unique( - std::move(impl), reader, read_settings, + std::move(impl), + reader, + read_settings, + buffer_size, global_context->getAsyncReadCounters(), global_context->getFilesystemReadPrefetchesLog()); diff --git a/src/Disks/tests/gtest_asynchronous_bounded_read_buffer.cpp b/src/Disks/tests/gtest_asynchronous_bounded_read_buffer.cpp index 63a39fe39c7..11b4fc3118d 100644 --- a/src/Disks/tests/gtest_asynchronous_bounded_read_buffer.cpp +++ b/src/Disks/tests/gtest_asynchronous_bounded_read_buffer.cpp @@ -51,7 +51,7 @@ TEST_F(AsynchronousBoundedReadBufferTest, setReadUntilPosition) for (bool with_prefetch : {false, true}) { - AsynchronousBoundedReadBuffer read_buffer(createReadBufferFromFileBase(file_path, {}), remote_fs_reader, {}); + AsynchronousBoundedReadBuffer read_buffer(createReadBufferFromFileBase(file_path, {}), remote_fs_reader, {}, DBMS_DEFAULT_BUFFER_SIZE); read_buffer.setReadUntilPosition(20); auto try_read = [&](size_t count) diff --git a/src/Formats/fuzzers/CMakeLists.txt b/src/Formats/fuzzers/CMakeLists.txt index ee1a4fd4358..83aa5eb781a 100644 --- a/src/Formats/fuzzers/CMakeLists.txt +++ b/src/Formats/fuzzers/CMakeLists.txt @@ -1,2 +1,2 @@ clickhouse_add_executable(format_fuzzer format_fuzzer.cpp ${SRCS}) -target_link_libraries(format_fuzzer PRIVATE clickhouse_aggregate_functions) +target_link_libraries(format_fuzzer PRIVATE clickhouse_aggregate_functions dbms) diff --git a/src/Functions/FunctionsComparison.h b/src/Functions/FunctionsComparison.h index bd6f0361307..be0875581a5 100644 --- a/src/Functions/FunctionsComparison.h +++ b/src/Functions/FunctionsComparison.h @@ -1171,7 +1171,7 @@ public: if (left_tuple && right_tuple) { - auto func = FunctionToOverloadResolverAdaptor(std::make_shared>(check_decimal_overflow)); + auto func = std::make_shared(std::make_shared>(check_decimal_overflow)); bool has_nullable = false; bool has_null = false; @@ -1181,7 +1181,7 @@ public: { ColumnsWithTypeAndName args = {{nullptr, left_tuple->getElements()[i], ""}, {nullptr, right_tuple->getElements()[i], ""}}; - auto element_type = func.build(args)->getResultType(); + auto element_type = func->build(args)->getResultType(); has_nullable = has_nullable || element_type->isNullable(); has_null = has_null || element_type->onlyNull(); } diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 0f6311c9716..ee04916e7b4 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -3921,7 +3921,7 @@ private: } } - WrapperType createTupleToObjectWrapper(const DataTypeTuple & from_tuple, bool has_nullable_subcolumns) const + WrapperType createTupleToObjectDeprecatedWrapper(const DataTypeTuple & from_tuple, bool has_nullable_subcolumns) const { if (!from_tuple.haveExplicitNames()) throw Exception(ErrorCodes::TYPE_MISMATCH, @@ -3968,7 +3968,7 @@ private: }; } - WrapperType createMapToObjectWrapper(const DataTypeMap & from_map, bool has_nullable_subcolumns) const + WrapperType createMapToObjectDeprecatedWrapper(const DataTypeMap & from_map, bool has_nullable_subcolumns) const { auto key_value_types = from_map.getKeyValueTypes(); @@ -4048,11 +4048,11 @@ private: { if (const auto * from_tuple = checkAndGetDataType(from_type.get())) { - return createTupleToObjectWrapper(*from_tuple, to_type->hasNullableSubcolumns()); + return createTupleToObjectDeprecatedWrapper(*from_tuple, to_type->hasNullableSubcolumns()); } else if (const auto * from_map = checkAndGetDataType(from_type.get())) { - return createMapToObjectWrapper(*from_map, to_type->hasNullableSubcolumns()); + return createMapToObjectDeprecatedWrapper(*from_map, to_type->hasNullableSubcolumns()); } else if (checkAndGetDataType(from_type.get())) { @@ -4081,23 +4081,43 @@ private: "Cast to Object can be performed only from flatten named Tuple, Map or String. Got: {}", from_type->getName()); } + WrapperType createObjectWrapper(const DataTypePtr & from_type, const DataTypeObject * to_object) const { if (checkAndGetDataType(from_type.get())) { return [this](ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable * nullable_source, size_t input_rows_count) { - auto res = ConvertImplGenericFromString::execute(arguments, result_type, nullable_source, input_rows_count, context)->assumeMutable(); - res->finalize(); - return res; + return ConvertImplGenericFromString::execute(arguments, result_type, nullable_source, input_rows_count, context); + }; + } + + /// Cast Tuple/Object/Map to JSON type through serializing into JSON string and parsing back into JSON column. + /// Potentially we can do smarter conversion Tuple -> JSON with type preservation, but it's questionable how exactly Tuple should be + /// converted to JSON (for example, should we recursively convert nested Array(Tuple) to Array(JSON) or not, should we infer types from String fields, etc). + if (checkAndGetDataType(from_type.get()) || checkAndGetDataType(from_type.get()) || checkAndGetDataType(from_type.get())) + { + return [this](ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable * nullable_source, size_t input_rows_count) + { + auto json_string = ColumnString::create(); + ColumnStringHelpers::WriteHelper write_helper(assert_cast(*json_string), input_rows_count); + auto & write_buffer = write_helper.getWriteBuffer(); + FormatSettings format_settings = context ? getFormatSettings(context) : FormatSettings{}; + auto serialization = arguments[0].type->getDefaultSerialization(); + for (size_t i = 0; i < input_rows_count; ++i) + { + serialization->serializeTextJSON(*arguments[0].column, i, write_buffer, format_settings); + write_helper.rowWritten(); + } + write_helper.finalize(); + + ColumnsWithTypeAndName args_with_json_string = {ColumnWithTypeAndName(json_string->getPtr(), std::make_shared(), "")}; + return ConvertImplGenericFromString::execute(args_with_json_string, result_type, nullable_source, input_rows_count, context); }; } /// TODO: support CAST between JSON types with different parameters - /// support CAST from Map to JSON - /// support CAST from Tuple to JSON - /// support CAST from Object('json') to JSON - throw Exception(ErrorCodes::TYPE_MISMATCH, "Cast to {} can be performed only from String. Got: {}", magic_enum::enum_name(to_object->getSchemaFormat()), from_type->getName()); + throw Exception(ErrorCodes::TYPE_MISMATCH, "Cast to {} can be performed only from String/Map/Object/Tuple. Got: {}", magic_enum::enum_name(to_object->getSchemaFormat()), from_type->getName()); } WrapperType createVariantToVariantWrapper(const DataTypeVariant & from_variant, const DataTypeVariant & to_variant) const diff --git a/src/Functions/UserDefined/UserDefinedSQLFunctionVisitor.cpp b/src/Functions/UserDefined/UserDefinedSQLFunctionVisitor.cpp index ebd65471449..a04b8d7b998 100644 --- a/src/Functions/UserDefined/UserDefinedSQLFunctionVisitor.cpp +++ b/src/Functions/UserDefined/UserDefinedSQLFunctionVisitor.cpp @@ -24,92 +24,7 @@ namespace ErrorCodes void UserDefinedSQLFunctionVisitor::visit(ASTPtr & ast) { - if (!ast) - { - chassert(false); - return; - } - - /// FIXME: this helper should use updatePointerToChild(), but - /// forEachPointerToChild() is not implemented for ASTColumnDeclaration - /// (and also some members should be adjusted for this). - const auto visit_child_with_shared_ptr = [&](ASTPtr & child) - { - if (!child) - return; - - auto * old_value = child.get(); - visit(child); - - // child did not change - if (old_value == child.get()) - return; - - // child changed, we need to modify it in the list of children of the parent also - for (auto & current_child : ast->children) - { - if (current_child.get() == old_value) - current_child = child; - } - }; - - if (auto * col_decl = ast->as()) - { - visit_child_with_shared_ptr(col_decl->default_expression); - visit_child_with_shared_ptr(col_decl->ttl); - return; - } - - if (auto * storage = ast->as()) - { - const auto visit_child = [&](IAST * & child) - { - if (!child) - return; - - if (const auto * function = child->template as()) - { - std::unordered_set udf_in_replace_process; - auto replace_result = tryToReplaceFunction(*function, udf_in_replace_process); - if (replace_result) - ast->setOrReplace(child, replace_result); - } - - visit(child); - }; - - visit_child(storage->partition_by); - visit_child(storage->primary_key); - visit_child(storage->order_by); - visit_child(storage->sample_by); - visit_child(storage->ttl_table); - - return; - } - - if (auto * alter = ast->as()) - { - /// It is OK to use updatePointerToChild() because ASTAlterCommand implements forEachPointerToChild() - const auto visit_child_update_parent = [&](ASTPtr & child) - { - if (!child) - return; - - auto * old_ptr = child.get(); - visit(child); - auto * new_ptr = child.get(); - - /// Some AST classes have naked pointers to children elements as members. - /// We have to replace them if the child was replaced. - if (new_ptr != old_ptr) - ast->updatePointerToChild(old_ptr, new_ptr); - }; - - for (auto & children : alter->children) - visit_child_update_parent(children); - - return; - } + chassert(ast); if (const auto * function = ast->template as()) { @@ -120,7 +35,19 @@ void UserDefinedSQLFunctionVisitor::visit(ASTPtr & ast) } for (auto & child : ast->children) + { + if (!child) + return; + + auto * old_ptr = child.get(); visit(child); + auto * new_ptr = child.get(); + + /// Some AST classes have naked pointers to children elements as members. + /// We have to replace them if the child was replaced. + if (new_ptr != old_ptr) + ast->updatePointerToChild(old_ptr, new_ptr); + } } void UserDefinedSQLFunctionVisitor::visit(IAST * ast) diff --git a/src/Functions/transform.cpp b/src/Functions/transform.cpp index 45f0a7f5c17..e5445b36809 100644 --- a/src/Functions/transform.cpp +++ b/src/Functions/transform.cpp @@ -211,7 +211,7 @@ namespace ColumnsWithTypeAndName args = arguments; args[0].column = args[0].column->cloneResized(input_rows_count)->convertToFullColumnIfConst(); - auto impl = FunctionToOverloadResolverAdaptor(std::make_shared()).build(args); + auto impl = std::make_shared(std::make_shared())->build(args); return impl->execute(args, result_type, input_rows_count); } diff --git a/src/IO/ReadBufferFromFileBase.h b/src/IO/ReadBufferFromFileBase.h index c98dcd5a93e..c59a5c152b6 100644 --- a/src/IO/ReadBufferFromFileBase.h +++ b/src/IO/ReadBufferFromFileBase.h @@ -60,6 +60,8 @@ public: /// file offset and what getPosition() returns. virtual bool isRegularLocalFile(size_t * /*out_view_offsee*/) { return false; } + virtual bool isCached() const { return false; } + protected: std::optional file_size; ProfileCallback profile_callback; diff --git a/src/IO/ReadSettings.h b/src/IO/ReadSettings.h index aa52e00e6d7..6ed02212095 100644 --- a/src/IO/ReadSettings.h +++ b/src/IO/ReadSettings.h @@ -58,6 +58,9 @@ struct ReadSettings bool enable_filesystem_cache_log = false; size_t filesystem_cache_segments_batch_size = 20; size_t filesystem_cache_reserve_space_wait_lock_timeout_milliseconds = 1000; + bool filesystem_cache_allow_background_download = true; + bool filesystem_cache_allow_background_download_for_metadata_files_in_packed_storage = true; + bool filesystem_cache_allow_background_download_during_fetch = true; bool use_page_cache_for_disks_without_file_cache = false; bool read_from_page_cache_if_exists_otherwise_bypass_cache = false; diff --git a/src/Interpreters/Cache/EvictionCandidates.cpp b/src/Interpreters/Cache/EvictionCandidates.cpp index 08776ad5aee..f5d5fdec6ba 100644 --- a/src/Interpreters/Cache/EvictionCandidates.cpp +++ b/src/Interpreters/Cache/EvictionCandidates.cpp @@ -83,7 +83,8 @@ void EvictionCandidates::removeQueueEntries(const CachePriorityGuard::Lock & loc queue_iterator->invalidate(); chassert(candidate->releasable()); - candidate->file_segment->resetQueueIterator(); + candidate->file_segment->markDelayedRemovalAndResetQueueIterator(); + /// We need to set removed flag in file segment metadata, /// because in dynamic cache resize we first remove queue entries, /// then evict which also removes file segment metadata, diff --git a/src/Interpreters/Cache/FileCache.cpp b/src/Interpreters/Cache/FileCache.cpp index f7b7ffc5aea..7de3f7af78d 100644 --- a/src/Interpreters/Cache/FileCache.cpp +++ b/src/Interpreters/Cache/FileCache.cpp @@ -37,6 +37,11 @@ namespace ProfileEvents extern const Event FilesystemCacheFailToReserveSpaceBecauseOfCacheResize; } +namespace CurrentMetrics +{ + extern const Metric FilesystemCacheDownloadQueueElements; +} + namespace DB { @@ -918,7 +923,13 @@ bool FileCache::tryReserve( if (!query_priority->collectCandidatesForEviction( size, required_elements_num, reserve_stat, eviction_candidates, {}, user.user_id, cache_lock)) { - failure_reason = "cannot evict enough space for query limit"; + const auto & stat = reserve_stat.total_stat; + failure_reason = fmt::format( + "cannot evict enough space for query limit " + "(non-releasable count: {}, non-releasable size: {}, " + "releasable count: {}, releasable size: {}, background download elements: {})", + stat.non_releasable_count, stat.non_releasable_size, stat.releasable_count, stat.releasable_size, + CurrentMetrics::get(CurrentMetrics::FilesystemCacheDownloadQueueElements)); return false; } @@ -933,7 +944,13 @@ bool FileCache::tryReserve( if (!main_priority->collectCandidatesForEviction( size, required_elements_num, reserve_stat, eviction_candidates, queue_iterator, user.user_id, cache_lock)) { - failure_reason = "cannot evict enough space"; + const auto & stat = reserve_stat.total_stat; + failure_reason = fmt::format( + "cannot evict enough space " + "(non-releasable count: {}, non-releasable size: {}, " + "releasable count: {}, releasable size: {}, background download elements: {})", + stat.non_releasable_count, stat.non_releasable_size, stat.releasable_count, stat.releasable_size, + CurrentMetrics::get(CurrentMetrics::FilesystemCacheDownloadQueueElements)); return false; } diff --git a/src/Interpreters/Cache/FileSegment.cpp b/src/Interpreters/Cache/FileSegment.cpp index c356800fa57..541f0f5607a 100644 --- a/src/Interpreters/Cache/FileSegment.cpp +++ b/src/Interpreters/Cache/FileSegment.cpp @@ -28,6 +28,7 @@ namespace ProfileEvents extern const Event FileSegmentFailToIncreasePriority; extern const Event FilesystemCacheHoldFileSegments; extern const Event FilesystemCacheUnusedHoldFileSegments; + extern const Event FilesystemCacheBackgroundDownloadQueuePush; } namespace CurrentMetrics @@ -171,10 +172,11 @@ void FileSegment::setQueueIterator(Priority::IteratorPtr iterator) queue_iterator = iterator; } -void FileSegment::resetQueueIterator() +void FileSegment::markDelayedRemovalAndResetQueueIterator() { auto lk = lock(); - queue_iterator.reset(); + on_delayed_removal = true; + queue_iterator = {}; } size_t FileSegment::getCurrentWriteOffset() const @@ -627,7 +629,7 @@ void FileSegment::completePartAndResetDownloader() LOG_TEST(log, "Complete batch. ({})", getInfoForLogUnlocked(lk)); } -void FileSegment::complete() +void FileSegment::complete(bool allow_background_download) { ProfileEventTimeIncrement watch(ProfileEvents::FileSegmentCompleteMicroseconds); @@ -700,12 +702,15 @@ void FileSegment::complete() case State::PARTIALLY_DOWNLOADED: { chassert(current_downloaded_size > 0); + chassert(fs::exists(getPath())); + chassert(fs::file_size(getPath()) > 0); if (is_last_holder) { bool added_to_download_queue = false; - if (background_download_enabled && remote_file_reader) + if (allow_background_download && background_download_enabled && remote_file_reader) { + ProfileEvents::increment(ProfileEvents::FilesystemCacheBackgroundDownloadQueuePush); added_to_download_queue = locked_key->addToDownloadQueue(offset(), segment_lock); /// Finish download in background. } @@ -841,29 +846,60 @@ bool FileSegment::assertCorrectnessUnlocked(const FileSegmentGuard::Lock & lock) } } - if (download_state == State::DOWNLOADED) + switch (download_state.load()) { - chassert(downloader_id.empty()); - chassert(downloaded_size == reserved_size); - chassert(downloaded_size == range().size()); - chassert(downloaded_size > 0); - chassert(std::filesystem::file_size(getPath()) > 0); - check_iterator(queue_iterator); - } - else - { - if (download_state == State::DOWNLOADING) - { - chassert(!downloader_id.empty()); - } - else if (download_state == State::PARTIALLY_DOWNLOADED - || download_state == State::EMPTY) + case State::EMPTY: { chassert(downloader_id.empty()); + chassert(!fs::exists(getPath())); + chassert(!queue_iterator); + break; } + case State::DOWNLOADED: + { + chassert(downloader_id.empty()); - chassert(reserved_size >= downloaded_size); - check_iterator(queue_iterator); + chassert(downloaded_size == reserved_size); + chassert(downloaded_size == range().size()); + chassert(downloaded_size > 0); + chassert(fs::file_size(getPath()) > 0); + + chassert(queue_iterator || on_delayed_removal); + check_iterator(queue_iterator); + break; + } + case State::DOWNLOADING: + { + chassert(!downloader_id.empty()); + if (downloaded_size) + { + chassert(queue_iterator); + chassert(fs::file_size(getPath()) > 0); + } + break; + } + case State::PARTIALLY_DOWNLOADED: + { + chassert(downloader_id.empty()); + + chassert(reserved_size >= downloaded_size); + chassert(downloaded_size > 0); + chassert(fs::file_size(getPath()) > 0); + + chassert(queue_iterator); + check_iterator(queue_iterator); + break; + } + case State::PARTIALLY_DOWNLOADED_NO_CONTINUATION: + { + chassert(reserved_size >= downloaded_size); + check_iterator(queue_iterator); + break; + } + case State::DETACHED: + { + break; + } } return true; @@ -991,7 +1027,12 @@ FileSegmentsHolder::FileSegmentsHolder(FileSegments && file_segments_) FileSegmentPtr FileSegmentsHolder::getSingleFileSegment() const { if (file_segments.size() != 1) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected single file segment, got: {} in holder {}", file_segments.size(), toString()); + { + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "Expected single file segment, got: {} in holder {}", + file_segments.size(), toString()); + } return file_segments.front(); } @@ -1001,7 +1042,23 @@ void FileSegmentsHolder::reset() ProfileEvents::increment(ProfileEvents::FilesystemCacheUnusedHoldFileSegments, file_segments.size()); for (auto file_segment_it = file_segments.begin(); file_segment_it != file_segments.end();) - file_segment_it = completeAndPopFrontImpl(); + { + try + { + /// One might think it would have been more correct to do `false` here, + /// not to allow background download for file segments that we actually did not start reading. + /// But actually we would only do that, if those file segments were already read partially by some other thread/query + /// but they were not put to the download queue, because current thread was holding them in Holder. + /// So as a culprit, we need to allow to happen what would have happened if we did not exist. + file_segment_it = completeAndPopFrontImpl(true); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + chassert(false); + continue; + } + } file_segments.clear(); } @@ -1010,9 +1067,9 @@ FileSegmentsHolder::~FileSegmentsHolder() reset(); } -FileSegments::iterator FileSegmentsHolder::completeAndPopFrontImpl() +FileSegments::iterator FileSegmentsHolder::completeAndPopFrontImpl(bool allow_background_download) { - front().complete(); + front().complete(allow_background_download); CurrentMetrics::sub(CurrentMetrics::FilesystemCacheHoldFileSegments); return file_segments.erase(file_segments.begin()); } diff --git a/src/Interpreters/Cache/FileSegment.h b/src/Interpreters/Cache/FileSegment.h index ee9aee1e354..21d5f9dab5f 100644 --- a/src/Interpreters/Cache/FileSegment.h +++ b/src/Interpreters/Cache/FileSegment.h @@ -177,7 +177,7 @@ public: void setQueueIterator(Priority::IteratorPtr iterator); - void resetQueueIterator(); + void markDelayedRemovalAndResetQueueIterator(); KeyMetadataPtr tryGetKeyMetadata() const; @@ -189,7 +189,7 @@ public: * ========== Methods that must do cv.notify() ================== */ - void complete(); + void complete(bool allow_background_download); void completePartAndResetDownloader(); @@ -249,12 +249,13 @@ private: String tryGetPath() const; - Key file_key; + const Key file_key; Range segment_range; const FileSegmentKind segment_kind; /// Size of the segment is not known until it is downloaded and /// can be bigger than max_file_segment_size. - const bool is_unbound = false; + /// is_unbound == true for temporary data in cache. + const bool is_unbound; const bool background_download_enabled; std::atomic download_state; @@ -279,6 +280,8 @@ private: std::atomic hits_count = 0; /// cache hits. std::atomic ref_count = 0; /// Used for getting snapshot state + bool on_delayed_removal = false; + CurrentMetrics::Increment metric_increment{CurrentMetrics::CacheFileSegments}; }; @@ -297,7 +300,7 @@ struct FileSegmentsHolder final : private boost::noncopyable String toString(bool with_state = false) const; - void popFront() { completeAndPopFrontImpl(); } + void completeAndPopFront(bool allow_background_download) { completeAndPopFrontImpl(allow_background_download); } FileSegment & front() { return *file_segments.front(); } const FileSegment & front() const { return *file_segments.front(); } @@ -319,7 +322,7 @@ struct FileSegmentsHolder final : private boost::noncopyable private: FileSegments file_segments{}; - FileSegments::iterator completeAndPopFrontImpl(); + FileSegments::iterator completeAndPopFrontImpl(bool allow_background_download); }; using FileSegmentsHolderPtr = std::unique_ptr; diff --git a/src/Interpreters/Cache/Metadata.cpp b/src/Interpreters/Cache/Metadata.cpp index 99ea01aa4f1..231545212cd 100644 --- a/src/Interpreters/Cache/Metadata.cpp +++ b/src/Interpreters/Cache/Metadata.cpp @@ -940,7 +940,16 @@ KeyMetadata::iterator LockedKey::removeFileSegmentImpl( if (file_segment->queue_iterator && invalidate_queue_entry) file_segment->queue_iterator->invalidate(); - file_segment->detach(segment_lock, *this); + try + { + file_segment->detach(segment_lock, *this); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + chassert(false); + /// Do not rethrow, we must delete the file below. + } try { @@ -990,8 +999,8 @@ void LockedKey::shrinkFileSegmentToDownloadedSize( * because of no space left in cache, we need to be able to cut file segment's size to downloaded_size. */ - auto metadata = getByOffset(offset); - const auto & file_segment = metadata->file_segment; + auto file_segment_metadata = getByOffset(offset); + const auto & file_segment = file_segment_metadata->file_segment; chassert(file_segment->assertCorrectnessUnlocked(segment_lock)); const size_t downloaded_size = file_segment->getDownloadedSize(); @@ -1006,15 +1015,15 @@ void LockedKey::shrinkFileSegmentToDownloadedSize( chassert(file_segment->reserved_size >= downloaded_size); int64_t diff = file_segment->reserved_size - downloaded_size; - metadata->file_segment = std::make_shared( + file_segment_metadata->file_segment = std::make_shared( getKey(), offset, downloaded_size, FileSegment::State::DOWNLOADED, CreateFileSegmentSettings(file_segment->getKind()), false, file_segment->cache, key_metadata, file_segment->queue_iterator); if (diff) - metadata->getQueueIterator()->decrementSize(diff); + file_segment_metadata->getQueueIterator()->decrementSize(diff); - chassert(file_segment->assertCorrectnessUnlocked(segment_lock)); + chassert(file_segment_metadata->file_segment->assertCorrectnessUnlocked(segment_lock)); } bool LockedKey::addToDownloadQueue(size_t offset, const FileSegmentGuard::Lock &) diff --git a/src/Interpreters/Cache/QueryCache.cpp b/src/Interpreters/Cache/QueryCache.cpp index c766c5209fc..7dbee567c5b 100644 --- a/src/Interpreters/Cache/QueryCache.cpp +++ b/src/Interpreters/Cache/QueryCache.cpp @@ -89,11 +89,40 @@ struct HasSystemTablesMatcher { database_table = identifier->name(); } - /// Handle SELECT [...] FROM clusterAllReplicas(, '') - else if (const auto * literal = node->as()) + /// SELECT [...] FROM clusterAllReplicas(, '
') + /// This SQL syntax is quite common but we need to be careful. A naive attempt to cast 'node' to an ASTLiteral will be too general + /// and introduce false positives in queries like + /// 'SELECT * FROM users WHERE name = 'system.metrics' SETTINGS use_query_cache = true;' + /// Therefore, make sure we are really in `clusterAllReplicas`. EXPLAIN AST for + /// 'SELECT * FROM clusterAllReplicas('default', system.one) SETTINGS use_query_cache = 1' + /// returns: + /// [...] + /// Function clusterAllReplicas (children 1) + /// ExpressionList (children 2) + /// Literal 'test_shard_localhost' + /// Literal 'system.one' + /// [...] + else if (const auto * function = node->as()) { - const auto & value = literal->value; - database_table = toString(value); + if (function->name == "clusterAllReplicas") + { + const ASTs & function_children = function->children; + if (!function_children.empty()) + { + if (const auto * expression_list = function_children[0]->as()) + { + const ASTs & expression_list_children = expression_list->children; + if (expression_list_children.size() >= 2) + { + if (const auto * literal = expression_list_children[1]->as()) + { + const auto & value = literal->value; + database_table = toString(value); + } + } + } + } + } } Tokens tokens(database_table.c_str(), database_table.c_str() + database_table.size(), /*max_query_size*/ 2048, /*skip_insignificant*/ true); diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index fbf0cbd0eb7..c1fa2c8549a 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -194,6 +194,8 @@ namespace Setting extern const SettingsUInt64 filesystem_cache_max_download_size; extern const SettingsUInt64 filesystem_cache_reserve_space_wait_lock_timeout_milliseconds; extern const SettingsUInt64 filesystem_cache_segments_batch_size; + extern const SettingsBool filesystem_cache_enable_background_download_for_metadata_files_in_packed_storage; + extern const SettingsBool filesystem_cache_enable_background_download_during_fetch; extern const SettingsBool http_make_head_request; extern const SettingsUInt64 http_max_fields; extern const SettingsUInt64 http_max_field_name_size; @@ -273,6 +275,13 @@ namespace ServerSetting extern const ServerSettingsUInt64 max_replicated_sends_network_bandwidth_for_server; extern const ServerSettingsUInt64 tables_loader_background_pool_size; extern const ServerSettingsUInt64 tables_loader_foreground_pool_size; + extern const ServerSettingsUInt64 prefetch_threadpool_pool_size; + extern const ServerSettingsUInt64 prefetch_threadpool_queue_size; + extern const ServerSettingsUInt64 load_marks_threadpool_pool_size; + extern const ServerSettingsUInt64 load_marks_threadpool_queue_size; + extern const ServerSettingsUInt64 threadpool_writer_pool_size; + extern const ServerSettingsUInt64 threadpool_writer_queue_size; + } namespace ErrorCodes @@ -3215,9 +3224,8 @@ void Context::clearMarkCache() const ThreadPool & Context::getLoadMarksThreadpool() const { callOnce(shared->load_marks_threadpool_initialized, [&] { - const auto & config = getConfigRef(); - auto pool_size = config.getUInt(".load_marks_threadpool_pool_size", 50); - auto queue_size = config.getUInt(".load_marks_threadpool_queue_size", 1000000); + auto pool_size = shared->server_settings[ServerSetting::load_marks_threadpool_pool_size]; + auto queue_size = shared->server_settings[ServerSetting::load_marks_threadpool_queue_size]; shared->load_marks_threadpool = std::make_unique( CurrentMetrics::MarksLoaderThreads, CurrentMetrics::MarksLoaderThreadsActive, CurrentMetrics::MarksLoaderThreadsScheduled, pool_size, pool_size, queue_size); }); @@ -3410,9 +3418,9 @@ AsynchronousMetrics * Context::getAsynchronousMetrics() const ThreadPool & Context::getPrefetchThreadpool() const { callOnce(shared->prefetch_threadpool_initialized, [&] { - const auto & config = getConfigRef(); - auto pool_size = config.getUInt(".prefetch_threadpool_pool_size", 100); - auto queue_size = config.getUInt(".prefetch_threadpool_queue_size", 1000000); + auto pool_size = shared->server_settings[ServerSetting::prefetch_threadpool_pool_size]; + auto queue_size = shared->server_settings[ServerSetting::prefetch_threadpool_queue_size]; + shared->prefetch_threadpool = std::make_unique( CurrentMetrics::IOPrefetchThreads, CurrentMetrics::IOPrefetchThreadsActive, CurrentMetrics::IOPrefetchThreadsScheduled, pool_size, pool_size, queue_size); }); @@ -3422,8 +3430,7 @@ ThreadPool & Context::getPrefetchThreadpool() const size_t Context::getPrefetchThreadpoolSize() const { - const auto & config = getConfigRef(); - return config.getUInt(".prefetch_threadpool_pool_size", 100); + return shared->server_settings[ServerSetting::prefetch_threadpool_pool_size]; } ThreadPool & Context::getBuildVectorSimilarityIndexThreadPool() const @@ -5696,9 +5703,8 @@ IOUringReader & Context::getIOUringReader() const ThreadPool & Context::getThreadPoolWriter() const { callOnce(shared->threadpool_writer_initialized, [&] { - const auto & config = getConfigRef(); - auto pool_size = config.getUInt(".threadpool_writer_pool_size", 100); - auto queue_size = config.getUInt(".threadpool_writer_queue_size", 1000000); + auto pool_size = shared->server_settings[ServerSetting::threadpool_writer_pool_size]; + auto queue_size = shared->server_settings[ServerSetting::threadpool_writer_queue_size]; shared->threadpool_writer = std::make_unique( CurrentMetrics::IOWriterThreads, CurrentMetrics::IOWriterThreadsActive, CurrentMetrics::IOWriterThreadsScheduled, pool_size, pool_size, queue_size); @@ -5742,6 +5748,9 @@ ReadSettings Context::getReadSettings() const res.filesystem_cache_segments_batch_size = settings_ref[Setting::filesystem_cache_segments_batch_size]; res.filesystem_cache_reserve_space_wait_lock_timeout_milliseconds = settings_ref[Setting::filesystem_cache_reserve_space_wait_lock_timeout_milliseconds]; + res.filesystem_cache_allow_background_download_for_metadata_files_in_packed_storage + = settings_ref[Setting::filesystem_cache_enable_background_download_for_metadata_files_in_packed_storage]; + res.filesystem_cache_allow_background_download_during_fetch = settings_ref[Setting::filesystem_cache_enable_background_download_during_fetch]; res.filesystem_cache_max_download_size = settings_ref[Setting::filesystem_cache_max_download_size]; res.skip_download_if_exceeds_query_cache = settings_ref[Setting::skip_download_if_exceeds_query_cache]; diff --git a/src/Interpreters/DDLWorker.cpp b/src/Interpreters/DDLWorker.cpp index 1be1a0c9bb9..eaba46f5d48 100644 --- a/src/Interpreters/DDLWorker.cpp +++ b/src/Interpreters/DDLWorker.cpp @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -1053,7 +1054,25 @@ void DDLWorker::createStatusDirs(const std::string & node_path, const ZooKeeperP } -String DDLWorker::enqueueQuery(DDLLogEntry & entry) +String DDLWorker::enqueueQuery(DDLLogEntry & entry, const ZooKeeperRetriesInfo & retries_info, QueryStatusPtr process_list_element) +{ + String node_path; + if (retries_info.max_retries > 0) + { + ZooKeeperRetriesControl retries_ctl{"DDLWorker::enqueueQuery", log, retries_info, process_list_element}; + retries_ctl.retryLoop([&]{ + node_path = enqueueQueryAttempt(entry); + }); + } + else + { + node_path = enqueueQueryAttempt(entry); + } + return node_path; +} + + +String DDLWorker::enqueueQueryAttempt(DDLLogEntry & entry) { if (entry.hosts.empty()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Empty host list in a distributed DDL task"); diff --git a/src/Interpreters/DDLWorker.h b/src/Interpreters/DDLWorker.h index ee17714add9..a5f47a51bb3 100644 --- a/src/Interpreters/DDLWorker.h +++ b/src/Interpreters/DDLWorker.h @@ -48,6 +48,9 @@ struct DDLTaskBase; using DDLTaskPtr = std::unique_ptr; using ZooKeeperPtr = std::shared_ptr; class AccessRightsElements; +struct ZooKeeperRetriesInfo; +class QueryStatus; +using QueryStatusPtr = std::shared_ptr; class DDLWorker { @@ -65,7 +68,7 @@ public: virtual ~DDLWorker(); /// Pushes query into DDL queue, returns path to created node - virtual String enqueueQuery(DDLLogEntry & entry); + virtual String enqueueQuery(DDLLogEntry & entry, const ZooKeeperRetriesInfo & retries_info, QueryStatusPtr process_list_element); /// Host ID (name:port) for logging purposes /// Note that in each task hosts are identified individually by name:port from initiator server cluster config @@ -120,6 +123,9 @@ protected: mutable std::shared_mutex mtx; }; + /// Pushes query into DDL queue, returns path to created node + String enqueueQueryAttempt(DDLLogEntry & entry); + /// Iterates through queue tasks in ZooKeeper, runs execution of new tasks void scheduleTasks(bool reinitialized); diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index 4e5cf7d2549..58224239723 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -105,6 +105,8 @@ namespace Setting extern const SettingsBool query_plan_aggregation_in_order; extern const SettingsBool query_plan_read_in_order; extern const SettingsUInt64 use_index_for_in_with_subqueries_max_values; + extern const SettingsBool allow_suspicious_types_in_group_by; + extern const SettingsBool allow_suspicious_types_in_order_by; } @@ -118,6 +120,7 @@ namespace ErrorCodes extern const int NOT_IMPLEMENTED; extern const int UNKNOWN_IDENTIFIER; extern const int UNKNOWN_TYPE_OF_AST_NODE; + extern const int ILLEGAL_COLUMN; } namespace @@ -1368,6 +1371,7 @@ bool SelectQueryExpressionAnalyzer::appendGroupBy(ExpressionActionsChain & chain ExpressionActionsChain::Step & step = chain.lastStep(columns_after_join); ASTs asts = select_query->groupBy()->children; + NameSet group_by_keys; if (select_query->group_by_with_grouping_sets) { for (const auto & ast : asts) @@ -1375,6 +1379,7 @@ bool SelectQueryExpressionAnalyzer::appendGroupBy(ExpressionActionsChain & chain for (const auto & ast_element : ast->children) { step.addRequiredOutput(ast_element->getColumnName()); + group_by_keys.insert(ast_element->getColumnName()); getRootActions(ast_element, only_types, step.actions()->dag); } } @@ -1384,10 +1389,17 @@ bool SelectQueryExpressionAnalyzer::appendGroupBy(ExpressionActionsChain & chain for (const auto & ast : asts) { step.addRequiredOutput(ast->getColumnName()); + group_by_keys.insert(ast->getColumnName()); getRootActions(ast, only_types, step.actions()->dag); } } + for (const auto & result_column : step.getResultColumns()) + { + if (group_by_keys.contains(result_column.name)) + validateGroupByKeyType(result_column.type); + } + if (optimize_aggregation_in_order) { for (auto & child : asts) @@ -1402,6 +1414,26 @@ bool SelectQueryExpressionAnalyzer::appendGroupBy(ExpressionActionsChain & chain return true; } +void SelectQueryExpressionAnalyzer::validateGroupByKeyType(const DB::DataTypePtr & key_type) const +{ + if (getContext()->getSettingsRef()[Setting::allow_suspicious_types_in_group_by]) + return; + + auto check = [](const IDataType & type) + { + if (isDynamic(type) || isVariant(type)) + throw Exception( + ErrorCodes::ILLEGAL_COLUMN, + "Data types Variant/Dynamic are not allowed in GROUP BY keys, because it can lead to unexpected results. " + "Consider using a subcolumn with a specific data type instead (for example 'column.Int64' or 'json.some.path.:Int64' if " + "its a JSON path subcolumn) or casting this column to a specific data type. " + "Set setting allow_suspicious_types_in_group_by = 1 in order to allow it"); + }; + + check(*key_type); + key_type->forEachChild(check); +} + void SelectQueryExpressionAnalyzer::appendAggregateFunctionsArguments(ExpressionActionsChain & chain, bool only_types) { const auto * select_query = getAggregatingQuery(); @@ -1599,6 +1631,12 @@ ActionsAndProjectInputsFlagPtr SelectQueryExpressionAnalyzer::appendOrderBy( with_fill = true; } + for (const auto & result_column : step.getResultColumns()) + { + if (order_by_keys.contains(result_column.name)) + validateOrderByKeyType(result_column.type); + } + if (auto interpolate_list = select_query->interpolate()) { @@ -1664,6 +1702,26 @@ ActionsAndProjectInputsFlagPtr SelectQueryExpressionAnalyzer::appendOrderBy( return actions; } +void SelectQueryExpressionAnalyzer::validateOrderByKeyType(const DataTypePtr & key_type) const +{ + if (getContext()->getSettingsRef()[Setting::allow_suspicious_types_in_order_by]) + return; + + auto check = [](const IDataType & type) + { + if (isDynamic(type) || isVariant(type)) + throw Exception( + ErrorCodes::ILLEGAL_COLUMN, + "Data types Variant/Dynamic are not allowed in ORDER BY keys, because it can lead to unexpected results. " + "Consider using a subcolumn with a specific data type instead (for example 'column.Int64' or 'json.some.path.:Int64' if " + "its a JSON path subcolumn) or casting this column to a specific data type. " + "Set setting allow_suspicious_types_in_order_by = 1 in order to allow it"); + }; + + check(*key_type); + key_type->forEachChild(check); +} + bool SelectQueryExpressionAnalyzer::appendLimitBy(ExpressionActionsChain & chain, bool only_types) { const auto * select_query = getSelectQuery(); @@ -1981,7 +2039,9 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( Block before_prewhere_sample = source_header; if (sanitizeBlock(before_prewhere_sample)) { - before_prewhere_sample = prewhere_dag_and_flags->dag.updateHeader(before_prewhere_sample); + ExpressionActions( + prewhere_dag_and_flags->dag.clone(), + ExpressionActionsSettings::fromSettings(context->getSettingsRef())).execute(before_prewhere_sample); auto & column_elem = before_prewhere_sample.getByName(query.prewhere()->getColumnName()); /// If the filter column is a constant, record it. if (column_elem.column) @@ -2013,7 +2073,9 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( before_where_sample = source_header; if (sanitizeBlock(before_where_sample)) { - before_where_sample = before_where->dag.updateHeader(before_where_sample); + ExpressionActions( + before_where->dag.clone(), + ExpressionActionsSettings::fromSettings(context->getSettingsRef())).execute(before_where_sample); auto & column_elem = before_where_sample.getByName(query.where()->getColumnName()); diff --git a/src/Interpreters/ExpressionAnalyzer.h b/src/Interpreters/ExpressionAnalyzer.h index d4ee8832c1c..5e3e1f81ca1 100644 --- a/src/Interpreters/ExpressionAnalyzer.h +++ b/src/Interpreters/ExpressionAnalyzer.h @@ -396,6 +396,7 @@ private: ActionsAndProjectInputsFlagPtr appendPrewhere(ExpressionActionsChain & chain, bool only_types); bool appendWhere(ExpressionActionsChain & chain, bool only_types); bool appendGroupBy(ExpressionActionsChain & chain, bool only_types, bool optimize_aggregation_in_order, ManyExpressionActions &); + void validateGroupByKeyType(const DataTypePtr & key_type) const; void appendAggregateFunctionsArguments(ExpressionActionsChain & chain, bool only_types); void appendWindowFunctionsArguments(ExpressionActionsChain & chain, bool only_types); @@ -408,6 +409,7 @@ private: bool appendHaving(ExpressionActionsChain & chain, bool only_types); /// appendSelect ActionsAndProjectInputsFlagPtr appendOrderBy(ExpressionActionsChain & chain, bool only_types, bool optimize_read_in_order, ManyExpressionActions &); + void validateOrderByKeyType(const DataTypePtr & key_type) const; bool appendLimitBy(ExpressionActionsChain & chain, bool only_types); /// appendProjectResult }; diff --git a/src/Interpreters/FillingRow.cpp b/src/Interpreters/FillingRow.cpp index 21b5b04bca3..384ad669206 100644 --- a/src/Interpreters/FillingRow.cpp +++ b/src/Interpreters/FillingRow.cpp @@ -1,11 +1,24 @@ -#include -#include +#include + #include +#include +#include +#include +#include namespace DB { +constexpr static bool debug_logging_enabled = false; + +template +inline static void logDebug(const char * fmt_str, Args&&... args) +{ + if constexpr (debug_logging_enabled) + LOG_DEBUG(getLogger("FillingRow"), "{}", fmt::format(fmt::runtime(fmt_str), std::forward(args)...)); +} + bool less(const Field & lhs, const Field & rhs, int direction) { if (direction == -1) @@ -28,6 +41,10 @@ FillingRow::FillingRow(const SortDescription & sort_description_) : sort_description(sort_description_) { row.resize(sort_description.size()); + + constraints.reserve(sort_description.size()); + for (size_t i = 0; i < size(); ++i) + constraints.push_back(getFillDescription(i).fill_to); } bool FillingRow::operator<(const FillingRow & other) const @@ -63,71 +80,254 @@ bool FillingRow::isNull() const return true; } -std::pair FillingRow::next(const FillingRow & to_row) +std::optional FillingRow::doLongJump(const FillColumnDescription & descr, size_t column_ind, const Field & to) { + Field shifted_value = row[column_ind]; + + if (less(to, shifted_value, getDirection(column_ind))) + return std::nullopt; + + for (int32_t step_len = 1, step_no = 0; step_no < 100 && step_len > 0; ++step_no) + { + Field next_value = shifted_value; + descr.step_func(next_value, step_len); + + if (less(to, next_value, getDirection(0))) + { + step_len /= 2; + } + else + { + shifted_value = std::move(next_value); + step_len *= 2; + } + } + + return shifted_value; +} + +bool FillingRow::hasSomeConstraints(size_t pos) const +{ + return !constraints[pos].isNull(); +} + +bool FillingRow::isConstraintsSatisfied(size_t pos) const +{ + chassert(!row[pos].isNull()); + chassert(hasSomeConstraints(pos)); + + int direction = getDirection(pos); + logDebug("constraint: {}, row: {}, direction: {}", constraints[pos], row[pos], direction); + + return less(row[pos], constraints[pos], direction); +} + +static const Field & findBorder(const Field & constraint, const Field & next_original, int direction) +{ + if (constraint.isNull()) + return next_original; + + if (next_original.isNull()) + return constraint; + + if (less(constraint, next_original, direction)) + return constraint; + + return next_original; +} + +bool FillingRow::next(const FillingRow & next_original_row, bool& value_changed) +{ + const size_t row_size = size(); size_t pos = 0; /// Find position we need to increment for generating next row. for (; pos < row_size; ++pos) - if (!row[pos].isNull() && !to_row.row[pos].isNull() && !equals(row[pos], to_row.row[pos])) + { + if (row[pos].isNull()) + continue; + + const Field & border = findBorder(constraints[pos], next_original_row[pos], getDirection(pos)); + logDebug("border: {}", border); + + if (!border.isNull() && !equals(row[pos], border)) break; + } - if (pos == row_size || less(to_row.row[pos], row[pos], getDirection(pos))) - return {false, false}; + logDebug("pos: {}", pos); - /// If we have any 'fill_to' value at position greater than 'pos', - /// we need to generate rows up to 'fill_to' value. + if (pos == row_size) + return false; + + if (!next_original_row[pos].isNull() && less(next_original_row[pos], row[pos], getDirection(pos))) + return false; + + if (!constraints[pos].isNull() && !less(row[pos], constraints[pos], getDirection(pos))) + return false; + + /// If we have any 'fill_to' value at position greater than 'pos' or configured staleness, + /// we need to generate rows up to one of this borders. for (size_t i = row_size - 1; i > pos; --i) { auto & fill_column_desc = getFillDescription(i); - if (fill_column_desc.fill_to.isNull() || row[i].isNull()) + if (row[i].isNull()) + continue; + + if (constraints[i].isNull()) continue; Field next_value = row[i]; - fill_column_desc.step_func(next_value); - if (less(next_value, fill_column_desc.fill_to, getDirection(i))) - { - row[i] = next_value; - initFromDefaults(i + 1); - return {true, true}; - } + fill_column_desc.step_func(next_value, 1); + + if (!less(next_value, constraints[i], getDirection(i))) + continue; + + row[i] = next_value; + initUsingFrom(i + 1); + + value_changed = true; + return true; } auto next_value = row[pos]; - getFillDescription(pos).step_func(next_value); + getFillDescription(pos).step_func(next_value, 1); - if (less(to_row.row[pos], next_value, getDirection(pos)) || equals(next_value, getFillDescription(pos).fill_to)) - return {false, false}; + if (!next_original_row[pos].isNull() && less(next_original_row[pos], next_value, getDirection(pos))) + return false; + + if (!constraints[pos].isNull() && !less(next_value, constraints[pos], getDirection(pos))) + return false; row[pos] = next_value; - if (equals(row[pos], to_row.row[pos])) + if (equals(row[pos], next_original_row[pos])) { bool is_less = false; for (size_t i = pos + 1; i < row_size; ++i) { - const auto & fill_from = getFillDescription(i).fill_from; - if (!fill_from.isNull()) - row[i] = fill_from; + const auto & descr = getFillDescription(i); + if (!descr.fill_from.isNull()) + row[i] = descr.fill_from; else - row[i] = to_row.row[i]; - is_less |= less(row[i], to_row.row[i], getDirection(i)); + row[i] = next_original_row[i]; + + is_less |= ( + (next_original_row[i].isNull() || less(row[i], next_original_row[i], getDirection(i))) && + (constraints[i].isNull() || less(row[i], constraints[i], getDirection(i))) + ); } - return {is_less, true}; + value_changed = true; + return is_less; } - initFromDefaults(pos + 1); - return {true, true}; + initUsingFrom(pos + 1); + + value_changed = true; + return true; } -void FillingRow::initFromDefaults(size_t from_pos) +bool FillingRow::shift(const FillingRow & next_original_row, bool& value_changed) +{ + logDebug("next_original_row: {}, current: {}", next_original_row, *this); + + for (size_t pos = 0; pos < size(); ++pos) + { + if (row[pos].isNull() || next_original_row[pos].isNull() || equals(row[pos], next_original_row[pos])) + continue; + + if (less(next_original_row[pos], row[pos], getDirection(pos))) + return false; + + std::optional next_value = doLongJump(getFillDescription(pos), pos, next_original_row[pos]); + logDebug("jumped to next value: {}", next_value.value_or("Did not complete")); + + row[pos] = std::move(next_value.value()); + + if (equals(row[pos], next_original_row[pos])) + { + bool is_less = false; + for (size_t i = pos + 1; i < size(); ++i) + { + const auto & descr = getFillDescription(i); + if (!descr.fill_from.isNull()) + row[i] = descr.fill_from; + else + row[i] = next_original_row[i]; + + is_less |= ( + (next_original_row[i].isNull() || less(row[i], next_original_row[i], getDirection(i))) && + (constraints[i].isNull() || less(row[i], constraints[i], getDirection(i))) + ); + } + + logDebug("is less: {}", is_less); + + value_changed = true; + return is_less; + } + else + { + initUsingTo(/*from_pos=*/pos + 1); + + value_changed = false; + return false; + } + } + + return false; +} + +bool FillingRow::hasSomeConstraints() const +{ + for (size_t pos = 0; pos < size(); ++pos) + if (hasSomeConstraints(pos)) + return true; + + return false; +} + +bool FillingRow::isConstraintsSatisfied() const +{ + for (size_t pos = 0; pos < size(); ++pos) + { + if (row[pos].isNull() || !hasSomeConstraints(pos)) + continue; + + return isConstraintsSatisfied(pos); + } + + return true; +} + +void FillingRow::initUsingFrom(size_t from_pos) { for (size_t i = from_pos; i < sort_description.size(); ++i) row[i] = getFillDescription(i).fill_from; } +void FillingRow::initUsingTo(size_t from_pos) +{ + for (size_t i = from_pos; i < sort_description.size(); ++i) + row[i] = getFillDescription(i).fill_to; +} + +void FillingRow::updateConstraintsWithStalenessRow(const Columns& base_row, size_t row_ind) +{ + for (size_t i = 0; i < size(); ++i) + { + const auto& descr = getFillDescription(i); + + if (!descr.fill_staleness.isNull()) + { + Field staleness_border = (*base_row[i])[row_ind]; + descr.staleness_step_func(staleness_border, 1); + constraints[i] = findBorder(descr.fill_to, staleness_border, getDirection(i)); + } + } +} + String FillingRow::dump() const { WriteBufferFromOwnString out; @@ -147,3 +347,12 @@ WriteBuffer & operator<<(WriteBuffer & out, const FillingRow & row) } } + +template <> +struct fmt::formatter : fmt::formatter +{ + constexpr auto format(const DB::FillingRow & row, format_context & ctx) const + { + return fmt::format_to(ctx.out(), "{}", row.dump()); + } +}; diff --git a/src/Interpreters/FillingRow.h b/src/Interpreters/FillingRow.h index 004b417542c..08d624a2405 100644 --- a/src/Interpreters/FillingRow.h +++ b/src/Interpreters/FillingRow.h @@ -1,6 +1,6 @@ #pragma once -#include +#include namespace DB { @@ -15,16 +15,28 @@ bool equals(const Field & lhs, const Field & rhs); */ class FillingRow { + /// finds last value <= to + std::optional doLongJump(const FillColumnDescription & descr, size_t column_ind, const Field & to); + + bool hasSomeConstraints(size_t pos) const; + bool isConstraintsSatisfied(size_t pos) const; + public: explicit FillingRow(const SortDescription & sort_description); /// Generates next row according to fill 'from', 'to' and 'step' values. - /// Return pair of boolean - /// apply - true if filling values should be inserted into result set - /// value_changed - true if filling row value was changed - std::pair next(const FillingRow & to_row); + /// Returns true if filling values should be inserted into result set + bool next(const FillingRow & next_original_row, bool& value_changed); - void initFromDefaults(size_t from_pos = 0); + /// Returns true if need to generate some prefix for to_row + bool shift(const FillingRow & next_original_row, bool& value_changed); + + bool hasSomeConstraints() const; + bool isConstraintsSatisfied() const; + + void initUsingFrom(size_t from_pos = 0); + void initUsingTo(size_t from_pos = 0); + void updateConstraintsWithStalenessRow(const Columns& base_row, size_t row_ind); Field & operator[](size_t index) { return row[index]; } const Field & operator[](size_t index) const { return row[index]; } @@ -42,6 +54,7 @@ public: private: Row row; + Row constraints; SortDescription sort_description; }; diff --git a/src/Interpreters/InterpreterBackupQuery.cpp b/src/Interpreters/InterpreterBackupQuery.cpp index 6f76b21a7b8..baaa6d40f0d 100644 --- a/src/Interpreters/InterpreterBackupQuery.cpp +++ b/src/Interpreters/InterpreterBackupQuery.cpp @@ -2,6 +2,8 @@ #include #include +#include +#include #include #include #include @@ -18,13 +20,13 @@ namespace DB namespace { - Block getResultRow(const BackupOperationInfo & info) + Block getResultRow(const String & id, BackupStatus status) { auto column_id = ColumnString::create(); auto column_status = ColumnInt8::create(); - column_id->insert(info.id); - column_status->insert(static_cast(info.status)); + column_id->insert(id); + column_status->insert(static_cast(status)); Block res_columns; res_columns.insert(0, {std::move(column_id), std::make_shared(), "id"}); @@ -36,15 +38,18 @@ namespace BlockIO InterpreterBackupQuery::execute() { + const ASTBackupQuery & backup_query = query_ptr->as(); auto & backups_worker = context->getBackupsWorker(); - auto id = backups_worker.start(query_ptr, context); - auto info = backups_worker.getInfo(id); - if (info.exception) - std::rethrow_exception(info.exception); + auto [id, status] = backups_worker.start(query_ptr, context); + + /// Wait if it's a synchronous operation. + bool async = BackupSettings::isAsync(backup_query); + if (!async) + status = backups_worker.wait(id); BlockIO res_io; - res_io.pipeline = QueryPipeline(std::make_shared(getResultRow(info))); + res_io.pipeline = QueryPipeline(std::make_shared(getResultRow(id, status))); return res_io; } diff --git a/src/Interpreters/InterpreterSystemQuery.cpp b/src/Interpreters/InterpreterSystemQuery.cpp index 45636ab40b9..4c875026ace 100644 --- a/src/Interpreters/InterpreterSystemQuery.cpp +++ b/src/Interpreters/InterpreterSystemQuery.cpp @@ -1310,7 +1310,7 @@ RefreshTaskList InterpreterSystemQuery::getRefreshTasks() void InterpreterSystemQuery::prewarmMarkCache() { if (table_id.empty()) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Table is not specified for prewarming marks cache"); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Table is not specified for PREWARM MARK CACHE command"); getContext()->checkAccess(AccessType::SYSTEM_PREWARM_MARK_CACHE, table_id); diff --git a/src/Interpreters/ProcessList.cpp b/src/Interpreters/ProcessList.cpp index 21c30a60617..435fda64bc2 100644 --- a/src/Interpreters/ProcessList.cpp +++ b/src/Interpreters/ProcessList.cpp @@ -447,12 +447,16 @@ void QueryStatus::ExecutorHolder::remove() executor = nullptr; } -CancellationCode QueryStatus::cancelQuery(bool) +CancellationCode QueryStatus::cancelQuery(bool /* kill */, std::exception_ptr exception) { - if (is_killed.load()) + if (is_killed.exchange(true)) return CancellationCode::CancelSent; - is_killed.store(true); + { + std::lock_guard lock{cancellation_exception_mutex}; + if (!cancellation_exception) + cancellation_exception = exception; + } std::vector executors_snapshot; @@ -486,7 +490,7 @@ void QueryStatus::addPipelineExecutor(PipelineExecutor * e) /// addPipelineExecutor() from the cancelQuery() context, and this will /// lead to deadlock. if (is_killed.load()) - throw Exception(ErrorCodes::QUERY_WAS_CANCELLED, "Query was cancelled"); + throwQueryWasCancelled(); std::lock_guard lock(executors_mutex); assert(!executors.contains(e)); @@ -512,11 +516,20 @@ void QueryStatus::removePipelineExecutor(PipelineExecutor * e) bool QueryStatus::checkTimeLimit() { if (is_killed.load()) - throw Exception(ErrorCodes::QUERY_WAS_CANCELLED, "Query was cancelled"); + throwQueryWasCancelled(); return limits.checkTimeLimit(watch, overflow_mode); } +void QueryStatus::throwQueryWasCancelled() const +{ + std::lock_guard lock{cancellation_exception_mutex}; + if (cancellation_exception) + std::rethrow_exception(cancellation_exception); + else + throw Exception(ErrorCodes::QUERY_WAS_CANCELLED, "Query was cancelled"); +} + bool QueryStatus::checkTimeLimitSoft() { if (is_killed.load()) diff --git a/src/Interpreters/ProcessList.h b/src/Interpreters/ProcessList.h index b2583e74d9b..f171fe8f4d4 100644 --- a/src/Interpreters/ProcessList.h +++ b/src/Interpreters/ProcessList.h @@ -109,6 +109,9 @@ protected: /// KILL was send to the query std::atomic is_killed { false }; + std::exception_ptr cancellation_exception TSA_GUARDED_BY(cancellation_exception_mutex); + mutable std::mutex cancellation_exception_mutex; + /// All data to the client already had been sent. /// Including EndOfStream or Exception. std::atomic is_all_data_sent { false }; @@ -127,6 +130,8 @@ protected: /// A weak pointer is used here because it's a ProcessListEntry which owns this QueryStatus, and not vice versa. void setProcessListEntry(std::weak_ptr process_list_entry_); + [[noreturn]] void throwQueryWasCancelled() const; + mutable std::mutex executors_mutex; struct ExecutorHolder @@ -225,7 +230,9 @@ public: QueryStatusInfo getInfo(bool get_thread_list = false, bool get_profile_events = false, bool get_settings = false) const; - CancellationCode cancelQuery(bool kill); + /// Cancels the current query. + /// Optional argument `exception` allows to set an exception which checkTimeLimit() will throw instead of "QUERY_WAS_CANCELLED". + CancellationCode cancelQuery(bool kill, std::exception_ptr exception = nullptr); bool isKilled() const { return is_killed; } diff --git a/src/Interpreters/executeDDLQueryOnCluster.cpp b/src/Interpreters/executeDDLQueryOnCluster.cpp index 6c48c9d58f4..02d6b4a7e05 100644 --- a/src/Interpreters/executeDDLQueryOnCluster.cpp +++ b/src/Interpreters/executeDDLQueryOnCluster.cpp @@ -191,7 +191,7 @@ BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr_, ContextPtr context, entry.initial_query_id = context->getClientInfo().initial_query_id; entry.initiator_user = context->getUserName(); entry.access_hash = sipHash64(context->getAccess()->getAccessRights()->toString()); - String node_path = ddl_worker.enqueueQuery(entry); + String node_path = ddl_worker.enqueueQuery(entry, params.retries_info, context->getProcessListElement()); return getDDLOnClusterStatus(node_path, ddl_worker.getReplicasDir(), entry, context); } diff --git a/src/Interpreters/executeDDLQueryOnCluster.h b/src/Interpreters/executeDDLQueryOnCluster.h index d015e8d8694..69e0c38834e 100644 --- a/src/Interpreters/executeDDLQueryOnCluster.h +++ b/src/Interpreters/executeDDLQueryOnCluster.h @@ -37,6 +37,9 @@ struct DDLQueryOnClusterParams /// Privileges which the current user should have to execute a query. AccessRightsElements access_to_check; + + /// Use retries when creating nodes "query-0000000000", "query-0000000001", "query-0000000002" in ZooKeeper. + ZooKeeperRetriesInfo retries_info; }; /// Pushes distributed DDL query to the queue. diff --git a/src/Interpreters/fuzzers/CMakeLists.txt b/src/Interpreters/fuzzers/CMakeLists.txt index 3317bba7e30..174fae299b7 100644 --- a/src/Interpreters/fuzzers/CMakeLists.txt +++ b/src/Interpreters/fuzzers/CMakeLists.txt @@ -3,5 +3,6 @@ target_link_libraries(execute_query_fuzzer PRIVATE dbms clickhouse_table_functions clickhouse_aggregate_functions + clickhouse_functions clickhouse_dictionaries clickhouse_dictionaries_embedded) diff --git a/src/Interpreters/tests/gtest_filecache.cpp b/src/Interpreters/tests/gtest_filecache.cpp index 007b31d9fdc..de767947428 100644 --- a/src/Interpreters/tests/gtest_filecache.cpp +++ b/src/Interpreters/tests/gtest_filecache.cpp @@ -253,7 +253,7 @@ void download(FileSegment & file_segment) download(cache_base_path, file_segment); ASSERT_EQ(file_segment.state(), State::DOWNLOADING); - file_segment.complete(); + file_segment.complete(false); ASSERT_EQ(file_segment.state(), State::DOWNLOADED); } @@ -263,7 +263,7 @@ void assertDownloadFails(FileSegment & file_segment) ASSERT_EQ(file_segment.getDownloadedSize(), 0); std::string failure_reason; ASSERT_FALSE(file_segment.reserve(file_segment.range().size(), 1000, failure_reason)); - file_segment.complete(); + file_segment.complete(false); } void download(const HolderPtr & holder) @@ -971,7 +971,7 @@ TEST_F(FileCacheTest, temporaryData) ASSERT_TRUE(segment->getOrSetDownloader() == DB::FileSegment::getCallerId()); ASSERT_TRUE(segment->reserve(segment->range().size(), 1000, failure_reason)); download(*segment); - segment->complete(); + segment->complete(false); } } diff --git a/src/Parsers/ASTColumnDeclaration.cpp b/src/Parsers/ASTColumnDeclaration.cpp index e7c3fdbb548..1c7d72bafcc 100644 --- a/src/Parsers/ASTColumnDeclaration.cpp +++ b/src/Parsers/ASTColumnDeclaration.cpp @@ -128,4 +128,14 @@ void ASTColumnDeclaration::formatImpl(const FormatSettings & format_settings, Fo } } +void ASTColumnDeclaration::forEachPointerToChild(std::function f) +{ + f(reinterpret_cast(&default_expression)); + f(reinterpret_cast(&comment)); + f(reinterpret_cast(&codec)); + f(reinterpret_cast(&statistics_desc)); + f(reinterpret_cast(&ttl)); + f(reinterpret_cast(&collation)); + f(reinterpret_cast(&settings)); +} } diff --git a/src/Parsers/ASTColumnDeclaration.h b/src/Parsers/ASTColumnDeclaration.h index 914916d5074..0c5076f0201 100644 --- a/src/Parsers/ASTColumnDeclaration.h +++ b/src/Parsers/ASTColumnDeclaration.h @@ -29,6 +29,9 @@ public: ASTPtr clone() const override; void formatImpl(const FormatSettings & format_settings, FormatState & state, FormatStateStacked frame) const override; + +protected: + void forEachPointerToChild(std::function f) override; }; } diff --git a/src/Parsers/ASTFunction.cpp b/src/Parsers/ASTFunction.cpp index 53d44e2f325..11cfe2e584e 100644 --- a/src/Parsers/ASTFunction.cpp +++ b/src/Parsers/ASTFunction.cpp @@ -724,7 +724,10 @@ void ASTFunction::formatImplWithoutAlias(const FormatSettings & settings, Format { if (secret_arguments.are_named) { - assert_cast(argument.get())->arguments->children[0]->formatImpl(settings, state, nested_dont_need_parens); + if (const auto * func_ast = typeid_cast(argument.get())) + func_ast->arguments->children[0]->formatImpl(settings, state, nested_dont_need_parens); + else + argument->formatImpl(settings, state, nested_dont_need_parens); settings.ostr << (settings.hilite ? hilite_operator : "") << " = " << (settings.hilite ? hilite_none : ""); } if (!secret_arguments.replacement.empty()) diff --git a/src/Parsers/ASTOrderByElement.cpp b/src/Parsers/ASTOrderByElement.cpp index 09193a8b5e1..d87c296d398 100644 --- a/src/Parsers/ASTOrderByElement.cpp +++ b/src/Parsers/ASTOrderByElement.cpp @@ -54,6 +54,11 @@ void ASTOrderByElement::formatImpl(const FormatSettings & settings, FormatState settings.ostr << (settings.hilite ? hilite_keyword : "") << " STEP " << (settings.hilite ? hilite_none : ""); fill_step->formatImpl(settings, state, frame); } + if (auto fill_staleness = getFillStaleness()) + { + settings.ostr << (settings.hilite ? hilite_keyword : "") << " STALENESS " << (settings.hilite ? hilite_none : ""); + fill_staleness->formatImpl(settings, state, frame); + } } } diff --git a/src/Parsers/ASTOrderByElement.h b/src/Parsers/ASTOrderByElement.h index 6edf84d7bde..4dc35dac217 100644 --- a/src/Parsers/ASTOrderByElement.h +++ b/src/Parsers/ASTOrderByElement.h @@ -18,6 +18,7 @@ private: FILL_FROM, FILL_TO, FILL_STEP, + FILL_STALENESS, }; public: @@ -32,12 +33,14 @@ public: void setFillFrom(ASTPtr node) { setChild(Child::FILL_FROM, node); } void setFillTo(ASTPtr node) { setChild(Child::FILL_TO, node); } void setFillStep(ASTPtr node) { setChild(Child::FILL_STEP, node); } + void setFillStaleness(ASTPtr node) { setChild(Child::FILL_STALENESS, node); } /** Collation for locale-specific string comparison. If empty, then sorting done by bytes. */ ASTPtr getCollation() const { return getChild(Child::COLLATION); } ASTPtr getFillFrom() const { return getChild(Child::FILL_FROM); } ASTPtr getFillTo() const { return getChild(Child::FILL_TO); } ASTPtr getFillStep() const { return getChild(Child::FILL_STEP); } + ASTPtr getFillStaleness() const { return getChild(Child::FILL_STALENESS); } String getID(char) const override { return "OrderByElement"; } diff --git a/src/Parsers/Access/ParserGrantQuery.cpp b/src/Parsers/Access/ParserGrantQuery.cpp index e29cf11273b..4a0d24559a3 100644 --- a/src/Parsers/Access/ParserGrantQuery.cpp +++ b/src/Parsers/Access/ParserGrantQuery.cpp @@ -155,6 +155,9 @@ namespace for (auto & [access_flags, columns] : access_and_columns) { + if (wildcard && !columns.empty()) + return false; + AccessRightsElement element; element.access_flags = access_flags; element.columns = std::move(columns); diff --git a/src/Parsers/CommonParsers.h b/src/Parsers/CommonParsers.h index dd0ba91d428..c02f8d06323 100644 --- a/src/Parsers/CommonParsers.h +++ b/src/Parsers/CommonParsers.h @@ -546,6 +546,7 @@ namespace DB MR_MACROS(YY, "YY") \ MR_MACROS(YYYY, "YYYY") \ MR_MACROS(ZKPATH, "ZKPATH") \ + MR_MACROS(STALENESS, "STALENESS") \ /// The list of keywords where underscore is intentional #define APPLY_FOR_PARSER_KEYWORDS_WITH_UNDERSCORES(MR_MACROS) \ diff --git a/src/Parsers/ExpressionElementParsers.cpp b/src/Parsers/ExpressionElementParsers.cpp index 31efcb16f02..ad062d27a37 100644 --- a/src/Parsers/ExpressionElementParsers.cpp +++ b/src/Parsers/ExpressionElementParsers.cpp @@ -2178,6 +2178,7 @@ bool ParserOrderByElement::parseImpl(Pos & pos, ASTPtr & node, Expected & expect ParserKeyword from(Keyword::FROM); ParserKeyword to(Keyword::TO); ParserKeyword step(Keyword::STEP); + ParserKeyword staleness(Keyword::STALENESS); ParserStringLiteral collate_locale_parser; ParserExpressionWithOptionalAlias exp_parser(false); @@ -2219,6 +2220,7 @@ bool ParserOrderByElement::parseImpl(Pos & pos, ASTPtr & node, Expected & expect ASTPtr fill_from; ASTPtr fill_to; ASTPtr fill_step; + ASTPtr fill_staleness; if (with_fill.ignore(pos, expected)) { has_with_fill = true; @@ -2230,6 +2232,9 @@ bool ParserOrderByElement::parseImpl(Pos & pos, ASTPtr & node, Expected & expect if (step.ignore(pos, expected) && !exp_parser.parse(pos, fill_step, expected)) return false; + + if (staleness.ignore(pos, expected) && !exp_parser.parse(pos, fill_staleness, expected)) + return false; } auto elem = std::make_shared(); @@ -2244,6 +2249,7 @@ bool ParserOrderByElement::parseImpl(Pos & pos, ASTPtr & node, Expected & expect elem->setFillFrom(fill_from); elem->setFillTo(fill_to); elem->setFillStep(fill_step); + elem->setFillStaleness(fill_staleness); node = elem; diff --git a/src/Parsers/fuzzers/CMakeLists.txt b/src/Parsers/fuzzers/CMakeLists.txt index 903319d733c..c829c26a805 100644 --- a/src/Parsers/fuzzers/CMakeLists.txt +++ b/src/Parsers/fuzzers/CMakeLists.txt @@ -2,10 +2,10 @@ clickhouse_add_executable(lexer_fuzzer lexer_fuzzer.cpp ${SRCS}) target_link_libraries(lexer_fuzzer PRIVATE clickhouse_parsers) clickhouse_add_executable(select_parser_fuzzer select_parser_fuzzer.cpp ${SRCS}) -target_link_libraries(select_parser_fuzzer PRIVATE clickhouse_parsers dbms) +target_link_libraries(select_parser_fuzzer PRIVATE clickhouse_parsers clickhouse_functions dbms) clickhouse_add_executable(create_parser_fuzzer create_parser_fuzzer.cpp ${SRCS}) -target_link_libraries(create_parser_fuzzer PRIVATE clickhouse_parsers dbms) +target_link_libraries(create_parser_fuzzer PRIVATE clickhouse_parsers clickhouse_functions dbms) add_subdirectory(codegen_fuzzer) diff --git a/src/Parsers/fuzzers/codegen_fuzzer/CMakeLists.txt b/src/Parsers/fuzzers/codegen_fuzzer/CMakeLists.txt index 74fdcff79f7..ee17e03fce2 100644 --- a/src/Parsers/fuzzers/codegen_fuzzer/CMakeLists.txt +++ b/src/Parsers/fuzzers/codegen_fuzzer/CMakeLists.txt @@ -47,4 +47,4 @@ target_compile_options (codegen_select_fuzzer PRIVATE -Wno-newline-eof) target_link_libraries(protoc ch_contrib::fuzzer) target_include_directories(codegen_select_fuzzer SYSTEM BEFORE PRIVATE "${CMAKE_CURRENT_BINARY_DIR}") -target_link_libraries(codegen_select_fuzzer PRIVATE ch_contrib::protobuf_mutator ch_contrib::protoc dbms) +target_link_libraries(codegen_select_fuzzer PRIVATE ch_contrib::protobuf_mutator ch_contrib::protoc clickhouse_functions dbms) diff --git a/src/Planner/PlannerActionsVisitor.cpp b/src/Planner/PlannerActionsVisitor.cpp index aea304e0ecc..aa233109fa9 100644 --- a/src/Planner/PlannerActionsVisitor.cpp +++ b/src/Planner/PlannerActionsVisitor.cpp @@ -391,6 +391,9 @@ public: if (sort_node.hasFillStep()) buffer << " STEP " << calculateActionNodeName(sort_node.getFillStep()); + + if (sort_node.hasFillStaleness()) + buffer << " STALENESS " << calculateActionNodeName(sort_node.getFillStaleness()); } if (i + 1 != order_by_nodes_size) diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 39c1352c9cf..5c153f6db39 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -1555,10 +1555,7 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ SortingStep::Settings sort_settings(*query_context); auto sorting_step = std::make_unique( - plan.getCurrentHeader(), - std::move(sort_description), - 0 /*limit*/, - sort_settings); + plan.getCurrentHeader(), std::move(sort_description), 0 /*limit*/, sort_settings, true /*is_sorting_for_merge_join*/); sorting_step->setStepDescription(fmt::format("Sort {} before JOIN", join_table_side)); plan.addStep(std::move(sorting_step)); }; diff --git a/src/Planner/PlannerSorting.cpp b/src/Planner/PlannerSorting.cpp index af51afdef13..9476ae348c5 100644 --- a/src/Planner/PlannerSorting.cpp +++ b/src/Planner/PlannerSorting.cpp @@ -43,7 +43,7 @@ std::pair extractWithFillValue(const QueryTreeNodePtr & node return result; } -std::pair> extractWithFillStepValue(const QueryTreeNodePtr & node) +std::pair> extractWithFillValueWithIntervalKind(const QueryTreeNodePtr & node) { const auto & constant_node = node->as(); @@ -77,7 +77,7 @@ FillColumnDescription extractWithFillDescription(const SortNode & sort_node) if (sort_node.hasFillStep()) { - auto extract_result = extractWithFillStepValue(sort_node.getFillStep()); + auto extract_result = extractWithFillValueWithIntervalKind(sort_node.getFillStep()); fill_column_description.fill_step = std::move(extract_result.first); fill_column_description.step_kind = std::move(extract_result.second); } @@ -87,16 +87,36 @@ FillColumnDescription extractWithFillDescription(const SortNode & sort_node) fill_column_description.fill_step = Field(direction_value); } + if (sort_node.getFillStaleness()) + { + auto extract_result = extractWithFillValueWithIntervalKind(sort_node.getFillStaleness()); + fill_column_description.fill_staleness = std::move(extract_result.first); + fill_column_description.staleness_kind = std::move(extract_result.second); + } + + /////////////////////////////////// + if (applyVisitor(FieldVisitorAccurateEquals(), fill_column_description.fill_step, Field{0})) throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, "WITH FILL STEP value cannot be zero"); + if (sort_node.hasFillStaleness()) + { + if (sort_node.hasFillFrom()) + throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, + "WITH FILL STALENESS cannot be used together with WITH FILL FROM"); + } + if (sort_node.getSortDirection() == SortDirection::ASCENDING) { if (applyVisitor(FieldVisitorAccurateLess(), fill_column_description.fill_step, Field{0})) throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, "WITH FILL STEP value cannot be negative for sorting in ascending direction"); + if (applyVisitor(FieldVisitorAccurateLess(), fill_column_description.fill_staleness, Field{0})) + throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, + "WITH FILL STALENESS value cannot be negative for sorting in ascending direction"); + if (!fill_column_description.fill_from.isNull() && !fill_column_description.fill_to.isNull() && applyVisitor(FieldVisitorAccurateLess(), fill_column_description.fill_to, fill_column_description.fill_from)) { @@ -110,6 +130,10 @@ FillColumnDescription extractWithFillDescription(const SortNode & sort_node) throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, "WITH FILL STEP value cannot be positive for sorting in descending direction"); + if (applyVisitor(FieldVisitorAccurateLess(), Field{0}, fill_column_description.fill_staleness)) + throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, + "WITH FILL STALENESS value cannot be positive for sorting in descending direction"); + if (!fill_column_description.fill_from.isNull() && !fill_column_description.fill_to.isNull() && applyVisitor(FieldVisitorAccurateLess(), fill_column_description.fill_from, fill_column_description.fill_to)) { diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index b97a9a36381..fce86a6cda0 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -170,12 +171,25 @@ const QueryNode * findQueryForParallelReplicas( const std::unordered_map & mapping, const Settings & settings) { - const QueryPlan::Node * prev_checked_node = nullptr; + struct Frame + { + const QueryPlan::Node * node = nullptr; + /// Below we will check subqueries from `stack` to find outermost subquery that could be executed remotely. + /// Currently traversal algorithm considers only steps with 0 or 1 children and JOIN specifically. + /// When we found some step that requires finalization on the initiator (e.g. GROUP BY) there are two options: + /// 1. If plan looks like a single path (e.g. AggregatingStep -> ExpressionStep -> Reading) we can execute + /// current subquery as a whole with replicas. + /// 2. If we were inside JOIN we cannot offload the whole subquery to replicas because at least one side + /// of the JOIN needs to be finalized on the initiator. + /// So this flag is used to track what subquery to return once we hit a step that needs finalization. + bool inside_join = false; + }; + const QueryNode * res = nullptr; while (!stack.empty()) { - const QueryNode * subquery_node = stack.top(); + const QueryNode * const subquery_node = stack.top(); stack.pop(); auto it = mapping.find(subquery_node); @@ -183,22 +197,21 @@ const QueryNode * findQueryForParallelReplicas( if (it == mapping.end()) break; - const QueryPlan::Node * curr_node = it->second; - const QueryPlan::Node * next_node_to_check = curr_node; + std::stack nodes_to_check; + nodes_to_check.push({.node = it->second, .inside_join = false}); bool can_distribute_full_node = true; + bool currently_inside_join = false; - while (next_node_to_check && next_node_to_check != prev_checked_node) + while (!nodes_to_check.empty()) { + const auto & [next_node_to_check, inside_join] = nodes_to_check.top(); + nodes_to_check.pop(); const auto & children = next_node_to_check->children; auto * step = next_node_to_check->step.get(); if (children.empty()) { - /// Found a source step. This should be possible only in the first iteration. - if (prev_checked_node) - return nullptr; - - next_node_to_check = nullptr; + /// Found a source step. } else if (children.size() == 1) { @@ -206,12 +219,19 @@ const QueryNode * findQueryForParallelReplicas( const auto * filter = typeid_cast(step); const auto * creating_sets = typeid_cast(step); - bool allowed_creating_sets = settings[Setting::parallel_replicas_allow_in_with_subquery] && creating_sets; + const bool allowed_creating_sets = settings[Setting::parallel_replicas_allow_in_with_subquery] && creating_sets; - if (!expression && !filter && !allowed_creating_sets) + const auto * sorting = typeid_cast(step); + /// Sorting for merge join is supposed to be done locally before join itself, so it doesn't need finalization. + const bool allowed_sorting = sorting && sorting->isSortingForMergeJoin(); + + if (!expression && !filter && !allowed_creating_sets && !allowed_sorting) + { can_distribute_full_node = false; + currently_inside_join = inside_join; + } - next_node_to_check = children.front(); + nodes_to_check.push({.node = children.front(), .inside_join = inside_join}); } else { @@ -221,12 +241,11 @@ const QueryNode * findQueryForParallelReplicas( if (!join) return res; - next_node_to_check = children.front(); + for (const auto & child : children) + nodes_to_check.push({.node = child, .inside_join = true}); } } - /// Current node contains steps like GROUP BY / DISTINCT - /// Will try to execute query up to WithMergableStage if (!can_distribute_full_node) { /// Current query node does not contain subqueries. @@ -234,12 +253,11 @@ const QueryNode * findQueryForParallelReplicas( if (!res) return nullptr; - return subquery_node; + return currently_inside_join ? res : subquery_node; } /// Query is simple enough to be fully distributed. res = subquery_node; - prev_checked_node = curr_node; } return res; diff --git a/src/Processors/Formats/Impl/NativeFormat.cpp b/src/Processors/Formats/Impl/NativeFormat.cpp index 5411e2e7811..022cb38596b 100644 --- a/src/Processors/Formats/Impl/NativeFormat.cpp +++ b/src/Processors/Formats/Impl/NativeFormat.cpp @@ -15,16 +15,17 @@ namespace DB class NativeInputFormat final : public IInputFormat { public: - NativeInputFormat(ReadBuffer & buf, const Block & header_, const FormatSettings & settings) + NativeInputFormat(ReadBuffer & buf, const Block & header_, const FormatSettings & settings_) : IInputFormat(header_, &buf) , reader(std::make_unique( buf, header_, 0, - settings, - settings.defaults_for_omitted_fields ? &block_missing_values : nullptr)) + settings_, + settings_.defaults_for_omitted_fields ? &block_missing_values : nullptr)) , header(header_) , block_missing_values(header.columns()) + , settings(settings_) { } @@ -55,7 +56,7 @@ public: void setReadBuffer(ReadBuffer & in_) override { - reader = std::make_unique(in_, header, 0); + reader = std::make_unique(in_, header, 0, settings, settings.defaults_for_omitted_fields ? &block_missing_values : nullptr); IInputFormat::setReadBuffer(in_); } @@ -67,6 +68,7 @@ private: std::unique_ptr reader; Block header; BlockMissingValues block_missing_values; + const FormatSettings settings; size_t approx_bytes_read_for_chunk = 0; }; diff --git a/src/Processors/Formats/Impl/ParquetMetadataInputFormat.cpp b/src/Processors/Formats/Impl/ParquetMetadataInputFormat.cpp index 7fd6e93dd80..8264b565e39 100644 --- a/src/Processors/Formats/Impl/ParquetMetadataInputFormat.cpp +++ b/src/Processors/Formats/Impl/ParquetMetadataInputFormat.cpp @@ -92,8 +92,9 @@ static NamesAndTypesList getHeaderForParquetMetadata() std::make_shared(std::make_shared()), std::make_shared(std::make_shared())}, Names{"num_values", "null_count", "distinct_count", "min", "max"}), + DataTypeFactory::instance().get("Bool"), }, - Names{"name", "path", "total_compressed_size", "total_uncompressed_size", "have_statistics", "statistics"}))}, + Names{"name", "path", "total_compressed_size", "total_uncompressed_size", "have_statistics", "statistics", "have_bloom_filter"}))}, Names{"num_columns", "num_rows", "total_uncompressed_size", "total_compressed_size", "columns"}))}, }; return names_and_types; @@ -350,6 +351,8 @@ void ParquetMetadataInputFormat::fillColumnChunksMetadata(const std::unique_ptr< fillColumnStatistics(column_chunk_metadata->statistics(), tuple_column.getColumn(5), row_group_metadata->schema()->Column(column_i)->type_length()); else tuple_column.getColumn(5).insertDefault(); + bool have_bloom_filter = column_chunk_metadata->bloom_filter_offset().has_value(); + assert_cast(tuple_column.getColumn(6)).insertValue(have_bloom_filter); } array_column.getOffsets().push_back(tuple_column.size()); } diff --git a/src/Processors/Merges/Algorithms/IMergingAlgorithmWithDelayedChunk.cpp b/src/Processors/Merges/Algorithms/IMergingAlgorithmWithDelayedChunk.cpp index cbad6813fbc..5e271e12943 100644 --- a/src/Processors/Merges/Algorithms/IMergingAlgorithmWithDelayedChunk.cpp +++ b/src/Processors/Merges/Algorithms/IMergingAlgorithmWithDelayedChunk.cpp @@ -24,7 +24,12 @@ void IMergingAlgorithmWithDelayedChunk::initializeQueue(Inputs inputs) continue; cursors[source_num] = SortCursorImpl( - header, current_inputs[source_num].chunk.getColumns(), description, source_num, current_inputs[source_num].permutation); + header, + current_inputs[source_num].chunk.getColumns(), + current_inputs[source_num].chunk.getNumRows(), + description, + source_num, + current_inputs[source_num].permutation); inputs_origin_merge_tree_part_level[source_num] = getPartLevelFromChunk(current_inputs[source_num].chunk); } @@ -41,7 +46,7 @@ void IMergingAlgorithmWithDelayedChunk::updateCursor(Input & input, size_t sourc last_chunk_sort_columns = std::move(cursors[source_num].sort_columns); current_input.swap(input); - cursors[source_num].reset(current_input.chunk.getColumns(), header, current_input.permutation); + cursors[source_num].reset(current_input.chunk.getColumns(), header, current_input.chunk.getNumRows(), current_input.permutation); inputs_origin_merge_tree_part_level[source_num] = getPartLevelFromChunk(current_input.chunk); diff --git a/src/Processors/Merges/Algorithms/IMergingAlgorithmWithSharedChunks.cpp b/src/Processors/Merges/Algorithms/IMergingAlgorithmWithSharedChunks.cpp index 47b7ddf38dc..f99f021286e 100644 --- a/src/Processors/Merges/Algorithms/IMergingAlgorithmWithSharedChunks.cpp +++ b/src/Processors/Merges/Algorithms/IMergingAlgorithmWithSharedChunks.cpp @@ -31,7 +31,8 @@ void IMergingAlgorithmWithSharedChunks::initialize(Inputs inputs) source.skip_last_row = inputs[source_num].skip_last_row; source.chunk = chunk_allocator.alloc(inputs[source_num].chunk); - cursors[source_num] = SortCursorImpl(header, source.chunk->getColumns(), description, source_num, inputs[source_num].permutation); + cursors[source_num] = SortCursorImpl( + header, source.chunk->getColumns(), source.chunk->getNumRows(), description, source_num, inputs[source_num].permutation); source.chunk->all_columns = cursors[source_num].all_columns; source.chunk->sort_columns = cursors[source_num].sort_columns; @@ -49,7 +50,7 @@ void IMergingAlgorithmWithSharedChunks::consume(Input & input, size_t source_num auto & source = sources[source_num]; source.skip_last_row = input.skip_last_row; source.chunk = chunk_allocator.alloc(input.chunk); - cursors[source_num].reset(source.chunk->getColumns(), header, input.permutation); + cursors[source_num].reset(source.chunk->getColumns(), header, source.chunk->getNumRows(), input.permutation); source.chunk->all_columns = cursors[source_num].all_columns; source.chunk->sort_columns = cursors[source_num].sort_columns; diff --git a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp index 3a9cf7ee141..28c6cb473e5 100644 --- a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp @@ -59,7 +59,7 @@ void MergingSortedAlgorithm::initialize(Inputs inputs) if (!chunk) continue; - cursors[source_num] = SortCursorImpl(header, chunk.getColumns(), description, source_num); + cursors[source_num] = SortCursorImpl(header, chunk.getColumns(), chunk.getNumRows(), description, source_num); } if (sorting_queue_strategy == SortingQueueStrategy::Default) @@ -84,7 +84,7 @@ void MergingSortedAlgorithm::consume(Input & input, size_t source_num) { removeConstAndSparse(input); current_inputs[source_num].swap(input); - cursors[source_num].reset(current_inputs[source_num].chunk.getColumns(), header); + cursors[source_num].reset(current_inputs[source_num].chunk.getColumns(), header, current_inputs[source_num].chunk.getNumRows()); if (sorting_queue_strategy == SortingQueueStrategy::Default) { diff --git a/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.cpp index cd347d371d9..dbce348d1aa 100644 --- a/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.cpp @@ -46,11 +46,28 @@ ReplacingSortedAlgorithm::ReplacingSortedAlgorithm( { if (!is_deleted_column.empty()) is_deleted_column_number = header_.getPositionByName(is_deleted_column); + if (!version_column.empty()) version_column_number = header_.getPositionByName(version_column); } void ReplacingSortedAlgorithm::insertRow() +{ + if (is_deleted_column_number != -1) + { + if (!(cleanup && assert_cast(*(*selected_row.all_columns)[is_deleted_column_number]).getData()[selected_row.row_num])) + insertRowImpl(); + } + else + { + insertRowImpl(); + } + + /// insertRowImpl() may has not been called + saveChunkForSkippingFinalFromSelectedRow(); +} + +void ReplacingSortedAlgorithm::insertRowImpl() { if (out_row_sources_buf) { @@ -67,6 +84,7 @@ void ReplacingSortedAlgorithm::insertRow() /// We just record the position to be selected in the chunk if (!selected_row.owned_chunk->replace_final_selection) selected_row.owned_chunk->replace_final_selection = ColumnUInt64::create(); + selected_row.owned_chunk->replace_final_selection->insert(selected_row.row_num); /// This is the last row we can select from `selected_row.owned_chunk`, keep it to emit later @@ -74,7 +92,9 @@ void ReplacingSortedAlgorithm::insertRow() to_be_emitted.push(std::move(selected_row.owned_chunk)); } else + { merged_data->insertRow(*selected_row.all_columns, selected_row.row_num, selected_row.owned_chunk->getNumRows()); + } selected_row.clear(); } @@ -113,30 +133,68 @@ IMergingAlgorithm::Status ReplacingSortedAlgorithm::merge() /// Write the data for the previous primary key. if (!selected_row.empty()) - { - if (is_deleted_column_number!=-1) - { - if (!(cleanup && assert_cast(*(*selected_row.all_columns)[is_deleted_column_number]).getData()[selected_row.row_num])) - insertRow(); - } - else - insertRow(); - /// insertRow() may has not been called - saveChunkForSkippingFinalFromSelectedRow(); - } + insertRow(); selected_row.clear(); } + if (current->isFirst() + && key_differs + && is_deleted_column_number == -1 /// Ignore optimization if we need to filter deleted rows. + && sources_origin_merge_tree_part_level[current->order] > 0 + && !skipLastRowFor(current->order) /// Ignore optimization if last row should be skipped. + && (queue.size() == 1 || (queue.size() >= 2 && current.totallyLess(queue.nextChild())))) + { + /// This is special optimization if current cursor is totally less than next cursor + /// and current chunk has no duplicates (we assume that parts with non-zero level have no duplicates) + /// We want to insert current cursor chunk directly in merged data. + + /// First if merged_data is not empty we need to flush it. + /// We will get into the same condition on next merge call. + if (merged_data->mergedRows() != 0) + return Status(merged_data->pull()); + + size_t source_num = current->order; + auto current_chunk = std::move(*sources[source_num].chunk); + size_t chunk_num_rows = current_chunk.getNumRows(); + + /// We will get the next block from the corresponding source, if there is one. + queue.removeTop(); + + if (enable_vertical_final) + { + current_chunk.getChunkInfos().add(std::make_shared()); + Status status(std::move(current_chunk)); + status.required_source = source_num; + return status; + } + + merged_data->insertChunk(std::move(current_chunk), chunk_num_rows); + sources[source_num].chunk = {}; + + /// Write order of rows for other columns this data will be used in gather stream + if (out_row_sources_buf) + { + /// All rows are not skipped. + RowSourcePart row_source(source_num); + for (size_t i = 0; i < chunk_num_rows; ++i) + out_row_sources_buf->write(row_source.data); + } + + Status status(merged_data->pull()); + status.required_source = source_num; + return status; + } + /// Initially, skip all rows. Unskip last on insert. size_t current_pos = current_row_sources.size(); if (out_row_sources_buf) current_row_sources.emplace_back(current.impl->order, true); - if ((is_deleted_column_number!=-1)) + if (is_deleted_column_number != -1) { const UInt8 is_deleted = assert_cast(*current->all_columns[is_deleted_column_number]).getData()[current->getRow()]; - if ((is_deleted != 1) && (is_deleted != 0)) + if (is_deleted > 1) throw Exception(ErrorCodes::INCORRECT_DATA, "Incorrect data: is_deleted = {} (must be 1 or 0).", toString(is_deleted)); } @@ -172,17 +230,7 @@ IMergingAlgorithm::Status ReplacingSortedAlgorithm::merge() /// We will write the data for the last primary key. if (!selected_row.empty()) - { - if (is_deleted_column_number!=-1) - { - if (!(cleanup && assert_cast(*(*selected_row.all_columns)[is_deleted_column_number]).getData()[selected_row.row_num])) - insertRow(); - } - else - insertRow(); - /// insertRow() may has not been called - saveChunkForSkippingFinalFromSelectedRow(); - } + insertRow(); /// Skipping final: emit the remaining chunks if (!to_be_emitted.empty()) diff --git a/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.h b/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.h index 2f23f2a5c4d..ec366b900f5 100644 --- a/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.h @@ -13,8 +13,7 @@ class Logger; namespace DB { -/** Use in skipping final to keep list of indices of selected row after merging final - */ +//// Used in skipping final to keep the list of indices of selected rows after merging. struct ChunkSelectFinalIndices : public ChunkInfoCloneable { explicit ChunkSelectFinalIndices(MutableColumnPtr select_final_indices_); @@ -24,6 +23,11 @@ struct ChunkSelectFinalIndices : public ChunkInfoCloneable +{ +}; + /** Merges several sorted inputs into one. * For each group of consecutive identical values of the primary key (the columns by which the data is sorted), * keeps row with max `version` value. @@ -63,6 +67,7 @@ private: PODArray current_row_sources; void insertRow(); + void insertRowImpl(); /// Method for using in skipping FINAL logic /// Skipping FINAL doesn't merge rows to new chunks but marks selected rows in input chunks and emit them diff --git a/src/Processors/QueryPlan/SortingStep.cpp b/src/Processors/QueryPlan/SortingStep.cpp index 5ad2f1f62d5..c15c45ee269 100644 --- a/src/Processors/QueryPlan/SortingStep.cpp +++ b/src/Processors/QueryPlan/SortingStep.cpp @@ -77,13 +77,11 @@ static ITransformingStep::Traits getTraits(size_t limit) } SortingStep::SortingStep( - const Header & input_header, - SortDescription description_, - UInt64 limit_, - const Settings & settings_) + const Header & input_header, SortDescription description_, UInt64 limit_, const Settings & settings_, bool is_sorting_for_merge_join_) : ITransformingStep(input_header, input_header, getTraits(limit_)) , type(Type::Full) , result_description(std::move(description_)) + , is_sorting_for_merge_join(is_sorting_for_merge_join_) , limit(limit_) , sort_settings(settings_) { diff --git a/src/Processors/QueryPlan/SortingStep.h b/src/Processors/QueryPlan/SortingStep.h index 6cdf626d4c8..be2e4b0149c 100644 --- a/src/Processors/QueryPlan/SortingStep.h +++ b/src/Processors/QueryPlan/SortingStep.h @@ -39,7 +39,8 @@ public: const Header & input_header, SortDescription description_, UInt64 limit_, - const Settings & settings_); + const Settings & settings_, + bool is_sorting_for_merge_join_ = false); /// Full with partitioning SortingStep( @@ -81,6 +82,8 @@ public: bool hasPartitions() const { return !partition_by_description.empty(); } + bool isSortingForMergeJoin() const { return is_sorting_for_merge_join; } + void convertToFinishSorting(SortDescription prefix_description, bool use_buffering_); Type getType() const { return type; } @@ -125,6 +128,9 @@ private: SortDescription partition_by_description; + /// See `findQueryForParallelReplicas` + bool is_sorting_for_merge_join = false; + UInt64 limit; bool always_read_till_end = false; bool use_buffering = false; diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index 95f4a674ebb..ab782f3e521 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -7,18 +7,20 @@ #include #include #include +#include #include #include #include +#include namespace DB { -constexpr bool debug_logging_enabled = false; +constexpr static bool debug_logging_enabled = false; template -void logDebug(String key, const T & value, const char * separator = " : ") +inline static void logDebug(const char * key, const T & value, const char * separator = " : ") { if constexpr (debug_logging_enabled) { @@ -60,15 +62,74 @@ static FillColumnDescription::StepFunction getStepFunction( { #define DECLARE_CASE(NAME) \ case IntervalKind::Kind::NAME: \ - return [step, scale, &date_lut](Field & field) { \ + return [step, scale, &date_lut](Field & field, Int32 jumps_count) { \ field = Add##NAME##sImpl::execute(static_cast(\ - field.safeGet()), static_cast(step), date_lut, utc_time_zone, scale); }; + field.safeGet()), static_cast(step) * jumps_count, date_lut, utc_time_zone, scale); }; FOR_EACH_INTERVAL_KIND(DECLARE_CASE) #undef DECLARE_CASE } } +static FillColumnDescription::StepFunction getStepFunction(const Field & step, const std::optional & step_kind, const DataTypePtr & type) +{ + WhichDataType which(type); + + if (step_kind) + { + if (which.isDate() || which.isDate32()) + { + Int64 avg_seconds = step.safeGet() * step_kind->toAvgSeconds(); + if (std::abs(avg_seconds) < 86400) + throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, + "Value of step is to low ({} seconds). Must be >= 1 day", std::abs(avg_seconds)); + } + + if (which.isDate()) + return getStepFunction(step_kind.value(), step.safeGet(), DateLUT::instance()); + else if (which.isDate32()) + return getStepFunction(step_kind.value(), step.safeGet(), DateLUT::instance()); + else if (const auto * date_time = checkAndGetDataType(type.get())) + return getStepFunction(step_kind.value(), step.safeGet(), date_time->getTimeZone()); + else if (const auto * date_time64 = checkAndGetDataType(type.get())) + { + const auto & step_dec = step.safeGet &>(); + Int64 converted_step = DecimalUtils::convertTo(step_dec.getValue(), step_dec.getScale()); + static const DateLUTImpl & utc_time_zone = DateLUT::instance("UTC"); + + switch (step_kind.value()) // NOLINT(bugprone-switch-missing-default-case) + { +#define DECLARE_CASE(NAME) \ + case IntervalKind::Kind::NAME: \ + return [converted_step, &time_zone = date_time64->getTimeZone()](Field & field, Int32 jumps_count) \ + { \ + auto field_decimal = field.safeGet>(); \ + auto res = Add##NAME##sImpl::execute(field_decimal.getValue(), converted_step * jumps_count, time_zone, utc_time_zone, field_decimal.getScale()); \ + field = DecimalField(res, field_decimal.getScale()); \ + }; \ + break; + + FOR_EACH_INTERVAL_KIND(DECLARE_CASE) +#undef DECLARE_CASE + } + } + else + throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, + "STEP of Interval type can be used only with Date/DateTime types, but got {}", type->getName()); + } + else + { + return [step](Field & field, Int32 jumps_count) + { + auto shifted_step = step; + if (jumps_count != 1) + applyVisitor(FieldVisitorScale(jumps_count), shifted_step); + + applyVisitor(FieldVisitorSum(shifted_step), field); + }; + } +} + static bool tryConvertFields(FillColumnDescription & descr, const DataTypePtr & type) { auto max_type = Field::Types::Null; @@ -125,7 +186,8 @@ static bool tryConvertFields(FillColumnDescription & descr, const DataTypePtr & if (descr.fill_from.getType() > max_type || descr.fill_to.getType() > max_type - || descr.fill_step.getType() > max_type) + || descr.fill_step.getType() > max_type + || descr.fill_staleness.getType() > max_type) return false; if (!descr.fill_from.isNull()) @@ -134,56 +196,11 @@ static bool tryConvertFields(FillColumnDescription & descr, const DataTypePtr & descr.fill_to = convertFieldToTypeOrThrow(descr.fill_to, *to_type); if (!descr.fill_step.isNull()) descr.fill_step = convertFieldToTypeOrThrow(descr.fill_step, *to_type); + if (!descr.fill_staleness.isNull()) + descr.fill_staleness = convertFieldToTypeOrThrow(descr.fill_staleness, *to_type); - if (descr.step_kind) - { - if (which.isDate() || which.isDate32()) - { - Int64 avg_seconds = descr.fill_step.safeGet() * descr.step_kind->toAvgSeconds(); - if (std::abs(avg_seconds) < 86400) - throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, - "Value of step is to low ({} seconds). Must be >= 1 day", std::abs(avg_seconds)); - } - - if (which.isDate()) - descr.step_func = getStepFunction(*descr.step_kind, descr.fill_step.safeGet(), DateLUT::instance()); - else if (which.isDate32()) - descr.step_func = getStepFunction(*descr.step_kind, descr.fill_step.safeGet(), DateLUT::instance()); - else if (const auto * date_time = checkAndGetDataType(type.get())) - descr.step_func = getStepFunction(*descr.step_kind, descr.fill_step.safeGet(), date_time->getTimeZone()); - else if (const auto * date_time64 = checkAndGetDataType(type.get())) - { - const auto & step_dec = descr.fill_step.safeGet &>(); - Int64 step = DecimalUtils::convertTo(step_dec.getValue(), step_dec.getScale()); - static const DateLUTImpl & utc_time_zone = DateLUT::instance("UTC"); - - switch (*descr.step_kind) // NOLINT(bugprone-switch-missing-default-case) - { -#define DECLARE_CASE(NAME) \ - case IntervalKind::Kind::NAME: \ - descr.step_func = [step, &time_zone = date_time64->getTimeZone()](Field & field) \ - { \ - auto field_decimal = field.safeGet>(); \ - auto res = Add##NAME##sImpl::execute(field_decimal.getValue(), step, time_zone, utc_time_zone, field_decimal.getScale()); \ - field = DecimalField(res, field_decimal.getScale()); \ - }; \ - break; - - FOR_EACH_INTERVAL_KIND(DECLARE_CASE) -#undef DECLARE_CASE - } - } - else - throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, - "STEP of Interval type can be used only with Date/DateTime types, but got {}", type->getName()); - } - else - { - descr.step_func = [step = descr.fill_step](Field & field) - { - applyVisitor(FieldVisitorSum(step), field); - }; - } + descr.step_func = getStepFunction(descr.fill_step, descr.step_kind, type); + descr.staleness_step_func = getStepFunction(descr.fill_staleness, descr.staleness_kind, type); return true; } @@ -218,6 +235,7 @@ FillingTransform::FillingTransform( fill_column_positions.push_back(block_position); auto & descr = filling_row.getFillDescription(i); + running_with_staleness |= !descr.fill_staleness.isNull(); const Block & output_header = getOutputPort().getHeader(); const DataTypePtr & type = removeNullable(output_header.getByPosition(block_position).type); @@ -437,7 +455,7 @@ void FillingTransform::initColumns( non_const_columns.reserve(input_columns.size()); for (const auto & column : input_columns) - non_const_columns.push_back(column->convertToFullColumnIfConst()); + non_const_columns.push_back(column->convertToFullColumnIfConst()->convertToFullColumnIfSparse()); for (const auto & column : non_const_columns) output_columns.push_back(column->cloneEmpty()->assumeMutable()); @@ -482,26 +500,26 @@ bool FillingTransform::generateSuffixIfNeeded( MutableColumnRawPtrs res_sort_prefix_columns, MutableColumnRawPtrs res_other_columns) { - logDebug("generateSuffixIfNeeded() filling_row", filling_row); - logDebug("generateSuffixIfNeeded() next_row", next_row); + logDebug("generateSuffixIfNeeded filling_row", filling_row); + logDebug("generateSuffixIfNeeded next_row", next_row); /// Determines if we should insert filling row before start generating next rows - bool should_insert_first = (next_row < filling_row && !filling_row_inserted) || next_row.isNull(); + bool should_insert_first = (next_row < filling_row && !filling_row_inserted) || (next_row.isNull() && !filling_row.isNull()); logDebug("should_insert_first", should_insert_first); for (size_t i = 0, size = filling_row.size(); i < size; ++i) - next_row[i] = filling_row.getFillDescription(i).fill_to; + next_row[i] = Field{}; - logDebug("generateSuffixIfNeeded() next_row updated", next_row); + logDebug("generateSuffixIfNeeded next_row updated", next_row); - if (filling_row >= next_row) + if (!filling_row.hasSomeConstraints() || !filling_row.isConstraintsSatisfied()) { - logDebug("generateSuffixIfNeeded()", "no need to generate suffix"); + logDebug("generateSuffixIfNeeded", "will not generate suffix"); return false; } Block interpolate_block; - if (should_insert_first && filling_row < next_row) + if (should_insert_first) { interpolate(result_columns, interpolate_block); insertFromFillingRow(res_fill_columns, res_interpolate_columns, res_other_columns, interpolate_block); @@ -516,9 +534,7 @@ bool FillingTransform::generateSuffixIfNeeded( bool filling_row_changed = false; while (true) { - const auto [apply, changed] = filling_row.next(next_row); - filling_row_changed = changed; - if (!apply) + if (!filling_row.next(next_row, filling_row_changed)) break; interpolate(result_columns, interpolate_block); @@ -595,7 +611,7 @@ void FillingTransform::transformRange( if (!fill_from.isNull() && !equals(current_value, fill_from)) { - filling_row.initFromDefaults(i); + filling_row.initUsingFrom(i); filling_row_inserted = false; if (less(fill_from, current_value, filling_row.getDirection(i))) { @@ -609,6 +625,9 @@ void FillingTransform::transformRange( } } + /// Init staleness first interval + filling_row.updateConstraintsWithStalenessRow(input_fill_columns, range_begin); + for (size_t row_ind = range_begin; row_ind < range_end; ++row_ind) { logDebug("row", row_ind); @@ -619,21 +638,14 @@ void FillingTransform::transformRange( logDebug("should_insert_first", should_insert_first); for (size_t i = 0, size = filling_row.size(); i < size; ++i) - { - const auto current_value = (*input_fill_columns[i])[row_ind]; - const auto & fill_to = filling_row.getFillDescription(i).fill_to; + next_row[i] = (*input_fill_columns[i])[row_ind]; - if (fill_to.isNull() || less(current_value, fill_to, filling_row.getDirection(i))) - next_row[i] = current_value; - else - next_row[i] = fill_to; - } logDebug("next_row updated", next_row); /// The condition is true when filling row is initialized by value(s) in FILL FROM, /// and there are row(s) in current range with value(s) < then in the filling row. /// It can happen only once for a range. - if (should_insert_first && filling_row < next_row) + if (should_insert_first && filling_row < next_row && filling_row.isConstraintsSatisfied()) { interpolate(result_columns, interpolate_block); insertFromFillingRow(res_fill_columns, res_interpolate_columns, res_other_columns, interpolate_block); @@ -643,15 +655,37 @@ void FillingTransform::transformRange( bool filling_row_changed = false; while (true) { - const auto [apply, changed] = filling_row.next(next_row); - filling_row_changed = changed; - if (!apply) + if (!filling_row.next(next_row, filling_row_changed)) break; interpolate(result_columns, interpolate_block); insertFromFillingRow(res_fill_columns, res_interpolate_columns, res_other_columns, interpolate_block); copyRowFromColumns(res_sort_prefix_columns, input_sort_prefix_columns, row_ind); + filling_row_changed = false; } + + if (running_with_staleness) + { + /// Initialize staleness border for current row to generate it's prefix + filling_row.updateConstraintsWithStalenessRow(input_fill_columns, row_ind); + + while (filling_row.shift(next_row, filling_row_changed)) + { + logDebug("filling_row after shift", filling_row); + + do + { + logDebug("inserting prefix filling_row", filling_row); + + interpolate(result_columns, interpolate_block); + insertFromFillingRow(res_fill_columns, res_interpolate_columns, res_other_columns, interpolate_block); + copyRowFromColumns(res_sort_prefix_columns, input_sort_prefix_columns, row_ind); + filling_row_changed = false; + + } while (filling_row.next(next_row, filling_row_changed)); + } + } + /// new valid filling row was generated but not inserted, will use it during suffix generation if (filling_row_changed) filling_row_inserted = false; @@ -707,7 +741,7 @@ void FillingTransform::transform(Chunk & chunk) /// if no data was processed, then need to initialize filling_row if (last_row.empty()) { - filling_row.initFromDefaults(); + filling_row.initUsingFrom(); filling_row_inserted = false; } diff --git a/src/Processors/Transforms/FillingTransform.h b/src/Processors/Transforms/FillingTransform.h index a8866a97103..92ca4fe6c9e 100644 --- a/src/Processors/Transforms/FillingTransform.h +++ b/src/Processors/Transforms/FillingTransform.h @@ -84,6 +84,7 @@ private: SortDescription sort_prefix; const InterpolateDescriptionPtr interpolate_description; /// Contains INTERPOLATE columns + bool running_with_staleness = false; /// True if STALENESS clause was used. FillingRow filling_row; /// Current row, which is used to fill gaps. FillingRow next_row; /// Row to which we need to generate filling rows. bool filling_row_inserted = false; diff --git a/src/Processors/Transforms/MergeJoinTransform.cpp b/src/Processors/Transforms/MergeJoinTransform.cpp index 1675e5d0386..77a437d4b97 100644 --- a/src/Processors/Transforms/MergeJoinTransform.cpp +++ b/src/Processors/Transforms/MergeJoinTransform.cpp @@ -394,7 +394,7 @@ void FullMergeJoinCursor::setChunk(Chunk && chunk) convertToFullIfSparse(chunk); current_chunk = std::move(chunk); - cursor = SortCursorImpl(sample_block, current_chunk.getColumns(), desc); + cursor = SortCursorImpl(sample_block, current_chunk.getColumns(), current_chunk.getNumRows(), desc); } bool FullMergeJoinCursor::fullyCompleted() const diff --git a/src/Processors/Transforms/SelectByIndicesTransform.h b/src/Processors/Transforms/SelectByIndicesTransform.h index b44f5a3203e..e67d3bfde51 100644 --- a/src/Processors/Transforms/SelectByIndicesTransform.h +++ b/src/Processors/Transforms/SelectByIndicesTransform.h @@ -26,8 +26,12 @@ public: void transform(Chunk & chunk) override { size_t num_rows = chunk.getNumRows(); - auto select_final_indices_info = chunk.getChunkInfos().extract(); + auto select_all_rows_info = chunk.getChunkInfos().extract(); + if (select_all_rows_info) + return; + + auto select_final_indices_info = chunk.getChunkInfos().extract(); if (!select_final_indices_info || !select_final_indices_info->select_final_indices) throw Exception(ErrorCodes::LOGICAL_ERROR, "Chunk passed to SelectByIndicesTransform without indices column"); diff --git a/src/Processors/Transforms/SortingTransform.cpp b/src/Processors/Transforms/SortingTransform.cpp index 6e65093e9e2..6a11354e2bf 100644 --- a/src/Processors/Transforms/SortingTransform.cpp +++ b/src/Processors/Transforms/SortingTransform.cpp @@ -42,7 +42,7 @@ MergeSorter::MergeSorter(const Block & header, Chunks chunks_, SortDescription & /// Convert to full column, because some cursors expect non-contant columns convertToFullIfConst(chunk); - cursors.emplace_back(header, chunk.getColumns(), description, chunk_index); + cursors.emplace_back(header, chunk.getColumns(), chunk.getNumRows(), description, chunk_index); has_collation |= cursors.back().has_collation; nonempty_chunks.emplace_back(std::move(chunk)); diff --git a/src/QueryPipeline/ExecutionSpeedLimits.cpp b/src/QueryPipeline/ExecutionSpeedLimits.cpp index 05fd394db77..fc0e86781f0 100644 --- a/src/QueryPipeline/ExecutionSpeedLimits.cpp +++ b/src/QueryPipeline/ExecutionSpeedLimits.cpp @@ -86,10 +86,12 @@ void ExecutionSpeedLimits::throttle( if (timeout_overflow_mode == OverflowMode::THROW && estimated_execution_time_seconds > max_estimated_execution_time.totalSeconds()) throw Exception( ErrorCodes::TOO_SLOW, - "Estimated query execution time ({} seconds) is too long. Maximum: {}. Estimated rows to process: {}", + "Estimated query execution time ({:.5f} seconds) is too long. Maximum: {}. Estimated rows to process: {} ({} read in {:.5f} seconds).", estimated_execution_time_seconds, max_estimated_execution_time.totalSeconds(), - total_rows_to_read); + total_rows_to_read, + read_rows, + elapsed_seconds); } if (max_execution_rps && rows_per_second >= max_execution_rps) diff --git a/src/Server/HTTP/authenticateUserByHTTP.cpp b/src/Server/HTTP/authenticateUserByHTTP.cpp index cbad91cc292..61029ed9560 100644 --- a/src/Server/HTTP/authenticateUserByHTTP.cpp +++ b/src/Server/HTTP/authenticateUserByHTTP.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -54,11 +55,13 @@ bool authenticateUserByHTTP( HTTPServerResponse & response, Session & session, std::unique_ptr & request_credentials, + const HTTPHandlerConnectionConfig & connection_config, ContextPtr global_context, LoggerPtr log) { /// Get the credentials created by the previous call of authenticateUserByHTTP() while handling the previous HTTP request. auto current_credentials = std::move(request_credentials); + const auto & config_credentials = connection_config.credentials; /// The user and password can be passed by headers (similar to X-Auth-*), /// which is used by load balancers to pass authentication information. @@ -70,6 +73,7 @@ bool authenticateUserByHTTP( /// The header 'X-ClickHouse-SSL-Certificate-Auth: on' enables checking the common name /// extracted from the SSL certificate used for this connection instead of checking password. bool has_ssl_certificate_auth = (request.get("X-ClickHouse-SSL-Certificate-Auth", "") == "on"); + bool has_config_credentials = config_credentials.has_value(); /// User name and password can be passed using HTTP Basic auth or query parameters /// (both methods are insecure). @@ -79,6 +83,10 @@ bool authenticateUserByHTTP( std::string spnego_challenge; SSLCertificateSubjects certificate_subjects; + if (config_credentials) + { + checkUserNameNotEmpty(config_credentials->getUserName(), "config authentication"); + } if (has_ssl_certificate_auth) { #if USE_SSL @@ -86,6 +94,8 @@ bool authenticateUserByHTTP( checkUserNameNotEmpty(user, "X-ClickHouse HTTP headers"); /// It is prohibited to mix different authorization schemes. + if (has_config_credentials) + throwMultipleAuthenticationMethods("SSL certificate authentication", "authentication set in config"); if (!password.empty()) throwMultipleAuthenticationMethods("SSL certificate authentication", "authentication via password"); if (has_http_credentials) @@ -109,6 +119,8 @@ bool authenticateUserByHTTP( checkUserNameNotEmpty(user, "X-ClickHouse HTTP headers"); /// It is prohibited to mix different authorization schemes. + if (has_config_credentials) + throwMultipleAuthenticationMethods("X-ClickHouse HTTP headers", "authentication set in config"); if (has_http_credentials) throwMultipleAuthenticationMethods("X-ClickHouse HTTP headers", "Authorization HTTP header"); if (has_credentials_in_query_params) @@ -117,6 +129,8 @@ bool authenticateUserByHTTP( else if (has_http_credentials) { /// It is prohibited to mix different authorization schemes. + if (has_config_credentials) + throwMultipleAuthenticationMethods("Authorization HTTP header", "authentication set in config"); if (has_credentials_in_query_params) throwMultipleAuthenticationMethods("Authorization HTTP header", "authentication via parameters"); @@ -190,6 +204,10 @@ bool authenticateUserByHTTP( return false; } } + else if (has_config_credentials) + { + current_credentials = std::make_unique(*config_credentials); + } else // I.e., now using user name and password strings ("Basic"). { if (!current_credentials) diff --git a/src/Server/HTTP/authenticateUserByHTTP.h b/src/Server/HTTP/authenticateUserByHTTP.h index 3b5a04cae68..02dcf828faa 100644 --- a/src/Server/HTTP/authenticateUserByHTTP.h +++ b/src/Server/HTTP/authenticateUserByHTTP.h @@ -11,13 +11,22 @@ class HTMLForm; class HTTPServerResponse; class Session; class Credentials; +class BasicCredentials; +struct HTTPHandlerConnectionConfig; /// Authenticates a user via HTTP protocol and initializes a session. +/// /// Usually retrieves the name and the password for that user from either the request's headers or from the query parameters. -/// Returns true when the user successfully authenticated, -/// the session instance will be configured accordingly, and the request_credentials instance will be dropped. -/// Returns false when the user is not authenticated yet, and the HTTP_UNAUTHORIZED response is sent with the "WWW-Authenticate" header, -/// in this case the `request_credentials` instance must be preserved until the next request or until any exception. +/// You can also pass user/password explicitly via `config_credentials`. +/// +/// Returns true when the user successfully authenticated: +/// - the session instance will be configured accordingly +/// - and the request_credentials instance will be dropped. +/// +/// Returns false when the user is not authenticated yet: +/// - the HTTP_UNAUTHORIZED response is sent with the "WWW-Authenticate" header +/// - the `request_credentials` instance must be preserved until the next request or until any exception. +/// /// Throws an exception if authentication failed. bool authenticateUserByHTTP( const HTTPServerRequest & request, @@ -25,6 +34,7 @@ bool authenticateUserByHTTP( HTTPServerResponse & response, Session & session, std::unique_ptr & request_credentials, + const HTTPHandlerConnectionConfig & connection_config, ContextPtr global_context, LoggerPtr log); diff --git a/src/Server/HTTPHandler.cpp b/src/Server/HTTPHandler.cpp index 8a9ae05b355..5fd92d99b3c 100644 --- a/src/Server/HTTPHandler.cpp +++ b/src/Server/HTTPHandler.cpp @@ -1,6 +1,5 @@ #include -#include #include #include #include @@ -145,6 +144,15 @@ static std::chrono::steady_clock::duration parseSessionTimeout( return std::chrono::seconds(session_timeout); } +HTTPHandlerConnectionConfig::HTTPHandlerConnectionConfig(const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix) +{ + if (config.has(config_prefix + ".handler.user") || config.has(config_prefix + ".handler.password")) + { + credentials.emplace( + config.getString(config_prefix + ".handler.user", "default"), + config.getString(config_prefix + ".handler.password", "")); + } +} void HTTPHandler::pushDelayedResults(Output & used_output) { @@ -182,11 +190,12 @@ void HTTPHandler::pushDelayedResults(Output & used_output) } -HTTPHandler::HTTPHandler(IServer & server_, const std::string & name, const HTTPResponseHeaderSetup & http_response_headers_override_) +HTTPHandler::HTTPHandler(IServer & server_, const HTTPHandlerConnectionConfig & connection_config_, const std::string & name, const HTTPResponseHeaderSetup & http_response_headers_override_) : server(server_) , log(getLogger(name)) , default_settings(server.context()->getSettingsRef()) , http_response_headers_override(http_response_headers_override_) + , connection_config(connection_config_) { server_display_name = server.config().getString("display_name", getFQDNOrHostName()); } @@ -199,7 +208,7 @@ HTTPHandler::~HTTPHandler() = default; bool HTTPHandler::authenticateUser(HTTPServerRequest & request, HTMLForm & params, HTTPServerResponse & response) { - return authenticateUserByHTTP(request, params, response, *session, request_credentials, server.context(), log); + return authenticateUserByHTTP(request, params, response, *session, request_credentials, connection_config, server.context(), log); } @@ -768,8 +777,12 @@ void HTTPHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse } DynamicQueryHandler::DynamicQueryHandler( - IServer & server_, const std::string & param_name_, const HTTPResponseHeaderSetup & http_response_headers_override_) - : HTTPHandler(server_, "DynamicQueryHandler", http_response_headers_override_), param_name(param_name_) + IServer & server_, + const HTTPHandlerConnectionConfig & connection_config, + const std::string & param_name_, + const HTTPResponseHeaderSetup & http_response_headers_override_) + : HTTPHandler(server_, connection_config, "DynamicQueryHandler", http_response_headers_override_) + , param_name(param_name_) { } @@ -826,12 +839,13 @@ std::string DynamicQueryHandler::getQuery(HTTPServerRequest & request, HTMLForm PredefinedQueryHandler::PredefinedQueryHandler( IServer & server_, + const HTTPHandlerConnectionConfig & connection_config, const NameSet & receive_params_, const std::string & predefined_query_, const CompiledRegexPtr & url_regex_, const std::unordered_map & header_name_with_regex_, const HTTPResponseHeaderSetup & http_response_headers_override_) - : HTTPHandler(server_, "PredefinedQueryHandler", http_response_headers_override_) + : HTTPHandler(server_, connection_config, "PredefinedQueryHandler", http_response_headers_override_) , receive_params(receive_params_) , predefined_query(predefined_query_) , url_regex(url_regex_) @@ -923,10 +937,11 @@ HTTPRequestHandlerFactoryPtr createDynamicHandlerFactory(IServer & server, { auto query_param_name = config.getString(config_prefix + ".handler.query_param_name", "query"); + HTTPHandlerConnectionConfig connection_config(config, config_prefix); HTTPResponseHeaderSetup http_response_headers_override = parseHTTPResponseHeaders(config, config_prefix); - auto creator = [&server, query_param_name, http_response_headers_override]() -> std::unique_ptr - { return std::make_unique(server, query_param_name, http_response_headers_override); }; + auto creator = [&server, query_param_name, http_response_headers_override, connection_config]() -> std::unique_ptr + { return std::make_unique(server, connection_config, query_param_name, http_response_headers_override); }; auto factory = std::make_shared>(std::move(creator)); factory->addFiltersFromConfig(config, config_prefix); @@ -968,6 +983,8 @@ HTTPRequestHandlerFactoryPtr createPredefinedHandlerFactory(IServer & server, Poco::Util::AbstractConfiguration::Keys headers_name; config.keys(config_prefix + ".headers", headers_name); + HTTPHandlerConnectionConfig connection_config(config, config_prefix); + for (const auto & header_name : headers_name) { auto expression = config.getString(config_prefix + ".headers." + header_name); @@ -1001,12 +1018,18 @@ HTTPRequestHandlerFactoryPtr createPredefinedHandlerFactory(IServer & server, predefined_query, regex, headers_name_with_regex, - http_response_headers_override] + http_response_headers_override, + connection_config] -> std::unique_ptr { return std::make_unique( - server, analyze_receive_params, predefined_query, regex, - headers_name_with_regex, http_response_headers_override); + server, + connection_config, + analyze_receive_params, + predefined_query, + regex, + headers_name_with_regex, + http_response_headers_override); }; factory = std::make_shared>(std::move(creator)); factory->addFiltersFromConfig(config, config_prefix); @@ -1019,18 +1042,21 @@ HTTPRequestHandlerFactoryPtr createPredefinedHandlerFactory(IServer & server, analyze_receive_params, predefined_query, headers_name_with_regex, - http_response_headers_override] + http_response_headers_override, + connection_config] -> std::unique_ptr { return std::make_unique( - server, analyze_receive_params, predefined_query, CompiledRegexPtr{}, - headers_name_with_regex, http_response_headers_override); + server, + connection_config, + analyze_receive_params, + predefined_query, + CompiledRegexPtr{}, + headers_name_with_regex, + http_response_headers_override); }; - factory = std::make_shared>(std::move(creator)); - factory->addFiltersFromConfig(config, config_prefix); - return factory; } diff --git a/src/Server/HTTPHandler.h b/src/Server/HTTPHandler.h index 6580b317f6e..2296fa70aeb 100644 --- a/src/Server/HTTPHandler.h +++ b/src/Server/HTTPHandler.h @@ -12,6 +12,7 @@ #include #include #include +#include #include "HTTPResponseHeaderWriter.h" @@ -26,17 +27,28 @@ namespace DB { class Session; -class Credentials; class IServer; struct Settings; class WriteBufferFromHTTPServerResponse; using CompiledRegexPtr = std::shared_ptr; +struct HTTPHandlerConnectionConfig +{ + std::optional credentials; + + /// TODO: + /// String quota; + /// String default_database; + + HTTPHandlerConnectionConfig() = default; + HTTPHandlerConnectionConfig(const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix); +}; + class HTTPHandler : public HTTPRequestHandler { public: - HTTPHandler(IServer & server_, const std::string & name, const HTTPResponseHeaderSetup & http_response_headers_override_); + HTTPHandler(IServer & server_, const HTTPHandlerConnectionConfig & connection_config_, const std::string & name, const HTTPResponseHeaderSetup & http_response_headers_override_); ~HTTPHandler() override; void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override; @@ -146,16 +158,7 @@ private: // The request_credential instance may outlive a single request/response loop. // This happens only when the authentication mechanism requires more than a single request/response exchange (e.g., SPNEGO). std::unique_ptr request_credentials; - - // Returns true when the user successfully authenticated, - // the session instance will be configured accordingly, and the request_credentials instance will be dropped. - // Returns false when the user is not authenticated yet, and the 'Negotiate' response is sent, - // the session and request_credentials instances are preserved. - // Throws an exception if authentication failed. - bool authenticateUser( - HTTPServerRequest & request, - HTMLForm & params, - HTTPServerResponse & response); + HTTPHandlerConnectionConfig connection_config; /// Also initializes 'used_output'. void processQuery( @@ -174,6 +177,13 @@ private: Output & used_output); static void pushDelayedResults(Output & used_output); + +protected: + // @see authenticateUserByHTTP() + virtual bool authenticateUser( + HTTPServerRequest & request, + HTMLForm & params, + HTTPServerResponse & response); }; class DynamicQueryHandler : public HTTPHandler @@ -184,6 +194,7 @@ private: public: explicit DynamicQueryHandler( IServer & server_, + const HTTPHandlerConnectionConfig & connection_config, const std::string & param_name_ = "query", const HTTPResponseHeaderSetup & http_response_headers_override_ = std::nullopt); @@ -203,6 +214,7 @@ private: public: PredefinedQueryHandler( IServer & server_, + const HTTPHandlerConnectionConfig & connection_config, const NameSet & receive_params_, const std::string & predefined_query_, const CompiledRegexPtr & url_regex_, diff --git a/src/Server/HTTPHandlerFactory.cpp b/src/Server/HTTPHandlerFactory.cpp index 2d5ddd859fe..950cad4038a 100644 --- a/src/Server/HTTPHandlerFactory.cpp +++ b/src/Server/HTTPHandlerFactory.cpp @@ -275,7 +275,7 @@ void addDefaultHandlersFactory( auto dynamic_creator = [&server] () -> std::unique_ptr { - return std::make_unique(server, "query"); + return std::make_unique(server, HTTPHandlerConnectionConfig{}, "query"); }; auto query_handler = std::make_shared>(std::move(dynamic_creator)); query_handler->addFilter([](const auto & request) diff --git a/src/Server/PrometheusRequestHandler.cpp b/src/Server/PrometheusRequestHandler.cpp index cd18eac50a7..9c521e06667 100644 --- a/src/Server/PrometheusRequestHandler.cpp +++ b/src/Server/PrometheusRequestHandler.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include "config.h" #include @@ -137,7 +138,7 @@ protected: bool authenticateUser(HTTPServerRequest & request, HTTPServerResponse & response) { - return authenticateUserByHTTP(request, *params, response, *session, request_credentials, server().context(), log()); + return authenticateUserByHTTP(request, *params, response, *session, request_credentials, HTTPHandlerConnectionConfig{}, server().context(), log()); } void makeContext(HTTPServerRequest & request) diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index e7e4ae25a68..4f54918445f 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -1614,7 +1614,8 @@ void TCPHandler::receiveHello() if (e.code() != DB::ErrorCodes::AUTHENTICATION_FAILED) throw; - tryLogCurrentException(log, "SSL authentication failed, falling back to password authentication"); + tryLogCurrentException(log, "SSL authentication failed, falling back to password authentication", LogsLevel::information); + /// ^^ Log at debug level instead of default error level as authentication failures are not an unusual event. } } } diff --git a/src/Storages/AlterCommands.cpp b/src/Storages/AlterCommands.cpp index ab4403b3a94..fbca222b1e7 100644 --- a/src/Storages/AlterCommands.cpp +++ b/src/Storages/AlterCommands.cpp @@ -1457,14 +1457,6 @@ void AlterCommands::validate(const StoragePtr & table, ContextPtr context) const ErrorCodes::BAD_ARGUMENTS, "The change of data type {} of column {} to {} is not allowed. It has known bugs", old_data_type->getName(), backQuote(column_name), command.data_type->getName()); - - bool has_object_type = isObject(command.data_type); - command.data_type->forEachChild([&](const IDataType & type){ has_object_type |= isObject(type); }); - if (has_object_type) - throw Exception( - ErrorCodes::BAD_ARGUMENTS, - "The change of data type {} of column {} to {} is not supported.", - old_data_type->getName(), backQuote(column_name), command.data_type->getName()); } if (command.isRemovingProperty()) @@ -1496,7 +1488,7 @@ void AlterCommands::validate(const StoragePtr & table, ContextPtr context) const if (command.to_remove == AlterCommand::RemoveProperty::CODEC && column_from_table.codec == nullptr) throw Exception( ErrorCodes::BAD_ARGUMENTS, - "Column {} doesn't have TTL, cannot remove it", + "Column {} doesn't have CODEC, cannot remove it", backQuote(column_name)); if (command.to_remove == AlterCommand::RemoveProperty::COMMENT && column_from_table.comment.empty()) throw Exception( diff --git a/src/Storages/Kafka/StorageKafkaUtils.cpp b/src/Storages/Kafka/StorageKafkaUtils.cpp index dd954d6a7c2..119aadd11d8 100644 --- a/src/Storages/Kafka/StorageKafkaUtils.cpp +++ b/src/Storages/Kafka/StorageKafkaUtils.cpp @@ -308,6 +308,7 @@ void registerStorageKafka(StorageFactory & factory) creator_fn, StorageFactory::StorageFeatures{ .supports_settings = true, + .source_access_type = AccessType::KAFKA, }); } diff --git a/src/Storages/KeyDescription.cpp b/src/Storages/KeyDescription.cpp index 7e43966556e..5c0449612e7 100644 --- a/src/Storages/KeyDescription.cpp +++ b/src/Storages/KeyDescription.cpp @@ -151,6 +151,18 @@ KeyDescription KeyDescription::getSortingKeyFromAST( throw Exception(ErrorCodes::DATA_TYPE_CANNOT_BE_USED_IN_KEY, "Column {} with type {} is not allowed in key expression, it's not comparable", backQuote(result.sample_block.getByPosition(i).name), result.data_types.back()->getName()); + + auto check = [&](const IDataType & type) + { + if (isDynamic(type) || isVariant(type)) + throw Exception( + ErrorCodes::DATA_TYPE_CANNOT_BE_USED_IN_KEY, + "Column with type Variant/Dynamic is not allowed in key expression. Consider using a subcolumn with a specific data " + "type instead (for example 'column.Int64' or 'json.some.path.:Int64' if its a JSON path subcolumn) or casting this column to a specific data type"); + }; + + check(*result.data_types.back()); + result.data_types.back()->forEachChild(check); } return result; diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index e13ec5a7515..1d79ae5aacb 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -908,7 +908,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToDisk( { part_storage_for_loading->commitTransaction(); - MergeTreeDataPartBuilder builder(data, part_name, volume, part_relative_path, part_dir); + MergeTreeDataPartBuilder builder(data, part_name, volume, part_relative_path, part_dir, getReadSettings()); new_data_part = builder.withPartFormatFromDisk().build(); new_data_part->version.setCreationTID(Tx::PrehistoricTID, nullptr); diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 20d7528d38a..41783ffddb0 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -833,7 +833,7 @@ MergeTreeDataPartBuilder IMergeTreeDataPart::getProjectionPartBuilder(const Stri { const char * projection_extension = is_temp_projection ? ".tmp_proj" : ".proj"; auto projection_storage = getDataPartStorage().getProjection(projection_name + projection_extension, !is_temp_projection); - MergeTreeDataPartBuilder builder(storage, projection_name, projection_storage); + MergeTreeDataPartBuilder builder(storage, projection_name, projection_storage, getReadSettings()); return builder.withPartInfo(MergeListElement::FAKE_RESULT_PART_FOR_PROJECTION).withParentPart(this); } diff --git a/src/Storages/MergeTree/IMergeTreeDataPartWriter.h b/src/Storages/MergeTree/IMergeTreeDataPartWriter.h index b8ac14b1750..d1c76505d7c 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPartWriter.h +++ b/src/Storages/MergeTree/IMergeTreeDataPartWriter.h @@ -46,6 +46,8 @@ public: virtual void finish(bool sync) = 0; + virtual size_t getNumberOfOpenStreams() const = 0; + Columns releaseIndexColumns(); PlainMarksByName releaseCachedMarks(); diff --git a/src/Storages/MergeTree/IMergedBlockOutputStream.h b/src/Storages/MergeTree/IMergedBlockOutputStream.h index a901b03c115..7dd6d720170 100644 --- a/src/Storages/MergeTree/IMergedBlockOutputStream.h +++ b/src/Storages/MergeTree/IMergedBlockOutputStream.h @@ -39,6 +39,11 @@ public: return writer->releaseCachedMarks(); } + size_t getNumberOfOpenStreams() const + { + return writer->getNumberOfOpenStreams(); + } + protected: /// Remove all columns marked expired in data_part. Also, clears checksums diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index 193622d7b87..08066113375 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -349,13 +349,13 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare() const if (global_ctx->parent_part) { auto data_part_storage = global_ctx->parent_part->getDataPartStorage().getProjection(local_tmp_part_basename, /* use parent transaction */ false); - builder.emplace(*global_ctx->data, global_ctx->future_part->name, data_part_storage); + builder.emplace(*global_ctx->data, global_ctx->future_part->name, data_part_storage, getReadSettings()); builder->withParentPart(global_ctx->parent_part); } else { auto local_single_disk_volume = std::make_shared("volume_" + global_ctx->future_part->name, global_ctx->disk, 0); - builder.emplace(global_ctx->data->getDataPartBuilder(global_ctx->future_part->name, local_single_disk_volume, local_tmp_part_basename)); + builder.emplace(global_ctx->data->getDataPartBuilder(global_ctx->future_part->name, local_single_disk_volume, local_tmp_part_basename, getReadSettings())); builder->withPartStorageType(global_ctx->future_part->part_format.storage_type); } diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 4ed8c67469d..b2f35d0a309 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -1442,7 +1442,7 @@ void MergeTreeData::loadUnexpectedDataPart(UnexpectedPartLoadState & state) try { - state.part = getDataPartBuilder(part_name, single_disk_volume, part_name) + state.part = getDataPartBuilder(part_name, single_disk_volume, part_name, getReadSettings()) .withPartInfo(part_info) .withPartFormatFromDisk() .build(); @@ -1457,7 +1457,7 @@ void MergeTreeData::loadUnexpectedDataPart(UnexpectedPartLoadState & state) /// Build a fake part and mark it as broken in case of filesystem error. /// If the error impacts part directory instead of single files, /// an exception will be thrown during detach and silently ignored. - state.part = getDataPartBuilder(part_name, single_disk_volume, part_name) + state.part = getDataPartBuilder(part_name, single_disk_volume, part_name, getReadSettings()) .withPartStorageType(MergeTreeDataPartStorageType::Full) .withPartType(MergeTreeDataPartType::Wide) .build(); @@ -1491,7 +1491,7 @@ MergeTreeData::LoadPartResult MergeTreeData::loadDataPart( /// Build a fake part and mark it as broken in case of filesystem error. /// If the error impacts part directory instead of single files, /// an exception will be thrown during detach and silently ignored. - res.part = getDataPartBuilder(part_name, single_disk_volume, part_name) + res.part = getDataPartBuilder(part_name, single_disk_volume, part_name, getReadSettings()) .withPartStorageType(MergeTreeDataPartStorageType::Full) .withPartType(MergeTreeDataPartType::Wide) .build(); @@ -1512,7 +1512,7 @@ MergeTreeData::LoadPartResult MergeTreeData::loadDataPart( try { - res.part = getDataPartBuilder(part_name, single_disk_volume, part_name) + res.part = getDataPartBuilder(part_name, single_disk_volume, part_name, getReadSettings()) .withPartInfo(part_info) .withPartFormatFromDisk() .build(); @@ -2343,11 +2343,16 @@ void MergeTreeData::stopOutdatedAndUnexpectedDataPartsLoadingTask() } } -void MergeTreeData::prewarmMarkCache(ThreadPool & pool) +void MergeTreeData::prewarmMarkCacheIfNeeded(ThreadPool & pool) { if (!(*getSettings())[MergeTreeSetting::prewarm_mark_cache]) return; + prewarmMarkCache(pool); +} + +void MergeTreeData::prewarmMarkCache(ThreadPool & pool) +{ auto * mark_cache = getContext()->getMarkCache().get(); if (!mark_cache) return; @@ -3830,9 +3835,9 @@ MergeTreeDataPartFormat MergeTreeData::choosePartFormatOnDisk(size_t bytes_uncom } MergeTreeDataPartBuilder MergeTreeData::getDataPartBuilder( - const String & name, const VolumePtr & volume, const String & part_dir) const + const String & name, const VolumePtr & volume, const String & part_dir, const ReadSettings & read_settings_) const { - return MergeTreeDataPartBuilder(*this, name, volume, relative_data_path, part_dir); + return MergeTreeDataPartBuilder(*this, name, volume, relative_data_path, part_dir, read_settings_); } void MergeTreeData::changeSettings( @@ -5914,7 +5919,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeData::loadPartRestoredFromBackup(cons /// Load this part from the directory `temp_part_dir`. auto load_part = [&] { - MergeTreeDataPartBuilder builder(*this, part_name, single_disk_volume, parent_part_dir, part_dir_name); + MergeTreeDataPartBuilder builder(*this, part_name, single_disk_volume, parent_part_dir, part_dir_name, getReadSettings()); builder.withPartFormatFromDisk(); part = std::move(builder).build(); part->version.setCreationTID(Tx::PrehistoricTID, nullptr); @@ -5929,7 +5934,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeData::loadPartRestoredFromBackup(cons if (!part) { /// Make a fake data part only to copy its files to /detached/. - part = MergeTreeDataPartBuilder{*this, part_name, single_disk_volume, parent_part_dir, part_dir_name} + part = MergeTreeDataPartBuilder{*this, part_name, single_disk_volume, parent_part_dir, part_dir_name, getReadSettings()} .withPartStorageType(MergeTreeDataPartStorageType::Full) .withPartType(MergeTreeDataPartType::Wide) .build(); @@ -6581,7 +6586,7 @@ MergeTreeData::MutableDataPartsVector MergeTreeData::tryLoadPartsToAttach(const LOG_DEBUG(log, "Checking part {}", new_name); auto single_disk_volume = std::make_shared("volume_" + old_name, disk); - auto part = getDataPartBuilder(old_name, single_disk_volume, source_dir / new_name) + auto part = getDataPartBuilder(old_name, single_disk_volume, source_dir / new_name, getReadSettings()) .withPartFormatFromDisk() .build(); @@ -7636,7 +7641,7 @@ std::pair MergeTreeData::cloneAn std::string(fs::path(dst_part_storage->getFullRootPath()) / tmp_dst_part_name), with_copy); - auto dst_data_part = MergeTreeDataPartBuilder(*this, dst_part_name, dst_part_storage) + auto dst_data_part = MergeTreeDataPartBuilder(*this, dst_part_name, dst_part_storage, getReadSettings()) .withPartFormatFromDisk() .build(); @@ -8905,7 +8910,7 @@ std::pair MergeTreeData::createE VolumePtr data_part_volume = createVolumeFromReservation(reservation, volume); auto tmp_dir_holder = getTemporaryPartDirectoryHolder(EMPTY_PART_TMP_PREFIX + new_part_name); - auto new_data_part = getDataPartBuilder(new_part_name, data_part_volume, EMPTY_PART_TMP_PREFIX + new_part_name) + auto new_data_part = getDataPartBuilder(new_part_name, data_part_volume, EMPTY_PART_TMP_PREFIX + new_part_name, getReadSettings()) .withBytesAndRowsOnDisk(0, 0) .withPartInfo(new_part_info) .build(); diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index a32106f76bb..fe360907875 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -241,7 +241,7 @@ public: MergeTreeDataPartFormat choosePartFormat(size_t bytes_uncompressed, size_t rows_count) const; MergeTreeDataPartFormat choosePartFormatOnDisk(size_t bytes_uncompressed, size_t rows_count) const; - MergeTreeDataPartBuilder getDataPartBuilder(const String & name, const VolumePtr & volume, const String & part_dir) const; + MergeTreeDataPartBuilder getDataPartBuilder(const String & name, const VolumePtr & volume, const String & part_dir, const ReadSettings & read_settings_) const; /// Auxiliary object to add a set of parts into the working set in two steps: /// * First, as PreActive parts (the parts are ready, but not yet in the active set). @@ -508,6 +508,7 @@ public: /// Prewarm mark cache for the most recent data parts. void prewarmMarkCache(ThreadPool & pool); + void prewarmMarkCacheIfNeeded(ThreadPool & pool); String getLogName() const { return log.loadName(); } diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 8b3c7bdf3fb..176b5c00b0a 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -48,6 +48,16 @@ namespace CurrentMetrics { extern const Metric BackgroundMergesAndMutationsPoolTask; } +namespace ProfileEvents +{ + + extern const Event MergerMutatorsGetPartsForMergeElapsedMicroseconds; + extern const Event MergerMutatorPrepareRangesForMergeElapsedMicroseconds; + extern const Event MergerMutatorSelectPartsForMergeElapsedMicroseconds; + extern const Event MergerMutatorRangesForMergeCount; + extern const Event MergerMutatorPartsInRangesForMergeCount; + extern const Event MergerMutatorSelectRangePartsCount; +} namespace DB { @@ -70,6 +80,8 @@ namespace MergeTreeSetting extern const MergeTreeSettingsBool ttl_only_drop_parts; extern const MergeTreeSettingsUInt64 parts_to_throw_insert; extern const MergeTreeSettingsMergeSelectorAlgorithm merge_selector_algorithm; + extern const MergeTreeSettingsBool merge_selector_enable_heuristic_to_remove_small_parts_at_right; + extern const MergeTreeSettingsFloat merge_selector_base; } namespace ErrorCodes @@ -213,6 +225,7 @@ MergeTreeDataMergerMutator::PartitionIdsHint MergeTreeDataMergerMutator::getPart { PartitionIdsHint res; MergeTreeData::DataPartsVector data_parts = getDataPartsToSelectMergeFrom(txn); + if (data_parts.empty()) return res; @@ -270,6 +283,8 @@ MergeTreeDataMergerMutator::PartitionIdsHint MergeTreeDataMergerMutator::getPart MergeTreeData::DataPartsVector MergeTreeDataMergerMutator::getDataPartsToSelectMergeFrom( const MergeTreeTransactionPtr & txn, const PartitionIdsHint * partitions_hint) const { + + Stopwatch get_data_parts_for_merge_timer; auto res = getDataPartsToSelectMergeFrom(txn); if (!partitions_hint) return res; @@ -278,6 +293,8 @@ MergeTreeData::DataPartsVector MergeTreeDataMergerMutator::getDataPartsToSelectM { return !partitions_hint->contains(part->info.partition_id); }); + + ProfileEvents::increment(ProfileEvents::MergerMutatorsGetPartsForMergeElapsedMicroseconds, get_data_parts_for_merge_timer.elapsedMicroseconds()); return res; } @@ -355,6 +372,7 @@ MergeTreeDataMergerMutator::MergeSelectingInfo MergeTreeDataMergerMutator::getPo const MergeTreeTransactionPtr & txn, PreformattedMessage & out_disable_reason) const { + Stopwatch ranges_for_merge_timer; MergeSelectingInfo res; res.current_time = std::time(nullptr); @@ -455,6 +473,10 @@ MergeTreeDataMergerMutator::MergeSelectingInfo MergeTreeDataMergerMutator::getPo prev_part = ∂ } + ProfileEvents::increment(ProfileEvents::MergerMutatorPartsInRangesForMergeCount, res.parts_selected_precondition); + ProfileEvents::increment(ProfileEvents::MergerMutatorRangesForMergeCount, res.parts_ranges.size()); + ProfileEvents::increment(ProfileEvents::MergerMutatorPrepareRangesForMergeElapsedMicroseconds, ranges_for_merge_timer.elapsedMicroseconds()); + return res; } @@ -469,6 +491,7 @@ SelectPartsDecision MergeTreeDataMergerMutator::selectPartsToMergeFromRanges( PreformattedMessage & out_disable_reason, bool dry_run) { + Stopwatch select_parts_from_ranges_timer; const auto data_settings = data.getSettings(); IMergeSelector::PartsRange parts_to_merge; @@ -540,6 +563,9 @@ SelectPartsDecision MergeTreeDataMergerMutator::selectPartsToMergeFromRanges( /// Override value from table settings simple_merge_settings.window_size = (*data_settings)[MergeTreeSetting::merge_selector_window_size]; simple_merge_settings.max_parts_to_merge_at_once = (*data_settings)[MergeTreeSetting::max_parts_to_merge_at_once]; + simple_merge_settings.enable_heuristic_to_remove_small_parts_at_right = (*data_settings)[MergeTreeSetting::merge_selector_enable_heuristic_to_remove_small_parts_at_right]; + simple_merge_settings.base = (*data_settings)[MergeTreeSetting::merge_selector_base]; + if (!(*data_settings)[MergeTreeSetting::min_age_to_force_merge_on_partition_only]) simple_merge_settings.min_age_to_force_merge = (*data_settings)[MergeTreeSetting::min_age_to_force_merge_seconds]; @@ -565,7 +591,8 @@ SelectPartsDecision MergeTreeDataMergerMutator::selectPartsToMergeFromRanges( if (parts_to_merge.empty()) { - out_disable_reason = PreformattedMessage::create("Did not find any parts to merge (with usual merge selectors)"); + ProfileEvents::increment(ProfileEvents::MergerMutatorSelectPartsForMergeElapsedMicroseconds, select_parts_from_ranges_timer.elapsedMicroseconds()); + out_disable_reason = PreformattedMessage::create("Did not find any parts to merge (with usual merge selectors) in {}ms", select_parts_from_ranges_timer.elapsedMicroseconds() / 1000); return SelectPartsDecision::CANNOT_SELECT; } } @@ -578,8 +605,11 @@ SelectPartsDecision MergeTreeDataMergerMutator::selectPartsToMergeFromRanges( parts.push_back(part); } - LOG_DEBUG(log, "Selected {} parts from {} to {}", parts.size(), parts.front()->name, parts.back()->name); + LOG_DEBUG(log, "Selected {} parts from {} to {} in {}ms", parts.size(), parts.front()->name, parts.back()->name, select_parts_from_ranges_timer.elapsedMicroseconds() / 1000); + ProfileEvents::increment(ProfileEvents::MergerMutatorSelectRangePartsCount, parts.size()); + future_part->assign(std::move(parts)); + ProfileEvents::increment(ProfileEvents::MergerMutatorSelectPartsForMergeElapsedMicroseconds, select_parts_from_ranges_timer.elapsedMicroseconds()); return SelectPartsDecision::SELECTED; } diff --git a/src/Storages/MergeTree/MergeTreeDataPartBuilder.cpp b/src/Storages/MergeTree/MergeTreeDataPartBuilder.cpp index 37f578b0c25..6ec4bc31d90 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartBuilder.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartBuilder.cpp @@ -14,20 +14,22 @@ namespace ErrorCodes } MergeTreeDataPartBuilder::MergeTreeDataPartBuilder( - const MergeTreeData & data_, String name_, VolumePtr volume_, String root_path_, String part_dir_) + const MergeTreeData & data_, String name_, VolumePtr volume_, String root_path_, String part_dir_, const ReadSettings & read_settings_) : data(data_) , name(std::move(name_)) , volume(std::move(volume_)) , root_path(std::move(root_path_)) , part_dir(std::move(part_dir_)) + , read_settings(read_settings_) { } MergeTreeDataPartBuilder::MergeTreeDataPartBuilder( - const MergeTreeData & data_, String name_, MutableDataPartStoragePtr part_storage_) + const MergeTreeData & data_, String name_, MutableDataPartStoragePtr part_storage_, const ReadSettings & read_settings_) : data(data_) , name(std::move(name_)) , part_storage(std::move(part_storage_)) + , read_settings(read_settings_) { } @@ -73,7 +75,8 @@ MutableDataPartStoragePtr MergeTreeDataPartBuilder::getPartStorageByType( MergeTreeDataPartStorageType storage_type_, const VolumePtr & volume_, const String & root_path_, - const String & part_dir_) + const String & part_dir_, + const ReadSettings &) /// Unused here, but used in private repo. { if (!volume_) throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot create part storage, because volume is not specified"); @@ -112,7 +115,7 @@ MergeTreeDataPartBuilder & MergeTreeDataPartBuilder::withPartType(MergeTreeDataP MergeTreeDataPartBuilder & MergeTreeDataPartBuilder::withPartStorageType(MergeTreeDataPartStorageType storage_type_) { - part_storage = getPartStorageByType(storage_type_, volume, root_path, part_dir); + part_storage = getPartStorageByType(storage_type_, volume, root_path, part_dir, read_settings); return *this; } @@ -126,7 +129,8 @@ MergeTreeDataPartBuilder::PartStorageAndMarkType MergeTreeDataPartBuilder::getPartStorageAndMarkType( const VolumePtr & volume_, const String & root_path_, - const String & part_dir_) + const String & part_dir_, + const ReadSettings & read_settings_) { auto disk = volume_->getDisk(); auto part_relative_path = fs::path(root_path_) / part_dir_; @@ -138,7 +142,7 @@ MergeTreeDataPartBuilder::getPartStorageAndMarkType( if (MarkType::isMarkFileExtension(ext)) { - auto storage = getPartStorageByType(MergeTreeDataPartStorageType::Full, volume_, root_path_, part_dir_); + auto storage = getPartStorageByType(MergeTreeDataPartStorageType::Full, volume_, root_path_, part_dir_, read_settings_); return {std::move(storage), MarkType(ext)}; } } @@ -156,7 +160,7 @@ MergeTreeDataPartBuilder & MergeTreeDataPartBuilder::withPartFormatFromDisk() MergeTreeDataPartBuilder & MergeTreeDataPartBuilder::withPartFormatFromVolume() { assert(volume); - auto [storage, mark_type] = getPartStorageAndMarkType(volume, root_path, part_dir); + auto [storage, mark_type] = getPartStorageAndMarkType(volume, root_path, part_dir, read_settings); if (!storage || !mark_type) { diff --git a/src/Storages/MergeTree/MergeTreeDataPartBuilder.h b/src/Storages/MergeTree/MergeTreeDataPartBuilder.h index 0f54ff0a631..bce881a1970 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartBuilder.h +++ b/src/Storages/MergeTree/MergeTreeDataPartBuilder.h @@ -21,8 +21,8 @@ using VolumePtr = std::shared_ptr; class MergeTreeDataPartBuilder { public: - MergeTreeDataPartBuilder(const MergeTreeData & data_, String name_, VolumePtr volume_, String root_path_, String part_dir_); - MergeTreeDataPartBuilder(const MergeTreeData & data_, String name_, MutableDataPartStoragePtr part_storage_); + MergeTreeDataPartBuilder(const MergeTreeData & data_, String name_, VolumePtr volume_, String root_path_, String part_dir_, const ReadSettings & read_settings_); + MergeTreeDataPartBuilder(const MergeTreeData & data_, String name_, MutableDataPartStoragePtr part_storage_, const ReadSettings & read_settings_); std::shared_ptr build(); @@ -42,7 +42,8 @@ public: static PartStorageAndMarkType getPartStorageAndMarkType( const VolumePtr & volume_, const String & root_path_, - const String & part_dir_); + const String & part_dir_, + const ReadSettings & read_settings); private: Self & withPartFormatFromVolume(); @@ -52,7 +53,8 @@ private: MergeTreeDataPartStorageType storage_type_, const VolumePtr & volume_, const String & root_path_, - const String & part_dir_); + const String & part_dir_, + const ReadSettings & read_settings); const MergeTreeData & data; const String name; @@ -64,6 +66,8 @@ private: std::optional part_type; MutableDataPartStoragePtr part_storage; const IMergeTreeDataPart * parent_part = nullptr; + + const ReadSettings read_settings; }; } diff --git a/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp b/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp index 22f3c379398..14c2da82de1 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp @@ -256,7 +256,14 @@ bool MergeTreeDataPartCompact::isStoredOnRemoteDiskWithZeroCopySupport() const MergeTreeDataPartCompact::~MergeTreeDataPartCompact() { - removeIfNeeded(); + try + { + removeIfNeeded(); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } } } diff --git a/src/Storages/MergeTree/MergeTreeDataPartWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWide.cpp index d6f213463f2..c515d645253 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWide.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWide.cpp @@ -241,7 +241,14 @@ bool MergeTreeDataPartWide::isStoredOnRemoteDiskWithZeroCopySupport() const MergeTreeDataPartWide::~MergeTreeDataPartWide() { - removeIfNeeded(); + try + { + removeIfNeeded(); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } } void MergeTreeDataPartWide::doCheckConsistency(bool require_part_metadata) const diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp index 67a2c1ee9f1..c8d11ced683 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp @@ -63,23 +63,7 @@ MergeTreeDataPartWriterCompact::MergeTreeDataPartWriterCompact( for (const auto & column : columns_list) { auto compression = getCodecDescOrDefault(column.name, default_codec); - addStreams(column, nullptr, compression); - } -} - -void MergeTreeDataPartWriterCompact::initDynamicStreamsIfNeeded(const Block & block) -{ - if (is_dynamic_streams_initialized) - return; - - is_dynamic_streams_initialized = true; - for (const auto & column : columns_list) - { - if (column.type->hasDynamicSubcolumns()) - { - auto compression = getCodecDescOrDefault(column.name, default_codec); - addStreams(column, block.getByName(column.name).column, compression); - } + MergeTreeDataPartWriterCompact::addStreams(column, nullptr, compression); } } @@ -181,20 +165,25 @@ void writeColumnSingleGranule( void MergeTreeDataPartWriterCompact::write(const Block & block, const IColumn::Permutation * permutation) { - /// On first block of data initialize streams for dynamic subcolumns. - initDynamicStreamsIfNeeded(block); + Block result_block = block; + + /// During serialization columns with dynamic subcolumns (like JSON/Dynamic) must have the same dynamic structure. + /// But it may happen that they don't (for example during ALTER MODIFY COLUMN from some type to JSON/Dynamic). + /// In this case we use dynamic structure of the column from the first written block and adjust columns from + /// the next blocks so they match this dynamic structure. + initOrAdjustDynamicStructureIfNeeded(result_block); /// Fill index granularity for this block /// if it's unknown (in case of insert data or horizontal merge, /// but not in case of vertical merge) if (compute_granularity) { - size_t index_granularity_for_block = computeIndexGranularity(block); + size_t index_granularity_for_block = computeIndexGranularity(result_block); assert(index_granularity_for_block >= 1); - fillIndexGranularity(index_granularity_for_block, block.rows()); + fillIndexGranularity(index_granularity_for_block, result_block.rows()); } - Block result_block = permuteBlockIfNeeded(block, permutation); + result_block = permuteBlockIfNeeded(result_block, permutation); if (!header) header = result_block.cloneEmpty(); diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h index b440a37222d..b3e2e78491d 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h @@ -32,6 +32,8 @@ public: void fillChecksums(MergeTreeDataPartChecksums & checksums, NameSet & checksums_to_remove) override; void finish(bool sync) override; + size_t getNumberOfOpenStreams() const override { return 1; } + private: /// Finish serialization of the data. Flush rows in buffer to disk, compute checksums. void fillDataChecksums(MergeTreeDataPartChecksums & checksums); @@ -48,9 +50,7 @@ private: void addToChecksums(MergeTreeDataPartChecksums & checksums); - void addStreams(const NameAndTypePair & name_and_type, const ColumnPtr & column, const ASTPtr & effective_codec_desc); - - void initDynamicStreamsIfNeeded(const Block & block); + void addStreams(const NameAndTypePair & name_and_type, const ColumnPtr & column, const ASTPtr & effective_codec_desc) override; Block header; @@ -104,8 +104,6 @@ private: /// then finally to 'marks_file'. std::unique_ptr marks_compressor; std::unique_ptr marks_source_hashing; - - bool is_dynamic_streams_initialized = false; }; } diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp index 388737915ab..c483d47fed7 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp @@ -564,6 +564,45 @@ Names MergeTreeDataPartWriterOnDisk::getSkipIndicesColumns() const return Names(skip_indexes_column_names_set.begin(), skip_indexes_column_names_set.end()); } +void MergeTreeDataPartWriterOnDisk::initOrAdjustDynamicStructureIfNeeded(Block & block) +{ + if (!is_dynamic_streams_initialized) + { + for (const auto & column : columns_list) + { + if (column.type->hasDynamicSubcolumns()) + { + /// Create all streams for dynamic subcolumns using dynamic structure from block. + auto compression = getCodecDescOrDefault(column.name, default_codec); + addStreams(column, block.getByName(column.name).column, compression); + } + } + is_dynamic_streams_initialized = true; + block_sample = block.cloneEmpty(); + } + else + { + size_t size = block.columns(); + for (size_t i = 0; i != size; ++i) + { + auto & column = block.getByPosition(i); + const auto & sample_column = block_sample.getByPosition(i); + /// Check if the dynamic structure of this column is different from the sample column. + if (column.type->hasDynamicSubcolumns() && !column.column->dynamicStructureEquals(*sample_column.column)) + { + /// We need to change the dynamic structure of the column so it matches the sample column. + /// To do it, we create empty column of this type, take dynamic structure from sample column + /// and insert data into it. Resulting column will have required dynamic structure and the content + /// of the column in current block. + auto new_column = sample_column.type->createColumn(); + new_column->takeDynamicStructureFromSourceColumns({sample_column.column}); + new_column->insertRangeFrom(*column.column, 0, column.column->size()); + column.column = std::move(new_column); + } + } + } +} + template struct MergeTreeDataPartWriterOnDisk::Stream; template struct MergeTreeDataPartWriterOnDisk::Stream; diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h index 4a760c20b58..49d654c15e1 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h @@ -154,6 +154,14 @@ protected: /// Get unique non ordered skip indices column. Names getSkipIndicesColumns() const; + virtual void addStreams(const NameAndTypePair & name_and_type, const ColumnPtr & column, const ASTPtr & effective_codec_desc) = 0; + + /// On first block create all required streams for columns with dynamic subcolumns and remember the block sample. + /// On each next block check if dynamic structure of the columns equals to the dynamic structure of the same + /// columns in the sample block. If for some column dynamic structure is different, adjust it so it matches + /// the structure from the sample. + void initOrAdjustDynamicStructureIfNeeded(Block & block); + const MergeTreeIndices skip_indices; const ColumnsStatistics stats; @@ -188,6 +196,10 @@ protected: size_t current_mark = 0; GinIndexStoreFactory::GinIndexStores gin_index_stores; + + bool is_dynamic_streams_initialized = false; + Block block_sample; + private: void initSkipIndices(); void initPrimaryIndex(); diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp index 433c7c21613..7c9724b1b75 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp @@ -116,24 +116,7 @@ MergeTreeDataPartWriterWide::MergeTreeDataPartWriterWide( for (const auto & column : columns_list) { auto compression = getCodecDescOrDefault(column.name, default_codec); - addStreams(column, nullptr, compression); - } -} - -void MergeTreeDataPartWriterWide::initDynamicStreamsIfNeeded(const DB::Block & block) -{ - if (is_dynamic_streams_initialized) - return; - - is_dynamic_streams_initialized = true; - block_sample = block.cloneEmpty(); - for (const auto & column : columns_list) - { - if (column.type->hasDynamicSubcolumns()) - { - auto compression = getCodecDescOrDefault(column.name, default_codec); - addStreams(column, block_sample.getByName(column.name).column, compression); - } + MergeTreeDataPartWriterWide::addStreams(column, nullptr, compression); } } @@ -277,15 +260,20 @@ void MergeTreeDataPartWriterWide::shiftCurrentMark(const Granules & granules_wri void MergeTreeDataPartWriterWide::write(const Block & block, const IColumn::Permutation * permutation) { - /// On first block of data initialize streams for dynamic subcolumns. - initDynamicStreamsIfNeeded(block); + Block block_to_write = block; + + /// During serialization columns with dynamic subcolumns (like JSON/Dynamic) must have the same dynamic structure. + /// But it may happen that they don't (for example during ALTER MODIFY COLUMN from some type to JSON/Dynamic). + /// In this case we use dynamic structure of the column from the first written block and adjust columns from + /// the next blocks so they match this dynamic structure. + initOrAdjustDynamicStructureIfNeeded(block_to_write); /// Fill index granularity for this block /// if it's unknown (in case of insert data or horizontal merge, /// but not in case of vertical part of vertical merge) if (compute_granularity) { - size_t index_granularity_for_block = computeIndexGranularity(block); + size_t index_granularity_for_block = computeIndexGranularity(block_to_write); if (rows_written_in_last_mark > 0) { size_t rows_left_in_last_mark = index_granularity.getMarkRows(getCurrentMark()) - rows_written_in_last_mark; @@ -303,11 +291,9 @@ void MergeTreeDataPartWriterWide::write(const Block & block, const IColumn::Perm } } - fillIndexGranularity(index_granularity_for_block, block.rows()); + fillIndexGranularity(index_granularity_for_block, block_to_write.rows()); } - Block block_to_write = block; - auto granules_to_write = getGranulesToWrite(index_granularity, block_to_write.rows(), getCurrentMark(), rows_written_in_last_mark); auto offset_columns = written_offset_columns ? *written_offset_columns : WrittenOffsetColumns{}; diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h index 68f016a7421..19304b28c6c 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h @@ -43,6 +43,8 @@ public: void finish(bool sync) final; + size_t getNumberOfOpenStreams() const override { return column_streams.size(); } + private: /// Finish serialization of data: write final mark if required and compute checksums /// Also validate written data in debug mode @@ -91,9 +93,7 @@ private: void addStreams( const NameAndTypePair & name_and_type, const ColumnPtr & column, - const ASTPtr & effective_codec_desc); - - void initDynamicStreamsIfNeeded(const Block & block); + const ASTPtr & effective_codec_desc) override; /// Method for self check (used in debug-build only). Checks that written /// data and corresponding marks are consistent. Otherwise throws logical @@ -142,10 +142,6 @@ private: /// How many rows we have already written in the current mark. /// More than zero when incoming blocks are smaller then their granularity. size_t rows_written_in_last_mark = 0; - - Block block_sample; - - bool is_dynamic_streams_initialized = false; }; } diff --git a/src/Storages/MergeTree/MergeTreeDataWriter.cpp b/src/Storages/MergeTree/MergeTreeDataWriter.cpp index ac29a9244b0..6d19f45e2c4 100644 --- a/src/Storages/MergeTree/MergeTreeDataWriter.cpp +++ b/src/Storages/MergeTree/MergeTreeDataWriter.cpp @@ -610,7 +610,7 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempPartImpl( } } - auto new_data_part = data.getDataPartBuilder(part_name, data_part_volume, part_dir) + auto new_data_part = data.getDataPartBuilder(part_name, data_part_volume, part_dir, getReadSettings()) .withPartFormat(data.choosePartFormat(expected_size, block.rows())) .withPartInfo(new_part_info) .build(); diff --git a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp index 5a725922e14..f95b840e223 100644 --- a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp @@ -345,10 +345,11 @@ void MergeTreeIndexAggregatorVectorSimilarity::update(const Block & block, size_ throw Exception(ErrorCodes::INCORRECT_DATA, "Index granularity is too big: more than {} rows per index granule.", std::numeric_limits::max()); if (index_sample_block.columns() > 1) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected block with single column"); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected that index is build over a single column"); - const String & index_column_name = index_sample_block.getByPosition(0).name; - const ColumnPtr & index_column = block.getByName(index_column_name).column; + const auto & index_column_name = index_sample_block.getByPosition(0).name; + + const auto & index_column = block.getByName(index_column_name).column; ColumnPtr column_cut = index_column->cut(*pos, rows_read); const auto * column_array = typeid_cast(column_cut.get()); @@ -382,8 +383,7 @@ void MergeTreeIndexAggregatorVectorSimilarity::update(const Block & block, size_ if (index->size() + rows > std::numeric_limits::max()) throw Exception(ErrorCodes::INCORRECT_DATA, "Size of vector similarity index would exceed 4 billion entries"); - DataTypePtr data_type = block.getDataTypes()[0]; - const auto * data_type_array = typeid_cast(data_type.get()); + const auto * data_type_array = typeid_cast(block.getByName(index_column_name).type.get()); if (!data_type_array) throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected data type Array(Float*)"); const TypeIndex nested_type_index = data_type_array->getNestedType()->getTypeId(); diff --git a/src/Storages/MergeTree/MergeTreePartsMover.cpp b/src/Storages/MergeTree/MergeTreePartsMover.cpp index 48a4a37f444..e9c9f2b4b06 100644 --- a/src/Storages/MergeTree/MergeTreePartsMover.cpp +++ b/src/Storages/MergeTree/MergeTreePartsMover.cpp @@ -280,7 +280,7 @@ MergeTreePartsMover::TemporaryClonedPart MergeTreePartsMover::clonePart(const Me cloned_part_storage = part->makeCloneOnDisk(disk, MergeTreeData::MOVING_DIR_NAME, read_settings, write_settings, cancellation_hook); } - MergeTreeDataPartBuilder builder(*data, part->name, cloned_part_storage); + MergeTreeDataPartBuilder builder(*data, part->name, cloned_part_storage, getReadSettings()); cloned_part.part = std::move(builder).withPartFormatFromDisk().build(); LOG_TRACE(log, "Part {} was cloned to {}", part->name, cloned_part.part->getDataPartStorage().getFullPath()); diff --git a/src/Storages/MergeTree/MergeTreeReaderWide.cpp b/src/Storages/MergeTree/MergeTreeReaderWide.cpp index 898bf5a2933..77231d8d392 100644 --- a/src/Storages/MergeTree/MergeTreeReaderWide.cpp +++ b/src/Storages/MergeTree/MergeTreeReaderWide.cpp @@ -262,7 +262,7 @@ MergeTreeReaderWide::FileStreams::iterator MergeTreeReaderWide::addStream(const /*num_columns_in_mark=*/ 1); auto stream_settings = settings; - stream_settings.is_low_cardinality_dictionary = substream_path.size() > 1 && substream_path[substream_path.size() - 2].type == ISerialization::Substream::Type::DictionaryKeys; + stream_settings.is_low_cardinality_dictionary = ISerialization::isLowCardinalityDictionarySubcolumn(substream_path); auto create_stream = [&]() { diff --git a/src/Storages/MergeTree/MergeTreeSettings.cpp b/src/Storages/MergeTree/MergeTreeSettings.cpp index 3abba83758b..33910d1048d 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.cpp +++ b/src/Storages/MergeTree/MergeTreeSettings.cpp @@ -100,6 +100,8 @@ namespace ErrorCodes DECLARE(String, mutation_workload, "", "Name of workload to be used to access resources for mutations", 0) \ DECLARE(Milliseconds, background_task_preferred_step_execution_time_ms, 50, "Target time to execution of one step of merge or mutation. Can be exceeded if one step takes longer time", 0) \ DECLARE(MergeSelectorAlgorithm, merge_selector_algorithm, MergeSelectorAlgorithm::SIMPLE, "The algorithm to select parts for merges assignment", EXPERIMENTAL) \ + DECLARE(Bool, merge_selector_enable_heuristic_to_remove_small_parts_at_right, true, "Enable heuristic for selecting parts for merge which removes parts from right side of range, if their size is less than specified ratio (0.01) of sum_size. Works for Simple and StochasticSimple merge selectors", 0) \ + DECLARE(Float, merge_selector_base, 5.0, "Affects write amplification of assigned merges (expert level setting, don't change if you don't understand what it is doing). Works for Simple and StochasticSimple merge selectors", 0) \ \ /** Inserts settings. */ \ DECLARE(UInt64, parts_to_delay_insert, 1000, "If table contains at least that many active parts in single partition, artificially slow down insert into table. Disabled if set to 0", 0) \ diff --git a/src/Storages/MergeTree/MergeTreeSink.cpp b/src/Storages/MergeTree/MergeTreeSink.cpp index 604112c26ea..99852309c77 100644 --- a/src/Storages/MergeTree/MergeTreeSink.cpp +++ b/src/Storages/MergeTree/MergeTreeSink.cpp @@ -94,7 +94,7 @@ void MergeTreeSink::consume(Chunk & chunk) DelayedPartitions partitions; const Settings & settings = context->getSettingsRef(); - size_t streams = 0; + size_t total_streams = 0; bool support_parallel_write = false; auto token_info = chunk.getChunkInfos().get(); @@ -153,16 +153,18 @@ void MergeTreeSink::consume(Chunk & chunk) max_insert_delayed_streams_for_parallel_write = 0; /// In case of too much columns/parts in block, flush explicitly. - streams += temp_part.streams.size(); + size_t current_streams = 0; + for (const auto & stream : temp_part.streams) + current_streams += stream.stream->getNumberOfOpenStreams(); - if (streams > max_insert_delayed_streams_for_parallel_write) + if (total_streams + current_streams > max_insert_delayed_streams_for_parallel_write) { finishDelayedChunk(); delayed_chunk = std::make_unique(); delayed_chunk->partitions = std::move(partitions); finishDelayedChunk(); - streams = 0; + total_streams = 0; support_parallel_write = false; partitions = DelayedPartitions{}; } @@ -174,6 +176,8 @@ void MergeTreeSink::consume(Chunk & chunk) .block_dedup_token = block_dedup_token, .part_counters = std::move(part_counters), }); + + total_streams += current_streams; } if (need_to_define_dedup_token) diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index 753b0c5d2fe..936df7b0275 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -2289,7 +2289,7 @@ bool MutateTask::prepare() String tmp_part_dir_name = prefix + ctx->future_part->name; ctx->temporary_directory_lock = ctx->data->getTemporaryPartDirectoryHolder(tmp_part_dir_name); - auto builder = ctx->data->getDataPartBuilder(ctx->future_part->name, single_disk_volume, tmp_part_dir_name); + auto builder = ctx->data->getDataPartBuilder(ctx->future_part->name, single_disk_volume, tmp_part_dir_name, getReadSettings()); builder.withPartFormat(ctx->future_part->part_format); builder.withPartInfo(ctx->future_part->part_info); diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeAttachThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeAttachThread.cpp index 22b8ccca151..c258048354e 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeAttachThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeAttachThread.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include @@ -20,7 +21,6 @@ namespace ErrorCodes { extern const int SUPPORT_IS_DISABLED; extern const int REPLICA_STATUS_CHANGED; - extern const int LOGICAL_ERROR; } ReplicatedMergeTreeAttachThread::ReplicatedMergeTreeAttachThread(StorageReplicatedMergeTree & storage_) @@ -123,67 +123,6 @@ void ReplicatedMergeTreeAttachThread::checkHasReplicaMetadataInZooKeeper(const z } } -Int32 ReplicatedMergeTreeAttachThread::fixReplicaMetadataVersionIfNeeded(zkutil::ZooKeeperPtr zookeeper) -{ - const String & zookeeper_path = storage.zookeeper_path; - const String & replica_path = storage.replica_path; - const bool replica_readonly = storage.is_readonly; - - for (size_t i = 0; i != 2; ++i) - { - String replica_metadata_version_str; - const bool replica_metadata_version_exists = zookeeper->tryGet(replica_path + "/metadata_version", replica_metadata_version_str); - if (!replica_metadata_version_exists) - return -1; - - const Int32 metadata_version = parse(replica_metadata_version_str); - - if (metadata_version != 0 || replica_readonly) - { - /// No need to fix anything - return metadata_version; - } - - Coordination::Stat stat; - zookeeper->get(fs::path(zookeeper_path) / "metadata", &stat); - if (stat.version == 0) - { - /// No need to fix anything - return metadata_version; - } - - ReplicatedMergeTreeQueue & queue = storage.queue; - queue.pullLogsToQueue(zookeeper); - if (queue.getStatus().metadata_alters_in_queue != 0) - { - LOG_DEBUG(log, "No need to update metadata_version as there are ALTER_METADATA entries in the queue"); - return metadata_version; - } - - const Coordination::Requests ops = { - zkutil::makeSetRequest(fs::path(replica_path) / "metadata_version", std::to_string(stat.version), 0), - zkutil::makeCheckRequest(fs::path(zookeeper_path) / "metadata", stat.version), - }; - Coordination::Responses ops_responses; - const auto code = zookeeper->tryMulti(ops, ops_responses); - if (code == Coordination::Error::ZOK) - { - LOG_DEBUG(log, "Successfully set metadata_version to {}", stat.version); - return stat.version; - } - if (code != Coordination::Error::ZBADVERSION) - { - throw zkutil::KeeperException(code); - } - } - - /// Second attempt is only possible if metadata_version != 0 or metadata.version changed during the first attempt. - /// If metadata_version != 0, on second attempt we will return the new metadata_version. - /// If metadata.version changed, on second attempt we will either get metadata_version != 0 and return the new metadata_version or we will get metadata_alters_in_queue != 0 and return 0. - /// Either way, on second attempt this method should return. - throw Exception(ErrorCodes::LOGICAL_ERROR, "Failed to fix replica metadata_version in ZooKeeper after two attempts"); -} - void ReplicatedMergeTreeAttachThread::runImpl() { storage.setZooKeeper(); @@ -227,33 +166,6 @@ void ReplicatedMergeTreeAttachThread::runImpl() /// Just in case it was not removed earlier due to connection loss zookeeper->tryRemove(replica_path + "/flags/force_restore_data"); - const Int32 replica_metadata_version = fixReplicaMetadataVersionIfNeeded(zookeeper); - const bool replica_metadata_version_exists = replica_metadata_version != -1; - if (replica_metadata_version_exists) - { - storage.setInMemoryMetadata(metadata_snapshot->withMetadataVersion(replica_metadata_version)); - } - else - { - /// Table was created before 20.4 and was never altered, - /// let's initialize replica metadata version from global metadata version. - Coordination::Stat table_metadata_version_stat; - zookeeper->get(zookeeper_path + "/metadata", &table_metadata_version_stat); - - Coordination::Requests ops; - ops.emplace_back(zkutil::makeCheckRequest(zookeeper_path + "/metadata", table_metadata_version_stat.version)); - ops.emplace_back(zkutil::makeCreateRequest(replica_path + "/metadata_version", toString(table_metadata_version_stat.version), zkutil::CreateMode::Persistent)); - - Coordination::Responses res; - auto code = zookeeper->tryMulti(ops, res); - - if (code == Coordination::Error::ZBADVERSION) - throw Exception(ErrorCodes::REPLICA_STATUS_CHANGED, "Failed to initialize metadata_version " - "because table was concurrently altered, will retry"); - - zkutil::KeeperMultiException::check(code, ops, res); - } - storage.checkTableStructure(replica_path, metadata_snapshot); storage.checkParts(skip_sanity_checks); diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeAttachThread.h b/src/Storages/MergeTree/ReplicatedMergeTreeAttachThread.h index bfc97442598..250a5ed34d1 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeAttachThread.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeAttachThread.h @@ -48,8 +48,6 @@ private: void runImpl(); void finalizeInitialization(); - - Int32 fixReplicaMetadataVersionIfNeeded(zkutil::ZooKeeperPtr zookeeper); }; } diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index 6b1581645f8..b1564b58a6c 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -615,7 +615,7 @@ std::pair ReplicatedMergeTreeQueue::pullLogsToQueue(zkutil::Zo { std::lock_guard lock(pull_logs_to_queue_mutex); - if (reason != LOAD) + if (reason != LOAD && reason != FIX_METADATA_VERSION) { /// It's totally ok to load queue on readonly replica (that's what RestartingThread does on initialization). /// It's ok if replica became readonly due to connection loss after we got current zookeeper (in this case zookeeper must be expired). diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h index 9d3349663e2..6ec8818b0c6 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h @@ -334,6 +334,7 @@ public: UPDATE, MERGE_PREDICATE, SYNC, + FIX_METADATA_VERSION, OTHER, }; diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp index 9d3e26cdc8d..93124e634bd 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp @@ -29,6 +29,8 @@ namespace MergeTreeSetting namespace ErrorCodes { extern const int REPLICA_IS_ALREADY_ACTIVE; + extern const int REPLICA_STATUS_CHANGED; + extern const int LOGICAL_ERROR; } namespace FailPoints @@ -207,6 +209,36 @@ bool ReplicatedMergeTreeRestartingThread::tryStartup() throw; } + const Int32 replica_metadata_version = fixReplicaMetadataVersionIfNeeded(zookeeper); + const bool replica_metadata_version_exists = replica_metadata_version != -1; + if (replica_metadata_version_exists) + { + storage.setInMemoryMetadata(storage.getInMemoryMetadataPtr()->withMetadataVersion(replica_metadata_version)); + } + else + { + /// Table was created before 20.4 and was never altered, + /// let's initialize replica metadata version from global metadata version. + + const String & zookeeper_path = storage.zookeeper_path, & replica_path = storage.replica_path; + + Coordination::Stat table_metadata_version_stat; + zookeeper->get(zookeeper_path + "/metadata", &table_metadata_version_stat); + + Coordination::Requests ops; + ops.emplace_back(zkutil::makeCheckRequest(zookeeper_path + "/metadata", table_metadata_version_stat.version)); + ops.emplace_back(zkutil::makeCreateRequest(replica_path + "/metadata_version", toString(table_metadata_version_stat.version), zkutil::CreateMode::Persistent)); + + Coordination::Responses res; + auto code = zookeeper->tryMulti(ops, res); + + if (code == Coordination::Error::ZBADVERSION) + throw Exception(ErrorCodes::REPLICA_STATUS_CHANGED, "Failed to initialize metadata_version " + "because table was concurrently altered, will retry"); + + zkutil::KeeperMultiException::check(code, ops, res); + } + storage.queue.removeCurrentPartsFromMutations(); storage.last_queue_update_finish_time.store(time(nullptr)); @@ -424,4 +456,64 @@ void ReplicatedMergeTreeRestartingThread::setNotReadonly() storage.readonly_start_time.store(0, std::memory_order_relaxed); } + +Int32 ReplicatedMergeTreeRestartingThread::fixReplicaMetadataVersionIfNeeded(zkutil::ZooKeeperPtr zookeeper) +{ + const String & zookeeper_path = storage.zookeeper_path; + const String & replica_path = storage.replica_path; + + const size_t num_attempts = 2; + for (size_t attempt = 0; attempt != num_attempts; ++attempt) + { + String replica_metadata_version_str; + Coordination::Stat replica_stat; + const bool replica_metadata_version_exists = zookeeper->tryGet(replica_path + "/metadata_version", replica_metadata_version_str, &replica_stat); + if (!replica_metadata_version_exists) + return -1; + + const Int32 metadata_version = parse(replica_metadata_version_str); + if (metadata_version != 0) + return metadata_version; + + Coordination::Stat table_stat; + zookeeper->get(fs::path(zookeeper_path) / "metadata", &table_stat); + if (table_stat.version == 0) + return metadata_version; + + ReplicatedMergeTreeQueue & queue = storage.queue; + queue.pullLogsToQueue(zookeeper, {}, ReplicatedMergeTreeQueue::FIX_METADATA_VERSION); + if (queue.getStatus().metadata_alters_in_queue != 0) + { + LOG_INFO(log, "Skipping updating metadata_version as there are ALTER_METADATA entries in the queue"); + return metadata_version; + } + + const Coordination::Requests ops = { + zkutil::makeSetRequest(fs::path(replica_path) / "metadata_version", std::to_string(table_stat.version), replica_stat.version), + zkutil::makeCheckRequest(fs::path(zookeeper_path) / "metadata", table_stat.version), + }; + Coordination::Responses ops_responses; + const Coordination::Error code = zookeeper->tryMulti(ops, ops_responses); + if (code == Coordination::Error::ZOK) + { + LOG_DEBUG(log, "Successfully set metadata_version to {}", table_stat.version); + return table_stat.version; + } + + if (code == Coordination::Error::ZBADVERSION) + { + LOG_WARNING(log, "Cannot fix metadata_version because either metadata.version or metadata_version.version changed, attempts left = {}", num_attempts - attempt - 1); + continue; + } + + throw zkutil::KeeperException(code); + } + + /// Second attempt is only possible if either metadata_version.version or metadata.version changed during the first attempt. + /// If metadata_version changed to non-zero value during the first attempt, on second attempt we will return the new metadata_version. + /// If metadata.version changed during first attempt, on second attempt we will either get metadata_version != 0 and return the new metadata_version or we will get metadata_alters_in_queue != 0 and return 0. + /// So either first or second attempt should return unless metadata_version was rewritten from 0 to 0 during the first attempt which is highly unlikely. + throw Exception(ErrorCodes::LOGICAL_ERROR, "Failed to fix replica metadata_version in ZooKeeper after two attempts"); +} + } diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h index d719505ae5e..6f450dc1d40 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h @@ -6,6 +6,7 @@ #include #include #include +#include namespace DB @@ -68,6 +69,9 @@ private: /// Disable readonly mode for table void setNotReadonly(); + + /// Fix replica metadata_version if needed + Int32 fixReplicaMetadataVersionIfNeeded(zkutil::ZooKeeperPtr zookeeper); }; diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp index f1b0e5ec385..f3ae6e77ac3 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp @@ -341,7 +341,7 @@ void ReplicatedMergeTreeSinkImpl::consume(Chunk & chunk) using DelayedPartitions = std::vector; DelayedPartitions partitions; - size_t streams = 0; + size_t total_streams = 0; bool support_parallel_write = false; for (auto & current_block : part_blocks) @@ -418,15 +418,18 @@ void ReplicatedMergeTreeSinkImpl::consume(Chunk & chunk) max_insert_delayed_streams_for_parallel_write = 0; /// In case of too much columns/parts in block, flush explicitly. - streams += temp_part.streams.size(); - if (streams > max_insert_delayed_streams_for_parallel_write) + size_t current_streams = 0; + for (const auto & stream : temp_part.streams) + current_streams += stream.stream->getNumberOfOpenStreams(); + + if (total_streams + current_streams > max_insert_delayed_streams_for_parallel_write) { finishDelayedChunk(zookeeper); delayed_chunk = std::make_unique::DelayedChunk>(replicas_num); delayed_chunk->partitions = std::move(partitions); finishDelayedChunk(zookeeper); - streams = 0; + total_streams = 0; support_parallel_write = false; partitions = DelayedPartitions{}; } @@ -447,6 +450,8 @@ void ReplicatedMergeTreeSinkImpl::consume(Chunk & chunk) std::move(unmerged_block), std::move(part_counters) /// profile_events_scope must be reset here. )); + + total_streams += current_streams; } if (need_to_define_dedup_token) diff --git a/src/Storages/NATS/StorageNATS.cpp b/src/Storages/NATS/StorageNATS.cpp index 123f5adc22d..5a51f078e7b 100644 --- a/src/Storages/NATS/StorageNATS.cpp +++ b/src/Storages/NATS/StorageNATS.cpp @@ -786,7 +786,13 @@ void registerStorageNATS(StorageFactory & factory) return std::make_shared(args.table_id, args.getContext(), args.columns, args.comment, std::move(nats_settings), args.mode); }; - factory.registerStorage("NATS", creator_fn, StorageFactory::StorageFeatures{ .supports_settings = true, }); + factory.registerStorage( + "NATS", + creator_fn, + StorageFactory::StorageFeatures{ + .supports_settings = true, + .source_access_type = AccessType::NATS, + }); } } diff --git a/src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h b/src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h new file mode 100644 index 00000000000..1a694a25dff --- /dev/null +++ b/src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h @@ -0,0 +1,112 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + + +namespace DB +{ + +template +concept StorageConfiguration = std::derived_from; + +template +class DataLakeConfiguration : public BaseStorageConfiguration, public std::enable_shared_from_this +{ +public: + using Configuration = StorageObjectStorage::Configuration; + + bool isDataLakeConfiguration() const override { return true; } + + std::string getEngineName() const override { return DataLakeMetadata::name; } + + void update(ObjectStoragePtr object_storage, ContextPtr local_context) override + { + BaseStorageConfiguration::update(object_storage, local_context); + auto new_metadata = DataLakeMetadata::create(object_storage, weak_from_this(), local_context); + if (current_metadata && *current_metadata == *new_metadata) + return; + + current_metadata = std::move(new_metadata); + BaseStorageConfiguration::setPaths(current_metadata->getDataFiles()); + BaseStorageConfiguration::setPartitionColumns(current_metadata->getPartitionColumns()); + } + + std::optional tryGetTableStructureFromMetadata() const override + { + if (!current_metadata) + return std::nullopt; + auto schema_from_metadata = current_metadata->getTableSchema(); + if (!schema_from_metadata.empty()) + { + return ColumnsDescription(std::move(schema_from_metadata)); + } + return std::nullopt; + } + +private: + DataLakeMetadataPtr current_metadata; + + ReadFromFormatInfo prepareReadingFromFormat( + ObjectStoragePtr object_storage, + const Strings & requested_columns, + const StorageSnapshotPtr & storage_snapshot, + bool supports_subset_of_columns, + ContextPtr local_context) override + { + auto info = DB::prepareReadingFromFormat(requested_columns, storage_snapshot, local_context, supports_subset_of_columns); + if (!current_metadata) + { + current_metadata = DataLakeMetadata::create(object_storage, weak_from_this(), local_context); + } + auto column_mapping = current_metadata->getColumnNameToPhysicalNameMapping(); + if (!column_mapping.empty()) + { + for (const auto & [column_name, physical_name] : column_mapping) + { + auto & column = info.format_header.getByName(column_name); + column.name = physical_name; + } + } + return info; + } +}; + +#if USE_AVRO +#if USE_AWS_S3 +using StorageS3IcebergConfiguration = DataLakeConfiguration; +# endif + +#if USE_AZURE_BLOB_STORAGE +using StorageAzureIcebergConfiguration = DataLakeConfiguration; +# endif + +#if USE_HDFS +using StorageHDFSIcebergConfiguration = DataLakeConfiguration; +# endif + +using StorageLocalIcebergConfiguration = DataLakeConfiguration; +#endif + +#if USE_PARQUET +#if USE_AWS_S3 +using StorageS3DeltaLakeConfiguration = DataLakeConfiguration; +# endif +#endif + +#if USE_AWS_S3 +using StorageS3HudiConfiguration = DataLakeConfiguration; +#endif +} diff --git a/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.cpp b/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.cpp index 8f9bd5b19b8..ef0adc15186 100644 --- a/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.cpp +++ b/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.cpp @@ -56,22 +56,18 @@ namespace ErrorCodes struct DeltaLakeMetadataImpl { - using ConfigurationPtr = DeltaLakeMetadata::ConfigurationPtr; + using ConfigurationObserverPtr = DeltaLakeMetadata::ConfigurationObserverPtr; ObjectStoragePtr object_storage; - ConfigurationPtr configuration; + ConfigurationObserverPtr configuration; ContextPtr context; /** * Useful links: * - https://github.com/delta-io/delta/blob/master/PROTOCOL.md#data-files */ - DeltaLakeMetadataImpl(ObjectStoragePtr object_storage_, - ConfigurationPtr configuration_, - ContextPtr context_) - : object_storage(object_storage_) - , configuration(configuration_) - , context(context_) + DeltaLakeMetadataImpl(ObjectStoragePtr object_storage_, ConfigurationObserverPtr configuration_, ContextPtr context_) + : object_storage(object_storage_), configuration(configuration_), context(context_) { } @@ -111,6 +107,7 @@ struct DeltaLakeMetadataImpl }; DeltaLakeMetadata processMetadataFiles() { + auto configuration_ptr = configuration.lock(); std::set result_files; NamesAndTypesList current_schema; DataLakePartitionColumns current_partition_columns; @@ -122,7 +119,7 @@ struct DeltaLakeMetadataImpl while (true) { const auto filename = withPadding(++current_version) + metadata_file_suffix; - const auto file_path = std::filesystem::path(configuration->getPath()) / deltalake_metadata_directory / filename; + const auto file_path = std::filesystem::path(configuration_ptr->getPath()) / deltalake_metadata_directory / filename; if (!object_storage->exists(StoredObject(file_path))) break; @@ -136,7 +133,7 @@ struct DeltaLakeMetadataImpl } else { - const auto keys = listFiles(*object_storage, *configuration, deltalake_metadata_directory, metadata_file_suffix); + const auto keys = listFiles(*object_storage, *configuration_ptr, deltalake_metadata_directory, metadata_file_suffix); for (const String & key : keys) processMetadataFile(key, current_schema, current_partition_columns, result_files); } @@ -246,6 +243,8 @@ struct DeltaLakeMetadataImpl } } + auto configuration_ptr = configuration.lock(); + if (object->has("add")) { auto add_object = object->get("add").extract(); @@ -253,7 +252,7 @@ struct DeltaLakeMetadataImpl throw Exception(ErrorCodes::LOGICAL_ERROR, "Failed to extract `add` field"); auto path = add_object->getValue("path"); - result.insert(fs::path(configuration->getPath()) / path); + result.insert(fs::path(configuration_ptr->getPath()) / path); auto filename = fs::path(path).filename().string(); auto it = file_partition_columns.find(filename); @@ -297,7 +296,7 @@ struct DeltaLakeMetadataImpl throw Exception(ErrorCodes::LOGICAL_ERROR, "Failed to extract `remove` field"); auto path = remove_object->getValue("path"); - result.erase(fs::path(configuration->getPath()) / path); + result.erase(fs::path(configuration_ptr->getPath()) / path); } } } @@ -488,7 +487,9 @@ struct DeltaLakeMetadataImpl */ size_t readLastCheckpointIfExists() const { - const auto last_checkpoint_file = std::filesystem::path(configuration->getPath()) / deltalake_metadata_directory / "_last_checkpoint"; + auto configuration_ptr = configuration.lock(); + const auto last_checkpoint_file + = std::filesystem::path(configuration_ptr->getPath()) / deltalake_metadata_directory / "_last_checkpoint"; if (!object_storage->exists(StoredObject(last_checkpoint_file))) return 0; @@ -555,7 +556,11 @@ struct DeltaLakeMetadataImpl return 0; const auto checkpoint_filename = withPadding(version) + ".checkpoint.parquet"; - const auto checkpoint_path = std::filesystem::path(configuration->getPath()) / deltalake_metadata_directory / checkpoint_filename; + + auto configuration_ptr = configuration.lock(); + + const auto checkpoint_path + = std::filesystem::path(configuration_ptr->getPath()) / deltalake_metadata_directory / checkpoint_filename; LOG_TRACE(log, "Using checkpoint file: {}", checkpoint_path.string()); @@ -671,7 +676,7 @@ struct DeltaLakeMetadataImpl } LOG_TEST(log, "Adding {}", path); - const auto [_, inserted] = result.insert(std::filesystem::path(configuration->getPath()) / path); + const auto [_, inserted] = result.insert(std::filesystem::path(configuration_ptr->getPath()) / path); if (!inserted) throw Exception(ErrorCodes::INCORRECT_DATA, "File already exists {}", path); } @@ -682,10 +687,7 @@ struct DeltaLakeMetadataImpl LoggerPtr log = getLogger("DeltaLakeMetadataParser"); }; -DeltaLakeMetadata::DeltaLakeMetadata( - ObjectStoragePtr object_storage_, - ConfigurationPtr configuration_, - ContextPtr context_) +DeltaLakeMetadata::DeltaLakeMetadata(ObjectStoragePtr object_storage_, ConfigurationObserverPtr configuration_, ContextPtr context_) { auto impl = DeltaLakeMetadataImpl(object_storage_, configuration_, context_); auto result = impl.processMetadataFiles(); diff --git a/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.h b/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.h index a479a3dd293..031d1fb9e96 100644 --- a/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.h +++ b/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.h @@ -1,5 +1,9 @@ #pragma once +#include "config.h" + +#if USE_PARQUET + #include #include #include @@ -12,13 +16,10 @@ namespace DB class DeltaLakeMetadata final : public IDataLakeMetadata { public: - using ConfigurationPtr = StorageObjectStorage::ConfigurationPtr; + using ConfigurationObserverPtr = StorageObjectStorage::ConfigurationObserverPtr; static constexpr auto name = "DeltaLake"; - DeltaLakeMetadata( - ObjectStoragePtr object_storage_, - ConfigurationPtr configuration_, - ContextPtr context_); + DeltaLakeMetadata(ObjectStoragePtr object_storage_, ConfigurationObserverPtr configuration_, ContextPtr context_); Strings getDataFiles() const override { return data_files; } @@ -36,10 +37,7 @@ public: && data_files == deltalake_metadata->data_files; } - static DataLakeMetadataPtr create( - ObjectStoragePtr object_storage, - ConfigurationPtr configuration, - ContextPtr local_context) + static DataLakeMetadataPtr create(ObjectStoragePtr object_storage, ConfigurationObserverPtr configuration, ContextPtr local_context) { return std::make_unique(object_storage, configuration, local_context); } @@ -52,3 +50,5 @@ private: }; } + +#endif diff --git a/src/Storages/ObjectStorage/DataLakes/HudiMetadata.cpp b/src/Storages/ObjectStorage/DataLakes/HudiMetadata.cpp index 91a586ccbf9..77ef769ed0e 100644 --- a/src/Storages/ObjectStorage/DataLakes/HudiMetadata.cpp +++ b/src/Storages/ObjectStorage/DataLakes/HudiMetadata.cpp @@ -1,11 +1,10 @@ -#include -#include #include -#include +#include +#include +#include #include #include -#include "config.h" -#include +#include namespace DB { @@ -43,8 +42,9 @@ namespace ErrorCodes */ Strings HudiMetadata::getDataFilesImpl() const { + auto configuration_ptr = configuration.lock(); auto log = getLogger("HudiMetadata"); - const auto keys = listFiles(*object_storage, *configuration, "", Poco::toLower(configuration->format)); + const auto keys = listFiles(*object_storage, *configuration_ptr, "", Poco::toLower(configuration_ptr->format)); using Partition = std::string; using FileID = std::string; @@ -86,13 +86,8 @@ Strings HudiMetadata::getDataFilesImpl() const return result; } -HudiMetadata::HudiMetadata( - ObjectStoragePtr object_storage_, - ConfigurationPtr configuration_, - ContextPtr context_) - : WithContext(context_) - , object_storage(object_storage_) - , configuration(configuration_) +HudiMetadata::HudiMetadata(ObjectStoragePtr object_storage_, ConfigurationObserverPtr configuration_, ContextPtr context_) + : WithContext(context_), object_storage(object_storage_), configuration(configuration_) { } diff --git a/src/Storages/ObjectStorage/DataLakes/HudiMetadata.h b/src/Storages/ObjectStorage/DataLakes/HudiMetadata.h index b060b1b0d39..cdab11c4277 100644 --- a/src/Storages/ObjectStorage/DataLakes/HudiMetadata.h +++ b/src/Storages/ObjectStorage/DataLakes/HudiMetadata.h @@ -13,14 +13,11 @@ namespace DB class HudiMetadata final : public IDataLakeMetadata, private WithContext { public: - using ConfigurationPtr = StorageObjectStorage::ConfigurationPtr; + using ConfigurationObserverPtr = StorageObjectStorage::ConfigurationObserverPtr; static constexpr auto name = "Hudi"; - HudiMetadata( - ObjectStoragePtr object_storage_, - ConfigurationPtr configuration_, - ContextPtr context_); + HudiMetadata(ObjectStoragePtr object_storage_, ConfigurationObserverPtr configuration_, ContextPtr context_); Strings getDataFiles() const override; @@ -38,17 +35,14 @@ public: && data_files == hudi_metadata->data_files; } - static DataLakeMetadataPtr create( - ObjectStoragePtr object_storage, - ConfigurationPtr configuration, - ContextPtr local_context) + static DataLakeMetadataPtr create(ObjectStoragePtr object_storage, ConfigurationObserverPtr configuration, ContextPtr local_context) { return std::make_unique(object_storage, configuration, local_context); } private: const ObjectStoragePtr object_storage; - const ConfigurationPtr configuration; + const ConfigurationObserverPtr configuration; mutable Strings data_files; std::unordered_map column_name_to_physical_name; DataLakePartitionColumns partition_columns; diff --git a/src/Storages/ObjectStorage/DataLakes/IStorageDataLake.h b/src/Storages/ObjectStorage/DataLakes/IStorageDataLake.h deleted file mode 100644 index 91fd9a9f981..00000000000 --- a/src/Storages/ObjectStorage/DataLakes/IStorageDataLake.h +++ /dev/null @@ -1,169 +0,0 @@ -#pragma once - -#include "config.h" - -#if USE_AVRO - -#include -#include -#include -#include -#include -#include -#include -#include - - -namespace DB -{ - -/// Storage for read-only integration with Apache Iceberg tables in Amazon S3 (see https://iceberg.apache.org/) -/// Right now it's implemented on top of StorageS3 and right now it doesn't support -/// many Iceberg features like schema evolution, partitioning, positional and equality deletes. -template -class IStorageDataLake final : public StorageObjectStorage -{ -public: - using Storage = StorageObjectStorage; - using ConfigurationPtr = Storage::ConfigurationPtr; - - static StoragePtr create( - ConfigurationPtr base_configuration, - ContextPtr context, - const StorageID & table_id_, - const ColumnsDescription & columns_, - const ConstraintsDescription & constraints_, - const String & comment_, - std::optional format_settings_, - LoadingStrictnessLevel mode) - { - auto object_storage = base_configuration->createObjectStorage(context, /* is_readonly */true); - DataLakeMetadataPtr metadata; - NamesAndTypesList schema_from_metadata; - const bool use_schema_from_metadata = columns_.empty(); - - if (base_configuration->format == "auto") - base_configuration->format = "Parquet"; - - ConfigurationPtr configuration = base_configuration->clone(); - - try - { - metadata = DataLakeMetadata::create(object_storage, base_configuration, context); - configuration->setPaths(metadata->getDataFiles()); - if (use_schema_from_metadata) - schema_from_metadata = metadata->getTableSchema(); - } - catch (...) - { - if (mode <= LoadingStrictnessLevel::CREATE) - throw; - - metadata.reset(); - configuration->setPaths({}); - tryLogCurrentException(__PRETTY_FUNCTION__); - } - - return std::make_shared>( - base_configuration, std::move(metadata), configuration, object_storage, - context, table_id_, - use_schema_from_metadata ? ColumnsDescription(schema_from_metadata) : columns_, - constraints_, comment_, format_settings_); - } - - String getName() const override { return DataLakeMetadata::name; } - - static ColumnsDescription getTableStructureFromData( - ObjectStoragePtr object_storage_, - ConfigurationPtr base_configuration, - const std::optional & format_settings_, - ContextPtr local_context) - { - auto metadata = DataLakeMetadata::create(object_storage_, base_configuration, local_context); - - auto schema_from_metadata = metadata->getTableSchema(); - if (!schema_from_metadata.empty()) - { - return ColumnsDescription(std::move(schema_from_metadata)); - } - - ConfigurationPtr configuration = base_configuration->clone(); - configuration->setPaths(metadata->getDataFiles()); - std::string sample_path; - return Storage::resolveSchemaFromData(object_storage_, configuration, format_settings_, sample_path, local_context); - } - - void updateConfiguration(ContextPtr local_context) override - { - Storage::updateConfiguration(local_context); - - auto new_metadata = DataLakeMetadata::create(Storage::object_storage, base_configuration, local_context); - if (current_metadata && *current_metadata == *new_metadata) - return; - - current_metadata = std::move(new_metadata); - auto updated_configuration = base_configuration->clone(); - updated_configuration->setPaths(current_metadata->getDataFiles()); - updated_configuration->setPartitionColumns(current_metadata->getPartitionColumns()); - - Storage::configuration = updated_configuration; - } - - template - IStorageDataLake( - ConfigurationPtr base_configuration_, - DataLakeMetadataPtr metadata_, - Args &&... args) - : Storage(std::forward(args)...) - , base_configuration(base_configuration_) - , current_metadata(std::move(metadata_)) - { - if (base_configuration->format == "auto") - { - base_configuration->format = Storage::configuration->format; - } - - if (current_metadata) - { - const auto & columns = current_metadata->getPartitionColumns(); - base_configuration->setPartitionColumns(columns); - Storage::configuration->setPartitionColumns(columns); - } - } - -private: - ConfigurationPtr base_configuration; - DataLakeMetadataPtr current_metadata; - - ReadFromFormatInfo prepareReadingFromFormat( - const Strings & requested_columns, - const StorageSnapshotPtr & storage_snapshot, - bool supports_subset_of_columns, - ContextPtr local_context) override - { - auto info = DB::prepareReadingFromFormat(requested_columns, storage_snapshot, local_context, supports_subset_of_columns); - if (!current_metadata) - { - Storage::updateConfiguration(local_context); - current_metadata = DataLakeMetadata::create(Storage::object_storage, base_configuration, local_context); - } - auto column_mapping = current_metadata->getColumnNameToPhysicalNameMapping(); - if (!column_mapping.empty()) - { - for (const auto & [column_name, physical_name] : column_mapping) - { - auto & column = info.format_header.getByName(column_name); - column.name = physical_name; - } - } - return info; - } -}; - -using StorageIceberg = IStorageDataLake; -using StorageDeltaLake = IStorageDataLake; -using StorageHudi = IStorageDataLake; - -} - -#endif diff --git a/src/Storages/ObjectStorage/DataLakes/IcebergMetadata.cpp b/src/Storages/ObjectStorage/DataLakes/IcebergMetadata.cpp index e27612ca4de..f0a80a41d4e 100644 --- a/src/Storages/ObjectStorage/DataLakes/IcebergMetadata.cpp +++ b/src/Storages/ObjectStorage/DataLakes/IcebergMetadata.cpp @@ -51,7 +51,7 @@ extern const int UNSUPPORTED_METHOD; IcebergMetadata::IcebergMetadata( ObjectStoragePtr object_storage_, - ConfigurationPtr configuration_, + ConfigurationObserverPtr configuration_, DB::ContextPtr context_, Int32 metadata_version_, Int32 format_version_, @@ -382,12 +382,12 @@ std::pair getMetadataFileAndVersion( } -DataLakeMetadataPtr IcebergMetadata::create( - ObjectStoragePtr object_storage, - ConfigurationPtr configuration, - ContextPtr local_context) +DataLakeMetadataPtr +IcebergMetadata::create(ObjectStoragePtr object_storage, ConfigurationObserverPtr configuration, ContextPtr local_context) { - const auto [metadata_version, metadata_file_path] = getMetadataFileAndVersion(object_storage, *configuration); + auto configuration_ptr = configuration.lock(); + + const auto [metadata_version, metadata_file_path] = getMetadataFileAndVersion(object_storage, *configuration_ptr); auto log = getLogger("IcebergMetadata"); LOG_DEBUG(log, "Parse metadata {}", metadata_file_path); @@ -416,12 +416,13 @@ DataLakeMetadataPtr IcebergMetadata::create( if (snapshot->getValue("snapshot-id") == current_snapshot_id) { const auto path = snapshot->getValue("manifest-list"); - manifest_list_file = std::filesystem::path(configuration->getPath()) / "metadata" / std::filesystem::path(path).filename(); + manifest_list_file = std::filesystem::path(configuration_ptr->getPath()) / "metadata" / std::filesystem::path(path).filename(); break; } } - return std::make_unique(object_storage, configuration, local_context, metadata_version, format_version, manifest_list_file, schema_id, schema); + return std::make_unique( + object_storage, configuration_ptr, local_context, metadata_version, format_version, manifest_list_file, schema_id, schema); } /** @@ -451,6 +452,7 @@ DataLakeMetadataPtr IcebergMetadata::create( */ Strings IcebergMetadata::getDataFiles() const { + auto configuration_ptr = configuration.lock(); if (!data_files.empty()) return data_files; @@ -483,7 +485,7 @@ Strings IcebergMetadata::getDataFiles() const { const auto file_path = col_str->getDataAt(i).toView(); const auto filename = std::filesystem::path(file_path).filename(); - manifest_files.emplace_back(std::filesystem::path(configuration->getPath()) / "metadata" / filename); + manifest_files.emplace_back(std::filesystem::path(configuration_ptr->getPath()) / "metadata" / filename); } NameSet files; @@ -618,9 +620,9 @@ Strings IcebergMetadata::getDataFiles() const const auto status = status_int_column->getInt(i); const auto data_path = std::string(file_path_string_column->getDataAt(i).toView()); - const auto pos = data_path.find(configuration->getPath()); + const auto pos = data_path.find(configuration_ptr->getPath()); if (pos == std::string::npos) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected to find {} in data path: {}", configuration->getPath(), data_path); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected to find {} in data path: {}", configuration_ptr->getPath(), data_path); const auto file_path = data_path.substr(pos); diff --git a/src/Storages/ObjectStorage/DataLakes/IcebergMetadata.h b/src/Storages/ObjectStorage/DataLakes/IcebergMetadata.h index 7b0deab91c3..eb5cac591f2 100644 --- a/src/Storages/ObjectStorage/DataLakes/IcebergMetadata.h +++ b/src/Storages/ObjectStorage/DataLakes/IcebergMetadata.h @@ -1,5 +1,7 @@ #pragma once +#include "config.h" + #if USE_AVRO /// StorageIceberg depending on Avro to parse metadata with Avro format. #include @@ -61,13 +63,13 @@ namespace DB class IcebergMetadata : public IDataLakeMetadata, private WithContext { public: - using ConfigurationPtr = StorageObjectStorage::ConfigurationPtr; + using ConfigurationObserverPtr = StorageObjectStorage::ConfigurationObserverPtr; static constexpr auto name = "Iceberg"; IcebergMetadata( ObjectStoragePtr object_storage_, - ConfigurationPtr configuration_, + ConfigurationObserverPtr configuration_, ContextPtr context_, Int32 metadata_version_, Int32 format_version_, @@ -92,16 +94,13 @@ public: return iceberg_metadata && getVersion() == iceberg_metadata->getVersion(); } - static DataLakeMetadataPtr create( - ObjectStoragePtr object_storage, - ConfigurationPtr configuration, - ContextPtr local_context); + static DataLakeMetadataPtr create(ObjectStoragePtr object_storage, ConfigurationObserverPtr configuration, ContextPtr local_context); private: size_t getVersion() const { return metadata_version; } const ObjectStoragePtr object_storage; - const ConfigurationPtr configuration; + const ConfigurationObserverPtr configuration; Int32 metadata_version; Int32 format_version; String manifest_list_file; diff --git a/src/Storages/ObjectStorage/DataLakes/registerDataLakeStorages.cpp b/src/Storages/ObjectStorage/DataLakes/registerDataLakeStorages.cpp deleted file mode 100644 index 2c2d75d81a6..00000000000 --- a/src/Storages/ObjectStorage/DataLakes/registerDataLakeStorages.cpp +++ /dev/null @@ -1,154 +0,0 @@ -#include "config.h" - -#if USE_AWS_S3 - -# include -# include -# include -# include -# include -# include - -#if USE_HDFS -# include -#endif - -namespace DB -{ - -#if USE_AVRO /// StorageIceberg depending on Avro to parse metadata with Avro format. - -void registerStorageIceberg(StorageFactory & factory) -{ - factory.registerStorage( - "Iceberg", - [&](const StorageFactory::Arguments & args) - { - auto configuration = std::make_shared(); - StorageObjectStorage::Configuration::initialize(*configuration, args.engine_args, args.getLocalContext(), false); - - return StorageIceberg::create( - configuration, args.getContext(), args.table_id, args.columns, args.constraints, args.comment, std::nullopt, args.mode); - }, - { - .supports_settings = false, - .supports_schema_inference = true, - .source_access_type = AccessType::S3, - }); - - factory.registerStorage( - "IcebergS3", - [&](const StorageFactory::Arguments & args) - { - auto configuration = std::make_shared(); - StorageObjectStorage::Configuration::initialize(*configuration, args.engine_args, args.getLocalContext(), false); - - return StorageIceberg::create( - configuration, args.getContext(), args.table_id, args.columns, args.constraints, args.comment, std::nullopt, args.mode); - }, - { - .supports_settings = false, - .supports_schema_inference = true, - .source_access_type = AccessType::S3, - }); - - factory.registerStorage( - "IcebergAzure", - [&](const StorageFactory::Arguments & args) - { - auto configuration = std::make_shared(); - StorageObjectStorage::Configuration::initialize(*configuration, args.engine_args, args.getLocalContext(), true); - - return StorageIceberg::create( - configuration, args.getContext(), args.table_id, args.columns, args.constraints, args.comment, std::nullopt, args.mode); - }, - { - .supports_settings = false, - .supports_schema_inference = true, - .source_access_type = AccessType::AZURE, - }); - - factory.registerStorage( - "IcebergLocal", - [&](const StorageFactory::Arguments & args) - { - auto configuration = std::make_shared(); - StorageObjectStorage::Configuration::initialize(*configuration, args.engine_args, args.getLocalContext(), false); - - return StorageIceberg::create( - configuration, args.getContext(), args.table_id, args.columns, - args.constraints, args.comment, std::nullopt, args.mode); - }, - { - .supports_settings = false, - .supports_schema_inference = true, - .source_access_type = AccessType::FILE, - }); - -#if USE_HDFS - factory.registerStorage( - "IcebergHDFS", - [&](const StorageFactory::Arguments & args) - { - auto configuration = std::make_shared(); - StorageObjectStorage::Configuration::initialize(*configuration, args.engine_args, args.getLocalContext(), false); - - return StorageIceberg::create( - configuration, args.getContext(), args.table_id, args.columns, - args.constraints, args.comment, std::nullopt, args.mode); - }, - { - .supports_settings = false, - .supports_schema_inference = true, - .source_access_type = AccessType::HDFS, - }); -#endif -} - -#endif - -#if USE_PARQUET -void registerStorageDeltaLake(StorageFactory & factory) -{ - factory.registerStorage( - "DeltaLake", - [&](const StorageFactory::Arguments & args) - { - auto configuration = std::make_shared(); - StorageObjectStorage::Configuration::initialize(*configuration, args.engine_args, args.getLocalContext(), false); - - return StorageDeltaLake::create( - configuration, args.getContext(), args.table_id, args.columns, - args.constraints, args.comment, std::nullopt, args.mode); - }, - { - .supports_settings = false, - .supports_schema_inference = true, - .source_access_type = AccessType::S3, - }); -} -#endif - -void registerStorageHudi(StorageFactory & factory) -{ - factory.registerStorage( - "Hudi", - [&](const StorageFactory::Arguments & args) - { - auto configuration = std::make_shared(); - StorageObjectStorage::Configuration::initialize(*configuration, args.engine_args, args.getLocalContext(), false); - - return StorageHudi::create( - configuration, args.getContext(), args.table_id, args.columns, - args.constraints, args.comment, std::nullopt, args.mode); - }, - { - .supports_settings = false, - .supports_schema_inference = true, - .source_access_type = AccessType::S3, - }); -} - -} - -#endif diff --git a/src/Storages/ObjectStorage/StorageObjectStorage.cpp b/src/Storages/ObjectStorage/StorageObjectStorage.cpp index 579d8e95059..fd2fe0400bb 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorage.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorage.cpp @@ -14,14 +14,16 @@ #include #include -#include #include -#include -#include #include +#include #include #include -#include +#include +#include +#include +#include "Databases/LoadingStrictnessLevel.h" +#include "Storages/ColumnsDescription.h" namespace DB @@ -76,6 +78,7 @@ StorageObjectStorage::StorageObjectStorage( const ConstraintsDescription & constraints_, const String & comment, std::optional format_settings_, + LoadingStrictnessLevel mode, bool distributed_processing_, ASTPtr partition_by_) : IStorage(table_id_) @@ -86,9 +89,25 @@ StorageObjectStorage::StorageObjectStorage( , distributed_processing(distributed_processing_) , log(getLogger(fmt::format("Storage{}({})", configuration->getEngineName(), table_id_.getFullTableName()))) { - ColumnsDescription columns{columns_}; + try + { + configuration->update(object_storage, context); + } + catch (...) + { + // If we don't have format or schema yet, we can't ignore failed configuration update, because relevant configuration is crucial for format and schema inference + if (mode <= LoadingStrictnessLevel::CREATE || columns_.empty() || (configuration->format == "auto")) + { + throw; + } + else + { + tryLogCurrentException(log); + } + } std::string sample_path; + ColumnsDescription columns{columns_}; resolveSchemaAndFormat(columns, configuration->format, object_storage, configuration, format_settings, sample_path, context); configuration->check(context); @@ -124,12 +143,11 @@ bool StorageObjectStorage::supportsSubsetOfColumns(const ContextPtr & context) c return FormatFactory::instance().checkIfFormatSupportsSubsetOfColumns(configuration->format, context, format_settings); } -void StorageObjectStorage::updateConfiguration(ContextPtr context) +void StorageObjectStorage::Configuration::update(ObjectStoragePtr object_storage_ptr, ContextPtr context) { - IObjectStorage::ApplyNewSettingsOptions options{ .allow_client_change = !configuration->isStaticConfiguration() }; - object_storage->applyNewSettings(context->getConfigRef(), configuration->getTypeName() + ".", context, options); + IObjectStorage::ApplyNewSettingsOptions options{.allow_client_change = !isStaticConfiguration()}; + object_storage_ptr->applyNewSettings(context->getConfigRef(), getTypeName() + ".", context, options); } - namespace { class ReadFromObjectStorageStep : public SourceStepWithFilter @@ -243,7 +261,8 @@ private: }; } -ReadFromFormatInfo StorageObjectStorage::prepareReadingFromFormat( +ReadFromFormatInfo StorageObjectStorage::Configuration::prepareReadingFromFormat( + ObjectStoragePtr, const Strings & requested_columns, const StorageSnapshotPtr & storage_snapshot, bool supports_subset_of_columns, @@ -252,6 +271,11 @@ ReadFromFormatInfo StorageObjectStorage::prepareReadingFromFormat( return DB::prepareReadingFromFormat(requested_columns, storage_snapshot, local_context, supports_subset_of_columns); } +std::optional StorageObjectStorage::Configuration::tryGetTableStructureFromMetadata() const +{ + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method tryGetTableStructureFromMetadata is not implemented for basic configuration"); +} + void StorageObjectStorage::read( QueryPlan & query_plan, const Names & column_names, @@ -262,7 +286,7 @@ void StorageObjectStorage::read( size_t max_block_size, size_t num_streams) { - updateConfiguration(local_context); + configuration->update(object_storage, local_context); if (partition_by && configuration->withPartitionWildcard()) { throw Exception(ErrorCodes::NOT_IMPLEMENTED, @@ -270,8 +294,8 @@ void StorageObjectStorage::read( getName()); } - const auto read_from_format_info = prepareReadingFromFormat( - column_names, storage_snapshot, supportsSubsetOfColumns(local_context), local_context); + const auto read_from_format_info = configuration->prepareReadingFromFormat( + object_storage, column_names, storage_snapshot, supportsSubsetOfColumns(local_context), local_context); const bool need_only_count = (query_info.optimize_trivial_count || read_from_format_info.requested_columns.empty()) && local_context->getSettingsRef()[Setting::optimize_count_from_files]; @@ -300,7 +324,7 @@ SinkToStoragePtr StorageObjectStorage::write( ContextPtr local_context, bool /* async_insert */) { - updateConfiguration(local_context); + configuration->update(object_storage, local_context); const auto sample_block = metadata_snapshot->getSampleBlock(); const auto & settings = configuration->getQuerySettings(local_context); @@ -409,6 +433,16 @@ ColumnsDescription StorageObjectStorage::resolveSchemaFromData( std::string & sample_path, const ContextPtr & context) { + if (configuration->isDataLakeConfiguration()) + { + configuration->update(object_storage, context); + auto table_structure = configuration->tryGetTableStructureFromMetadata(); + if (table_structure) + { + return table_structure.value(); + } + } + ObjectInfos read_keys; auto iterator = createReadBufferIterator(object_storage, configuration, format_settings, read_keys, context); auto schema = readSchemaFromFormat(configuration->format, format_settings, *iterator, context); @@ -489,10 +523,17 @@ void StorageObjectStorage::Configuration::initialize( if (configuration.format == "auto") { - configuration.format = FormatFactory::instance().tryGetFormatFromFileName( - configuration.isArchive() - ? configuration.getPathInArchive() - : configuration.getPath()).value_or("auto"); + if (configuration.isDataLakeConfiguration()) + { + configuration.format = "Parquet"; + } + else + { + configuration.format + = FormatFactory::instance() + .tryGetFormatFromFileName(configuration.isArchive() ? configuration.getPathInArchive() : configuration.getPath()) + .value_or("auto"); + } } else FormatFactory::instance().checkFormatName(configuration.format); diff --git a/src/Storages/ObjectStorage/StorageObjectStorage.h b/src/Storages/ObjectStorage/StorageObjectStorage.h index 3f90586c4f3..e2bb41a4935 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorage.h +++ b/src/Storages/ObjectStorage/StorageObjectStorage.h @@ -1,12 +1,13 @@ #pragma once -#include -#include #include -#include +#include #include -#include #include +#include #include +#include +#include +#include "Storages/ColumnsDescription.h" namespace DB { @@ -25,6 +26,7 @@ class StorageObjectStorage : public IStorage public: class Configuration; using ConfigurationPtr = std::shared_ptr; + using ConfigurationObserverPtr = std::weak_ptr; using ObjectInfo = RelativePathWithMetadata; using ObjectInfoPtr = std::shared_ptr; using ObjectInfos = std::vector; @@ -55,6 +57,7 @@ public: const ConstraintsDescription & constraints_, const String & comment, std::optional format_settings_, + LoadingStrictnessLevel mode, bool distributed_processing_ = false, ASTPtr partition_by_ = nullptr); @@ -120,16 +123,8 @@ public: const ContextPtr & context); protected: - virtual void updateConfiguration(ContextPtr local_context); - String getPathSample(StorageInMemoryMetadata metadata, ContextPtr context); - virtual ReadFromFormatInfo prepareReadingFromFormat( - const Strings & requested_columns, - const StorageSnapshotPtr & storage_snapshot, - bool supports_subset_of_columns, - ContextPtr local_context); - static std::unique_ptr createReadBufferIterator( const ObjectStoragePtr & object_storage, const ConfigurationPtr & configuration, @@ -207,14 +202,29 @@ public: void setPartitionColumns(const DataLakePartitionColumns & columns) { partition_columns = columns; } const DataLakePartitionColumns & getPartitionColumns() const { return partition_columns; } + virtual bool isDataLakeConfiguration() const { return false; } + + virtual ReadFromFormatInfo prepareReadingFromFormat( + ObjectStoragePtr object_storage, + const Strings & requested_columns, + const StorageSnapshotPtr & storage_snapshot, + bool supports_subset_of_columns, + ContextPtr local_context); + + virtual std::optional tryGetTableStructureFromMetadata() const; + String format = "auto"; String compression_method = "auto"; String structure = "auto"; + virtual void update(ObjectStoragePtr object_storage, ContextPtr local_context); + + protected: virtual void fromNamedCollection(const NamedCollection & collection, ContextPtr context) = 0; virtual void fromAST(ASTs & args, ContextPtr context, bool with_structure) = 0; + void assertInitialized() const; bool initialized = false; diff --git a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp index 52b0f00f71a..563bdc44760 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp @@ -517,9 +517,19 @@ std::unique_ptr StorageObjectStorageSource::createReadBu LOG_TRACE(log, "Downloading object of size {} with initial prefetch", object_size); + bool prefer_bigger_buffer_size = impl->isCached(); + size_t buffer_size = prefer_bigger_buffer_size + ? std::max(read_settings.remote_fs_buffer_size, DBMS_DEFAULT_BUFFER_SIZE) + : read_settings.remote_fs_buffer_size; + if (object_size) + buffer_size = std::min(object_size, buffer_size); + auto & reader = context_->getThreadPoolReader(FilesystemReaderType::ASYNCHRONOUS_REMOTE_FS_READER); impl = std::make_unique( - std::move(impl), reader, modified_read_settings, + std::move(impl), + reader, + modified_read_settings, + buffer_size, context_->getAsyncReadCounters(), context_->getFilesystemReadPrefetchesLog()); diff --git a/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp b/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp index d0cacc29adf..e94f1860176 100644 --- a/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp +++ b/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include #include #include @@ -10,17 +11,19 @@ namespace DB { -#if USE_AWS_S3 || USE_AZURE_BLOB_STORAGE || USE_HDFS - namespace ErrorCodes { extern const int BAD_ARGUMENTS; } -static std::shared_ptr createStorageObjectStorage( - const StorageFactory::Arguments & args, - StorageObjectStorage::ConfigurationPtr configuration, - ContextPtr context) +namespace +{ + +// LocalObjectStorage is only supported for Iceberg Datalake operations where Avro format is required. For regular file access, use FileStorage instead. +#if USE_AWS_S3 || USE_AZURE_BLOB_STORAGE || USE_HDFS || USE_AVRO + +std::shared_ptr +createStorageObjectStorage(const StorageFactory::Arguments & args, StorageObjectStorage::ConfigurationPtr configuration, ContextPtr context) { auto & engine_args = args.engine_args; if (engine_args.empty()) @@ -52,18 +55,20 @@ static std::shared_ptr createStorageObjectStorage( return std::make_shared( configuration, - configuration->createObjectStorage(context, /* is_readonly */false), + configuration->createObjectStorage(context, /* is_readonly */ false), args.getContext(), args.table_id, args.columns, args.constraints, args.comment, format_settings, + args.mode, /* distributed_processing */ false, partition_by); } #endif +} #if USE_AZURE_BLOB_STORAGE void registerStorageAzure(StorageFactory & factory) @@ -148,4 +153,133 @@ void registerStorageObjectStorage(StorageFactory & factory) UNUSED(factory); } +#if USE_AVRO /// StorageIceberg depending on Avro to parse metadata with Avro format. + +void registerStorageIceberg(StorageFactory & factory) +{ +#if USE_AWS_S3 + factory.registerStorage( + "Iceberg", + [&](const StorageFactory::Arguments & args) + { + auto configuration = std::make_shared(); + StorageObjectStorage::Configuration::initialize(*configuration, args.engine_args, args.getLocalContext(), false); + + return createStorageObjectStorage(args, configuration, args.getLocalContext()); + }, + { + .supports_settings = false, + .supports_schema_inference = true, + .source_access_type = AccessType::S3, + }); + + factory.registerStorage( + "IcebergS3", + [&](const StorageFactory::Arguments & args) + { + auto configuration = std::make_shared(); + StorageObjectStorage::Configuration::initialize(*configuration, args.engine_args, args.getLocalContext(), false); + + return createStorageObjectStorage(args, configuration, args.getLocalContext()); + }, + { + .supports_settings = false, + .supports_schema_inference = true, + .source_access_type = AccessType::S3, + }); +#endif +#if USE_AZURE_BLOB_STORAGE + factory.registerStorage( + "IcebergAzure", + [&](const StorageFactory::Arguments & args) + { + auto configuration = std::make_shared(); + StorageObjectStorage::Configuration::initialize(*configuration, args.engine_args, args.getLocalContext(), true); + + return createStorageObjectStorage(args, configuration, args.getLocalContext()); + }, + { + .supports_settings = false, + .supports_schema_inference = true, + .source_access_type = AccessType::AZURE, + }); +#endif +#if USE_HDFS + factory.registerStorage( + "IcebergHDFS", + [&](const StorageFactory::Arguments & args) + { + auto configuration = std::make_shared(); + StorageObjectStorage::Configuration::initialize(*configuration, args.engine_args, args.getLocalContext(), false); + + return createStorageObjectStorage(args, configuration, args.getLocalContext()); + }, + { + .supports_settings = false, + .supports_schema_inference = true, + .source_access_type = AccessType::HDFS, + }); +#endif + factory.registerStorage( + "IcebergLocal", + [&](const StorageFactory::Arguments & args) + { + auto configuration = std::make_shared(); + StorageObjectStorage::Configuration::initialize(*configuration, args.engine_args, args.getLocalContext(), false); + + return createStorageObjectStorage(args, configuration, args.getLocalContext()); + }, + { + .supports_settings = false, + .supports_schema_inference = true, + .source_access_type = AccessType::FILE, + }); +} + +#endif + + +#if USE_PARQUET +void registerStorageDeltaLake(StorageFactory & factory) +{ +#if USE_AWS_S3 + factory.registerStorage( + "DeltaLake", + [&](const StorageFactory::Arguments & args) + { + auto configuration = std::make_shared(); + StorageObjectStorage::Configuration::initialize(*configuration, args.engine_args, args.getLocalContext(), false); + + return createStorageObjectStorage(args, configuration, args.getLocalContext()); + }, + { + .supports_settings = false, + .supports_schema_inference = true, + .source_access_type = AccessType::S3, + }); +#endif + UNUSED(factory); +} +#endif + +void registerStorageHudi(StorageFactory & factory) +{ +#if USE_AWS_S3 + factory.registerStorage( + "Hudi", + [&](const StorageFactory::Arguments & args) + { + auto configuration = std::make_shared(); + StorageObjectStorage::Configuration::initialize(*configuration, args.engine_args, args.getLocalContext(), false); + + return createStorageObjectStorage(args, configuration, args.getLocalContext()); + }, + { + .supports_settings = false, + .supports_schema_inference = true, + .source_access_type = AccessType::S3, + }); +#endif + UNUSED(factory); +} } diff --git a/src/Storages/RabbitMQ/StorageRabbitMQ.cpp b/src/Storages/RabbitMQ/StorageRabbitMQ.cpp index 0f3ac2d5289..3e922b541f7 100644 --- a/src/Storages/RabbitMQ/StorageRabbitMQ.cpp +++ b/src/Storages/RabbitMQ/StorageRabbitMQ.cpp @@ -1322,7 +1322,13 @@ void registerStorageRabbitMQ(StorageFactory & factory) return std::make_shared(args.table_id, args.getContext(), args.columns, args.comment, std::move(rabbitmq_settings), args.mode); }; - factory.registerStorage("RabbitMQ", creator_fn, StorageFactory::StorageFeatures{ .supports_settings = true, }); + factory.registerStorage( + "RabbitMQ", + creator_fn, + StorageFactory::StorageFeatures{ + .supports_settings = true, + .source_access_type = AccessType::RABBITMQ, + }); } } diff --git a/src/Storages/StorageExternalDistributed.cpp b/src/Storages/StorageExternalDistributed.cpp deleted file mode 100644 index ac560b58962..00000000000 --- a/src/Storages/StorageExternalDistributed.cpp +++ /dev/null @@ -1,233 +0,0 @@ -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -namespace DB -{ -namespace Setting -{ - extern const SettingsUInt64 glob_expansion_max_elements; - extern const SettingsUInt64 postgresql_connection_attempt_timeout; - extern const SettingsBool postgresql_connection_pool_auto_close_connection; - extern const SettingsUInt64 postgresql_connection_pool_retries; - extern const SettingsUInt64 postgresql_connection_pool_size; - extern const SettingsUInt64 postgresql_connection_pool_wait_timeout; -} - -namespace ErrorCodes -{ - extern const int BAD_ARGUMENTS; -} - -StorageExternalDistributed::StorageExternalDistributed( - const StorageID & table_id_, - std::unordered_set && shards_, - const ColumnsDescription & columns_, - const ConstraintsDescription & constraints_, - const String & comment) - : IStorage(table_id_) - , shards(shards_) -{ - StorageInMemoryMetadata storage_metadata; - storage_metadata.setColumns(columns_); - storage_metadata.setConstraints(constraints_); - storage_metadata.setComment(comment); - setInMemoryMetadata(storage_metadata); -} - -void StorageExternalDistributed::read( - QueryPlan & query_plan, - const Names & column_names, - const StorageSnapshotPtr & storage_snapshot, - SelectQueryInfo & query_info, - ContextPtr context, - QueryProcessingStage::Enum processed_stage, - size_t max_block_size, - size_t num_streams) -{ - std::vector> plans; - for (const auto & shard : shards) - { - plans.emplace_back(std::make_unique()); - shard->read( - *plans.back(), - column_names, - storage_snapshot, - query_info, - context, - processed_stage, - max_block_size, - num_streams - ); - } - - if (plans.empty()) - { - auto header = storage_snapshot->getSampleBlockForColumns(column_names); - InterpreterSelectQuery::addEmptySourceToQueryPlan(query_plan, header, query_info); - } - - if (plans.size() == 1) - { - query_plan = std::move(*plans.front()); - return; - } - - Headers input_headers; - input_headers.reserve(plans.size()); - for (auto & plan : plans) - input_headers.emplace_back(plan->getCurrentHeader()); - - auto union_step = std::make_unique(std::move(input_headers)); - query_plan.unitePlans(std::move(union_step), std::move(plans)); -} - -void registerStorageExternalDistributed(StorageFactory & factory) -{ - factory.registerStorage("ExternalDistributed", [](const StorageFactory::Arguments & args) - { - ASTs & engine_args = args.engine_args; - if (engine_args.size() < 2) - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Engine ExternalDistributed must have at least 2 arguments: " - "engine_name, named_collection and/or description"); - - auto context = args.getLocalContext(); - const auto & settings = context->getSettingsRef(); - size_t max_addresses = settings[Setting::glob_expansion_max_elements]; - auto get_addresses = [&](const std::string addresses_expr) - { - return parseRemoteDescription(addresses_expr, 0, addresses_expr.size(), ',', max_addresses); - }; - - std::unordered_set shards; - ASTs inner_engine_args(engine_args.begin() + 1, engine_args.end()); - - ASTPtr * address_arg = nullptr; - - /// If there is a named collection argument, named `addresses_expr` - for (auto & node : inner_engine_args) - { - if (ASTFunction * func = node->as(); func && func->name == "equals" && func->arguments) - { - if (ASTExpressionList * func_args = func->arguments->as(); func_args && func_args->children.size() == 2) - { - if (ASTIdentifier * arg_name = func_args->children[0]->as(); arg_name && arg_name->name() == "addresses_expr") - { - address_arg = &func_args->children[1]; - break; - } - } - } - } - - /// Otherwise it is the first argument. - if (!address_arg) - address_arg = &inner_engine_args.at(0); - - String addresses_expr = checkAndGetLiteralArgument(*address_arg, "addresses"); - Strings shards_addresses = get_addresses(addresses_expr); - - auto engine_name = checkAndGetLiteralArgument(engine_args[0], "engine_name"); - if (engine_name == "URL") - { - auto format_settings = StorageURL::getFormatSettingsFromArgs(args); - for (const auto & shard_address : shards_addresses) - { - *address_arg = std::make_shared(shard_address); - auto configuration = StorageURL::getConfiguration(inner_engine_args, context); - auto uri_options = parseRemoteDescription(shard_address, 0, shard_address.size(), '|', max_addresses); - if (uri_options.size() > 1) - { - shards.insert( - std::make_shared( - uri_options, args.table_id, configuration.format, format_settings, - args.columns, args.constraints, context, configuration.compression_method)); - } - else - { - shards.insert(std::make_shared( - shard_address, args.table_id, configuration.format, format_settings, - args.columns, args.constraints, String{}, context, configuration.compression_method)); - } - } - } -#if USE_MYSQL - else if (engine_name == "MySQL") - { - MySQLSettings mysql_settings; - for (const auto & shard_address : shards_addresses) - { - *address_arg = std::make_shared(shard_address); - auto configuration = StorageMySQL::getConfiguration(inner_engine_args, context, mysql_settings); - configuration.addresses = parseRemoteDescriptionForExternalDatabase(shard_address, max_addresses, 3306); - auto pool = createMySQLPoolWithFailover(configuration, mysql_settings); - shards.insert(std::make_shared( - args.table_id, std::move(pool), configuration.database, configuration.table, - /* replace_query = */ false, /* on_duplicate_clause = */ "", - args.columns, args.constraints, String{}, context, mysql_settings)); - } - } -#endif -#if USE_LIBPQXX - else if (engine_name == "PostgreSQL") - { - for (const auto & shard_address : shards_addresses) - { - *address_arg = std::make_shared(shard_address); - auto configuration = StoragePostgreSQL::getConfiguration(inner_engine_args, context); - configuration.addresses = parseRemoteDescriptionForExternalDatabase(shard_address, max_addresses, 5432); - auto pool = std::make_shared( - configuration, - settings[Setting::postgresql_connection_pool_size], - settings[Setting::postgresql_connection_pool_wait_timeout], - settings[Setting::postgresql_connection_pool_retries], - settings[Setting::postgresql_connection_pool_auto_close_connection], - settings[Setting::postgresql_connection_attempt_timeout]); - shards.insert(std::make_shared( - args.table_id, std::move(pool), configuration.table, args.columns, args.constraints, String{}, context)); - } - } -#endif - else - { - throw Exception( - ErrorCodes::BAD_ARGUMENTS, - "External storage engine {} is not supported for StorageExternalDistributed. " - "Supported engines are: MySQL, PostgreSQL, URL", - engine_name); - } - - return std::make_shared( - args.table_id, - std::move(shards), - args.columns, - args.constraints, - args.comment); - }, - { - .source_access_type = AccessType::SOURCES, - }); -} - -} diff --git a/src/Storages/StorageExternalDistributed.h b/src/Storages/StorageExternalDistributed.h deleted file mode 100644 index 56c7fe86f34..00000000000 --- a/src/Storages/StorageExternalDistributed.h +++ /dev/null @@ -1,43 +0,0 @@ -#pragma once - -#include "config.h" - -#include - - -namespace DB -{ - -/// Storages MySQL and PostgreSQL use ConnectionPoolWithFailover and support multiple replicas. -/// This class unites multiple storages with replicas into multiple shards with replicas. -/// A query to external database is passed to one replica on each shard, the result is united. -/// Replicas on each shard have the same priority, traversed replicas are moved to the end of the queue. -/// Similar approach is used for URL storage. -class StorageExternalDistributed final : public DB::IStorage -{ -public: - StorageExternalDistributed( - const StorageID & table_id_, - std::unordered_set && shards_, - const ColumnsDescription & columns_, - const ConstraintsDescription & constraints_, - const String & comment); - - std::string getName() const override { return "ExternalDistributed"; } - - void read( - QueryPlan & query_plan, - const Names & column_names, - const StorageSnapshotPtr & storage_snapshot, - SelectQueryInfo & query_info, - ContextPtr context, - QueryProcessingStage::Enum processed_stage, - size_t max_block_size, - size_t num_streams) override; - -private: - using Shards = std::unordered_set; - Shards shards; -}; - -} diff --git a/src/Storages/StorageKeeperMap.cpp b/src/Storages/StorageKeeperMap.cpp index 316eced1ed6..2a4a5f3370f 100644 --- a/src/Storages/StorageKeeperMap.cpp +++ b/src/Storages/StorageKeeperMap.cpp @@ -889,7 +889,7 @@ private: } }; - auto max_multiread_size = with_retries->getKeeperSettings().batch_size_for_keeper_multiread; + auto max_multiread_size = with_retries->getKeeperSettings().batch_size_for_multiread; auto keys_it = data_children.begin(); while (keys_it != data_children.end()) @@ -941,9 +941,8 @@ void StorageKeeperMap::backupData(BackupEntriesCollector & backup_entries_collec ( getLogger(fmt::format("StorageKeeperMapBackup ({})", getStorageID().getNameForLogs())), [&] { return getClient(); }, - WithRetries::KeeperSettings::fromContext(backup_entries_collector.getContext()), - backup_entries_collector.getContext()->getProcessListElement(), - [](WithRetries::FaultyKeeper &) {} + BackupKeeperSettings::fromContext(backup_entries_collector.getContext()), + backup_entries_collector.getContext()->getProcessListElement() ); backup_entries_collector.addBackupEntries( @@ -972,9 +971,8 @@ void StorageKeeperMap::restoreDataFromBackup(RestorerFromBackup & restorer, cons ( getLogger(fmt::format("StorageKeeperMapRestore ({})", getStorageID().getNameForLogs())), [&] { return getClient(); }, - WithRetries::KeeperSettings::fromContext(restorer.getContext()), - restorer.getContext()->getProcessListElement(), - [](WithRetries::FaultyKeeper &) {} + BackupKeeperSettings::fromContext(restorer.getContext()), + restorer.getContext()->getProcessListElement() ); bool allow_non_empty_tables = restorer.isNonEmptyTableAllowed(); @@ -1037,7 +1035,7 @@ void StorageKeeperMap::restoreDataImpl( CompressedReadBufferFromFile compressed_in{std::move(in_from_file)}; fs::path data_path_fs(zk_data_path); - auto max_multi_size = with_retries->getKeeperSettings().batch_size_for_keeper_multi; + auto max_multi_size = with_retries->getKeeperSettings().batch_size_for_multi; Coordination::Requests create_requests; const auto flush_create_requests = [&] diff --git a/src/Storages/StorageMaterializedView.cpp b/src/Storages/StorageMaterializedView.cpp index d047b28e076..d56b09eec67 100644 --- a/src/Storages/StorageMaterializedView.cpp +++ b/src/Storages/StorageMaterializedView.cpp @@ -228,10 +228,20 @@ StorageMaterializedView::StorageMaterializedView( if (!fixed_uuid) { - if (to_inner_uuid != UUIDHelpers::Nil) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "TO INNER UUID is not allowed for materialized views with REFRESH without APPEND"); - if (to_table_id.hasUUID()) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "explicit UUID is not allowed for target table of materialized view with REFRESH without APPEND"); + if (mode >= LoadingStrictnessLevel::ATTACH) + { + /// Old versions of ClickHouse (when refreshable MV was experimental) could add useless + /// UUIDs to attach queries. + to_table_id.uuid = UUIDHelpers::Nil; + to_inner_uuid = UUIDHelpers::Nil; + } + else + { + if (to_inner_uuid != UUIDHelpers::Nil) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "TO INNER UUID is not allowed for materialized views with REFRESH without APPEND"); + if (to_table_id.hasUUID()) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "explicit UUID is not allowed for target table of materialized view with REFRESH without APPEND"); + } } if (!has_inner_table) diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index 40cd6e01dba..1ba0617d8ae 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -155,7 +155,7 @@ StorageMergeTree::StorageMergeTree( loadMutations(); loadDeduplicationLog(); - prewarmMarkCache(getActivePartsLoadingThreadPool().get()); + prewarmMarkCacheIfNeeded(getActivePartsLoadingThreadPool().get()); } diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index bbfedb2f355..793fd02c656 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -509,7 +509,7 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree( } loadDataParts(skip_sanity_checks, expected_parts_on_this_replica); - prewarmMarkCache(getActivePartsLoadingThreadPool().get()); + prewarmMarkCacheIfNeeded(getActivePartsLoadingThreadPool().get()); if (LoadingStrictnessLevel::ATTACH <= mode) { @@ -2095,7 +2095,7 @@ MergeTreeData::MutableDataPartPtr StorageReplicatedMergeTree::attachPartHelperFo const auto part_old_name = part_info->getPartNameV1(); const auto volume = std::make_shared("volume_" + part_old_name, disk); - auto part = getDataPartBuilder(entry.new_part_name, volume, fs::path(DETACHED_DIR_NAME) / part_old_name) + auto part = getDataPartBuilder(entry.new_part_name, volume, fs::path(DETACHED_DIR_NAME) / part_old_name, getReadSettings()) .withPartFormatFromDisk() .build(); diff --git a/src/Storages/System/StorageSystemDashboards.cpp b/src/Storages/System/StorageSystemDashboards.cpp index 96ba7e59cf2..27579da4bfe 100644 --- a/src/Storages/System/StorageSystemDashboards.cpp +++ b/src/Storages/System/StorageSystemDashboards.cpp @@ -227,6 +227,194 @@ FROM merge('system', '^metric_log') WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} GROUP BY t ORDER BY t WITH FILL STEP {rounding:UInt32} +)EOQ") } + }, + /// Default per host dashboard for self-managed ClickHouse + { + { "dashboard", "Overview (host)" }, + { "title", "Queries/second" }, + { "query", trim(R"EOQ( +SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT as t, hostname, avg(ProfileEvent_Query) +FROM merge('system', '^metric_log') +WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} +GROUP BY t, hostname +ORDER BY t WITH FILL STEP {rounding:UInt32} +)EOQ") } + }, + { + { "dashboard", "Overview (host)" }, + { "title", "CPU Usage (cores)" }, + { "query", trim(R"EOQ( +SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT as t, hostname, avg(ProfileEvent_OSCPUVirtualTimeMicroseconds) / 1000000 +FROM merge('system', '^metric_log') +WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} +GROUP BY t, hostname +ORDER BY t WITH FILL STEP {rounding:UInt32} +)EOQ") } + }, + { + { "dashboard", "Overview (host)" }, + { "title", "Queries Running" }, + { "query", trim(R"EOQ( +SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT as t, hostname, avg(CurrentMetric_Query) +FROM merge('system', '^metric_log') +WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} +GROUP BY t, hostname +ORDER BY t WITH FILL STEP {rounding:UInt32} +)EOQ") } + }, + { + { "dashboard", "Overview (host)" }, + { "title", "Merges Running" }, + { "query", trim(R"EOQ( +SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT as t, hostname, avg(CurrentMetric_Merge) +FROM merge('system', '^metric_log') +WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} +GROUP BY t, hostname +ORDER BY t WITH FILL STEP {rounding:UInt32} +)EOQ") } + }, + { + { "dashboard", "Overview (host)" }, + { "title", "Selected Bytes/second" }, + { "query", trim(R"EOQ( +SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT as t, hostname, avg(ProfileEvent_SelectedBytes) +FROM merge('system', '^metric_log') +WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} +GROUP BY t, hostname +ORDER BY t WITH FILL STEP {rounding:UInt32} +)EOQ") } + }, + { + { "dashboard", "Overview (host)" }, + { "title", "IO Wait" }, + { "query", trim(R"EOQ( +SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT as t, hostname, avg(ProfileEvent_OSIOWaitMicroseconds) / 1000000 +FROM merge('system', '^metric_log') +WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} +GROUP BY t, hostname +ORDER BY t WITH FILL STEP {rounding:UInt32} +)EOQ") } + }, + { + { "dashboard", "Overview (host)" }, + { "title", "CPU Wait" }, + { "query", trim(R"EOQ( +SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT as t, hostname, avg(ProfileEvent_OSCPUWaitMicroseconds) / 1000000 +FROM merge('system', '^metric_log') +WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} +GROUP BY t, hostname +ORDER BY t WITH FILL STEP {rounding:UInt32} +)EOQ") } + }, + { + { "dashboard", "Overview (host)" }, + { "title", "OS CPU Usage (Userspace)" }, + { "query", trim(R"EOQ( +SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT as t, hostname, avg(value) +FROM merge('system', '^asynchronous_metric_log') +WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} AND metric = 'OSUserTimeNormalized' +GROUP BY t, hostname +ORDER BY t WITH FILL STEP {rounding:UInt32} +)EOQ") } + }, + { + { "dashboard", "Overview (host)" }, + { "title", "OS CPU Usage (Kernel)" }, + { "query", trim(R"EOQ( +SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT as t, hostname, avg(value) +FROM merge('system', '^asynchronous_metric_log') +WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} AND metric = 'OSSystemTimeNormalized' +GROUP BY t, hostname +ORDER BY t WITH FILL STEP {rounding:UInt32} +)EOQ") } + }, + { + { "dashboard", "Overview (host)" }, + { "title", "Read From Disk" }, + { "query", trim(R"EOQ( +SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT as t, hostname, avg(ProfileEvent_OSReadBytes) +FROM merge('system', '^metric_log') +WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} +GROUP BY t, hostname +ORDER BY t WITH FILL STEP {rounding:UInt32} +)EOQ") } + }, + { + { "dashboard", "Overview (host)" }, + { "title", "Read From Filesystem" }, + { "query", trim(R"EOQ( +SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT as t, hostname, avg(ProfileEvent_OSReadChars) +FROM merge('system', '^metric_log') +WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} +GROUP BY t, hostname +ORDER BY t WITH FILL STEP {rounding:UInt32} +)EOQ") } + }, + { + { "dashboard", "Overview (host)" }, + { "title", "Memory (tracked)" }, + { "query", trim(R"EOQ( +SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT as t, hostname, avg(CurrentMetric_MemoryTracking) +FROM merge('system', '^metric_log') +WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} +GROUP BY t, hostname +ORDER BY t WITH FILL STEP {rounding:UInt32} +)EOQ") } + }, + { + { "dashboard", "Overview (host)" }, + { "title", "Load Average (15 minutes)" }, + { "query", trim(R"EOQ( +SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT as t, hostname, avg(value) +FROM merge('system', '^asynchronous_metric_log') +WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} AND metric = 'LoadAverage15' +GROUP BY t, hostname +ORDER BY t WITH FILL STEP {rounding:UInt32} +)EOQ") } + }, + { + { "dashboard", "Overview (host)" }, + { "title", "Selected Rows/second" }, + { "query", trim(R"EOQ( +SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT as t, hostname, avg(ProfileEvent_SelectedRows) +FROM merge('system', '^metric_log') +WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} +GROUP BY t, hostname +ORDER BY t WITH FILL STEP {rounding:UInt32} +)EOQ") } + }, + { + { "dashboard", "Overview (host)" }, + { "title", "Inserted Rows/second" }, + { "query", trim(R"EOQ( +SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT as t, hostname, avg(ProfileEvent_InsertedRows) +FROM merge('system', '^metric_log') +WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} +GROUP BY t, hostname +ORDER BY t WITH FILL STEP {rounding:UInt32} +)EOQ") } + }, + { + { "dashboard", "Overview (host)" }, + { "title", "Total MergeTree Parts" }, + { "query", trim(R"EOQ( +SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT as t, hostname, avg(value) +FROM merge('system', '^asynchronous_metric_log') +WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} AND metric = 'TotalPartsOfMergeTreeTables' +GROUP BY t, hostname +ORDER BY t WITH FILL STEP {rounding:UInt32} +)EOQ") } + }, + { + { "dashboard", "Overview (host)" }, + { "title", "Max Parts For Partition" }, + { "query", trim(R"EOQ( +SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT as t, hostname, max(value) +FROM merge('system', '^asynchronous_metric_log') +WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} AND metric = 'MaxPartCountForPartition' +GROUP BY t, hostname +ORDER BY t WITH FILL STEP {rounding:UInt32} )EOQ") } }, /// Default dashboard for ClickHouse Cloud @@ -369,7 +557,143 @@ ORDER BY t WITH FILL STEP {rounding:UInt32} { "dashboard", "Cloud overview" }, { "title", "Concurrent network connections" }, { "query", "SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, max(TCP_Connections), max(MySQL_Connections), max(HTTP_Connections) FROM (SELECT event_time, sum(CurrentMetric_TCPConnection) AS TCP_Connections, sum(CurrentMetric_MySQLConnection) AS MySQL_Connections, sum(CurrentMetric_HTTPConnection) AS HTTP_Connections FROM clusterAllReplicas(default, merge('system', '^metric_log')) WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} GROUP BY event_time) GROUP BY t ORDER BY t WITH FILL STEP {rounding:UInt32} SETTINGS skip_unavailable_shards = 1" } - } + }, + /// Default per host dashboard for ClickHouse Cloud + { + { "dashboard", "Cloud overview (host)" }, + { "title", "Queries/second" }, + { "query", "SELECT \n toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT as t,\n hostname,\n avg(metric)\nFROM (\n SELECT event_time, hostname, sum(ProfileEvent_Query) AS metric \n FROM clusterAllReplicas(default, merge('system', '^metric_log'))\n WHERE event_date >= toDate(now() - {seconds:UInt32})\n AND event_time >= now() - {seconds:UInt32}\n GROUP BY event_time, hostname)\n GROUP BY t, hostname\nORDER BY t WITH FILL STEP {rounding:UInt32} SETTINGS skip_unavailable_shards = 1" } + }, + { + { "dashboard", "Cloud overview (host)" }, + { "title", "CPU Usage (cores)" }, + { "query", "SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, hostname, avg(metric) / 1000000\nFROM (\n SELECT event_time, hostname, sum(ProfileEvent_OSCPUVirtualTimeMicroseconds) AS metric \n FROM clusterAllReplicas(default, merge('system', '^metric_log'))\n WHERE event_date >= toDate(now() - {seconds:UInt32})\n AND event_time >= now() - {seconds:UInt32} GROUP BY event_time, hostname)\n GROUP BY t, hostname\nORDER BY t WITH FILL STEP {rounding:UInt32} SETTINGS skip_unavailable_shards = 1" } + }, + { + { "dashboard", "Cloud overview (host)" }, + { "title", "Queries Running" }, + { "query", "SELECT \n toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT as t,\n hostname,\n avg(metric)\nFROM (\n SELECT event_time, hostname, sum(CurrentMetric_Query) AS metric \n FROM clusterAllReplicas(default, merge('system', '^metric_log'))\n WHERE event_date >= toDate(now() - {seconds:UInt32})\n AND event_time >= now() - {seconds:UInt32}\n GROUP BY event_time, hostname)\n GROUP BY t, hostname\nORDER BY t WITH FILL STEP {rounding:UInt32} SETTINGS skip_unavailable_shards = 1" } + }, + { + { "dashboard", "Cloud overview (host)" }, + { "title", "Merges Running" }, + { "query", "SELECT \n toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT as t,\n hostname,\n avg(metric)\nFROM (\n SELECT event_time, hostname, sum(CurrentMetric_Merge) AS metric \n FROM clusterAllReplicas(default, merge('system', '^metric_log'))\n WHERE event_date >= toDate(now() - {seconds:UInt32})\n AND event_time >= now() - {seconds:UInt32}\n GROUP BY event_time, hostname)\n GROUP BY t, hostname\nORDER BY t WITH FILL STEP {rounding:UInt32} SETTINGS skip_unavailable_shards = 1" } + }, + { + { "dashboard", "Cloud overview (host)" }, + { "title", "Selected Bytes/second" }, + { "query", "SELECT \n toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT as t,\n hostname,\n avg(metric)\nFROM (\n SELECT event_time, hostname, sum(ProfileEvent_SelectedBytes) AS metric \n FROM clusterAllReplicas(default, merge('system', '^metric_log'))\n WHERE event_date >= toDate(now() - {seconds:UInt32})\n AND event_time >= now() - {seconds:UInt32}\n GROUP BY event_time, hostname)\n GROUP BY t, hostname\nORDER BY t WITH FILL STEP {rounding:UInt32} SETTINGS skip_unavailable_shards = 1" } + }, + { + { "dashboard", "Cloud overview (host)" }, + { "title", "IO Wait (local fs)" }, + { "query", "SELECT \n toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT as t,\n hostname,\n avg(metric)\nFROM (\n SELECT event_time, hostname, sum(ProfileEvent_OSIOWaitMicroseconds) / 1000000 AS metric \n FROM clusterAllReplicas(default, merge('system', '^metric_log'))\n WHERE event_date >= toDate(now() - {seconds:UInt32})\n AND event_time >= now() - {seconds:UInt32}\n GROUP BY event_time, hostname)\n GROUP BY t, hostname\nORDER BY t WITH FILL STEP {rounding:UInt32} SETTINGS skip_unavailable_shards = 1" } + }, + { + { "dashboard", "Cloud overview (host)" }, + { "title", "S3 read wait" }, + { "query", "SELECT \n toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT as t,\n hostname,\n avg(metric)\nFROM (\n SELECT event_time, hostname, sum(ProfileEvent_ReadBufferFromS3Microseconds) / 1000000 AS metric \n FROM clusterAllReplicas(default, merge('system', '^metric_log'))\n WHERE event_date >= toDate(now() - {seconds:UInt32})\n AND event_time >= now() - {seconds:UInt32}\n GROUP BY event_time, hostname)\n GROUP BY t, hostname\nORDER BY t WITH FILL STEP {rounding:UInt32} SETTINGS skip_unavailable_shards = 1" } + }, + { + { "dashboard", "Cloud overview (host)" }, + { "title", "S3 read errors/sec" }, + { "query", "SELECT \n toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT as t,\n hostname,\n avg(metric)\nFROM (\n SELECT event_time, hostname, sum(ProfileEvent_ReadBufferFromS3RequestsErrors) AS metric \n FROM clusterAllReplicas(default, merge('system', '^metric_log'))\n WHERE event_date >= toDate(now() - {seconds:UInt32})\n AND event_time >= now() - {seconds:UInt32}\n GROUP BY event_time, hostname)\n GROUP BY t, hostname\nORDER BY t WITH FILL STEP {rounding:UInt32} SETTINGS skip_unavailable_shards = 1" } + }, + { + { "dashboard", "Cloud overview (host)" }, + { "title", "CPU Wait" }, + { "query", "SELECT \n toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT as t,\n hostname,\n avg(metric)\nFROM (\n SELECT event_time, hostname, sum(ProfileEvent_OSCPUWaitMicroseconds) / 1000000 AS metric \n FROM clusterAllReplicas(default, merge('system', '^metric_log'))\n WHERE event_date >= toDate(now() - {seconds:UInt32})\n AND event_time >= now() - {seconds:UInt32}\n GROUP BY event_time, hostname)\n GROUP BY t, hostname\nORDER BY t WITH FILL STEP {rounding:UInt32} SETTINGS skip_unavailable_shards = 1" } + }, + { + { "dashboard", "Cloud overview (host)" }, + { "title", "OS CPU Usage (Userspace, normalized)" }, + { "query", "SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, hostname, avg(value)\nFROM clusterAllReplicas(default, merge('system', '^asynchronous_metric_log'))\nWHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32}\nAND metric = 'OSUserTimeNormalized'\n GROUP BY t, hostname\nORDER BY t WITH FILL STEP {rounding:UInt32} SETTINGS skip_unavailable_shards = 1" } + }, + { + { "dashboard", "Cloud overview (host)" }, + { "title", "OS CPU Usage (Kernel, normalized)" }, + { "query", "SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, hostname, avg(value)\nFROM clusterAllReplicas(default, merge('system', '^asynchronous_metric_log'))\nWHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32}\nAND metric = 'OSSystemTimeNormalized'\n GROUP BY t, hostname\nORDER BY t WITH FILL STEP {rounding:UInt32} SETTINGS skip_unavailable_shards = 1" } + }, + { + { "dashboard", "Cloud overview (host)" }, + { "title", "Read From Disk (bytes/sec)" }, + { "query", "SELECT \n toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT as t,\n hostname,\n avg(metric)\nFROM (\n SELECT event_time, hostname, sum(ProfileEvent_OSReadBytes) AS metric \n FROM clusterAllReplicas(default, merge('system', '^metric_log'))\n WHERE event_date >= toDate(now() - {seconds:UInt32})\n AND event_time >= now() - {seconds:UInt32}\n GROUP BY event_time, hostname)\n GROUP BY t, hostname\nORDER BY t WITH FILL STEP {rounding:UInt32} SETTINGS skip_unavailable_shards = 1" } + }, + { + { "dashboard", "Cloud overview (host)" }, + { "title", "Read From Filesystem (bytes/sec)" }, + { "query", "SELECT \n toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT as t,\n hostname,\n avg(metric)\nFROM (\n SELECT event_time, hostname, sum(ProfileEvent_OSReadChars) AS metric \n FROM clusterAllReplicas(default, merge('system', '^metric_log'))\n WHERE event_date >= toDate(now() - {seconds:UInt32})\n AND event_time >= now() - {seconds:UInt32}\n GROUP BY event_time, hostname)\n GROUP BY t, hostname\nORDER BY t WITH FILL STEP {rounding:UInt32} SETTINGS skip_unavailable_shards = 1" } + }, + { + { "dashboard", "Cloud overview (host)" }, + { "title", "Memory (tracked, bytes)" }, + { "query", "SELECT \n toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT as t,\n hostname,\n avg(metric)\nFROM (\n SELECT event_time, hostname, sum(CurrentMetric_MemoryTracking) AS metric \n FROM clusterAllReplicas(default, merge('system', '^metric_log'))\n WHERE event_date >= toDate(now() - {seconds:UInt32})\n AND event_time >= now() - {seconds:UInt32}\n GROUP BY event_time, hostname)\n GROUP BY t, hostname\nORDER BY t WITH FILL STEP {rounding:UInt32} SETTINGS skip_unavailable_shards = 1" } + }, + { + { "dashboard", "Cloud overview (host)" }, + { "title", "Load Average (15 minutes)" }, + { "query", "SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, hostname, avg(value)\nFROM (\n SELECT event_time, hostname, sum(value) AS value\n FROM clusterAllReplicas(default, merge('system', '^asynchronous_metric_log'))\n WHERE event_date >= toDate(now() - {seconds:UInt32})\n AND event_time >= now() - {seconds:UInt32}\n AND metric = 'LoadAverage15'\n GROUP BY event_time, hostname)\n GROUP BY t, hostname\nORDER BY t WITH FILL STEP {rounding:UInt32} SETTINGS skip_unavailable_shards = 1" } + }, + { + { "dashboard", "Cloud overview (host)" }, + { "title", "Selected Rows/sec" }, + { "query", "SELECT \n toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT as t,\n hostname,\n avg(metric)\nFROM (\n SELECT event_time, hostname, sum(ProfileEvent_SelectedRows) AS metric \n FROM clusterAllReplicas(default, merge('system', '^metric_log'))\n WHERE event_date >= toDate(now() - {seconds:UInt32})\n AND event_time >= now() - {seconds:UInt32}\n GROUP BY event_time, hostname)\n GROUP BY t, hostname\nORDER BY t WITH FILL STEP {rounding:UInt32} SETTINGS skip_unavailable_shards = 1" } + }, + { + { "dashboard", "Cloud overview (host)" }, + { "title", "Inserted Rows/sec" }, + { "query", "SELECT \n toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT as t,\n hostname,\n avg(metric)\nFROM (\n SELECT event_time, hostname, sum(ProfileEvent_InsertedRows) AS metric \n FROM clusterAllReplicas(default, merge('system', '^metric_log'))\n WHERE event_date >= toDate(now() - {seconds:UInt32})\n AND event_time >= now() - {seconds:UInt32}\n GROUP BY event_time, hostname)\n GROUP BY t, hostname\nORDER BY t WITH FILL STEP {rounding:UInt32} SETTINGS skip_unavailable_shards = 1" } + }, + { + { "dashboard", "Cloud overview (host)" }, + { "title", "Total MergeTree Parts" }, + { "query", "SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, hostname, max(value)\nFROM clusterAllReplicas(default, merge('system', '^asynchronous_metric_log'))\nWHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32}\nAND metric = 'TotalPartsOfMergeTreeTables'\n GROUP BY t, hostname\nORDER BY t WITH FILL STEP {rounding:UInt32} SETTINGS skip_unavailable_shards = 1" } + }, + { + { "dashboard", "Cloud overview (host)" }, + { "title", "Max Parts For Partition" }, + { "query", "SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, hostname, max(value)\nFROM clusterAllReplicas(default, merge('system', '^asynchronous_metric_log'))\nWHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32}\nAND metric = 'MaxPartCountForPartition'\n GROUP BY t, hostname\nORDER BY t WITH FILL STEP {rounding:UInt32} SETTINGS skip_unavailable_shards = 1" } + }, + { + { "dashboard", "Cloud overview (host)" }, + { "title", "Read From S3 (bytes/sec)" }, + { "query", "SELECT \n toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT as t,\n hostname,\n avg(metric)\nFROM (\n SELECT event_time, hostname, sum(ProfileEvent_ReadBufferFromS3Bytes) AS metric \n FROM clusterAllReplicas(default, merge('system', '^metric_log'))\n WHERE event_date >= toDate(now() - {seconds:UInt32})\n AND event_time >= now() - {seconds:UInt32}\n GROUP BY event_time, hostname)\n GROUP BY t, hostname\nORDER BY t WITH FILL STEP {rounding:UInt32} SETTINGS skip_unavailable_shards = 1" } + }, + { + { "dashboard", "Cloud overview (host)" }, + { "title", "Filesystem Cache Size" }, + { "query", "SELECT \n toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT as t,\n hostname,\n avg(metric)\nFROM (\n SELECT event_time, hostname, sum(CurrentMetric_FilesystemCacheSize) AS metric \n FROM clusterAllReplicas(default, merge('system', '^metric_log'))\n WHERE event_date >= toDate(now() - {seconds:UInt32})\n AND event_time >= now() - {seconds:UInt32}\n GROUP BY event_time, hostname)\n GROUP BY t, hostname\nORDER BY t WITH FILL STEP {rounding:UInt32} SETTINGS skip_unavailable_shards = 1" } + }, + { + { "dashboard", "Cloud overview (host)" }, + { "title", "Disk S3 write req/sec" }, + { "query", "SELECT \n toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT as t,\n hostname,\n avg(metric)\nFROM (\n SELECT event_time, hostname, sum(ProfileEvent_DiskS3PutObject + ProfileEvent_DiskS3UploadPart + ProfileEvent_DiskS3CreateMultipartUpload + ProfileEvent_DiskS3CompleteMultipartUpload) AS metric \n FROM clusterAllReplicas(default, merge('system', '^metric_log'))\n WHERE event_date >= toDate(now() - {seconds:UInt32})\n AND event_time >= now() - {seconds:UInt32}\n GROUP BY event_time, hostname)\n GROUP BY t, hostname\nORDER BY t WITH FILL STEP {rounding:UInt32} SETTINGS skip_unavailable_shards = 1" } + }, + { + { "dashboard", "Cloud overview (host)" }, + { "title", "Disk S3 read req/sec" }, + { "query", "SELECT \n toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t,\n hostname,\n avg(metric)\nFROM (\n SELECT event_time, hostname, sum(ProfileEvent_DiskS3GetObject + ProfileEvent_DiskS3HeadObject + ProfileEvent_DiskS3ListObjects) AS metric \n FROM clusterAllReplicas(default, merge('system', '^metric_log'))\n WHERE event_date >= toDate(now() - {seconds:UInt32})\n AND event_time >= now() - {seconds:UInt32}\n GROUP BY event_time, hostname)\nGROUP BY t, hostname\nORDER BY t\nWITH FILL STEP {rounding:UInt32} SETTINGS skip_unavailable_shards = 1" } + }, + { + { "dashboard", "Cloud overview (host)" }, + { "title", "FS cache hit rate" }, + { "query", "SELECT \n toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t,\n hostname,\n avg(metric)\nFROM (\n SELECT event_time, hostname, sum(ProfileEvent_CachedReadBufferReadFromCacheBytes) / (sum(ProfileEvent_CachedReadBufferReadFromCacheBytes) + sum(ProfileEvent_CachedReadBufferReadFromSourceBytes)) AS metric \n FROM clusterAllReplicas(default, merge('system', '^metric_log'))\n WHERE event_date >= toDate(now() - {seconds:UInt32})\n AND event_time >= now() - {seconds:UInt32}\n GROUP BY event_time, hostname)\nGROUP BY t, hostname\nORDER BY t WITH FILL STEP {rounding:UInt32} SETTINGS skip_unavailable_shards = 1" } + }, + { + { "dashboard", "Cloud overview (host)" }, + { "title", "Page cache hit rate" }, + { "query", "SELECT \n toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t,\n hostname,\n avg(metric)\nFROM (\n SELECT event_time, hostname, greatest(0, (sum(ProfileEvent_OSReadChars) - sum(ProfileEvent_OSReadBytes)) / (sum(ProfileEvent_OSReadChars) + sum(ProfileEvent_ReadBufferFromS3Bytes))) AS metric \n FROM clusterAllReplicas(default, merge('system', '^metric_log'))\n WHERE event_date >= toDate(now() - {seconds:UInt32})\n AND event_time >= now() - {seconds:UInt32}\n GROUP BY event_time, hostname)\nGROUP BY t, hostname\nORDER BY t WITH FILL STEP {rounding:UInt32} SETTINGS skip_unavailable_shards = 1" } + }, + { + { "dashboard", "Cloud overview (host)" }, + { "title", "Network receive bytes/sec" }, + { "query", "SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, hostname, avg(value)\nFROM (\n SELECT event_time, hostname, sum(value) AS value\n FROM clusterAllReplicas(default, merge('system', '^asynchronous_metric_log'))\n WHERE event_date >= toDate(now() - {seconds:UInt32})\n AND event_time >= now() - {seconds:UInt32}\n AND metric LIKE 'NetworkReceiveBytes%'\n GROUP BY event_time, hostname)\nGROUP BY t, hostname\nORDER BY t WITH FILL STEP {rounding:UInt32} SETTINGS skip_unavailable_shards = 1" } + }, + { + { "dashboard", "Cloud overview (host)" }, + { "title", "Network send bytes/sec" }, + { "query", "SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, hostname, avg(value)\nFROM (\n SELECT event_time, hostname, sum(value) AS value\n FROM clusterAllReplicas(default, merge('system', '^asynchronous_metric_log'))\n WHERE event_date >= toDate(now() - {seconds:UInt32})\n AND event_time >= now() - {seconds:UInt32}\n AND metric LIKE 'NetworkSendBytes%'\n GROUP BY event_time, hostname)\nGROUP BY t, hostname\nORDER BY t WITH FILL STEP {rounding:UInt32} SETTINGS skip_unavailable_shards = 1" } + }, }; auto add_dashboards = [&](const auto & dashboards) diff --git a/src/Storages/System/StorageSystemGrants.cpp b/src/Storages/System/StorageSystemGrants.cpp index 5de1f8cef55..aa010e44388 100644 --- a/src/Storages/System/StorageSystemGrants.cpp +++ b/src/Storages/System/StorageSystemGrants.cpp @@ -30,8 +30,8 @@ ColumnsDescription StorageSystemGrants::getColumnsDescription() {"column", std::make_shared(std::make_shared()), "Name of a column to which access is granted."}, {"is_partial_revoke", std::make_shared(), "Logical value. It shows whether some privileges have been revoked. Possible values: " - "0 — The row describes a partial revoke, " - "1 — The row describes a grant." + "0 — The row describes a grant, " + "1 — The row describes a partial revoke." }, {"grant_option", std::make_shared(), "Permission is granted WITH GRANT OPTION."}, }; diff --git a/src/Storages/fuzzers/CMakeLists.txt b/src/Storages/fuzzers/CMakeLists.txt index 2c7c0c16fc2..719b9b77cd9 100644 --- a/src/Storages/fuzzers/CMakeLists.txt +++ b/src/Storages/fuzzers/CMakeLists.txt @@ -4,4 +4,4 @@ clickhouse_add_executable (mergetree_checksum_fuzzer mergetree_checksum_fuzzer.c target_link_libraries (mergetree_checksum_fuzzer PRIVATE dbms) clickhouse_add_executable (columns_description_fuzzer columns_description_fuzzer.cpp) -target_link_libraries (columns_description_fuzzer PRIVATE) +target_link_libraries (columns_description_fuzzer PRIVATE dbms) diff --git a/src/Storages/registerStorages.cpp b/src/Storages/registerStorages.cpp index cfd406ccbe2..458b151a400 100644 --- a/src/Storages/registerStorages.cpp +++ b/src/Storages/registerStorages.cpp @@ -41,10 +41,11 @@ void registerStorageS3Queue(StorageFactory & factory); #if USE_PARQUET void registerStorageDeltaLake(StorageFactory & factory); #endif +#endif + #if USE_AVRO void registerStorageIceberg(StorageFactory & factory); #endif -#endif #if USE_AZURE_BLOB_STORAGE void registerStorageAzureQueue(StorageFactory & factory); @@ -93,10 +94,6 @@ void registerStoragePostgreSQL(StorageFactory & factory); void registerStorageMaterializedPostgreSQL(StorageFactory & factory); #endif -#if USE_MYSQL || USE_LIBPQXX -void registerStorageExternalDistributed(StorageFactory & factory); -#endif - #if USE_FILELOG void registerStorageFileLog(StorageFactory & factory); #endif @@ -144,6 +141,10 @@ void registerStorages(bool use_legacy_mongodb_integration [[maybe_unused]]) registerStorageAzureQueue(factory); #endif +#if USE_AVRO + registerStorageIceberg(factory); +#endif + #if USE_AWS_S3 registerStorageHudi(factory); registerStorageS3Queue(factory); @@ -152,14 +153,10 @@ void registerStorages(bool use_legacy_mongodb_integration [[maybe_unused]]) registerStorageDeltaLake(factory); #endif - #if USE_AVRO - registerStorageIceberg(factory); - #endif +#endif - #endif - - #if USE_HDFS - #if USE_HIVE +#if USE_HDFS +# if USE_HIVE registerStorageHive(factory); #endif #endif @@ -205,10 +202,6 @@ void registerStorages(bool use_legacy_mongodb_integration [[maybe_unused]]) registerStorageMaterializedPostgreSQL(factory); #endif - #if USE_MYSQL || USE_LIBPQXX - registerStorageExternalDistributed(factory); - #endif - #if USE_SQLITE registerStorageSQLite(factory); #endif diff --git a/src/TableFunctions/ITableFunctionDataLake.h b/src/TableFunctions/ITableFunctionDataLake.h deleted file mode 100644 index eff181d168f..00000000000 --- a/src/TableFunctions/ITableFunctionDataLake.h +++ /dev/null @@ -1,126 +0,0 @@ -#pragma once - -#include "config.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -namespace DB -{ - -template -class ITableFunctionDataLake : public TableFunction -{ -public: - static constexpr auto name = Name::name; - std::string getName() const override { return name; } - -protected: - StoragePtr executeImpl( - const ASTPtr & /* ast_function */, - ContextPtr context, - const std::string & table_name, - ColumnsDescription cached_columns, - bool /*is_insert_query*/) const override - { - ColumnsDescription columns; - auto configuration = TableFunction::getConfiguration(); - if (configuration->structure != "auto") - columns = parseColumnsListFromString(configuration->structure, context); - else if (!cached_columns.empty()) - columns = cached_columns; - - StoragePtr storage = Storage::create( - configuration, context, StorageID(TableFunction::getDatabaseName(), table_name), - columns, ConstraintsDescription{}, String{}, std::nullopt, LoadingStrictnessLevel::CREATE); - - storage->startup(); - return storage; - } - - const char * getStorageTypeName() const override { return name; } - - ColumnsDescription getActualTableStructure(ContextPtr context, bool is_insert_query) const override - { - auto configuration = TableFunction::getConfiguration(); - if (configuration->structure == "auto") - { - context->checkAccess(TableFunction::getSourceAccessType()); - auto object_storage = TableFunction::getObjectStorage(context, !is_insert_query); - return Storage::getTableStructureFromData(object_storage, configuration, std::nullopt, context); - } - - return parseColumnsListFromString(configuration->structure, context); - } - - void parseArguments(const ASTPtr & ast_function, ContextPtr context) override - { - auto configuration = TableFunction::getConfiguration(); - configuration->format = "Parquet"; - /// Set default format to Parquet if it's not specified in arguments. - TableFunction::parseArguments(ast_function, context); - } -}; - -struct TableFunctionIcebergName -{ - static constexpr auto name = "iceberg"; -}; - -struct TableFunctionIcebergS3Name -{ - static constexpr auto name = "icebergS3"; -}; - -struct TableFunctionIcebergAzureName -{ - static constexpr auto name = "icebergAzure"; -}; - -struct TableFunctionIcebergLocalName -{ - static constexpr auto name = "icebergLocal"; -}; - -struct TableFunctionIcebergHDFSName -{ - static constexpr auto name = "icebergHDFS"; -}; - -struct TableFunctionDeltaLakeName -{ - static constexpr auto name = "deltaLake"; -}; - -struct TableFunctionHudiName -{ - static constexpr auto name = "hudi"; -}; - -#if USE_AVRO -# if USE_AWS_S3 -using TableFunctionIceberg = ITableFunctionDataLake; -using TableFunctionIcebergS3 = ITableFunctionDataLake; -# endif -# if USE_AZURE_BLOB_STORAGE -using TableFunctionIcebergAzure = ITableFunctionDataLake; -# endif -using TableFunctionIcebergLocal = ITableFunctionDataLake; -#if USE_HDFS -using TableFunctionIcebergHDFS = ITableFunctionDataLake; -#endif -#endif -#if USE_AWS_S3 -# if USE_PARQUET -using TableFunctionDeltaLake = ITableFunctionDataLake; -#endif -using TableFunctionHudi = ITableFunctionDataLake; -#endif -} diff --git a/src/TableFunctions/TableFunctionMongoDB.cpp b/src/TableFunctions/TableFunctionMongoDB.cpp index e13427c1557..9f91839fb33 100644 --- a/src/TableFunctions/TableFunctionMongoDB.cpp +++ b/src/TableFunctions/TableFunctionMongoDB.cpp @@ -15,7 +15,7 @@ #include #include #include - +#include namespace DB { @@ -85,17 +85,11 @@ void TableFunctionMongoDB::parseArguments(const ASTPtr & ast_function, ContextPt { if (const auto * ast_func = typeid_cast(args[i].get())) { - const auto * args_expr = assert_cast(ast_func->arguments.get()); - auto function_args = args_expr->children; - if (function_args.size() != 2) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected key-value defined argument"); - - auto arg_name = function_args[0]->as()->name(); - + const auto & [arg_name, arg_value] = getKeyValueMongoDBArgument(ast_func); if (arg_name == "structure") - structure = checkAndGetLiteralArgument(function_args[1], "structure"); + structure = checkAndGetLiteralArgument(arg_value, arg_name); else if (arg_name == "options") - main_arguments.push_back(function_args[1]); + main_arguments.push_back(arg_value); } else if (i == 5) { @@ -117,15 +111,11 @@ void TableFunctionMongoDB::parseArguments(const ASTPtr & ast_function, ContextPt { if (const auto * ast_func = typeid_cast(args[i].get())) { - const auto * args_expr = assert_cast(ast_func->arguments.get()); - auto function_args = args_expr->children; - if (function_args.size() != 2) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected key-value defined argument"); - - auto arg_name = function_args[0]->as()->name(); - + const auto & [arg_name, arg_value] = getKeyValueMongoDBArgument(ast_func); if (arg_name == "structure") - structure = checkAndGetLiteralArgument(function_args[1], "structure"); + structure = checkAndGetLiteralArgument(arg_value, arg_name); + else if (arg_name == "options") + main_arguments.push_back(arg_value); } else if (i == 2) { @@ -145,6 +135,20 @@ void TableFunctionMongoDB::parseArguments(const ASTPtr & ast_function, ContextPt } +std::pair getKeyValueMongoDBArgument(const ASTFunction * ast_func) +{ + const auto * args_expr = assert_cast(ast_func->arguments.get()); + const auto & function_args = args_expr->children; + if (function_args.size() != 2 || ast_func->name != "equals" || !function_args[0]->as()) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected key-value defined argument, got {}", ast_func->formatForErrorMessage()); + + const auto & arg_name = function_args[0]->as()->name(); + if (arg_name == "structure" || arg_name == "options") + return std::make_pair(arg_name, function_args[1]); + + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected key-value defined argument, got {}", ast_func->formatForErrorMessage()); +} + void registerTableFunctionMongoDB(TableFunctionFactory & factory) { factory.registerFunction( diff --git a/src/TableFunctions/TableFunctionMongoDB.h b/src/TableFunctions/TableFunctionMongoDB.h new file mode 100644 index 00000000000..2ab8ee9479f --- /dev/null +++ b/src/TableFunctions/TableFunctionMongoDB.h @@ -0,0 +1,16 @@ +#pragma once + +#include + +#include +#include +#include + + +namespace DB +{ + +std::pair getKeyValueMongoDBArgument(const ASTFunction * ast_func); + +} + diff --git a/src/TableFunctions/TableFunctionMongoDBPocoLegacy.cpp b/src/TableFunctions/TableFunctionMongoDBPocoLegacy.cpp index dc1df7fcad8..4e27fd35e12 100644 --- a/src/TableFunctions/TableFunctionMongoDBPocoLegacy.cpp +++ b/src/TableFunctions/TableFunctionMongoDBPocoLegacy.cpp @@ -15,6 +15,7 @@ #include #include #include +#include namespace DB @@ -97,17 +98,11 @@ void TableFunctionMongoDBPocoLegacy::parseArguments(const ASTPtr & ast_function, { if (const auto * ast_func = typeid_cast(args[i].get())) { - const auto * args_expr = assert_cast(ast_func->arguments.get()); - auto function_args = args_expr->children; - if (function_args.size() != 2) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected key-value defined argument"); - - auto arg_name = function_args[0]->as()->name(); - + const auto & [arg_name, arg_value] = getKeyValueMongoDBArgument(ast_func); if (arg_name == "structure") - structure = checkAndGetLiteralArgument(function_args[1], "structure"); + structure = checkAndGetLiteralArgument(arg_value, "structure"); else if (arg_name == "options") - main_arguments.push_back(function_args[1]); + main_arguments.push_back(arg_value); } else if (i == 5) { diff --git a/src/TableFunctions/TableFunctionObjectStorage.cpp b/src/TableFunctions/TableFunctionObjectStorage.cpp index 8559a4cd668..12de08afad0 100644 --- a/src/TableFunctions/TableFunctionObjectStorage.cpp +++ b/src/TableFunctions/TableFunctionObjectStorage.cpp @@ -117,8 +117,9 @@ StoragePtr TableFunctionObjectStorage::executeImpl( columns, ConstraintsDescription{}, String{}, - /* format_settings */std::nullopt, - /* distributed_processing */false, + /* format_settings */ std::nullopt, + /* mode */ LoadingStrictnessLevel::CREATE, + /* distributed_processing */ false, nullptr); storage->startup(); @@ -224,4 +225,87 @@ template class TableFunctionObjectStorage; #endif template class TableFunctionObjectStorage; + +#if USE_AVRO +void registerTableFunctionIceberg(TableFunctionFactory & factory) +{ +#if USE_AWS_S3 + factory.registerFunction( + {.documentation + = {.description = R"(The table function can be used to read the Iceberg table stored on S3 object store. Alias to icebergS3)", + .examples{{"iceberg", "SELECT * FROM iceberg(url, access_key_id, secret_access_key)", ""}}, + .categories{"DataLake"}}, + .allow_readonly = false}); + factory.registerFunction( + {.documentation + = {.description = R"(The table function can be used to read the Iceberg table stored on S3 object store.)", + .examples{{"icebergS3", "SELECT * FROM icebergS3(url, access_key_id, secret_access_key)", ""}}, + .categories{"DataLake"}}, + .allow_readonly = false}); + +#endif +#if USE_AZURE_BLOB_STORAGE + factory.registerFunction( + {.documentation + = {.description = R"(The table function can be used to read the Iceberg table stored on Azure object store.)", + .examples{{"icebergAzure", "SELECT * FROM icebergAzure(url, access_key_id, secret_access_key)", ""}}, + .categories{"DataLake"}}, + .allow_readonly = false}); +#endif +#if USE_HDFS + factory.registerFunction( + {.documentation + = {.description = R"(The table function can be used to read the Iceberg table stored on HDFS virtual filesystem.)", + .examples{{"icebergHDFS", "SELECT * FROM icebergHDFS(url)", ""}}, + .categories{"DataLake"}}, + .allow_readonly = false}); +#endif + factory.registerFunction( + {.documentation + = {.description = R"(The table function can be used to read the Iceberg table stored locally.)", + .examples{{"icebergLocal", "SELECT * FROM icebergLocal(filename)", ""}}, + .categories{"DataLake"}}, + .allow_readonly = false}); +} +#endif + + +#if USE_AWS_S3 +#if USE_PARQUET +void registerTableFunctionDeltaLake(TableFunctionFactory & factory) +{ + factory.registerFunction( + {.documentation + = {.description = R"(The table function can be used to read the DeltaLake table stored on object store.)", + .examples{{"deltaLake", "SELECT * FROM deltaLake(url, access_key_id, secret_access_key)", ""}}, + .categories{"DataLake"}}, + .allow_readonly = false}); +} +#endif + +void registerTableFunctionHudi(TableFunctionFactory & factory) +{ + factory.registerFunction( + {.documentation + = {.description = R"(The table function can be used to read the Hudi table stored on object store.)", + .examples{{"hudi", "SELECT * FROM hudi(url, access_key_id, secret_access_key)", ""}}, + .categories{"DataLake"}}, + .allow_readonly = false}); +} + +#endif + +void registerDataLakeTableFunctions(TableFunctionFactory & factory) +{ + UNUSED(factory); +#if USE_AVRO + registerTableFunctionIceberg(factory); +#endif +#if USE_AWS_S3 +#if USE_PARQUET + registerTableFunctionDeltaLake(factory); +#endif + registerTableFunctionHudi(factory); +#endif +} } diff --git a/src/TableFunctions/TableFunctionObjectStorage.h b/src/TableFunctions/TableFunctionObjectStorage.h index 6b923f93e75..19cd637bd80 100644 --- a/src/TableFunctions/TableFunctionObjectStorage.h +++ b/src/TableFunctions/TableFunctionObjectStorage.h @@ -2,6 +2,7 @@ #include #include +#include #include #include #include @@ -61,6 +62,48 @@ struct LocalDefinition static constexpr auto storage_type_name = "Local"; }; +struct IcebergDefinition +{ + static constexpr auto name = "iceberg"; + static constexpr auto storage_type_name = "S3"; +}; + +struct IcebergS3Definition +{ + static constexpr auto name = "icebergS3"; + static constexpr auto storage_type_name = "S3"; +}; + +struct IcebergAzureDefinition +{ + static constexpr auto name = "icebergAzure"; + static constexpr auto storage_type_name = "Azure"; +}; + +struct IcebergLocalDefinition +{ + static constexpr auto name = "icebergLocal"; + static constexpr auto storage_type_name = "Local"; +}; + +struct IcebergHDFSDefinition +{ + static constexpr auto name = "icebergHDFS"; + static constexpr auto storage_type_name = "HDFS"; +}; + +struct DeltaLakeDefinition +{ + static constexpr auto name = "deltaLake"; + static constexpr auto storage_type_name = "S3"; +}; + +struct HudiDefinition +{ + static constexpr auto name = "hudi"; + static constexpr auto storage_type_name = "S3"; +}; + template class TableFunctionObjectStorage : public ITableFunction { @@ -137,4 +180,25 @@ using TableFunctionHDFS = TableFunctionObjectStorage; + + +#if USE_AVRO +# if USE_AWS_S3 +using TableFunctionIceberg = TableFunctionObjectStorage; +using TableFunctionIcebergS3 = TableFunctionObjectStorage; +# endif +# if USE_AZURE_BLOB_STORAGE +using TableFunctionIcebergAzure = TableFunctionObjectStorage; +# endif +# if USE_HDFS +using TableFunctionIcebergHDFS = TableFunctionObjectStorage; +# endif +using TableFunctionIcebergLocal = TableFunctionObjectStorage; +#endif +#if USE_AWS_S3 +# if USE_PARQUET +using TableFunctionDeltaLake = TableFunctionObjectStorage; +# endif +using TableFunctionHudi = TableFunctionObjectStorage; +#endif } diff --git a/src/TableFunctions/TableFunctionObjectStorageCluster.cpp b/src/TableFunctions/TableFunctionObjectStorageCluster.cpp index 449bd2c8c49..5ca26aabe32 100644 --- a/src/TableFunctions/TableFunctionObjectStorageCluster.cpp +++ b/src/TableFunctions/TableFunctionObjectStorageCluster.cpp @@ -41,9 +41,10 @@ StoragePtr TableFunctionObjectStorageCluster::execute StorageID(Base::getDatabaseName(), table_name), columns, ConstraintsDescription{}, - /* comment */String{}, - /* format_settings */std::nullopt, /// No format_settings - /* distributed_processing */true, + /* comment */ String{}, + /* format_settings */ std::nullopt, /// No format_settings + /* mode */ LoadingStrictnessLevel::CREATE, + /* distributed_processing */ true, /*partition_by_=*/nullptr); } else diff --git a/src/TableFunctions/TableFunctionURL.cpp b/src/TableFunctions/TableFunctionURL.cpp index 2bdc0b449e0..8f4841a992b 100644 --- a/src/TableFunctions/TableFunctionURL.cpp +++ b/src/TableFunctions/TableFunctionURL.cpp @@ -6,7 +6,6 @@ #include #include #include -#include #include #include #include diff --git a/src/TableFunctions/registerDataLakeTableFunctions.cpp b/src/TableFunctions/registerDataLakeTableFunctions.cpp deleted file mode 100644 index 63b09fcf9e7..00000000000 --- a/src/TableFunctions/registerDataLakeTableFunctions.cpp +++ /dev/null @@ -1,96 +0,0 @@ -#include -#include - -namespace DB -{ - -#if USE_AVRO -void registerTableFunctionIceberg(TableFunctionFactory & factory) -{ -# if USE_AWS_S3 - factory.registerFunction( - {.documentation - = {.description = R"(The table function can be used to read the Iceberg table stored on S3 object store. Alias to icebergS3)", - .examples{{"iceberg", "SELECT * FROM iceberg(url, access_key_id, secret_access_key)", ""}}, - .categories{"DataLake"}}, - .allow_readonly = false}); - factory.registerFunction( - {.documentation - = {.description = R"(The table function can be used to read the Iceberg table stored on S3 object store.)", - .examples{{"icebergS3", "SELECT * FROM icebergS3(url, access_key_id, secret_access_key)", ""}}, - .categories{"DataLake"}}, - .allow_readonly = false}); - -# endif -# if USE_AZURE_BLOB_STORAGE - factory.registerFunction( - {.documentation - = {.description = R"(The table function can be used to read the Iceberg table stored on Azure object store.)", - .examples{{"icebergAzure", "SELECT * FROM icebergAzure(url, access_key_id, secret_access_key)", ""}}, - .categories{"DataLake"}}, - .allow_readonly = false}); -# endif -# if USE_HDFS - factory.registerFunction( - {.documentation - = {.description = R"(The table function can be used to read the Iceberg table stored on HDFS object store.)", - .examples{{"icebergHDFS", "SELECT * FROM icebergHDFS(url)", ""}}, - .categories{"DataLake"}}, - .allow_readonly = false}); -# endif - factory.registerFunction( - {.documentation - = {.description = R"(The table function can be used to read the Iceberg table stored locally.)", - .examples{{"icebergLocal", "SELECT * FROM icebergLocal(filename)", ""}}, - .categories{"DataLake"}}, - .allow_readonly = false}); -} -#endif - -#if USE_AWS_S3 -# if USE_PARQUET -void registerTableFunctionDeltaLake(TableFunctionFactory & factory) -{ - factory.registerFunction( - { - .documentation = - { - .description=R"(The table function can be used to read the DeltaLake table stored on object store.)", - .examples{{"deltaLake", "SELECT * FROM deltaLake(url, access_key_id, secret_access_key)", ""}}, - .categories{"DataLake"} - }, - .allow_readonly = false - }); -} -#endif - -void registerTableFunctionHudi(TableFunctionFactory & factory) -{ - factory.registerFunction( - { - .documentation = - { - .description=R"(The table function can be used to read the Hudi table stored on object store.)", - .examples{{"hudi", "SELECT * FROM hudi(url, access_key_id, secret_access_key)", ""}}, - .categories{"DataLake"} - }, - .allow_readonly = false - }); -} -#endif - -void registerDataLakeTableFunctions(TableFunctionFactory & factory) -{ - UNUSED(factory); -#if USE_AVRO - registerTableFunctionIceberg(factory); -#endif -#if USE_AWS_S3 -# if USE_PARQUET - registerTableFunctionDeltaLake(factory); -#endif - registerTableFunctionHudi(factory); -#endif -} - -} diff --git a/tests/ci/artifactory.py b/tests/ci/artifactory.py index c66659d4e93..00a7eeebb35 100644 --- a/tests/ci/artifactory.py +++ b/tests/ci/artifactory.py @@ -200,6 +200,7 @@ class RpmArtifactory: ) _PROD_REPO_URL = "https://packages.clickhouse.com/rpm/clickhouse.repo" _SIGN_KEY = "885E2BDCF96B0B45ABF058453E4AD4719DDE9A38" + FEDORA_VERSION = 40 def __init__(self, release_info: ReleaseInfo, dry_run: bool): self.release_info = release_info @@ -249,16 +250,16 @@ class RpmArtifactory: Shell.check("sync") def test_packages(self): - Shell.check("docker pull fedora:latest", strict=True) + Shell.check(f"docker pull fedora:{self.FEDORA_VERSION}", strict=True) print(f"Test package installation, version [{self.version}]") rpm_command = f"dnf config-manager --add-repo={self.repo_url} && dnf makecache && dnf -y install clickhouse-client-{self.version}-1" - cmd = f'docker run --rm fedora:latest /bin/bash -c "dnf -y install dnf-plugins-core && dnf config-manager --add-repo={self.repo_url} && {rpm_command}"' + cmd = f'docker run --rm fedora:{self.FEDORA_VERSION} /bin/bash -c "dnf -y install dnf-plugins-core && dnf config-manager --add-repo={self.repo_url} && {rpm_command}"' print("Running test command:") print(f" {cmd}") assert Shell.check(cmd) print("Test package installation, version [latest]") rpm_command_2 = f"dnf config-manager --add-repo={self.repo_url} && dnf makecache && dnf -y install clickhouse-client" - cmd = f'docker run --rm fedora:latest /bin/bash -c "dnf -y install dnf-plugins-core && dnf config-manager --add-repo={self.repo_url} && {rpm_command_2}"' + cmd = f'docker run --rm fedora:{self.FEDORA_VERSION} /bin/bash -c "dnf -y install dnf-plugins-core && dnf config-manager --add-repo={self.repo_url} && {rpm_command_2}"' print("Running test command:") print(f" {cmd}") assert Shell.check(cmd) diff --git a/tests/ci/ci_cache.py b/tests/ci/ci_cache.py index 6f2e3e70736..c271339db8b 100644 --- a/tests/ci/ci_cache.py +++ b/tests/ci/ci_cache.py @@ -795,11 +795,12 @@ class CiCache: # start waiting for the next TIMEOUT seconds if there are more than X(=4) jobs to wait # wait TIMEOUT seconds in rounds. Y(=5) is the max number of rounds expired_sec = 0 - start_at = int(time.time()) + start_at = time.time() while expired_sec < TIMEOUT and self.jobs_to_wait: await_finished: Set[str] = set() if not dry_run: - time.sleep(poll_interval_sec) + # Do not sleep longer than required + time.sleep(min(poll_interval_sec, TIMEOUT - expired_sec)) self.update() for job_name, job_config in self.jobs_to_wait.items(): num_batches = job_config.num_batches @@ -844,10 +845,12 @@ class CiCache: del self.jobs_to_wait[job] if not dry_run: - expired_sec = int(time.time()) - start_at - print( - f"...awaiting continues... seconds left [{TIMEOUT - expired_sec}]" - ) + expired_sec = int(time.time() - start_at) + msg = f"...awaiting continues... seconds left [{TIMEOUT - expired_sec}]" + if expired_sec >= TIMEOUT: + # Avoid `seconds left [-3]` + msg = f"awaiting for round {round_cnt} is finished" + print(msg) else: # make up for 2 iterations in dry_run expired_sec += int(TIMEOUT / 2) + 1 diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py index 9f5d5f1983d..67cdbbdcf6d 100644 --- a/tests/ci/ci_config.py +++ b/tests/ci/ci_config.py @@ -51,11 +51,11 @@ class CI: TAG_CONFIGS = { Tags.DO_NOT_TEST_LABEL: LabelConfig(run_jobs=[JobNames.STYLE_CHECK]), - Tags.CI_SET_ARM: LabelConfig( + Tags.CI_SET_AARCH64: LabelConfig( run_jobs=[ JobNames.STYLE_CHECK, BuildNames.PACKAGE_AARCH64, - JobNames.INTEGRATION_TEST_ARM, + JobNames.INTEGRATION_TEST_AARCH64, ] ), Tags.CI_SET_REQUIRED: LabelConfig( @@ -95,16 +95,16 @@ class CI: static_binary_name="aarch64", additional_pkgs=True, ), - runner_type=Runners.BUILDER_ARM, + runner_type=Runners.BUILDER_AARCH64, ), - BuildNames.PACKAGE_ARM_ASAN: CommonJobConfigs.BUILD.with_properties( + BuildNames.PACKAGE_AARCH64_ASAN: CommonJobConfigs.BUILD.with_properties( build_config=BuildConfig( - name=BuildNames.PACKAGE_ARM_ASAN, + name=BuildNames.PACKAGE_AARCH64_ASAN, compiler="clang-18-aarch64", sanitizer="address", package_type="deb", ), - runner_type=Runners.BUILDER_ARM, + runner_type=Runners.BUILDER_AARCH64, ), BuildNames.PACKAGE_ASAN: CommonJobConfigs.BUILD.with_properties( build_config=BuildConfig( @@ -276,16 +276,16 @@ class CI: JobNames.INSTALL_TEST_AMD: CommonJobConfigs.INSTALL_TEST.with_properties( required_builds=[BuildNames.PACKAGE_RELEASE] ), - JobNames.INSTALL_TEST_ARM: CommonJobConfigs.INSTALL_TEST.with_properties( + JobNames.INSTALL_TEST_AARCH64: CommonJobConfigs.INSTALL_TEST.with_properties( required_builds=[BuildNames.PACKAGE_AARCH64], - runner_type=Runners.STYLE_CHECKER_ARM, + runner_type=Runners.STYLE_CHECKER_AARCH64, ), JobNames.STATEFUL_TEST_ASAN: CommonJobConfigs.STATEFUL_TEST.with_properties( required_builds=[BuildNames.PACKAGE_ASAN] ), - JobNames.STATEFUL_TEST_ARM_ASAN: CommonJobConfigs.STATEFUL_TEST.with_properties( - required_builds=[BuildNames.PACKAGE_ARM_ASAN], - runner_type=Runners.FUNC_TESTER_ARM, + JobNames.STATEFUL_TEST_AARCH64_ASAN: CommonJobConfigs.STATEFUL_TEST.with_properties( + required_builds=[BuildNames.PACKAGE_AARCH64_ASAN], + runner_type=Runners.FUNC_TESTER_AARCH64, ), JobNames.STATEFUL_TEST_TSAN: CommonJobConfigs.STATEFUL_TEST.with_properties( required_builds=[BuildNames.PACKAGE_TSAN] @@ -307,7 +307,7 @@ class CI: ), JobNames.STATEFUL_TEST_AARCH64: CommonJobConfigs.STATEFUL_TEST.with_properties( required_builds=[BuildNames.PACKAGE_AARCH64], - runner_type=Runners.FUNC_TESTER_ARM, + runner_type=Runners.FUNC_TESTER_AARCH64, ), JobNames.STATEFUL_TEST_PARALLEL_REPL_RELEASE: CommonJobConfigs.STATEFUL_TEST.with_properties( required_builds=[BuildNames.PACKAGE_RELEASE] @@ -335,10 +335,10 @@ class CI: JobNames.STATELESS_TEST_ASAN: CommonJobConfigs.STATELESS_TEST.with_properties( required_builds=[BuildNames.PACKAGE_ASAN], num_batches=2 ), - JobNames.STATELESS_TEST_ARM_ASAN: CommonJobConfigs.STATELESS_TEST.with_properties( - required_builds=[BuildNames.PACKAGE_ARM_ASAN], + JobNames.STATELESS_TEST_AARCH64_ASAN: CommonJobConfigs.STATELESS_TEST.with_properties( + required_builds=[BuildNames.PACKAGE_AARCH64_ASAN], num_batches=2, - runner_type=Runners.FUNC_TESTER_ARM, + runner_type=Runners.FUNC_TESTER_AARCH64, ), JobNames.STATELESS_TEST_TSAN: CommonJobConfigs.STATELESS_TEST.with_properties( required_builds=[BuildNames.PACKAGE_TSAN], num_batches=4 @@ -360,7 +360,7 @@ class CI: ), JobNames.STATELESS_TEST_AARCH64: CommonJobConfigs.STATELESS_TEST.with_properties( required_builds=[BuildNames.PACKAGE_AARCH64], - runner_type=Runners.FUNC_TESTER_ARM, + runner_type=Runners.FUNC_TESTER_AARCH64, ), JobNames.STATELESS_TEST_OLD_ANALYZER_S3_REPLICATED_RELEASE: CommonJobConfigs.STATELESS_TEST.with_properties( required_builds=[BuildNames.PACKAGE_RELEASE], num_batches=2 @@ -432,10 +432,10 @@ class CI: num_batches=6, timeout=9000, # the job timed out with default value (7200) ), - JobNames.INTEGRATION_TEST_ARM: CommonJobConfigs.INTEGRATION_TEST.with_properties( + JobNames.INTEGRATION_TEST_AARCH64: CommonJobConfigs.INTEGRATION_TEST.with_properties( required_builds=[BuildNames.PACKAGE_AARCH64], num_batches=6, - runner_type=Runners.FUNC_TESTER_ARM, + runner_type=Runners.FUNC_TESTER_AARCH64, ), JobNames.INTEGRATION_TEST: CommonJobConfigs.INTEGRATION_TEST.with_properties( required_builds=[BuildNames.PACKAGE_RELEASE], @@ -453,10 +453,10 @@ class CI: required_builds=[BuildNames.PACKAGE_RELEASE], required_on_release_branch=True, ), - JobNames.COMPATIBILITY_TEST_ARM: CommonJobConfigs.COMPATIBILITY_TEST.with_properties( + JobNames.COMPATIBILITY_TEST_AARCH64: CommonJobConfigs.COMPATIBILITY_TEST.with_properties( required_builds=[BuildNames.PACKAGE_AARCH64], required_on_release_branch=True, - runner_type=Runners.STYLE_CHECKER_ARM, + runner_type=Runners.STYLE_CHECKER_AARCH64, ), JobNames.UNIT_TEST: CommonJobConfigs.UNIT_TEST.with_properties( required_builds=[BuildNames.BINARY_RELEASE], @@ -499,22 +499,22 @@ class CI: required_builds=[BuildNames.BINARY_RELEASE], run_by_labels=[Labels.JEPSEN_TEST], run_command="jepsen_check.py keeper", - runner_type=Runners.STYLE_CHECKER_ARM, + runner_type=Runners.STYLE_CHECKER_AARCH64, ), JobNames.JEPSEN_SERVER: JobConfig( required_builds=[BuildNames.BINARY_RELEASE], run_by_labels=[Labels.JEPSEN_TEST], run_command="jepsen_check.py server", - runner_type=Runners.STYLE_CHECKER_ARM, + runner_type=Runners.STYLE_CHECKER_AARCH64, ), JobNames.PERFORMANCE_TEST_AMD64: CommonJobConfigs.PERF_TESTS.with_properties( required_builds=[BuildNames.PACKAGE_RELEASE], num_batches=4 ), - JobNames.PERFORMANCE_TEST_ARM64: CommonJobConfigs.PERF_TESTS.with_properties( + JobNames.PERFORMANCE_TEST_AARCH64: CommonJobConfigs.PERF_TESTS.with_properties( required_builds=[BuildNames.PACKAGE_AARCH64], num_batches=4, run_by_labels=[Labels.PR_PERFORMANCE], - runner_type=Runners.FUNC_TESTER_ARM, + runner_type=Runners.FUNC_TESTER_AARCH64, ), JobNames.SQLANCER: CommonJobConfigs.SQLLANCER_TEST.with_properties( required_builds=[BuildNames.PACKAGE_RELEASE], @@ -532,16 +532,16 @@ class CI: JobNames.CLICKBENCH_TEST: CommonJobConfigs.CLICKBENCH_TEST.with_properties( required_builds=[BuildNames.PACKAGE_RELEASE], ), - JobNames.CLICKBENCH_TEST_ARM: CommonJobConfigs.CLICKBENCH_TEST.with_properties( + JobNames.CLICKBENCH_TEST_AARCH64: CommonJobConfigs.CLICKBENCH_TEST.with_properties( required_builds=[BuildNames.PACKAGE_AARCH64], - runner_type=Runners.FUNC_TESTER_ARM, + runner_type=Runners.FUNC_TESTER_AARCH64, ), JobNames.LIBFUZZER_TEST: JobConfig( required_builds=[BuildNames.FUZZERS], run_by_labels=[Tags.libFuzzer], - timeout=10800, + timeout=5400, run_command='libfuzzer_test_check.py "$CHECK_NAME"', - runner_type=Runners.STYLE_CHECKER, + runner_type=Runners.FUNC_TESTER, ), JobNames.DOCKER_SERVER: CommonJobConfigs.DOCKER_SERVER.with_properties( required_builds=[BuildNames.PACKAGE_RELEASE, BuildNames.PACKAGE_AARCH64] @@ -572,7 +572,7 @@ class CI: ), JobNames.STYLE_CHECK: JobConfig( run_always=True, - runner_type=Runners.STYLE_CHECKER_ARM, + runner_type=Runners.STYLE_CHECKER_AARCH64, ), JobNames.BUGFIX_VALIDATE: JobConfig( run_by_labels=[Labels.PR_BUGFIX, Labels.PR_CRITICAL_BUGFIX], diff --git a/tests/ci/ci_definitions.py b/tests/ci/ci_definitions.py index dd86dc320c2..fb3e55fdbe3 100644 --- a/tests/ci/ci_definitions.py +++ b/tests/ci/ci_definitions.py @@ -58,11 +58,11 @@ class Runners(metaclass=WithIter): """ BUILDER = "builder" - BUILDER_ARM = "builder-aarch64" + BUILDER_AARCH64 = "builder-aarch64" STYLE_CHECKER = "style-checker" - STYLE_CHECKER_ARM = "style-checker-aarch64" + STYLE_CHECKER_AARCH64 = "style-checker-aarch64" FUNC_TESTER = "func-tester" - FUNC_TESTER_ARM = "func-tester-aarch64" + FUNC_TESTER_AARCH64 = "func-tester-aarch64" FUZZER_UNIT_TESTER = "fuzzer-unit-tester" @@ -78,7 +78,7 @@ class Tags(metaclass=WithIter): # to upload all binaries from build jobs UPLOAD_ALL_ARTIFACTS = "upload_all" CI_SET_SYNC = "ci_set_sync" - CI_SET_ARM = "ci_set_arm" + CI_SET_AARCH64 = "ci_set_aarch64" CI_SET_REQUIRED = "ci_set_required" CI_SET_BUILDS = "ci_set_builds" @@ -106,7 +106,7 @@ class BuildNames(metaclass=WithIter): PACKAGE_MSAN = "package_msan" PACKAGE_DEBUG = "package_debug" PACKAGE_AARCH64 = "package_aarch64" - PACKAGE_ARM_ASAN = "package_aarch64_asan" + PACKAGE_AARCH64_ASAN = "package_aarch64_asan" PACKAGE_RELEASE_COVERAGE = "package_release_coverage" BINARY_RELEASE = "binary_release" BINARY_TIDY = "binary_tidy" @@ -134,14 +134,14 @@ class JobNames(metaclass=WithIter): DOCKER_SERVER = "Docker server image" DOCKER_KEEPER = "Docker keeper image" INSTALL_TEST_AMD = "Install packages (release)" - INSTALL_TEST_ARM = "Install packages (aarch64)" + INSTALL_TEST_AARCH64 = "Install packages (aarch64)" STATELESS_TEST_DEBUG = "Stateless tests (debug)" STATELESS_TEST_RELEASE = "Stateless tests (release)" STATELESS_TEST_RELEASE_COVERAGE = "Stateless tests (coverage)" STATELESS_TEST_AARCH64 = "Stateless tests (aarch64)" STATELESS_TEST_ASAN = "Stateless tests (asan)" - STATELESS_TEST_ARM_ASAN = "Stateless tests (aarch64, asan)" + STATELESS_TEST_AARCH64_ASAN = "Stateless tests (aarch64, asan)" STATELESS_TEST_TSAN = "Stateless tests (tsan)" STATELESS_TEST_MSAN = "Stateless tests (msan)" STATELESS_TEST_UBSAN = "Stateless tests (ubsan)" @@ -158,7 +158,7 @@ class JobNames(metaclass=WithIter): STATEFUL_TEST_RELEASE_COVERAGE = "Stateful tests (coverage)" STATEFUL_TEST_AARCH64 = "Stateful tests (aarch64)" STATEFUL_TEST_ASAN = "Stateful tests (asan)" - STATEFUL_TEST_ARM_ASAN = "Stateful tests (aarch64, asan)" + STATEFUL_TEST_AARCH64_ASAN = "Stateful tests (aarch64, asan)" STATEFUL_TEST_TSAN = "Stateful tests (tsan)" STATEFUL_TEST_MSAN = "Stateful tests (msan)" STATEFUL_TEST_UBSAN = "Stateful tests (ubsan)" @@ -181,7 +181,7 @@ class JobNames(metaclass=WithIter): INTEGRATION_TEST_ASAN = "Integration tests (asan)" INTEGRATION_TEST_ASAN_OLD_ANALYZER = "Integration tests (asan, old analyzer)" INTEGRATION_TEST_TSAN = "Integration tests (tsan)" - INTEGRATION_TEST_ARM = "Integration tests (aarch64)" + INTEGRATION_TEST_AARCH64 = "Integration tests (aarch64)" INTEGRATION_TEST_FLAKY = "Integration tests flaky check (asan)" UPGRADE_TEST_DEBUG = "Upgrade check (debug)" @@ -205,7 +205,7 @@ class JobNames(metaclass=WithIter): JEPSEN_SERVER = "ClickHouse Server Jepsen" PERFORMANCE_TEST_AMD64 = "Performance Comparison (release)" - PERFORMANCE_TEST_ARM64 = "Performance Comparison (aarch64)" + PERFORMANCE_TEST_AARCH64 = "Performance Comparison (aarch64)" # SQL_LOGIC_TEST = "Sqllogic test (release)" @@ -214,10 +214,10 @@ class JobNames(metaclass=WithIter): SQLTEST = "SQLTest" COMPATIBILITY_TEST = "Compatibility check (release)" - COMPATIBILITY_TEST_ARM = "Compatibility check (aarch64)" + COMPATIBILITY_TEST_AARCH64 = "Compatibility check (aarch64)" CLICKBENCH_TEST = "ClickBench (release)" - CLICKBENCH_TEST_ARM = "ClickBench (aarch64)" + CLICKBENCH_TEST_AARCH64 = "ClickBench (aarch64)" LIBFUZZER_TEST = "libFuzzer tests" @@ -387,7 +387,7 @@ class CommonJobConfigs: "./tests/ci/upload_result_helper.py", ], ), - runner_type=Runners.STYLE_CHECKER_ARM, + runner_type=Runners.STYLE_CHECKER_AARCH64, disable_await=True, ) COMPATIBILITY_TEST = JobConfig( @@ -634,8 +634,8 @@ REQUIRED_CHECKS = [ JobNames.STATEFUL_TEST_RELEASE, JobNames.STATELESS_TEST_RELEASE, JobNames.STATELESS_TEST_ASAN, - JobNames.STATELESS_TEST_ARM_ASAN, - JobNames.STATEFUL_TEST_ARM_ASAN, + JobNames.STATELESS_TEST_AARCH64_ASAN, + JobNames.STATEFUL_TEST_AARCH64_ASAN, JobNames.STATELESS_TEST_FLAKY_ASAN, JobNames.STATEFUL_TEST_ASAN, JobNames.STYLE_CHECK, diff --git a/tests/ci/compatibility_check.py b/tests/ci/compatibility_check.py index bb0c717160e..38fb2eceb28 100644 --- a/tests/ci/compatibility_check.py +++ b/tests/ci/compatibility_check.py @@ -131,7 +131,7 @@ def main(): check_name = args.check_name or os.getenv("CHECK_NAME") assert check_name check_glibc = True - # currently hardcoded to x86, don't enable for ARM + # currently hardcoded to x86, don't enable for AARCH64 check_distributions = ( "aarch64" not in check_name.lower() and "arm64" not in check_name.lower() ) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index 8f19dd7d023..2616fbe3f5d 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -3,20 +3,37 @@ import argparse import logging import os +import re import sys import zipfile from pathlib import Path from typing import List +from botocore.exceptions import ClientError + from build_download_helper import download_fuzzers from clickhouse_helper import CiLogsCredentials from docker_images_helper import DockerImage, get_docker_image, pull_image -from env_helper import REPO_COPY, REPORT_PATH, TEMP_PATH +from env_helper import REPO_COPY, REPORT_PATH, S3_BUILDS_BUCKET, TEMP_PATH from pr_info import PRInfo +from report import FAILURE, SUCCESS, JobReport, TestResult +from s3_helper import S3Helper from stopwatch import Stopwatch from tee_popen import TeePopen +TIMEOUT = 60 NO_CHANGES_MSG = "Nothing to run" +s3 = S3Helper() + + +def zipdir(path, ziph): + # ziph is zipfile handle + for root, _, files in os.walk(path): + for file in files: + ziph.write( + os.path.join(root, file), + os.path.relpath(os.path.join(root, file), os.path.join(path, "..")), + ) def get_additional_envs(check_name, run_by_hash_num, run_by_hash_total): @@ -59,16 +76,19 @@ def get_run_command( envs = [ # a static link, don't use S3_URL or S3_DOWNLOAD - '-e S3_URL="https://s3.amazonaws.com/clickhouse-datasets"', + '-e S3_URL="https://s3.amazonaws.com"', ] envs += [f"-e {e}" for e in additional_envs] env_str = " ".join(envs) + uid = os.getuid() + gid = os.getgid() return ( f"docker run " f"{ci_logs_args} " + f"--user {uid}:{gid} " f"--workdir=/fuzzers " f"--volume={fuzzers_path}:/fuzzers " f"--volume={repo_path}/tests:/usr/share/clickhouse-test " @@ -85,6 +105,115 @@ def parse_args(): return parser.parse_args() +def download_corpus(path: str): + logging.info("Download corpus...") + + try: + s3.download_file( + bucket=S3_BUILDS_BUCKET, + s3_path="fuzzer/corpus.zip", + local_file_path=path, + ) + except ClientError as e: + if e.response["Error"]["Code"] == "NoSuchKey": + logging.debug("No active corpus exists") + else: + raise + + with zipfile.ZipFile(f"{path}/corpus.zip", "r") as zipf: + zipf.extractall(path) + os.remove(f"{path}/corpus.zip") + + units = 0 + for _, _, files in os.walk(path): + units += len(files) + + logging.info("...downloaded %d units", units) + + +def upload_corpus(path: str): + with zipfile.ZipFile(f"{path}/corpus.zip", "w", zipfile.ZIP_DEFLATED) as zipf: + zipdir(f"{path}/corpus/", zipf) + s3.upload_file( + bucket=S3_BUILDS_BUCKET, + file_path=f"{path}/corpus.zip", + s3_path="fuzzer/corpus.zip", + ) + + +def process_error(path: Path) -> list: + ERROR = r"^==\d+==\s?ERROR: (\S+): (.*)" + # error_source = "" + # error_reason = "" + # test_unit = "" + # TEST_UNIT_LINE = r"artifact_prefix='.*\/'; Test unit written to (.*)" + error_info = [] + is_error = False + + with open(path, "r", encoding="utf-8") as file: + for line in file: + line = line.rstrip("\n") + if is_error: + error_info.append(line) + # match = re.search(TEST_UNIT_LINE, line) + # if match: + # test_unit = match.group(1) + continue + + match = re.search(ERROR, line) + if match: + error_info.append(line) + # error_source = match.group(1) + # error_reason = match.group(2) + is_error = True + + return error_info + + +def read_status(status_path: Path): + result = [] + with open(status_path, "r", encoding="utf-8") as file: + for line in file: + result.append(line.rstrip("\n")) + return result + + +def process_results(result_path: Path): + test_results = [] + oks = 0 + errors = 0 + fails = 0 + for file in result_path.glob("*.status"): + fuzzer = file.stem + file_path = file.parent / fuzzer + file_path_unit = file_path.with_suffix(".unit") + file_path_out = file_path.with_suffix(".out") + file_path_stdout = file_path.with_suffix(".stdout") + status = read_status(file) + result = TestResult(fuzzer, status[0], float(status[2])) + if status[0] == "OK": + oks += 1 + elif status[0] == "ERROR": + errors += 1 + if file_path_out.exists(): + result.set_log_files(f"['{file_path_out}']") + elif file_path_stdout.exists(): + result.set_log_files(f"['{file_path_stdout}']") + else: + fails += 1 + if file_path_out.exists(): + result.set_raw_logs("\n".join(process_error(file_path_out))) + if file_path_unit.exists(): + result.set_log_files(f"['{file_path_unit}']") + elif file_path_out.exists(): + result.set_log_files(f"['{file_path_out}']") + elif file_path_stdout.exists(): + result.set_log_files(f"['{file_path_stdout}']") + test_results.append(result) + + return [oks, errors, fails, test_results] + + def main(): logging.basicConfig(level=logging.INFO) @@ -114,15 +243,18 @@ def main(): fuzzers_path = temp_path / "fuzzers" fuzzers_path.mkdir(parents=True, exist_ok=True) + download_corpus(fuzzers_path) download_fuzzers(check_name, reports_path, fuzzers_path) for file in os.listdir(fuzzers_path): if file.endswith("_fuzzer"): os.chmod(fuzzers_path / file, 0o777) elif file.endswith("_seed_corpus.zip"): - corpus_path = fuzzers_path / (file.removesuffix("_seed_corpus.zip") + ".in") + seed_corpus_path = fuzzers_path / ( + file.removesuffix("_seed_corpus.zip") + ".in" + ) with zipfile.ZipFile(fuzzers_path / file, "r") as zfd: - zfd.extractall(corpus_path) + zfd.extractall(seed_corpus_path) result_path = temp_path / "result_path" result_path.mkdir(parents=True, exist_ok=True) @@ -133,6 +265,8 @@ def main(): check_name, run_by_hash_num, run_by_hash_total ) + additional_envs.append(f"TIMEOUT={TIMEOUT}") + ci_logs_credentials = CiLogsCredentials(Path(temp_path) / "export-logs-config.sh") ci_logs_args = ci_logs_credentials.get_docker_arguments( pr_info, stopwatch.start_time_str, check_name @@ -152,10 +286,25 @@ def main(): retcode = process.wait() if retcode == 0: logging.info("Run successfully") + upload_corpus(fuzzers_path) else: logging.info("Run failed") - sys.exit(0) + results = process_results(result_path) + + success = results[1] == 0 and results[2] == 0 + + JobReport( + description=f"OK: {results[0]}, ERROR: {results[1]}, FAIL: {results[2]}", + test_results=results[3], + status=SUCCESS if success else FAILURE, + start_time=stopwatch.start_time_str, + duration=stopwatch.duration_seconds, + additional_files=[], + ).dump() + + if not success: + sys.exit(1) if __name__ == "__main__": diff --git a/tests/ci/mark_release_ready.py b/tests/ci/mark_release_ready.py index 7ffb3c9a89b..838961bd89f 100755 --- a/tests/ci/mark_release_ready.py +++ b/tests/ci/mark_release_ready.py @@ -9,9 +9,10 @@ from get_robot_token import get_best_robot_token from git_helper import commit as commit_arg from github_helper import GitHub from pr_info import PRInfo -from release import RELEASE_READY_STATUS from report import SUCCESS +RELEASE_READY_STATUS = "Ready for release" + def main(): parser = argparse.ArgumentParser( diff --git a/tests/ci/release.py b/tests/ci/release.py deleted file mode 100755 index ed9d60a5cad..00000000000 --- a/tests/ci/release.py +++ /dev/null @@ -1,693 +0,0 @@ -#!/usr/bin/env python3 - -""" -script to create releases for ClickHouse - -The `gh` CLI preferred over the PyGithub to have an easy way to rollback bad -release in command line by simple execution giving rollback commands - -On another hand, PyGithub is used for convenient getting commit's status from API - -To run this script on a freshly installed Ubuntu 22.04 system, it is enough to do the following commands: - -sudo apt install pip -pip install requests boto3 github PyGithub -sudo snap install gh -gh auth login -""" - - -import argparse -import json -import logging -import subprocess -from contextlib import contextmanager -from typing import Any, Final, Iterator, List, Optional, Tuple - -from ci_config import Labels -from git_helper import Git, commit, release_branch -from report import SUCCESS -from version_helper import ( - FILE_WITH_VERSION_PATH, - GENERATED_CONTRIBUTORS, - ClickHouseVersion, - VersionType, - get_abs_path, - get_version_from_repo, - update_cmake_version, - update_contributors, -) - -RELEASE_READY_STATUS = "Ready for release" - - -class Repo: - VALID = ("ssh", "https", "origin") - - def __init__(self, repo: str, protocol: str): - self._repo = repo - self._url = "" - self.url = protocol - - @property - def url(self) -> str: - return self._url - - @url.setter - def url(self, protocol: str) -> None: - if protocol == "ssh": - self._url = f"git@github.com:{self}.git" - elif protocol == "https": - self._url = f"https://github.com/{self}.git" - elif protocol == "origin": - self._url = protocol - else: - raise ValueError(f"protocol must be in {self.VALID}") - - def __str__(self): - return self._repo - - -class Release: - NEW = "new" # type: Final - PATCH = "patch" # type: Final - VALID_TYPE = (NEW, PATCH) # type: Final[Tuple[str, str]] - CMAKE_PATH = get_abs_path(FILE_WITH_VERSION_PATH) - CONTRIBUTORS_PATH = get_abs_path(GENERATED_CONTRIBUTORS) - - def __init__( - self, - repo: Repo, - release_commit: str, - release_type: str, - dry_run: bool, - with_stderr: bool, - ): - self.repo = repo - self._release_commit = "" - self.release_commit = release_commit - self.dry_run = dry_run - self.with_stderr = with_stderr - assert release_type in self.VALID_TYPE - self.release_type = release_type - self._git = Git() - self._version = get_version_from_repo(git=self._git) - self.release_version = self.version - self._release_branch = "" - self._version_new_tag = None # type: Optional[ClickHouseVersion] - self._rollback_stack = [] # type: List[str] - - def run( - self, cmd: str, cwd: Optional[str] = None, dry_run: bool = False, **kwargs: Any - ) -> str: - cwd_text = "" - if cwd: - cwd_text = f" (CWD='{cwd}')" - if dry_run: - logging.info("Would run command%s:\n %s", cwd_text, cmd) - return "" - if not self.with_stderr: - kwargs["stderr"] = subprocess.DEVNULL - - logging.info("Running command%s:\n %s", cwd_text, cmd) - return self._git.run(cmd, cwd, **kwargs) - - def set_release_info(self): - # Fetch release commit and tags in case they don't exist locally - self.run( - f"git fetch {self.repo.url} {self.release_commit} --no-recurse-submodules" - ) - self.run(f"git fetch {self.repo.url} --tags --no-recurse-submodules") - - # Get the actual version for the commit before check - with self._checkout(self.release_commit, True): - self.release_branch = f"{self.version.major}.{self.version.minor}" - self.release_version = get_version_from_repo(git=self._git) - self.release_version.with_description(self.get_stable_release_type()) - - self.read_version() - - def read_version(self): - self._git.update() - self.version = get_version_from_repo(git=self._git) - - def get_stable_release_type(self) -> str: - if self.version.is_lts: - return VersionType.LTS - return VersionType.STABLE - - def check_commit_release_ready(self): - per_page = 100 - page = 1 - while True: - statuses = json.loads( - self.run( - f"gh api 'repos/{self.repo}/commits/{self.release_commit}" - f"/statuses?per_page={per_page}&page={page}'" - ) - ) - - if not statuses: - break - - for status in statuses: - if status["context"] == RELEASE_READY_STATUS: - if not status["state"] == SUCCESS: - raise ValueError( - f"the status {RELEASE_READY_STATUS} is {status['state']}" - ", not success" - ) - - return - - page += 1 - - raise KeyError( - f"the status {RELEASE_READY_STATUS} " - f"is not found for commit {self.release_commit}" - ) - - def check_prerequisites(self): - """ - Check tooling installed in the system, `git` is checked by Git() init - """ - try: - self.run("gh auth status") - except subprocess.SubprocessError: - logging.error( - "The github-cli either not installed or not setup, please follow " - "the instructions on https://github.com/cli/cli#installation and " - "https://cli.github.com/manual/" - ) - raise - - if self.release_type == self.PATCH: - self.check_commit_release_ready() - - def do( - self, check_dirty: bool, check_run_from_master: bool, check_branch: bool - ) -> None: - self.check_prerequisites() - - if check_dirty: - logging.info("Checking if repo is clean") - try: - self.run("git diff HEAD --exit-code") - except subprocess.CalledProcessError: - logging.fatal("Repo contains uncommitted changes") - raise - - if check_run_from_master and self._git.branch != "master": - raise RuntimeError("the script must be launched only from master") - - self.set_release_info() - - if check_branch: - self.check_branch() - - if self.release_type == self.NEW: - with self._checkout(self.release_commit, True): - # Checkout to the commit, it will provide the correct current version - with self.new_release(): - with self.create_release_branch(): - logging.info( - "Publishing release %s from commit %s is done", - self.release_version.describe, - self.release_commit, - ) - - elif self.release_type == self.PATCH: - with self._checkout(self.release_commit, True): - with self.patch_release(): - logging.info( - "Publishing release %s from commit %s is done", - self.release_version.describe, - self.release_commit, - ) - - if self.dry_run: - logging.info("Dry running, clean out possible changes") - rollback = self._rollback_stack.copy() - rollback.reverse() - for cmd in rollback: - self.run(cmd) - return - - self.log_post_workflows() - self.log_rollback() - - def check_no_tags_after(self): - tags_after_commit = self.run(f"git tag --contains={self.release_commit}") - if tags_after_commit: - raise RuntimeError( - f"Commit {self.release_commit} belongs to following tags:\n" - f"{tags_after_commit}\nChoose another commit" - ) - - def check_branch(self): - branch = self.release_branch - if self.release_type == self.NEW: - # Commit to spin up the release must belong to a main branch - branch = "master" - elif self.release_type != self.PATCH: - raise ( - ValueError(f"release_type {self.release_type} not in {self.VALID_TYPE}") - ) - - # Prefetch the branch to have it updated - if self._git.branch == branch: - self.run("git pull --no-recurse-submodules") - else: - self.run( - f"git fetch {self.repo.url} {branch}:{branch} --no-recurse-submodules" - ) - output = self.run(f"git branch --contains={self.release_commit} {branch}") - if branch not in output: - raise RuntimeError( - f"commit {self.release_commit} must belong to {branch} " - f"for {self.release_type} release" - ) - - def _update_cmake_contributors( - self, version: ClickHouseVersion, reset_tweak: bool = True - ) -> None: - if reset_tweak: - desc = version.description - version = version.reset_tweak() - version.with_description(desc) - update_cmake_version(version) - update_contributors(raise_error=True) - if self.dry_run: - logging.info( - "Dry running, resetting the following changes in the repo:\n%s", - self.run(f"git diff '{self.CMAKE_PATH}' '{self.CONTRIBUTORS_PATH}'"), - ) - self.run(f"git checkout '{self.CMAKE_PATH}' '{self.CONTRIBUTORS_PATH}'") - - def _commit_cmake_contributors( - self, version: ClickHouseVersion, reset_tweak: bool = True - ) -> None: - if reset_tweak: - version = version.reset_tweak() - self.run( - f"git commit '{self.CMAKE_PATH}' '{self.CONTRIBUTORS_PATH}' " - f"-m 'Update autogenerated version to {version.string} and contributors'", - dry_run=self.dry_run, - ) - - @property - def bump_part(self) -> ClickHouseVersion.PART_TYPE: - if self.release_type == Release.NEW: - if self._version.minor >= 12: - return "major" - return "minor" - return "patch" - - @property - def has_rollback(self) -> bool: - return bool(self._rollback_stack) - - def log_rollback(self): - if self.has_rollback: - rollback = self._rollback_stack.copy() - rollback.reverse() - logging.info( - "To rollback the action run the following commands:\n %s", - "\n ".join(rollback), - ) - - def log_post_workflows(self): - logging.info( - "To verify all actions are running good visit the following links:\n %s", - "\n ".join( - f"https://github.com/{self.repo}/actions/workflows/{action}.yml" - for action in ("release", "tags_stable") - ), - ) - - @contextmanager - def create_release_branch(self): - self.check_no_tags_after() - # Create release branch - self.read_version() - assert self._version_new_tag is not None - with self._create_tag( - self._version_new_tag.describe, - self.release_commit, - f"Initial commit for release {self._version_new_tag.major}.{self._version_new_tag.minor}", - ): - with self._create_branch(self.release_branch, self.release_commit): - with self._checkout(self.release_branch, True): - with self._bump_release_branch(): - yield - - @contextmanager - def patch_release(self): - self.check_no_tags_after() - self.read_version() - version_type = self.get_stable_release_type() - self.version.with_description(version_type) - with self._create_gh_release(False): - self.version = self.version.update(self.bump_part) - self.version.with_description(version_type) - self._update_cmake_contributors(self.version) - # Checking out the commit of the branch and not the branch itself, - # then we are able to skip rollback - with self._checkout(f"{self.release_branch}^0", False): - current_commit = self.run("git rev-parse HEAD") - self._commit_cmake_contributors(self.version) - with self._push( - "HEAD", with_rollback_on_fail=False, remote_ref=self.release_branch - ): - # DO NOT PUT ANYTHING ELSE HERE - # The push must be the last action and mean the successful release - self._rollback_stack.append( - f"{self.dry_run_prefix}git push {self.repo.url} " - f"+{current_commit}:{self.release_branch}" - ) - yield - - @contextmanager - def new_release(self): - # Create branch for a version bump - self.read_version() - self.version = self.version.update(self.bump_part) - helper_branch = f"{self.version.major}.{self.version.minor}-prepare" - with self._create_branch(helper_branch, self.release_commit): - with self._checkout(helper_branch, True): - with self._bump_version_in_master(helper_branch): - yield - - @property - def version(self) -> ClickHouseVersion: - return self._version - - @version.setter - def version(self, version: ClickHouseVersion) -> None: - if not isinstance(version, ClickHouseVersion): - raise ValueError(f"version must be ClickHouseVersion, not {type(version)}") - self._version = version - - @property - def release_branch(self) -> str: - return self._release_branch - - @release_branch.setter - def release_branch(self, branch: str) -> None: - self._release_branch = release_branch(branch) - - @property - def release_commit(self) -> str: - return self._release_commit - - @release_commit.setter - def release_commit(self, release_commit: str) -> None: - self._release_commit = commit(release_commit) - - @property - def dry_run_prefix(self) -> str: - if self.dry_run: - return "# " - return "" - - @contextmanager - def _bump_release_branch(self): - # Update only git, original version stays the same - self._git.update() - new_version = self.version.copy() - version_type = self.get_stable_release_type() - pr_labels = f"--label {Labels.RELEASE}" - if version_type == VersionType.LTS: - pr_labels += f" --label {Labels.RELEASE_LTS}" - new_version.with_description(version_type) - self._update_cmake_contributors(new_version) - self._commit_cmake_contributors(new_version) - with self._push(self.release_branch): - with self._create_gh_label( - f"v{self.release_branch}-must-backport", "10dbed" - ): - with self._create_gh_label( - f"v{self.release_branch}-affected", "c2bfff" - ): - # The following command is rolled back by deleting branch - # in self._push - self.run( - f"gh pr create --repo {self.repo} --title " - f"'Release pull request for branch {self.release_branch}' " - f"--head {self.release_branch} {pr_labels} " - "--body 'This PullRequest is a part of ClickHouse release " - "cycle. It is used by CI system only. Do not perform any " - "changes with it.'", - dry_run=self.dry_run, - ) - # Here the release branch part is done. - # We don't create a release itself automatically to have a - # safe window to backport possible bug fixes. - yield - - @contextmanager - def _bump_version_in_master(self, helper_branch: str) -> Iterator[None]: - self.read_version() - self.version = self.version.update(self.bump_part) - self.version.with_description(VersionType.TESTING) - self._update_cmake_contributors(self.version) - self._commit_cmake_contributors(self.version) - # Create a version-new tag - self._version_new_tag = self.version.copy() - self._version_new_tag.tweak = 1 - self._version_new_tag.with_description(VersionType.NEW) - - with self._push(helper_branch): - body_file = get_abs_path(".github/PULL_REQUEST_TEMPLATE.md") - # The following command is rolled back by deleting branch in self._push - self.run( - f"gh pr create --repo {self.repo} --title 'Update version after " - f"release' --head {helper_branch} --body-file '{body_file}' " - "--label 'do not test' --assignee @me", - dry_run=self.dry_run, - ) - # Here the new release part is done - yield - - @contextmanager - def _checkout(self, ref: str, with_checkout_back: bool = False) -> Iterator[None]: - self._git.update() - orig_ref = self._git.branch or self._git.sha - rollback_cmd = "" - if ref not in (self._git.branch, self._git.sha): - self.run(f"git checkout {ref}") - # checkout is not put into rollback_stack intentionally - rollback_cmd = f"git checkout {orig_ref}" - # always update version and git after checked out ref - self.read_version() - try: - yield - except (Exception, KeyboardInterrupt): - logging.warning("Rolling back checked out %s for %s", ref, orig_ref) - self.run(f"git reset --hard; git checkout -f {orig_ref}") - raise - # Normal flow when we need to checkout back - if with_checkout_back and rollback_cmd: - self.run(rollback_cmd) - - @contextmanager - def _create_branch(self, name: str, start_point: str = "") -> Iterator[None]: - self.run(f"git branch {name} {start_point}") - - rollback_cmd = f"git branch -D {name}" - self._rollback_stack.append(rollback_cmd) - try: - yield - except (Exception, KeyboardInterrupt): - logging.warning("Rolling back created branch %s", name) - self.run(rollback_cmd) - raise - - @contextmanager - def _create_gh_label(self, label: str, color_hex: str) -> Iterator[None]: - # API call, https://docs.github.com/en/rest/reference/issues#create-a-label - self.run( - f"gh api repos/{self.repo}/labels -f name={label} -f color={color_hex}", - dry_run=self.dry_run, - ) - rollback_cmd = ( - f"{self.dry_run_prefix}gh api repos/{self.repo}/labels/{label} -X DELETE" - ) - self._rollback_stack.append(rollback_cmd) - try: - yield - except (Exception, KeyboardInterrupt): - logging.warning("Rolling back label %s", label) - self.run(rollback_cmd) - raise - - @contextmanager - def _create_gh_release(self, as_prerelease: bool) -> Iterator[None]: - tag = self.release_version.describe - with self._create_tag(tag, self.release_commit): - # Preserve tag if version is changed - prerelease = "" - if as_prerelease: - prerelease = "--prerelease" - self.run( - f"gh release create {prerelease} --repo {self.repo} " - f"--title 'Release {tag}' '{tag}'", - dry_run=self.dry_run, - ) - rollback_cmd = ( - f"{self.dry_run_prefix}gh release delete --yes " - f"--repo {self.repo} '{tag}'" - ) - self._rollback_stack.append(rollback_cmd) - try: - yield - except (Exception, KeyboardInterrupt): - logging.warning("Rolling back release publishing") - self.run(rollback_cmd) - raise - - @contextmanager - def _create_tag( - self, tag: str, commit: str, tag_message: str = "" - ) -> Iterator[None]: - tag_message = tag_message or f"Release {tag}" - # Create tag even in dry-run - self.run(f"git tag -a -m '{tag_message}' '{tag}' {commit}") - rollback_cmd = f"git tag -d '{tag}'" - self._rollback_stack.append(rollback_cmd) - try: - with self._push(tag): - yield - except (Exception, KeyboardInterrupt): - logging.warning("Rolling back tag %s", tag) - self.run(rollback_cmd) - raise - - @contextmanager - def _push( - self, ref: str, with_rollback_on_fail: bool = True, remote_ref: str = "" - ) -> Iterator[None]: - if remote_ref == "": - remote_ref = ref - - self.run(f"git push {self.repo.url} {ref}:{remote_ref}", dry_run=self.dry_run) - if with_rollback_on_fail: - rollback_cmd = ( - f"{self.dry_run_prefix}git push -d {self.repo.url} {remote_ref}" - ) - self._rollback_stack.append(rollback_cmd) - - try: - yield - except (Exception, KeyboardInterrupt): - if with_rollback_on_fail: - logging.warning("Rolling back pushed ref %s", ref) - self.run(rollback_cmd) - - raise - - -def parse_args() -> argparse.Namespace: - parser = argparse.ArgumentParser( - formatter_class=argparse.ArgumentDefaultsHelpFormatter, - description="Script to release a new ClickHouse version, requires `git` and " - "`gh` (github-cli) commands " - "!!! LAUNCH IT ONLY FROM THE MASTER BRANCH !!!", - ) - - parser.add_argument( - "--commit", - required=True, - type=commit, - help="commit create a release", - ) - parser.add_argument( - "--repo", - default="ClickHouse/ClickHouse", - help="repository to create the release", - ) - parser.add_argument( - "--remote-protocol", - "-p", - default="ssh", - choices=Repo.VALID, - help="repo protocol for git commands remote, 'origin' is a special case and " - "uses 'origin' as a remote", - ) - parser.add_argument( - "--type", - required=True, - choices=Release.VALID_TYPE, - dest="release_type", - help="a release type to bump the major.minor.patch version part, " - "new branch is created only for the value 'new'", - ) - parser.add_argument("--with-release-branch", default=True, help=argparse.SUPPRESS) - parser.add_argument("--check-dirty", default=True, help=argparse.SUPPRESS) - parser.add_argument( - "--no-check-dirty", - dest="check_dirty", - action="store_false", - default=argparse.SUPPRESS, - help="(dangerous) if set, skip check repository for uncommitted changes", - ) - parser.add_argument("--check-run-from-master", default=True, help=argparse.SUPPRESS) - parser.add_argument( - "--no-run-from-master", - dest="check_run_from_master", - action="store_false", - default=argparse.SUPPRESS, - help="(for development) if set, the script could run from non-master branch", - ) - parser.add_argument("--check-branch", default=True, help=argparse.SUPPRESS) - parser.add_argument( - "--no-check-branch", - dest="check_branch", - action="store_false", - default=argparse.SUPPRESS, - help="(debug or development only, dangerous) if set, skip the branch check for " - "a run. By default, 'new' type work only for master, and 'patch' " - "works only for a release branches, that name " - "should be the same as '$MAJOR.$MINOR' version, e.g. 22.2", - ) - parser.add_argument( - "--dry-run", - action="store_true", - help="do not make any actual changes in the repo, just show what will be done", - ) - parser.add_argument( - "--with-stderr", - action="store_true", - help="if set, the stderr of all subprocess commands will be printed as well", - ) - - return parser.parse_args() - - -def main(): - logging.basicConfig(level=logging.INFO) - args = parse_args() - repo = Repo(args.repo, args.remote_protocol) - release = Release( - repo, args.commit, args.release_type, args.dry_run, args.with_stderr - ) - - try: - release.do(args.check_dirty, args.check_run_from_master, args.check_branch) - except: - if release.has_rollback: - logging.error( - "!!The release process finished with error, read the output carefully!!" - ) - logging.error( - "Probably, rollback finished with error. " - "If you don't see any of the following commands in the output, " - "execute them manually:" - ) - release.log_rollback() - raise - - -if __name__ == "__main__": - assert False, "Script Deprecated, ask ci team for help" - main() diff --git a/tests/ci/s3_helper.py b/tests/ci/s3_helper.py index 9a40ad1277f..d0aa034258a 100644 --- a/tests/ci/s3_helper.py +++ b/tests/ci/s3_helper.py @@ -311,24 +311,32 @@ class S3Helper: def list_prefix( self, s3_prefix_path: str, bucket: str = S3_BUILDS_BUCKET ) -> List[str]: - objects = self.client.list_objects_v2(Bucket=bucket, Prefix=s3_prefix_path) + paginator = self.client.get_paginator("list_objects_v2") + pages = paginator.paginate(Bucket=bucket, Prefix=s3_prefix_path) result = [] - if "Contents" in objects: - for obj in objects["Contents"]: - result.append(obj["Key"]) + for page in pages: + if "Contents" in page: + for obj in page["Contents"]: + result.append(obj["Key"]) return result def list_prefix_non_recursive( - self, s3_prefix_path: str, bucket: str = S3_BUILDS_BUCKET + self, + s3_prefix_path: str, + bucket: str = S3_BUILDS_BUCKET, + only_dirs: bool = False, ) -> List[str]: - objects = self.client.list_objects_v2(Bucket=bucket, Prefix=s3_prefix_path) + paginator = self.client.get_paginator("list_objects_v2") + pages = paginator.paginate(Bucket=bucket, Prefix=s3_prefix_path, Delimiter="/") result = [] - if "Contents" in objects: - for obj in objects["Contents"]: - if "/" not in obj["Key"][len(s3_prefix_path) + 1 :]: + for page in pages: + if not only_dirs and "Contents" in page: + for obj in page["Contents"]: result.append(obj["Key"]) - + if "CommonPrefixes" in page: + for obj in page["CommonPrefixes"]: + result.append(obj["Prefix"]) return result def url_if_exists(self, key: str, bucket: str = S3_BUILDS_BUCKET) -> str: diff --git a/tests/ci/test_ci_config.py b/tests/ci/test_ci_config.py index 0e396b827ea..65418310c31 100644 --- a/tests/ci/test_ci_config.py +++ b/tests/ci/test_ci_config.py @@ -36,11 +36,12 @@ class TestCIConfig(unittest.TestCase): elif "binary_" in job.lower() or "package_" in job.lower(): if job.lower() in ( CI.BuildNames.PACKAGE_AARCH64, - CI.BuildNames.PACKAGE_ARM_ASAN, + CI.BuildNames.PACKAGE_AARCH64_ASAN, ): self.assertTrue( - CI.JOB_CONFIGS[job].runner_type in (CI.Runners.BUILDER_ARM,), - f"Job [{job}] must have [{CI.Runners.BUILDER_ARM}] runner", + CI.JOB_CONFIGS[job].runner_type + in (CI.Runners.BUILDER_AARCH64,), + f"Job [{job}] must have [{CI.Runners.BUILDER_AARCH64}] runner", ) else: self.assertTrue( @@ -96,7 +97,7 @@ class TestCIConfig(unittest.TestCase): else: self.assertTrue(CI.JOB_CONFIGS[job].build_config is None) if "asan" in job and "aarch" in job: - expected_builds = [CI.BuildNames.PACKAGE_ARM_ASAN] + expected_builds = [CI.BuildNames.PACKAGE_AARCH64_ASAN] elif "asan" in job: expected_builds = [CI.BuildNames.PACKAGE_ASAN] elif "msan" in job: diff --git a/tests/ci/test_ci_options.py b/tests/ci/test_ci_options.py index 536e18758f8..e1b780387e7 100644 --- a/tests/ci/test_ci_options.py +++ b/tests/ci/test_ci_options.py @@ -10,7 +10,7 @@ from ci_settings import CiSettings _TEST_BODY_1 = """ #### Run only: - [ ] Some Set -- [x] Integration tests (arm64) +- [x] Integration tests (aarch64) - [x] Integration tests - [x] Integration tests - [ ] Integration tests @@ -150,7 +150,7 @@ class TestCIOptions(unittest.TestCase): self.assertFalse(ci_options.no_ci_cache) self.assertTrue(ci_options.no_merge_commit) self.assertTrue(ci_options.woolen_wolfdog) - self.assertEqual(ci_options.ci_sets, ["ci_set_arm"]) + self.assertEqual(ci_options.ci_sets, ["ci_set_aarch64"]) self.assertCountEqual(ci_options.include_keywords, ["foo", "foo_bar"]) self.assertCountEqual(ci_options.exclude_keywords, ["foo", "foo_bar"]) diff --git a/tests/config/config.d/storage_conf.xml b/tests/config/config.d/storage_conf.xml index 74bad7528c8..fee7ce841a6 100644 --- a/tests/config/config.d/storage_conf.xml +++ b/tests/config/config.d/storage_conf.xml @@ -27,6 +27,7 @@ 0.3 0.15 0.15 + 50 0 diff --git a/tests/fuzz/build.sh b/tests/fuzz/build.sh index 12f41f6e079..f60336e6b53 100755 --- a/tests/fuzz/build.sh +++ b/tests/fuzz/build.sh @@ -1,5 +1,8 @@ #!/bin/bash -eu +# rename clickhouse +mv $OUT/clickhouse $OUT/clickhouse_fuzzer + # copy fuzzer options and dictionaries cp $SRC/tests/fuzz/*.dict $OUT/ cp $SRC/tests/fuzz/*.options $OUT/ diff --git a/tests/fuzz/clickhouse_fuzzer.options b/tests/fuzz/clickhouse_fuzzer.options new file mode 100644 index 00000000000..a22ba7b3b88 --- /dev/null +++ b/tests/fuzz/clickhouse_fuzzer.options @@ -0,0 +1,2 @@ +[CI] +FUZZER_ARGS = true diff --git a/tests/fuzz/dictionaries/keywords.dict b/tests/fuzz/dictionaries/keywords.dict index abaaf9e53b5..a37675ebcad 100644 --- a/tests/fuzz/dictionaries/keywords.dict +++ b/tests/fuzz/dictionaries/keywords.dict @@ -538,6 +538,7 @@ "WITH ADMIN OPTION" "WITH CHECK" "WITH FILL" +"STALENESS" "WITH GRANT OPTION" "WITH NAME" "WITH REPLACE OPTION" diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 4d7ac457627..f4c66e00117 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -1,26 +1,49 @@ #!/usr/bin/env python3 import configparser +import datetime import logging import os import subprocess from pathlib import Path DEBUGGER = os.getenv("DEBUGGER", "") -FUZZER_ARGS = os.getenv("FUZZER_ARGS", "") +TIMEOUT = int(os.getenv("TIMEOUT", "0")) +OUTPUT = "/test_output" -def run_fuzzer(fuzzer: str): +class Stopwatch: + def __init__(self): + self.reset() + + @property + def duration_seconds(self) -> float: + return (datetime.datetime.utcnow() - self.start_time).total_seconds() + + @property + def start_time_str(self) -> str: + return self.start_time_str_value + + def reset(self) -> None: + self.start_time = datetime.datetime.utcnow() + self.start_time_str_value = self.start_time.strftime("%Y-%m-%d %H:%M:%S") + + +def run_fuzzer(fuzzer: str, timeout: int): logging.info("Running fuzzer %s...", fuzzer) - corpus_dir = f"{fuzzer}.in" - with Path(corpus_dir) as path: + seed_corpus_dir = f"{fuzzer}.in" + with Path(seed_corpus_dir) as path: if not path.exists() or not path.is_dir(): - corpus_dir = "" + seed_corpus_dir = "" + active_corpus_dir = f"corpus/{fuzzer}" + if not os.path.exists(active_corpus_dir): + os.makedirs(active_corpus_dir) options_file = f"{fuzzer}.options" custom_libfuzzer_options = "" fuzzer_arguments = "" + use_fuzzer_args = False with Path(options_file) as path: if path.exists() and path.is_file(): @@ -44,7 +67,9 @@ def run_fuzzer(fuzzer: str): if parser.has_section("libfuzzer"): custom_libfuzzer_options = " ".join( - f"-{key}={value}" for key, value in parser["libfuzzer"].items() + f"-{key}={value}" + for key, value in parser["libfuzzer"].items() + if key not in ("jobs", "exact_artifact_path") ) if parser.has_section("fuzzer_arguments"): @@ -53,19 +78,70 @@ def run_fuzzer(fuzzer: str): for key, value in parser["fuzzer_arguments"].items() ) - cmd_line = f"{DEBUGGER} ./{fuzzer} {FUZZER_ARGS} {corpus_dir}" - if custom_libfuzzer_options: - cmd_line += f" {custom_libfuzzer_options}" - if fuzzer_arguments: - cmd_line += f" {fuzzer_arguments}" + use_fuzzer_args = parser.getboolean("CI", "FUZZER_ARGS", fallback=False) - if not "-dict=" in cmd_line and Path(f"{fuzzer}.dict").exists(): - cmd_line += f" -dict={fuzzer}.dict" + exact_artifact_path = f"{OUTPUT}/{fuzzer}.unit" + status_path = f"{OUTPUT}/{fuzzer}.status" + out_path = f"{OUTPUT}/{fuzzer}.out" + stdout_path = f"{OUTPUT}/{fuzzer}.stdout" - cmd_line += " < /dev/null" + if not "-dict=" in custom_libfuzzer_options and Path(f"{fuzzer}.dict").exists(): + custom_libfuzzer_options += f" -dict={fuzzer}.dict" + custom_libfuzzer_options += f" -exact_artifact_path={exact_artifact_path}" - logging.info("...will execute: %s", cmd_line) - subprocess.check_call(cmd_line, shell=True) + libfuzzer_corpora = f"{active_corpus_dir} {seed_corpus_dir}" + + cmd_line = f"{DEBUGGER} ./{fuzzer} {fuzzer_arguments}" + + env = None + with_fuzzer_args = "" + if use_fuzzer_args: + env = {"FUZZER_ARGS": f"{custom_libfuzzer_options} {libfuzzer_corpora}".strip()} + with_fuzzer_args = f" with FUZZER_ARGS '{env['FUZZER_ARGS']}'" + else: + cmd_line += f" {custom_libfuzzer_options} {libfuzzer_corpora}" + + logging.info("...will execute: '%s'%s", cmd_line, with_fuzzer_args) + + stopwatch = Stopwatch() + try: + with open(out_path, "wb") as out, open(stdout_path, "wb") as stdout: + subprocess.run( + cmd_line.split(), + stdin=subprocess.DEVNULL, + stdout=stdout, + stderr=out, + text=True, + check=True, + shell=False, + errors="replace", + timeout=timeout, + env=env, + ) + except subprocess.CalledProcessError: + logging.info("Fail running %s", fuzzer) + with open(status_path, "w", encoding="utf-8") as status: + status.write( + f"FAIL\n{stopwatch.start_time_str}\n{stopwatch.duration_seconds}\n" + ) + except subprocess.TimeoutExpired: + logging.info("Successful running %s", fuzzer) + with open(status_path, "w", encoding="utf-8") as status: + status.write( + f"OK\n{stopwatch.start_time_str}\n{stopwatch.duration_seconds}\n" + ) + except Exception as e: + logging.info("Unexpected exception running %s: %s", fuzzer, e) + with open(status_path, "w", encoding="utf-8") as status: + status.write( + f"ERROR\n{stopwatch.start_time_str}\n{stopwatch.duration_seconds}\n" + ) + else: + logging.info("Error running %s", fuzzer) + with open(status_path, "w", encoding="utf-8") as status: + status.write( + f"ERROR\n{stopwatch.start_time_str}\n{stopwatch.duration_seconds}\n" + ) def main(): @@ -73,10 +149,14 @@ def main(): subprocess.check_call("ls -al", shell=True) + timeout = 30 if TIMEOUT == 0 else TIMEOUT + with Path() as current: for fuzzer in current.iterdir(): if (current / fuzzer).is_file() and os.access(current / fuzzer, os.X_OK): - run_fuzzer(fuzzer) + run_fuzzer(fuzzer.name, timeout) + + subprocess.check_call(f"ls -al {OUTPUT}", shell=True) if __name__ == "__main__": diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index bac783501e1..b24593602ec 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -83,6 +83,8 @@ CLICKHOUSE_ERROR_LOG_FILE = "/var/log/clickhouse-server/clickhouse-server.err.lo # This means that this minimum need to be, at least, 1 year older than the current release CLICKHOUSE_CI_MIN_TESTED_VERSION = "23.3" +ZOOKEEPER_CONTAINERS = ("zoo1", "zoo2", "zoo3") + # to create docker-compose env file def _create_env_file(path, variables): @@ -1651,6 +1653,7 @@ class ClickHouseCluster: copy_common_configs=True, config_root_name="clickhouse", extra_configs=[], + extra_args="", randomize_settings=True, ) -> "ClickHouseInstance": """Add an instance to the cluster. @@ -1738,6 +1741,7 @@ class ClickHouseCluster: with_postgres_cluster=with_postgres_cluster, with_postgresql_java_client=with_postgresql_java_client, clickhouse_start_command=clickhouse_start_command, + clickhouse_start_extra_args=extra_args, main_config_name=main_config_name, users_config_name=users_config_name, copy_common_configs=copy_common_configs, @@ -2061,6 +2065,11 @@ class ClickHouseCluster: container_id = self.get_container_id(instance_name) return self.docker_client.api.logs(container_id).decode() + def query_zookeeper(self, query, node=ZOOKEEPER_CONTAINERS[0], nothrow=False): + cmd = f'clickhouse keeper-client -p {self.zookeeper_port} -q "{query}"' + container_id = self.get_container_id(node) + return self.exec_in_container(container_id, cmd, nothrow=nothrow, use_cli=False) + def exec_in_container( self, container_id: str, @@ -2125,6 +2134,16 @@ class ClickHouseCluster: ], ) + def remove_file_from_container(self, container_id, path): + self.exec_in_container( + container_id, + [ + "bash", + "-c", + "rm {}".format(path), + ], + ) + def wait_for_url( self, url="http://localhost:8123/ping", conn_timeout=2, interval=2, timeout=60 ): @@ -2391,16 +2410,16 @@ class ClickHouseCluster: def wait_zookeeper_secure_to_start(self, timeout=20): logging.debug("Wait ZooKeeper Secure to start") - nodes = ["zoo1", "zoo2", "zoo3"] - self.wait_zookeeper_nodes_to_start(nodes, timeout) + self.wait_zookeeper_nodes_to_start(ZOOKEEPER_CONTAINERS, timeout) def wait_zookeeper_to_start(self, timeout: float = 180) -> None: logging.debug("Wait ZooKeeper to start") - nodes = ["zoo1", "zoo2", "zoo3"] - self.wait_zookeeper_nodes_to_start(nodes, timeout) + self.wait_zookeeper_nodes_to_start(ZOOKEEPER_CONTAINERS, timeout) def wait_zookeeper_nodes_to_start( - self, nodes: List[str], timeout: float = 60 + self, + nodes: List[str], + timeout: float = 60, ) -> None: start = time.time() err = Exception("") @@ -3226,7 +3245,11 @@ class ClickHouseCluster: return zk def run_kazoo_commands_with_retries( - self, kazoo_callback, zoo_instance_name="zoo1", repeats=1, sleep_for=1 + self, + kazoo_callback, + zoo_instance_name=ZOOKEEPER_CONTAINERS[0], + repeats=1, + sleep_for=1, ): zk = self.get_kazoo_client(zoo_instance_name) logging.debug( @@ -3347,6 +3370,7 @@ class ClickHouseInstance: with_postgres_cluster, with_postgresql_java_client, clickhouse_start_command=CLICKHOUSE_START_COMMAND, + clickhouse_start_extra_args="", main_config_name="config.xml", users_config_name="users.xml", copy_common_configs=True, @@ -3442,11 +3466,18 @@ class ClickHouseInstance: self.users_config_name = users_config_name self.copy_common_configs = copy_common_configs - self.clickhouse_start_command = clickhouse_start_command.replace( + clickhouse_start_command_with_conf = clickhouse_start_command.replace( "{main_config_file}", self.main_config_name ) - self.clickhouse_stay_alive_command = "bash -c \"trap 'pkill tail' INT TERM; {} --daemon; coproc tail -f /dev/null; wait $$!\"".format( - clickhouse_start_command + + self.clickhouse_start_command = "{} -- {}".format( + clickhouse_start_command_with_conf, clickhouse_start_extra_args + ) + self.clickhouse_start_command_in_daemon = "{} --daemon -- {}".format( + clickhouse_start_command_with_conf, clickhouse_start_extra_args + ) + self.clickhouse_stay_alive_command = "bash -c \"trap 'pkill tail' INT TERM; {}; coproc tail -f /dev/null; wait $$!\"".format( + self.clickhouse_start_command_in_daemon ) self.path = p.join(self.cluster.instances_dir, name) @@ -3889,7 +3920,7 @@ class ClickHouseInstance: if pid is None: logging.debug("No clickhouse process running. Start new one.") self.exec_in_container( - ["bash", "-c", "{} --daemon".format(self.clickhouse_start_command)], + ["bash", "-c", self.clickhouse_start_command_in_daemon], user=str(os.getuid()), ) if expected_to_fail: @@ -4128,6 +4159,9 @@ class ClickHouseInstance: self.docker_id, local_path, dest_path ) + def remove_file_from_container(self, path): + return self.cluster.remove_file_from_container(self.docker_id, path) + def get_process_pid(self, process_name): output = self.exec_in_container( [ @@ -4206,7 +4240,7 @@ class ClickHouseInstance: user="root", ) self.exec_in_container( - ["bash", "-c", "{} --daemon".format(self.clickhouse_start_command)], + ["bash", "-c", self.clickhouse_start_command_in_daemon], user=str(os.getuid()), ) @@ -4287,7 +4321,7 @@ class ClickHouseInstance: ] ) self.exec_in_container( - ["bash", "-c", "{} --daemon".format(self.clickhouse_start_command)], + ["bash", "-c", self.clickhouse_start_command_in_daemon], user=str(os.getuid()), ) @@ -4648,9 +4682,7 @@ class ClickHouseInstance: depends_on.append("nats1") if self.with_zookeeper: - depends_on.append("zoo1") - depends_on.append("zoo2") - depends_on.append("zoo3") + depends_on += list(ZOOKEEPER_CONTAINERS) if self.with_minio: depends_on.append("minio1") @@ -4677,9 +4709,7 @@ class ClickHouseInstance: entrypoint_cmd = self.clickhouse_start_command if self.stay_alive: - entrypoint_cmd = self.clickhouse_stay_alive_command.replace( - "{main_config_file}", self.main_config_name - ) + entrypoint_cmd = self.clickhouse_stay_alive_command else: entrypoint_cmd = ( "[" diff --git a/tests/integration/helpers/config_manager.py b/tests/integration/helpers/config_manager.py new file mode 100644 index 00000000000..0a080a33477 --- /dev/null +++ b/tests/integration/helpers/config_manager.py @@ -0,0 +1,65 @@ +import os + + +class ConfigManager: + """Allows to temporarily add configuration files to the "config.d" or "users.d" directories. + + Can act as a context manager: + + with ConfigManager() as cm: + cm.add_main_config("configs/test_specific_config.xml") # copy "configs/test_specific_config.xml" to "/etc/clickhouse-server/config.d" + ... + # "/etc/clickhouse-server/config.d/test_specific_config.xml" is removed automatically + + """ + + def __init__(self): + self.__added_configs = [] + + def add_main_config(self, node_or_nodes, local_path, reload_config=True): + """Temporarily adds a configuration file to the "config.d" directory.""" + self.__add_config( + node_or_nodes, local_path, dest_dir="config.d", reload_config=reload_config + ) + + def add_user_config(self, node_or_nodes, local_path, reload_config=True): + """Temporarily adds a configuration file to the "users.d" directory.""" + self.__add_config( + node_or_nodes, local_path, dest_dir="users.d", reload_config=reload_config + ) + + def reset(self, reload_config=True): + """Removes all configuration files added by this ConfigManager.""" + if not self.__added_configs: + return + for node, dest_path in self.__added_configs: + node.remove_file_from_container(dest_path) + if reload_config: + for node, _ in self.__added_configs: + node.query("SYSTEM RELOAD CONFIG") + self.__added_configs = [] + + def __add_config(self, node_or_nodes, local_path, dest_dir, reload_config): + nodes_to_add_config = ( + node_or_nodes if (type(node_or_nodes) is list) else [node_or_nodes] + ) + for node in nodes_to_add_config: + src_path = os.path.join(node.cluster.base_dir, local_path) + dest_path = os.path.join( + "/etc/clickhouse-server", dest_dir, os.path.basename(local_path) + ) + node.copy_file_to_container(src_path, dest_path) + if reload_config: + for node in nodes_to_add_config: + node.query("SYSTEM RELOAD CONFIG") + for node in nodes_to_add_config: + dest_path = os.path.join( + "/etc/clickhouse-server", dest_dir, os.path.basename(local_path) + ) + self.__added_configs.append((node, dest_path)) + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.reset() diff --git a/tests/integration/parallel_skip.json b/tests/integration/parallel_skip.json index 507894534d4..d293cae4dfd 100644 --- a/tests/integration/parallel_skip.json +++ b/tests/integration/parallel_skip.json @@ -170,6 +170,18 @@ "test_storage_kerberized_kafka/test.py::test_kafka_json_as_string", "test_storage_kerberized_kafka/test.py::test_kafka_json_as_string_request_new_ticket_after_expiration", "test_storage_kerberized_kafka/test.py::test_kafka_json_as_string_no_kdc", - "test_storage_kerberized_kafka/test.py::test_kafka_config_from_sql_named_collection" + "test_storage_kerberized_kafka/test.py::test_kafka_config_from_sql_named_collection", + "test_dns_cache/test.py::test_ip_change_drop_dns_cache", + "test_dns_cache/test.py::test_ip_change_update_dns_cache", + "test_dns_cache/test.py::test_dns_cache_update", + "test_dns_cache/test.py::test_user_access_ip_change", + "test_dns_cache/test.py::test_host_is_drop_from_cache_after_consecutive_failures", + "test_dns_cache/test.py::test_dns_resolver_filter", + + "test_https_replication/test_change_ip.py::test_replication_when_node_ip_changed", + + "test_host_regexp_multiple_ptr_records/test.py::test_host_regexp_multiple_ptr_v4_fails_with_wrong_resolution", + "test_host_regexp_multiple_ptr_records/test.py::test_host_regexp_multiple_ptr_v4", + "test_host_regexp_multiple_ptr_records/test.py::test_host_regexp_multiple_ptr_v6" ] diff --git a/tests/integration/test_backup_restore_on_cluster/configs/cluster_different_versions.xml b/tests/integration/test_backup_restore_on_cluster/configs/cluster_different_versions.xml new file mode 100644 index 00000000000..f70b255da18 --- /dev/null +++ b/tests/integration/test_backup_restore_on_cluster/configs/cluster_different_versions.xml @@ -0,0 +1,16 @@ + + + + + + new_node + 9000 + + + old_node + 9000 + + + + + diff --git a/tests/integration/test_backup_restore_on_cluster/configs/faster_zk_disconnect_detect.xml b/tests/integration/test_backup_restore_on_cluster/configs/faster_zk_disconnect_detect.xml new file mode 100644 index 00000000000..cfc6672ede4 --- /dev/null +++ b/tests/integration/test_backup_restore_on_cluster/configs/faster_zk_disconnect_detect.xml @@ -0,0 +1,12 @@ + + + + zoo1 + 2181 + + 500 + 0 + 1000 + 5000 + + diff --git a/tests/integration/test_backup_restore_on_cluster/configs/lesser_timeouts.xml b/tests/integration/test_backup_restore_on_cluster/configs/lesser_timeouts.xml index 0886f4bc722..38947be6a5d 100644 --- a/tests/integration/test_backup_restore_on_cluster/configs/lesser_timeouts.xml +++ b/tests/integration/test_backup_restore_on_cluster/configs/lesser_timeouts.xml @@ -1,6 +1,6 @@ - 1000 + 1000 10000 3000 diff --git a/tests/integration/test_backup_restore_on_cluster/configs/shutdown_cancel_backups.xml b/tests/integration/test_backup_restore_on_cluster/configs/shutdown_cancel_backups.xml new file mode 100644 index 00000000000..e0c0e0b32cd --- /dev/null +++ b/tests/integration/test_backup_restore_on_cluster/configs/shutdown_cancel_backups.xml @@ -0,0 +1,3 @@ + + false + diff --git a/tests/integration/test_backup_restore_on_cluster/configs/slow_backups.xml b/tests/integration/test_backup_restore_on_cluster/configs/slow_backups.xml new file mode 100644 index 00000000000..933c3250054 --- /dev/null +++ b/tests/integration/test_backup_restore_on_cluster/configs/slow_backups.xml @@ -0,0 +1,7 @@ + + + true + + 12 + 2 + diff --git a/tests/integration/test_backup_restore_on_cluster/configs/zookeeper_retries.xml b/tests/integration/test_backup_restore_on_cluster/configs/zookeeper_retries.xml index 1283f28a8cb..7af54d2dd95 100644 --- a/tests/integration/test_backup_restore_on_cluster/configs/zookeeper_retries.xml +++ b/tests/integration/test_backup_restore_on_cluster/configs/zookeeper_retries.xml @@ -1,9 +1,12 @@ - 1000 - 1 - 1 + 50 + 100 + 1000 + 10 + 2 + 3 42 0.002 diff --git a/tests/integration/test_backup_restore_on_cluster/test.py b/tests/integration/test_backup_restore_on_cluster/test.py index a1082c563d1..4d4fe0e665a 100644 --- a/tests/integration/test_backup_restore_on_cluster/test.py +++ b/tests/integration/test_backup_restore_on_cluster/test.py @@ -1153,7 +1153,7 @@ def test_get_error_from_other_host(): node1.query("INSERT INTO tbl VALUES (3)") backup_name = new_backup_name() - expected_error = "Got error from node2.*Table default.tbl was not found" + expected_error = "Got error from host node2.*Table default.tbl was not found" assert re.search( expected_error, node1.query_and_get_error( @@ -1162,8 +1162,7 @@ def test_get_error_from_other_host(): ) -@pytest.mark.parametrize("kill", [False, True]) -def test_stop_other_host_during_backup(kill): +def test_shutdown_waits_for_backup(): node1.query( "CREATE TABLE tbl ON CLUSTER 'cluster' (" "x UInt8" @@ -1182,7 +1181,7 @@ def test_stop_other_host_during_backup(kill): # If kill=False the pending backup must be completed # If kill=True the pending backup might be completed or failed - node2.stop_clickhouse(kill=kill) + node2.stop_clickhouse(kill=False) assert_eq_with_retry( node1, @@ -1192,22 +1191,11 @@ def test_stop_other_host_during_backup(kill): ) status = node1.query(f"SELECT status FROM system.backups WHERE id='{id}'").strip() - - if kill: - expected_statuses = ["BACKUP_CREATED", "BACKUP_FAILED"] - else: - expected_statuses = ["BACKUP_CREATED", "BACKUP_CANCELLED"] - - assert status in expected_statuses + assert status == "BACKUP_CREATED" node2.start_clickhouse() - if status == "BACKUP_CREATED": - node1.query("DROP TABLE tbl ON CLUSTER 'cluster' SYNC") - node1.query(f"RESTORE TABLE tbl ON CLUSTER 'cluster' FROM {backup_name}") - node1.query("SYSTEM SYNC REPLICA tbl") - assert node1.query("SELECT * FROM tbl ORDER BY x") == TSV([3, 5]) - elif status == "BACKUP_FAILED": - assert not os.path.exists( - os.path.join(get_path_to_backup(backup_name), ".backup") - ) + node1.query("DROP TABLE tbl ON CLUSTER 'cluster' SYNC") + node1.query(f"RESTORE TABLE tbl ON CLUSTER 'cluster' FROM {backup_name}") + node1.query("SYSTEM SYNC REPLICA tbl") + assert node1.query("SELECT * FROM tbl ORDER BY x") == TSV([3, 5]) diff --git a/tests/integration/test_backup_restore_on_cluster/test_cancel_backup.py b/tests/integration/test_backup_restore_on_cluster/test_cancel_backup.py new file mode 100644 index 00000000000..f63dc2aef3d --- /dev/null +++ b/tests/integration/test_backup_restore_on_cluster/test_cancel_backup.py @@ -0,0 +1,780 @@ +import os +import random +import time +import uuid + +import pytest + +from helpers.cluster import ClickHouseCluster +from helpers.config_manager import ConfigManager +from helpers.network import PartitionManager +from helpers.test_tools import TSV + +cluster = ClickHouseCluster(__file__) + +main_configs = [ + "configs/backups_disk.xml", + "configs/cluster.xml", + "configs/lesser_timeouts.xml", # Default timeouts are quite big (a few minutes), the tests don't need them to be that big. + "configs/slow_backups.xml", + "configs/shutdown_cancel_backups.xml", +] + +user_configs = [ + "configs/zookeeper_retries.xml", +] + +node1 = cluster.add_instance( + "node1", + main_configs=main_configs, + user_configs=user_configs, + external_dirs=["/backups/"], + macros={"replica": "node1", "shard": "shard1"}, + with_zookeeper=True, + stay_alive=True, # Necessary for "test_shutdown_cancel_backup" +) + +node2 = cluster.add_instance( + "node2", + main_configs=main_configs, + user_configs=user_configs, + external_dirs=["/backups/"], + macros={"replica": "node2", "shard": "shard1"}, + with_zookeeper=True, + stay_alive=True, # Necessary for "test_shutdown_cancel_backup" +) + +nodes = [node1, node2] + + +@pytest.fixture(scope="module", autouse=True) +def start_cluster(): + try: + cluster.start() + yield cluster + finally: + cluster.shutdown() + + +@pytest.fixture(autouse=True) +def cleanup_after_test(): + try: + yield + finally: + node1.query("DROP TABLE IF EXISTS tbl ON CLUSTER 'cluster' SYNC") + + +# Utilities + + +# Gets a printable version the name of a node. +def get_node_name(node): + return "node1" if (node == node1) else "node2" + + +# Choose a random instance. +def random_node(): + return random.choice(nodes) + + +# Makes table "tbl" and fill it with data. +def create_and_fill_table(node, num_parts=10, on_cluster=True): + # We use partitioning to make sure there will be more files in a backup. + partition_by_clause = " PARTITION BY x%" + str(num_parts) if num_parts > 1 else "" + node.query( + "CREATE TABLE tbl " + + ("ON CLUSTER 'cluster' " if on_cluster else "") + + "(x UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/tbl/', '{replica}') " + + "ORDER BY tuple()" + + partition_by_clause + ) + if num_parts > 0: + node.query(f"INSERT INTO tbl SELECT number FROM numbers({num_parts})") + + +# Generates an ID suitable both as backup id or restore id. +def random_id(): + return uuid.uuid4().hex + + +# Generates a backup name prepared for using in BACKUP and RESTORE queries. +def get_backup_name(backup_id): + return f"Disk('backups', '{backup_id}')" + + +# Reads the status of a backup or a restore from system.backups. +def get_status(initiator, backup_id=None, restore_id=None): + id = backup_id if backup_id is not None else restore_id + return initiator.query(f"SELECT status FROM system.backups WHERE id='{id}'").rstrip( + "\n" + ) + + +# Reads the error message of a failed backup or a failed restore from system.backups. +def get_error(initiator, backup_id=None, restore_id=None): + id = backup_id if backup_id is not None else restore_id + return initiator.query(f"SELECT error FROM system.backups WHERE id='{id}'").rstrip( + "\n" + ) + + +# Waits until the status of a backup or a restore becomes a desired one. +# Returns how many seconds the function was waiting. +def wait_status( + initiator, + status="BACKUP_CREATED", + backup_id=None, + restore_id=None, + timeout=None, +): + print(f"Waiting for status {status}") + id = backup_id if backup_id is not None else restore_id + operation_name = "backup" if backup_id is not None else "restore" + current_status = get_status(initiator, backup_id=backup_id, restore_id=restore_id) + waited = 0 + while ( + (current_status != status) + and (current_status in ["CREATING_BACKUP", "RESTORING"]) + and ((timeout is None) or (waited < timeout)) + ): + sleep_time = 1 if (timeout is None) else min(1, timeout - waited) + time.sleep(sleep_time) + waited += sleep_time + current_status = get_status( + initiator, backup_id=backup_id, restore_id=restore_id + ) + start_time, end_time = ( + initiator.query( + f"SELECT start_time, end_time FROM system.backups WHERE id='{id}'" + ) + .splitlines()[0] + .split("\t") + ) + print( + f"{get_node_name(initiator)} : Got status {current_status} for {operation_name} {id} after waiting {waited} seconds " + f"(start_time = {start_time}, end_time = {end_time})" + ) + assert current_status == status + + +# Returns how many entries are in system.processes corresponding to a specified backup or restore. +def get_num_system_processes( + node_or_nodes, backup_id=None, restore_id=None, is_initial_query=None +): + id = backup_id if backup_id is not None else restore_id + query_kind = "Backup" if backup_id is not None else "Restore" + total = 0 + filter_for_is_initial_query = ( + f" AND (is_initial_query = {is_initial_query})" + if is_initial_query is not None + else "" + ) + nodes_to_consider = ( + node_or_nodes if (type(node_or_nodes) is list) else [node_or_nodes] + ) + for node in nodes_to_consider: + count = int( + node.query( + f"SELECT count() FROM system.processes WHERE (query_kind='{query_kind}') AND (query LIKE '%{id}%'){filter_for_is_initial_query}" + ) + ) + total += count + return total + + +# Waits until the number of entries in system.processes corresponding to a specified backup or restore becomes a desired one. +# Returns how many seconds the function was waiting. +def wait_num_system_processes( + node_or_nodes, + num_system_processes=0, + backup_id=None, + restore_id=None, + is_initial_query=None, + timeout=None, +): + print(f"Waiting for number of system processes = {num_system_processes}") + id = backup_id if backup_id is not None else restore_id + operation_name = "backup" if backup_id is not None else "restore" + current_count = get_num_system_processes( + node_or_nodes, + backup_id=backup_id, + restore_id=restore_id, + is_initial_query=is_initial_query, + ) + + def is_current_count_ok(): + return (current_count == num_system_processes) or ( + num_system_processes == "1+" and current_count >= 1 + ) + + waited = 0 + while not is_current_count_ok() and ((timeout is None) or (waited < timeout)): + sleep_time = 1 if (timeout is None) else min(1, timeout - waited) + time.sleep(sleep_time) + waited += sleep_time + current_count = get_num_system_processes( + node_or_nodes, + backup_id=backup_id, + restore_id=restore_id, + is_initial_query=is_initial_query, + ) + if is_current_count_ok(): + print( + f"Got {current_count} system processes for {operation_name} {id} after waiting {waited} seconds" + ) + else: + nodes_to_consider = ( + node_or_nodes if (type(node_or_nodes) is list) else [node_or_nodes] + ) + for node in nodes_to_consider: + count = get_num_system_processes( + node, backup_id=backup_id, restore_id=restore_id + ) + print( + f"{get_node_name(node)}: Got {count} system processes for {operation_name} {id} after waiting {waited} seconds" + ) + assert False + return waited + + +# Kills a BACKUP or RESTORE query. +# Returns how many seconds the KILL QUERY was executing. +def kill_query( + node, backup_id=None, restore_id=None, is_initial_query=None, timeout=None +): + id = backup_id if backup_id is not None else restore_id + query_kind = "Backup" if backup_id is not None else "Restore" + operation_name = "backup" if backup_id is not None else "restore" + print(f"{get_node_name(node)}: Cancelling {operation_name} {id}") + filter_for_is_initial_query = ( + f" AND (is_initial_query = {is_initial_query})" + if is_initial_query is not None + else "" + ) + node.query( + f"KILL QUERY WHERE (query_kind='{query_kind}') AND (query LIKE '%{id}%'){filter_for_is_initial_query} SYNC" + ) + node.query("SYSTEM FLUSH LOGS") + duration = ( + int( + node.query( + f"SELECT query_duration_ms FROM system.query_log WHERE query_kind='KillQuery' AND query LIKE '%{id}%' AND type='QueryFinish'" + ) + ) + / 1000 + ) + print( + f"{get_node_name(node)}: Cancelled {operation_name} {id} after {duration} seconds" + ) + if timeout is not None: + assert duration < timeout + + +# Stops all ZooKeeper servers. +def stop_zookeeper_servers(zoo_nodes): + print(f"Stopping ZooKeeper servers {zoo_nodes}") + old_time = time.monotonic() + cluster.stop_zookeeper_nodes(zoo_nodes) + print( + f"Stopped ZooKeeper servers {zoo_nodes} in {time.monotonic() - old_time} seconds" + ) + + +# Starts all ZooKeeper servers back. +def start_zookeeper_servers(zoo_nodes): + print(f"Starting ZooKeeper servers {zoo_nodes}") + old_time = time.monotonic() + cluster.start_zookeeper_nodes(zoo_nodes) + print( + f"Started ZooKeeper servers {zoo_nodes} in {time.monotonic() - old_time} seconds" + ) + + +# Sleeps for random amount of time. +def random_sleep(max_seconds): + if random.randint(0, 5) > 0: + sleep(random.uniform(0, max_seconds)) + + +def sleep(seconds): + print(f"Sleeping {seconds} seconds") + time.sleep(seconds) + + +# Checks that BACKUP and RESTORE cleaned up properly with no trash left in ZooKeeper, backups folder, and logs. +class NoTrashChecker: + def __init__(self): + self.expect_backups = [] + self.expect_unfinished_backups = [] + self.expect_errors = [] + self.allow_errors = [] + self.check_zookeeper = True + + # Sleep 1 second to ensure this NoTrashChecker won't collect errors from a possible previous NoTrashChecker. + time.sleep(1) + + self.__start_time_for_collecting_errors = time.gmtime() + self.__previous_list_of_backups = set( + os.listdir(os.path.join(node1.cluster.instances_dir, "backups")) + ) + + self.__previous_list_of_znodes = set( + node1.query( + "SELECT name FROM system.zookeeper WHERE path = '/clickhouse/backups' " + + "AND NOT (name == 'alive_tracker')" + ).splitlines() + ) + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + list_of_znodes = set( + node1.query( + "SELECT name FROM system.zookeeper WHERE path = '/clickhouse/backups' " + + "AND NOT (name == 'alive_tracker')" + ).splitlines() + ) + new_znodes = list_of_znodes.difference(self.__previous_list_of_znodes) + if new_znodes: + print(f"Found nodes in ZooKeeper: {new_znodes}") + for node in new_znodes: + print( + f"Nodes in '/clickhouse/backups/{node}':\n" + + node1.query( + f"SELECT name FROM system.zookeeper WHERE path = '/clickhouse/backups/{node}'" + ) + ) + print( + f"Nodes in '/clickhouse/backups/{node}/stage':\n" + + node1.query( + f"SELECT name FROM system.zookeeper WHERE path = '/clickhouse/backups/{node}/stage'" + ) + ) + if self.check_zookeeper: + assert new_znodes == set() + + list_of_backups = set( + os.listdir(os.path.join(node1.cluster.instances_dir, "backups")) + ) + new_backups = list_of_backups.difference(self.__previous_list_of_backups) + unfinished_backups = set( + backup + for backup in new_backups + if not os.path.exists( + os.path.join(node1.cluster.instances_dir, "backups", backup, ".backup") + ) + ) + new_backups = set( + backup for backup in new_backups if backup not in unfinished_backups + ) + if new_backups: + print(f"Found new backups: {new_backups}") + if unfinished_backups: + print(f"Found unfinished backups: {unfinished_backups}") + assert new_backups == set(self.expect_backups) + assert unfinished_backups == set(self.expect_unfinished_backups) + + all_errors = set() + start_time = time.strftime( + "%Y-%m-%d %H:%M:%S", self.__start_time_for_collecting_errors + ) + for node in nodes: + errors_query_result = node.query( + "SELECT name FROM system.errors WHERE last_error_time >= toDateTime('" + + start_time + + "') " + + "AND NOT ((name == 'KEEPER_EXCEPTION') AND (last_error_message LIKE '%Fault injection%')) " + + "AND NOT (name == 'NO_ELEMENTS_IN_CONFIG')" + ) + errors = errors_query_result.splitlines() + if errors: + print(f"{get_node_name(node)}: Found errors: {errors}") + print( + node.query( + "SELECT name, last_error_message FROM system.errors WHERE last_error_time >= toDateTime('" + + start_time + + "')" + ) + ) + for error in errors: + assert (error in self.expect_errors) or (error in self.allow_errors) + all_errors.update(errors) + + not_found_expected_errors = set(self.expect_errors).difference(all_errors) + if not_found_expected_errors: + print(f"Not found expected errors: {not_found_expected_errors}") + assert False + + +__backup_id_of_successful_backup = None + + +# Generates a backup which will be used to test RESTORE. +def get_backup_id_of_successful_backup(): + global __backup_id_of_successful_backup + if __backup_id_of_successful_backup is None: + __backup_id_of_successful_backup = random_id() + with NoTrashChecker() as no_trash_checker: + print("Will make backup successfully") + backup_id = __backup_id_of_successful_backup + create_and_fill_table(random_node()) + initiator = random_node() + print(f"Using {get_node_name(initiator)} as initiator") + initiator.query( + f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {get_backup_name(backup_id)} SETTINGS id='{backup_id}' ASYNC" + ) + wait_status(initiator, "BACKUP_CREATED", backup_id=backup_id) + assert get_num_system_processes(nodes, backup_id=backup_id) == 0 + no_trash_checker.expect_backups = [backup_id] + + # Dropping the table before restoring. + node1.query("DROP TABLE tbl ON CLUSTER 'cluster' SYNC") + + return __backup_id_of_successful_backup + + +# Actual tests + + +# Test that a BACKUP operation can be cancelled with KILL QUERY. +def test_cancel_backup(): + with NoTrashChecker() as no_trash_checker: + create_and_fill_table(random_node()) + + initiator = random_node() + print(f"Using {get_node_name(initiator)} as initiator") + + backup_id = random_id() + initiator.query( + f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {get_backup_name(backup_id)} SETTINGS id='{backup_id}' ASYNC" + ) + + assert get_status(initiator, backup_id=backup_id) == "CREATING_BACKUP" + assert get_num_system_processes(initiator, backup_id=backup_id) >= 1 + + # We shouldn't wait too long here, because otherwise the backup might be completed before we cancel it. + random_sleep(3) + + node_to_cancel, cancel_as_initiator = random.choice( + [(node1, False), (node2, False), (initiator, True)] + ) + + wait_num_system_processes( + node_to_cancel, + "1+", + backup_id=backup_id, + is_initial_query=cancel_as_initiator, + ) + + print( + f"Cancelling on {'initiator' if cancel_as_initiator else 'node'} {get_node_name(node_to_cancel)}" + ) + + # The timeout is 2 seconds here because a backup must be cancelled quickly. + kill_query( + node_to_cancel, + backup_id=backup_id, + is_initial_query=cancel_as_initiator, + timeout=3, + ) + + if cancel_as_initiator: + assert get_status(initiator, backup_id=backup_id) == "BACKUP_CANCELLED" + wait_status(initiator, "BACKUP_CANCELLED", backup_id=backup_id, timeout=3) + + assert "QUERY_WAS_CANCELLED" in get_error(initiator, backup_id=backup_id) + assert get_num_system_processes(nodes, backup_id=backup_id) == 0 + no_trash_checker.expect_errors = ["QUERY_WAS_CANCELLED"] + + +# Test that a RESTORE operation can be cancelled with KILL QUERY. +def test_cancel_restore(): + # Make backup. + backup_id = get_backup_id_of_successful_backup() + + # Cancel restoring. + with NoTrashChecker() as no_trash_checker: + print("Will cancel restoring") + initiator = random_node() + print(f"Using {get_node_name(initiator)} as initiator") + + restore_id = random_id() + initiator.query( + f"RESTORE TABLE tbl ON CLUSTER 'cluster' FROM {get_backup_name(backup_id)} SETTINGS id='{restore_id}' ASYNC" + ) + + assert get_status(initiator, restore_id=restore_id) == "RESTORING" + assert get_num_system_processes(initiator, restore_id=restore_id) >= 1 + + # We shouldn't wait too long here, because otherwise the restore might be completed before we cancel it. + random_sleep(3) + + node_to_cancel, cancel_as_initiator = random.choice( + [(node1, False), (node2, False), (initiator, True)] + ) + + wait_num_system_processes( + node_to_cancel, + "1+", + restore_id=restore_id, + is_initial_query=cancel_as_initiator, + ) + + print( + f"Cancelling on {'initiator' if cancel_as_initiator else 'node'} {get_node_name(node_to_cancel)}" + ) + + # The timeout is 2 seconds here because a restore must be cancelled quickly. + kill_query( + node_to_cancel, + restore_id=restore_id, + is_initial_query=cancel_as_initiator, + timeout=3, + ) + + if cancel_as_initiator: + assert get_status(initiator, restore_id=restore_id) == "RESTORE_CANCELLED" + wait_status(initiator, "RESTORE_CANCELLED", restore_id=restore_id, timeout=3) + + assert "QUERY_WAS_CANCELLED" in get_error(initiator, restore_id=restore_id) + assert get_num_system_processes(nodes, restore_id=restore_id) == 0 + no_trash_checker.expect_errors = ["QUERY_WAS_CANCELLED"] + + # Restore successfully. + with NoTrashChecker() as no_trash_checker: + print("Will restore from backup successfully") + restore_id = random_id() + initiator = random_node() + print(f"Using {get_node_name(initiator)} as initiator") + + initiator.query( + f"RESTORE TABLE tbl ON CLUSTER 'cluster' FROM {get_backup_name(backup_id)} SETTINGS id='{restore_id}' ASYNC" + ) + + wait_status(initiator, "RESTORED", restore_id=restore_id) + assert get_num_system_processes(nodes, restore_id=restore_id) == 0 + + +# Test that shutdown cancels a running backup and doesn't wait until it finishes. +def test_shutdown_cancels_backup(): + with NoTrashChecker() as no_trash_checker: + create_and_fill_table(random_node()) + + initiator = random_node() + print(f"Using {get_node_name(initiator)} as initiator") + + backup_id = random_id() + initiator.query( + f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {get_backup_name(backup_id)} SETTINGS id='{backup_id}' ASYNC" + ) + + assert get_status(initiator, backup_id=backup_id) == "CREATING_BACKUP" + assert get_num_system_processes(initiator, backup_id=backup_id) >= 1 + + # We shouldn't wait too long here, because otherwise the backup might be completed before we cancel it. + random_sleep(3) + + node_to_restart = random.choice([node1, node2]) + wait_num_system_processes(node_to_restart, "1+", backup_id=backup_id) + + print(f"{get_node_name(node_to_restart)}: Restarting...") + node_to_restart.restart_clickhouse() # Must cancel the backup. + print(f"{get_node_name(node_to_restart)}: Restarted") + + wait_num_system_processes(nodes, 0, backup_id=backup_id) + + if initiator != node_to_restart: + assert get_status(initiator, backup_id=backup_id) == "BACKUP_CANCELLED" + assert "QUERY_WAS_CANCELLED" in get_error(initiator, backup_id=backup_id) + + # The information about this cancelled backup must be stored in system.backup_log + initiator.query("SYSTEM FLUSH LOGS") + assert initiator.query( + f"SELECT status FROM system.backup_log WHERE id='{backup_id}' ORDER BY status" + ) == TSV(["CREATING_BACKUP", "BACKUP_CANCELLED"]) + + no_trash_checker.expect_errors = ["QUERY_WAS_CANCELLED"] + + +# After an error backup should clean the destination folder and used nodes in ZooKeeper. +# No unexpected errors must be generated. +def test_error_leaves_no_trash(): + with NoTrashChecker() as no_trash_checker: + # We create table "tbl" on one node only in order to make "BACKUP TABLE tbl ON CLUSTER" fail + # (because of the non-existing table on another node). + create_and_fill_table(random_node(), on_cluster=False) + + initiator = random_node() + print(f"Using {get_node_name(initiator)} as initiator") + + backup_id = random_id() + initiator.query( + f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {get_backup_name(backup_id)} SETTINGS id='{backup_id}' ASYNC" + ) + + wait_status(initiator, "BACKUP_FAILED", backup_id=backup_id) + assert "UNKNOWN_TABLE" in get_error(initiator, backup_id=backup_id) + + assert get_num_system_processes(nodes, backup_id=backup_id) == 0 + no_trash_checker.expect_errors = ["UNKNOWN_TABLE"] + + +# A backup must be stopped if Zookeeper is disconnected longer than `failure_after_host_disconnected_for_seconds`. +def test_long_disconnection_stops_backup(): + with NoTrashChecker() as no_trash_checker, ConfigManager() as config_manager: + # Config "faster_zk_disconnect_detect.xml" is used in this test to decrease number of retries when reconnecting to ZooKeeper. + # Without this config this test can take several minutes (instead of seconds) to run. + config_manager.add_main_config(nodes, "configs/faster_zk_disconnect_detect.xml") + + create_and_fill_table(random_node(), num_parts=100) + + initiator = random_node() + print(f"Using {get_node_name(initiator)} as initiator") + + backup_id = random_id() + initiator.query( + f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {get_backup_name(backup_id)} SETTINGS id='{backup_id}' ASYNC", + settings={"backup_restore_failure_after_host_disconnected_for_seconds": 3}, + ) + + assert get_status(initiator, backup_id=backup_id) == "CREATING_BACKUP" + assert get_num_system_processes(initiator, backup_id=backup_id) >= 1 + + no_trash_checker.expect_unfinished_backups = [backup_id] + no_trash_checker.allow_errors = [ + "FAILED_TO_SYNC_BACKUP_OR_RESTORE", + "KEEPER_EXCEPTION", + "SOCKET_TIMEOUT", + "CANNOT_READ_ALL_DATA", + "NETWORK_ERROR", + "TABLE_IS_READ_ONLY", + ] + no_trash_checker.check_zookeeper = False + + with PartitionManager() as pm: + random_sleep(3) + + time_before_disconnection = time.monotonic() + + node_to_drop_zk_connection = random_node() + print( + f"Dropping connection between {get_node_name(node_to_drop_zk_connection)} and ZooKeeper" + ) + pm.drop_instance_zk_connections(node_to_drop_zk_connection) + + # Being disconnected from ZooKeeper a backup is expected to fail. + wait_status(initiator, "BACKUP_FAILED", backup_id=backup_id) + + time_to_fail = time.monotonic() - time_before_disconnection + error = get_error(initiator, backup_id=backup_id) + print(f"error={error}") + assert "Lost connection" in error + + # A backup is expected to fail, but it isn't expected to fail too soon. + print(f"Backup failed after {time_to_fail} seconds disconnection") + assert time_to_fail > 3 + assert time_to_fail < 30 + + +# A backup must NOT be stopped if Zookeeper is disconnected shorter than `failure_after_host_disconnected_for_seconds`. +def test_short_disconnection_doesnt_stop_backup(): + with NoTrashChecker() as no_trash_checker, ConfigManager() as config_manager: + use_faster_zk_disconnect_detect = random.choice([True, False]) + if use_faster_zk_disconnect_detect: + print("Using faster_zk_disconnect_detect.xml") + config_manager.add_main_config( + nodes, "configs/faster_zk_disconnect_detect.xml" + ) + + create_and_fill_table(random_node()) + + initiator = random_node() + print(f"Using {get_node_name(initiator)} as initiator") + + backup_id = random_id() + initiator.query( + f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {get_backup_name(backup_id)} SETTINGS id='{backup_id}' ASYNC", + settings={"backup_restore_failure_after_host_disconnected_for_seconds": 6}, + ) + + assert get_status(initiator, backup_id=backup_id) == "CREATING_BACKUP" + assert get_num_system_processes(initiator, backup_id=backup_id) >= 1 + + # Dropping connection for less than `failure_after_host_disconnected_for_seconds` + with PartitionManager() as pm: + random_sleep(3) + node_to_drop_zk_connection = random_node() + print( + f"Dropping connection between {get_node_name(node_to_drop_zk_connection)} and ZooKeeper" + ) + pm.drop_instance_zk_connections(node_to_drop_zk_connection) + random_sleep(3) + print( + f"Restoring connection between {get_node_name(node_to_drop_zk_connection)} and ZooKeeper" + ) + + # Backup must be successful. + wait_status(initiator, "BACKUP_CREATED", backup_id=backup_id) + assert get_num_system_processes(nodes, backup_id=backup_id) == 0 + + no_trash_checker.expect_backups = [backup_id] + no_trash_checker.allow_errors = [ + "KEEPER_EXCEPTION", + "SOCKET_TIMEOUT", + "CANNOT_READ_ALL_DATA", + "NETWORK_ERROR", + "TABLE_IS_READ_ONLY", + ] + + +# A restore must NOT be stopped if Zookeeper is disconnected shorter than `failure_after_host_disconnected_for_seconds`. +def test_short_disconnection_doesnt_stop_restore(): + # Make a backup. + backup_id = get_backup_id_of_successful_backup() + + # Restore from the backup. + with NoTrashChecker() as no_trash_checker, ConfigManager() as config_manager: + use_faster_zk_disconnect_detect = random.choice([True, False]) + if use_faster_zk_disconnect_detect: + print("Using faster_zk_disconnect_detect.xml") + config_manager.add_main_config( + nodes, "configs/faster_zk_disconnect_detect.xml" + ) + + initiator = random_node() + print(f"Using {get_node_name(initiator)} as initiator") + + restore_id = random_id() + initiator.query( + f"RESTORE TABLE tbl ON CLUSTER 'cluster' FROM {get_backup_name(backup_id)} SETTINGS id='{restore_id}' ASYNC", + settings={"backup_restore_failure_after_host_disconnected_for_seconds": 6}, + ) + + assert get_status(initiator, restore_id=restore_id) == "RESTORING" + assert get_num_system_processes(initiator, restore_id=restore_id) >= 1 + + # Dropping connection for less than `failure_after_host_disconnected_for_seconds` + with PartitionManager() as pm: + random_sleep(3) + node_to_drop_zk_connection = random_node() + print( + f"Dropping connection between {get_node_name(node_to_drop_zk_connection)} and ZooKeeper" + ) + pm.drop_instance_zk_connections(node_to_drop_zk_connection) + random_sleep(3) + print( + f"Restoring connection between {get_node_name(node_to_drop_zk_connection)} and ZooKeeper" + ) + + # Restore must be successful. + wait_status(initiator, "RESTORED", restore_id=restore_id) + assert get_num_system_processes(nodes, restore_id=restore_id) == 0 + + no_trash_checker.allow_errors = [ + "KEEPER_EXCEPTION", + "SOCKET_TIMEOUT", + "CANNOT_READ_ALL_DATA", + "NETWORK_ERROR", + "TABLE_IS_READ_ONLY", + ] diff --git a/tests/integration/test_backup_restore_on_cluster/test_different_versions.py b/tests/integration/test_backup_restore_on_cluster/test_different_versions.py new file mode 100644 index 00000000000..b5eea7a1902 --- /dev/null +++ b/tests/integration/test_backup_restore_on_cluster/test_different_versions.py @@ -0,0 +1,125 @@ +import random + +import pytest + +from helpers.cluster import ClickHouseCluster +from helpers.test_tools import TSV + +cluster = ClickHouseCluster(__file__) + +main_configs = [ + "configs/backups_disk.xml", + "configs/cluster_different_versions.xml", +] + +user_configs = [] + +new_node = cluster.add_instance( + "new_node", + main_configs=main_configs, + user_configs=user_configs, + external_dirs=["/backups/"], + macros={"replica": "new_node", "shard": "shard1"}, + with_zookeeper=True, +) + +old_node = cluster.add_instance( + "old_node", + image="clickhouse/clickhouse-server", + tag="24.9.2.42", + with_installed_binary=True, + main_configs=main_configs, + user_configs=user_configs, + external_dirs=["/backups/"], + macros={"replica": "old_node", "shard": "shard1"}, + with_zookeeper=True, +) + +nodes = [new_node, old_node] + + +@pytest.fixture(scope="module", autouse=True) +def start_cluster(): + try: + cluster.start() + yield cluster + finally: + cluster.shutdown() + + +@pytest.fixture(autouse=True) +def cleanup_after_test(): + try: + yield + finally: + new_node.query("DROP TABLE IF EXISTS tbl ON CLUSTER 'cluster_ver' SYNC") + + +backup_id_counter = 0 + + +def new_backup_name(): + global backup_id_counter + backup_id_counter += 1 + return f"Disk('backups', '{backup_id_counter}')" + + +# Gets a printable version the name of a node. +def get_node_name(node): + return "new_node" if (node == new_node) else "old_node" + + +# Choose a random instance. +def random_node(): + return random.choice(nodes) + + +def test_different_versions(): + new_node.query( + "CREATE TABLE tbl" + " ON CLUSTER 'cluster_ver'" + " (x UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/tbl/', '{replica}')" + " ORDER BY tuple()" + ) + + new_node.query(f"INSERT INTO tbl VALUES (1)") + old_node.query(f"INSERT INTO tbl VALUES (2)") + + backup_name = new_backup_name() + + initiator = random_node() + print(f"Using {get_node_name(initiator)} as initiator for BACKUP") + initiator.query(f"BACKUP TABLE tbl ON CLUSTER 'cluster_ver' TO {backup_name}") + + new_node.query("DROP TABLE tbl ON CLUSTER 'cluster_ver' SYNC") + + initiator = random_node() + print(f"Using {get_node_name(initiator)} as initiator for RESTORE") + initiator.query(f"RESTORE TABLE tbl ON CLUSTER 'cluster_ver' FROM {backup_name}") + + new_node.query("SYSTEM SYNC REPLICA ON CLUSTER 'cluster_ver' tbl") + assert new_node.query("SELECT * FROM tbl ORDER BY x") == TSV([1, 2]) + assert old_node.query("SELECT * FROM tbl ORDER BY x") == TSV([1, 2]) + + # Error NO_ELEMENTS_IN_CONFIG is unrelated. + assert ( + new_node.query( + "SELECT name, last_error_message FROM system.errors WHERE NOT (" + "(name == 'NO_ELEMENTS_IN_CONFIG')" + ")" + ) + == "" + ) + + # Error FAILED_TO_SYNC_BACKUP_OR_RESTORE: "No connection to host new_node:9000 yet, will retry" is generated by the old version + # when it fails to connect to other host because that other host hasn't started yet. + # This is not an error actually, just an exception thrown and caught. The new version doesn't throw this exception. + assert ( + old_node.query( + "SELECT name, last_error_message FROM system.errors WHERE NOT (" + "(name == 'NO_ELEMENTS_IN_CONFIG') OR" + "((name == 'FAILED_TO_SYNC_BACKUP_OR_RESTORE') AND (last_error_message == 'No connection to host new_node:9000 yet, will retry'))" + ")" + ) + == "" + ) diff --git a/tests/integration/test_backup_restore_on_cluster/test_disallow_concurrency.py b/tests/integration/test_backup_restore_on_cluster/test_disallow_concurrency.py index 846c41592f7..3dea986e3d9 100644 --- a/tests/integration/test_backup_restore_on_cluster/test_disallow_concurrency.py +++ b/tests/integration/test_backup_restore_on_cluster/test_disallow_concurrency.py @@ -145,7 +145,7 @@ def wait_for_restore(node, restore_id): def check_backup_error(error): expected_errors = [ - "Concurrent backups not supported", + "Concurrent backups are not allowed", "BACKUP_ALREADY_EXISTS", ] assert any([expected_error in error for expected_error in expected_errors]) @@ -153,7 +153,7 @@ def check_backup_error(error): def check_restore_error(error): expected_errors = [ - "Concurrent restores not supported", + "Concurrent restores are not allowed", "Cannot restore the table default.tbl because it already contains some data", ] assert any([expected_error in error for expected_error in expected_errors]) diff --git a/tests/queries/0_stateless/02354_vector_search_multiple_indexes.reference b/tests/integration/test_fix_metadata_version/__init__.py similarity index 100% rename from tests/queries/0_stateless/02354_vector_search_multiple_indexes.reference rename to tests/integration/test_fix_metadata_version/__init__.py diff --git a/tests/integration/test_fix_metadata_version/configs/config.xml b/tests/integration/test_fix_metadata_version/configs/config.xml new file mode 100644 index 00000000000..4662e6794e3 --- /dev/null +++ b/tests/integration/test_fix_metadata_version/configs/config.xml @@ -0,0 +1,16 @@ + + 9000 + + + + + + + + + default + + + + + diff --git a/tests/integration/test_fix_metadata_version/test.py b/tests/integration/test_fix_metadata_version/test.py new file mode 100644 index 00000000000..085872bba05 --- /dev/null +++ b/tests/integration/test_fix_metadata_version/test.py @@ -0,0 +1,73 @@ +import pytest + +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) +node = cluster.add_instance( + "node", + main_configs=["configs/config.xml"], + stay_alive=True, + with_zookeeper=True, +) + + +@pytest.fixture(scope="module") +def start_cluster(): + try: + cluster.start() + yield cluster + finally: + cluster.shutdown() + + +def test_fix_metadata_version(start_cluster): + zookeeper_path = "/clickhouse/test_fix_metadata_version" + replica = "replica1" + replica_path = f"{zookeeper_path}/replicas/{replica}" + + def get_metadata_versions(): + table_metadata_version = int( + node.query( + f""" + SELECT version + FROM system.zookeeper + WHERE path = '{zookeeper_path}' AND name = 'metadata' + """ + ).strip() + ) + + replica_metadata_version = int( + node.query( + f""" + SELECT value + FROM system.zookeeper + WHERE path = '{replica_path}' AND name = 'metadata_version' + """ + ).strip() + ) + + return table_metadata_version, replica_metadata_version + + node.query( + f""" + DROP TABLE IF EXISTS t SYNC; + CREATE TABLE t + ( + `x` UInt32 + ) + ENGINE = ReplicatedMergeTree('{zookeeper_path}', '{replica}') + ORDER BY x + """ + ) + + node.query("ALTER TABLE t (ADD COLUMN `y` UInt32)") + + assert get_metadata_versions() == (1, 1) + + cluster.query_zookeeper(f"set '{replica_path}/metadata_version' '0'") + + assert get_metadata_versions() == (1, 0) + + node.restart_clickhouse() + + assert get_metadata_versions() == (1, 1) diff --git a/tests/integration/test_http_handlers_config/test.py b/tests/integration/test_http_handlers_config/test.py index efba4f05748..cf291c6dedd 100644 --- a/tests/integration/test_http_handlers_config/test.py +++ b/tests/integration/test_http_handlers_config/test.py @@ -17,9 +17,10 @@ class SimpleCluster: cluster.start() def add_instance(self, name, config_dir): - script_path = os.path.dirname(os.path.realpath(__file__)) return self.cluster.add_instance( - name, main_configs=[os.path.join(script_path, config_dir, "config.xml")] + name, + main_configs=[os.path.join(config_dir, "config.xml")], + user_configs=["users.d/users.yaml"], ) @@ -96,6 +97,16 @@ def test_dynamic_query_handler(): == res_custom_ct.headers["X-Test-Http-Response-Headers-Even-Multiple"] ) + assert cluster.instance.http_request( + "test_dynamic_handler_auth_with_password?query=select+currentUser()" + ).content, "with_password" + assert cluster.instance.http_request( + "test_dynamic_handler_auth_with_password_fail?query=select+currentUser()" + ).status_code, 403 + assert cluster.instance.http_request( + "test_dynamic_handler_auth_without_password?query=select+currentUser()" + ).content, "without_password" + def test_predefined_query_handler(): with contextlib.closing( @@ -177,6 +188,16 @@ def test_predefined_query_handler(): ) assert b"max_threads\t1\n" == res1.content + assert cluster.instance.http_request( + "test_predefined_handler_auth_with_password" + ).content, "with_password" + assert cluster.instance.http_request( + "test_predefined_handler_auth_with_password_fail" + ).status_code, 403 + assert cluster.instance.http_request( + "test_predefined_handler_auth_without_password" + ).content, "without_password" + def test_fixed_static_handler(): with contextlib.closing( diff --git a/tests/integration/test_http_handlers_config/test_dynamic_handler/config.xml b/tests/integration/test_http_handlers_config/test_dynamic_handler/config.xml index 58fedbd9078..4900219f595 100644 --- a/tests/integration/test_http_handlers_config/test_dynamic_handler/config.xml +++ b/tests/integration/test_http_handlers_config/test_dynamic_handler/config.xml @@ -24,5 +24,32 @@ + + + GET + /test_dynamic_handler_auth_with_password + + dynamic_query_handler + with_password + password + + + + GET + /test_dynamic_handler_auth_with_password_fail + + dynamic_query_handler + with_password + + + + + GET + /test_dynamic_handler_auth_without_password + + dynamic_query_handler + without_password + + diff --git a/tests/integration/test_http_handlers_config/test_predefined_handler/config.xml b/tests/integration/test_http_handlers_config/test_predefined_handler/config.xml index a7804721f12..3c0ee3cd09a 100644 --- a/tests/integration/test_http_handlers_config/test_predefined_handler/config.xml +++ b/tests/integration/test_http_handlers_config/test_predefined_handler/config.xml @@ -33,5 +33,35 @@ INSERT INTO test_table(id, data) SELECT {id:UInt32}, {_request_body:String} + + + GET + /test_predefined_handler_auth_with_password + + predefined_query_handler + with_password + password + SELECT currentUser() + + + + GET + /test_predefined_handler_auth_with_password_fail + + predefined_query_handler + with_password + + SELECT currentUser() + + + + GET + /test_predefined_handler_auth_without_password + + predefined_query_handler + without_password + SELECT currentUser() + + diff --git a/tests/integration/test_http_handlers_config/users.d/users.yaml b/tests/integration/test_http_handlers_config/users.d/users.yaml new file mode 100644 index 00000000000..9ab8a84ae5a --- /dev/null +++ b/tests/integration/test_http_handlers_config/users.d/users.yaml @@ -0,0 +1,7 @@ +users: + with_password: + profile: default + password: password + without_password: + profile: default + no_password: 1 diff --git a/tests/integration/test_named_collections/test.py b/tests/integration/test_named_collections/test.py index ed80898ebc7..e2fa776a8f0 100644 --- a/tests/integration/test_named_collections/test.py +++ b/tests/integration/test_named_collections/test.py @@ -794,3 +794,17 @@ def test_keeper_storage_remove_on_cluster(cluster, ignore, expected_raise): node.query( f"DROP NAMED COLLECTION test_nc ON CLUSTER `replicated_nc_nodes_cluster`" ) + + +@pytest.mark.parametrize( + "instance_name", + [("node"), ("node_with_keeper")], +) +def test_name_escaping(cluster, instance_name): + node = cluster.instances[instance_name] + + node.query("DROP NAMED COLLECTION IF EXISTS `test_!strange/symbols!`;") + node.query("CREATE NAMED COLLECTION `test_!strange/symbols!` AS key1=1, key2=2") + node.restart_clickhouse() + + node.query("DROP NAMED COLLECTION `test_!strange/symbols!`") diff --git a/tests/integration/test_quorum_inserts/test.py b/tests/integration/test_quorum_inserts/test.py index eefc4882e8e..5e4a960acdf 100644 --- a/tests/integration/test_quorum_inserts/test.py +++ b/tests/integration/test_quorum_inserts/test.py @@ -1,5 +1,6 @@ import concurrent import time +import uuid import pytest @@ -46,10 +47,10 @@ def started_cluster(): def test_simple_add_replica(started_cluster): - zero.query("DROP TABLE IF EXISTS test_simple ON CLUSTER cluster") + table_name = "test_simple_" + uuid.uuid4().hex create_query = ( - "CREATE TABLE test_simple " + f"CREATE TABLE {table_name} " "(a Int8, d Date) " "Engine = ReplicatedMergeTree('/clickhouse/tables/{shard}/{table}', '{replica}') " "PARTITION BY d ORDER BY a" @@ -58,91 +59,78 @@ def test_simple_add_replica(started_cluster): zero.query(create_query) first.query(create_query) - first.query("SYSTEM STOP FETCHES test_simple") + first.query(f"SYSTEM STOP FETCHES {table_name}") zero.query( - "INSERT INTO test_simple VALUES (1, '2011-01-01')", + f"INSERT INTO {table_name} VALUES (1, '2011-01-01')", settings={"insert_quorum": 1}, ) - assert "1\t2011-01-01\n" == zero.query("SELECT * from test_simple") - assert "" == first.query("SELECT * from test_simple") + assert "1\t2011-01-01\n" == zero.query(f"SELECT * from {table_name}") + assert "" == first.query(f"SELECT * from {table_name}") - first.query("SYSTEM START FETCHES test_simple") + first.query(f"SYSTEM START FETCHES {table_name}") - first.query("SYSTEM SYNC REPLICA test_simple", timeout=20) + first.query(f"SYSTEM SYNC REPLICA {table_name}", timeout=20) - assert "1\t2011-01-01\n" == zero.query("SELECT * from test_simple") - assert "1\t2011-01-01\n" == first.query("SELECT * from test_simple") + assert "1\t2011-01-01\n" == zero.query(f"SELECT * from {table_name}") + assert "1\t2011-01-01\n" == first.query(f"SELECT * from {table_name}") second.query(create_query) - second.query("SYSTEM SYNC REPLICA test_simple", timeout=20) + second.query(f"SYSTEM SYNC REPLICA {table_name}", timeout=20) - assert "1\t2011-01-01\n" == zero.query("SELECT * from test_simple") - assert "1\t2011-01-01\n" == first.query("SELECT * from test_simple") - assert "1\t2011-01-01\n" == second.query("SELECT * from test_simple") + assert "1\t2011-01-01\n" == zero.query(f"SELECT * from {table_name}") + assert "1\t2011-01-01\n" == first.query(f"SELECT * from {table_name}") + assert "1\t2011-01-01\n" == second.query(f"SELECT * from {table_name}") - zero.query("DROP TABLE IF EXISTS test_simple ON CLUSTER cluster") + zero.query(f"DROP TABLE IF EXISTS {table_name} ON CLUSTER cluster") def test_drop_replica_and_achieve_quorum(started_cluster): - zero.query( - "DROP TABLE IF EXISTS test_drop_replica_and_achieve_quorum ON CLUSTER cluster" - ) - + table_name = "test_drop_replica_and_achieve_quorum_" + uuid.uuid4().hex create_query = ( - "CREATE TABLE test_drop_replica_and_achieve_quorum " + f"CREATE TABLE {table_name} " "(a Int8, d Date) " "Engine = ReplicatedMergeTree('/clickhouse/tables/{shard}/{table}', '{replica}') " "PARTITION BY d ORDER BY a" ) - print("Create Replicated table with two replicas") zero.query(create_query) first.query(create_query) - print("Stop fetches on one replica. Since that, it will be isolated.") - first.query("SYSTEM STOP FETCHES test_drop_replica_and_achieve_quorum") - + first.query(f"SYSTEM STOP FETCHES {table_name}") print("Insert to other replica. This query will fail.") quorum_timeout = zero.query_and_get_error( - "INSERT INTO test_drop_replica_and_achieve_quorum(a,d) VALUES (1, '2011-01-01')", + f"INSERT INTO {table_name}(a,d) VALUES (1, '2011-01-01')", settings={"insert_quorum_timeout": 5000}, ) assert "Timeout while waiting for quorum" in quorum_timeout, "Query must fail." - assert TSV("1\t2011-01-01\n") == TSV( zero.query( - "SELECT * FROM test_drop_replica_and_achieve_quorum", + f"SELECT * FROM {table_name}", settings={"select_sequential_consistency": 0}, ) ) - assert TSV("") == TSV( zero.query( - "SELECT * FROM test_drop_replica_and_achieve_quorum", + f"SELECT * FROM {table_name}", settings={"select_sequential_consistency": 1}, ) ) - # TODO:(Mikhaylov) begin; maybe delete this lines. I want clickhouse to fetch parts and update quorum. print("START FETCHES first replica") - first.query("SYSTEM START FETCHES test_drop_replica_and_achieve_quorum") - + first.query(f"SYSTEM START FETCHES {table_name}") print("SYNC first replica") - first.query("SYSTEM SYNC REPLICA test_drop_replica_and_achieve_quorum", timeout=20) + first.query(f"SYSTEM SYNC REPLICA {table_name}", timeout=20) # TODO:(Mikhaylov) end - print("Add second replica") second.query(create_query) - print("SYNC second replica") - second.query("SYSTEM SYNC REPLICA test_drop_replica_and_achieve_quorum", timeout=20) - + second.query(f"SYSTEM SYNC REPLICA {table_name}", timeout=20) print("Quorum for previous insert achieved.") assert TSV("1\t2011-01-01\n") == TSV( second.query( - "SELECT * FROM test_drop_replica_and_achieve_quorum", + f"SELECT * FROM {table_name}", settings={"select_sequential_consistency": 1}, ) ) @@ -155,7 +143,7 @@ def test_insert_quorum_with_drop_partition(started_cluster, add_new_data): "test_quorum_insert_with_drop_partition_new_data" if add_new_data else "test_quorum_insert_with_drop_partition" - ) + ) + uuid.uuid4().hex zero.query(f"DROP TABLE IF EXISTS {table_name} ON CLUSTER cluster") create_query = ( @@ -218,12 +206,12 @@ def test_insert_quorum_with_move_partition(started_cluster, add_new_data): "test_insert_quorum_with_move_partition_source_new_data" if add_new_data else "test_insert_quorum_with_move_partition_source" - ) + ) + uuid.uuid4().hex destination_table_name = ( "test_insert_quorum_with_move_partition_destination_new_data" if add_new_data else "test_insert_quorum_with_move_partition_destination" - ) + ) + uuid.uuid4().hex zero.query(f"DROP TABLE IF EXISTS {source_table_name} ON CLUSTER cluster") zero.query(f"DROP TABLE IF EXISTS {destination_table_name} ON CLUSTER cluster") @@ -296,10 +284,10 @@ def test_insert_quorum_with_move_partition(started_cluster, add_new_data): def test_insert_quorum_with_ttl(started_cluster): - zero.query("DROP TABLE IF EXISTS test_insert_quorum_with_ttl ON CLUSTER cluster") + table_name = "test_insert_quorum_with_ttl_" + uuid.uuid4().hex create_query = ( - "CREATE TABLE test_insert_quorum_with_ttl " + f"CREATE TABLE {table_name} " "(a Int8, d Date) " "Engine = ReplicatedMergeTree('/clickhouse/tables/{table}', '{replica}') " "PARTITION BY d ORDER BY a " @@ -311,12 +299,12 @@ def test_insert_quorum_with_ttl(started_cluster): zero.query(create_query) first.query(create_query) - print("Stop fetches for test_insert_quorum_with_ttl at first replica.") - first.query("SYSTEM STOP FETCHES test_insert_quorum_with_ttl") + print(f"Stop fetches for {table_name} at first replica.") + first.query(f"SYSTEM STOP FETCHES {table_name}") print("Insert should fail since it can not reach the quorum.") quorum_timeout = zero.query_and_get_error( - "INSERT INTO test_insert_quorum_with_ttl(a,d) VALUES(1, '2011-01-01')", + f"INSERT INTO {table_name}(a,d) VALUES(1, '2011-01-01')", settings={"insert_quorum_timeout": 5000}, ) assert "Timeout while waiting for quorum" in quorum_timeout, "Query must fail." @@ -327,51 +315,49 @@ def test_insert_quorum_with_ttl(started_cluster): time.sleep(10) assert TSV("1\t2011-01-01\n") == TSV( zero.query( - "SELECT * FROM test_insert_quorum_with_ttl", + f"SELECT * FROM {table_name}", settings={"select_sequential_consistency": 0}, ) ) - print("Resume fetches for test_insert_quorum_with_ttl at first replica.") - first.query("SYSTEM START FETCHES test_insert_quorum_with_ttl") + print(f"Resume fetches for {table_name} at first replica.") + first.query(f"SYSTEM START FETCHES {table_name}") print("Sync first replica.") - first.query("SYSTEM SYNC REPLICA test_insert_quorum_with_ttl") + first.query(f"SYSTEM SYNC REPLICA {table_name}") zero.query( - "INSERT INTO test_insert_quorum_with_ttl(a,d) VALUES(1, '2011-01-01')", + f"INSERT INTO {table_name}(a,d) VALUES(1, '2011-01-01')", settings={"insert_quorum_timeout": 5000}, ) print("Inserts should resume.") - zero.query("INSERT INTO test_insert_quorum_with_ttl(a, d) VALUES(2, '2012-02-02')") + zero.query(f"INSERT INTO {table_name}(a, d) VALUES(2, '2012-02-02')") - first.query("OPTIMIZE TABLE test_insert_quorum_with_ttl") - first.query("SYSTEM SYNC REPLICA test_insert_quorum_with_ttl") - zero.query("SYSTEM SYNC REPLICA test_insert_quorum_with_ttl") + first.query(f"OPTIMIZE TABLE {table_name}") + first.query(f"SYSTEM SYNC REPLICA {table_name}") + zero.query(f"SYSTEM SYNC REPLICA {table_name}") assert TSV("2\t2012-02-02\n") == TSV( first.query( - "SELECT * FROM test_insert_quorum_with_ttl", + f"SELECT * FROM {table_name}", settings={"select_sequential_consistency": 0}, ) ) assert TSV("2\t2012-02-02\n") == TSV( first.query( - "SELECT * FROM test_insert_quorum_with_ttl", + f"SELECT * FROM {table_name}", settings={"select_sequential_consistency": 1}, ) ) - zero.query("DROP TABLE IF EXISTS test_insert_quorum_with_ttl ON CLUSTER cluster") + zero.query(f"DROP TABLE IF EXISTS {table_name} ON CLUSTER cluster") -def test_insert_quorum_with_keeper_loss_connection(): - zero.query( - "DROP TABLE IF EXISTS test_insert_quorum_with_keeper_fail ON CLUSTER cluster" - ) +def test_insert_quorum_with_keeper_loss_connection(started_cluster): + table_name = "test_insert_quorum_with_keeper_loss_" + uuid.uuid4().hex create_query = ( - "CREATE TABLE test_insert_quorum_with_keeper_loss" + f"CREATE TABLE {table_name} " "(a Int8, d Date) " "Engine = ReplicatedMergeTree('/clickhouse/tables/{table}', '{replica}') " "ORDER BY a " @@ -380,7 +366,7 @@ def test_insert_quorum_with_keeper_loss_connection(): zero.query(create_query) first.query(create_query) - first.query("SYSTEM STOP FETCHES test_insert_quorum_with_keeper_loss") + first.query(f"SYSTEM STOP FETCHES {table_name}") zero.query("SYSTEM ENABLE FAILPOINT replicated_merge_tree_commit_zk_fail_after_op") zero.query("SYSTEM ENABLE FAILPOINT replicated_merge_tree_insert_retry_pause") @@ -388,57 +374,60 @@ def test_insert_quorum_with_keeper_loss_connection(): with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor: insert_future = executor.submit( lambda: zero.query( - "INSERT INTO test_insert_quorum_with_keeper_loss(a,d) VALUES(1, '2011-01-01')", + f"INSERT INTO {table_name}(a,d) VALUES(1, '2011-01-01')", settings={"insert_quorum_timeout": 150000}, ) ) - pm = PartitionManager() - pm.drop_instance_zk_connections(zero) + with PartitionManager() as pm: + pm.drop_instance_zk_connections(zero) - retries = 0 - zk = cluster.get_kazoo_client("zoo1") - while True: - if ( - zk.exists( - "/clickhouse/tables/test_insert_quorum_with_keeper_loss/replicas/zero/is_active" + retries = 0 + zk = cluster.get_kazoo_client("zoo1") + while True: + if ( + zk.exists( + f"/clickhouse/tables/{table_name}/replicas/zero/is_active" + ) + is None + ): + break + print("replica is still active") + time.sleep(1) + retries += 1 + if retries == 120: + raise Exception("Can not wait cluster replica inactive") + + first.query("SYSTEM ENABLE FAILPOINT finish_set_quorum_failed_parts") + quorum_fail_future = executor.submit( + lambda: first.query( + "SYSTEM WAIT FAILPOINT finish_set_quorum_failed_parts", timeout=300 ) - is None - ): - break - print("replica is still active") - time.sleep(1) - retries += 1 - if retries == 120: - raise Exception("Can not wait cluster replica inactive") - - first.query("SYSTEM ENABLE FAILPOINT finish_set_quorum_failed_parts") - quorum_fail_future = executor.submit( - lambda: first.query( - "SYSTEM WAIT FAILPOINT finish_set_quorum_failed_parts", timeout=300 ) - ) - first.query("SYSTEM START FETCHES test_insert_quorum_with_keeper_loss") + first.query(f"SYSTEM START FETCHES {table_name}") - concurrent.futures.wait([quorum_fail_future]) + concurrent.futures.wait([quorum_fail_future]) - assert quorum_fail_future.exception() is None + assert quorum_fail_future.exception() is None - zero.query("SYSTEM ENABLE FAILPOINT finish_clean_quorum_failed_parts") - clean_quorum_fail_parts_future = executor.submit( - lambda: first.query( - "SYSTEM WAIT FAILPOINT finish_clean_quorum_failed_parts", timeout=300 + zero.query("SYSTEM ENABLE FAILPOINT finish_clean_quorum_failed_parts") + clean_quorum_fail_parts_future = executor.submit( + lambda: first.query( + "SYSTEM WAIT FAILPOINT finish_clean_quorum_failed_parts", + timeout=300, + ) ) - ) - pm.restore_instance_zk_connections(zero) - concurrent.futures.wait([clean_quorum_fail_parts_future]) + pm.restore_instance_zk_connections(zero) + concurrent.futures.wait([clean_quorum_fail_parts_future]) - assert clean_quorum_fail_parts_future.exception() is None + assert clean_quorum_fail_parts_future.exception() is None - zero.query("SYSTEM DISABLE FAILPOINT replicated_merge_tree_insert_retry_pause") - concurrent.futures.wait([insert_future]) - assert insert_future.exception() is not None - assert not zero.contains_in_log("LOGICAL_ERROR") - assert zero.contains_in_log( - "fails to commit and will not retry or clean garbage" - ) + zero.query( + "SYSTEM DISABLE FAILPOINT replicated_merge_tree_insert_retry_pause" + ) + concurrent.futures.wait([insert_future]) + assert insert_future.exception() is not None + assert not zero.contains_in_log("LOGICAL_ERROR") + assert zero.contains_in_log( + "fails to commit and will not retry or clean garbage" + ) diff --git a/tests/integration/test_replicated_s3_zero_copy_drop_partition/test.py b/tests/integration/test_replicated_s3_zero_copy_drop_partition/test.py index 6d2bb0a3b70..7623a24c0ef 100644 --- a/tests/integration/test_replicated_s3_zero_copy_drop_partition/test.py +++ b/tests/integration/test_replicated_s3_zero_copy_drop_partition/test.py @@ -65,12 +65,24 @@ CREATE TABLE test_s3(c1 Int8, c2 Date) ENGINE = ReplicatedMergeTree('/test/table objects_after = get_objects_in_data_path() assert objects_before == objects_after + node1.query("DROP TABLE test_local SYNC") + node1.query("DROP TABLE test_s3 SYNC") def test_drop_complex_columns(started_cluster): + node1 = cluster.instances["node1"] + node1.query( + """ +CREATE TABLE warming_up( +id Int8 +) ENGINE = MergeTree +order by (id) SETTINGS storage_policy = 's3';""" + ) + + # Now we are sure that s3 storage is up and running start_objects = get_objects_in_data_path() print("Objects before", start_objects) - node1 = cluster.instances["node1"] + node1.query( """ CREATE TABLE test_s3_complex_types( @@ -104,3 +116,4 @@ vertical_merge_algorithm_min_columns_to_activate=1;""" end_objects = get_objects_in_data_path() print("Objects after drop", end_objects) assert start_objects == end_objects + node1.query("DROP TABLE warming_up SYNC") diff --git a/tests/integration/test_scheduler/test.py b/tests/integration/test_scheduler/test.py index e4ef83759e4..c8f16c150e1 100644 --- a/tests/integration/test_scheduler/test.py +++ b/tests/integration/test_scheduler/test.py @@ -921,10 +921,11 @@ def test_workload_entity_keeper_storage(): "select name, create_query from system.resources order by all", "select resource, path, type, weight, priority, max_requests, max_cost, max_speed, max_burst from system.scheduler where resource not in ['network_read', 'network_write'] order by all", ] - attempts = 10 + attempts = 30 value1 = "" value2 = "" error_query = "" + retry_period = 0.1 for attempt in range(attempts): for query in checks: value1 = node.query(query) @@ -934,7 +935,8 @@ def test_workload_entity_keeper_storage(): break # error else: break # success - time.sleep(0.5) + time.sleep(retry_period) + retry_period = min(3, retry_period * 1.5) else: raise Exception( f"query '{error_query}' gives different results after {attempts} attempts:\n=== leader node ===\n{value1}\n=== follower node ===\n{value2}" diff --git a/tests/integration/test_storage_kafka/test.py b/tests/integration/test_storage_kafka/test.py index 0bade55415f..336ca824a2d 100644 --- a/tests/integration/test_storage_kafka/test.py +++ b/tests/integration/test_storage_kafka/test.py @@ -4193,7 +4193,7 @@ def test_kafka_formats_with_broken_message(kafka_cluster, create_query_generator ], "expected": { "raw_message": "050102696405496E743634000000000000000007626C6F636B4E6F06537472696E67034241440476616C3106537472696E6702414D0476616C3207466C6F617433320000003F0476616C330555496E743801", - "error": "Cannot convert: String to UInt16", + "error": "Cannot parse string 'BAD' as UInt16", }, "printable": False, }, diff --git a/tests/integration/test_storage_mysql/test.py b/tests/integration/test_storage_mysql/test.py index 2fc62d7f511..2d34a52c17b 100644 --- a/tests/integration/test_storage_mysql/test.py +++ b/tests/integration/test_storage_mysql/test.py @@ -386,100 +386,6 @@ CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32, source Enum8(' conn.close() -def test_mysql_distributed(started_cluster): - table_name = "test_replicas" - - conn1 = get_mysql_conn(started_cluster, started_cluster.mysql8_ip) - conn2 = get_mysql_conn(started_cluster, started_cluster.mysql2_ip) - conn3 = get_mysql_conn(started_cluster, started_cluster.mysql3_ip) - conn4 = get_mysql_conn(started_cluster, started_cluster.mysql4_ip) - - create_mysql_db(conn1, "clickhouse") - create_mysql_db(conn2, "clickhouse") - create_mysql_db(conn3, "clickhouse") - create_mysql_db(conn4, "clickhouse") - - create_mysql_table(conn1, table_name) - create_mysql_table(conn2, table_name) - create_mysql_table(conn3, table_name) - create_mysql_table(conn4, table_name) - - node2.query("DROP TABLE IF EXISTS test_replicas") - - # Storage with with 3 replicas - node2.query( - """ - CREATE TABLE test_replicas - (id UInt32, name String, age UInt32, money UInt32) - ENGINE = MySQL('mysql{2|3|4}:3306', 'clickhouse', 'test_replicas', 'root', 'clickhouse'); """ - ) - - # Fill remote tables with different data to be able to check - nodes = [node1, node2, node2, node2] - for i in range(1, 5): - nodes[i - 1].query("DROP TABLE IF EXISTS test_replica{}".format(i)) - nodes[i - 1].query( - """ - CREATE TABLE test_replica{} - (id UInt32, name String, age UInt32, money UInt32) - ENGINE = MySQL('mysql{}:3306', 'clickhouse', 'test_replicas', 'root', 'clickhouse');""".format( - i, 80 if i == 1 else i - ) - ) - nodes[i - 1].query( - "INSERT INTO test_replica{} (id, name) SELECT number, 'host{}' from numbers(10) ".format( - i, i - ) - ) - - # test multiple ports parsing - result = node2.query( - """SELECT DISTINCT(name) FROM mysql('mysql{80|2|3}:3306', 'clickhouse', 'test_replicas', 'root', 'clickhouse'); """ - ) - assert result == "host1\n" or result == "host2\n" or result == "host3\n" - result = node2.query( - """SELECT DISTINCT(name) FROM mysql('mysql80:3306|mysql2:3306|mysql3:3306', 'clickhouse', 'test_replicas', 'root', 'clickhouse'); """ - ) - assert result == "host1\n" or result == "host2\n" or result == "host3\n" - - # check all replicas are traversed - query = "SELECT * FROM (" - for i in range(3): - query += "SELECT name FROM test_replicas UNION DISTINCT " - query += "SELECT name FROM test_replicas) ORDER BY name" - - result = node2.query(query) - assert result == "host2\nhost3\nhost4\n" - - # Storage with with two shards, each has 2 replicas - node2.query("DROP TABLE IF EXISTS test_shards") - - node2.query( - """ - CREATE TABLE test_shards - (id UInt32, name String, age UInt32, money UInt32) - ENGINE = ExternalDistributed('MySQL', 'mysql{80|2}:3306,mysql{3|4}:3306', 'clickhouse', 'test_replicas', 'root', 'clickhouse'); """ - ) - - # Check only one replica in each shard is used - result = node2.query("SELECT DISTINCT(name) FROM test_shards ORDER BY name") - assert result == "host1\nhost3\n" - - # check all replicas are traversed - query = "SELECT name FROM (" - for i in range(3): - query += "SELECT name FROM test_shards UNION DISTINCT " - query += "SELECT name FROM test_shards) ORDER BY name" - result = node2.query(query) - assert result == "host1\nhost2\nhost3\nhost4\n" - - # disconnect mysql - started_cluster.pause_container("mysql80") - result = node2.query("SELECT DISTINCT(name) FROM test_shards ORDER BY name") - started_cluster.unpause_container("mysql80") - assert result == "host2\nhost4\n" or result == "host3\nhost4\n" - - def test_external_settings(started_cluster): table_name = "test_external_settings" node1.query(f"DROP TABLE IF EXISTS {table_name}") diff --git a/tests/integration/test_storage_postgresql/test.py b/tests/integration/test_storage_postgresql/test.py index aaecc7537cf..78bb1167d79 100644 --- a/tests/integration/test_storage_postgresql/test.py +++ b/tests/integration/test_storage_postgresql/test.py @@ -449,89 +449,6 @@ def test_concurrent_queries(started_cluster): node1.query("DROP TABLE test.stat;") -def test_postgres_distributed(started_cluster): - cursor0 = started_cluster.postgres_conn.cursor() - cursor1 = started_cluster.postgres2_conn.cursor() - cursor2 = started_cluster.postgres3_conn.cursor() - cursor3 = started_cluster.postgres4_conn.cursor() - cursors = [cursor0, cursor1, cursor2, cursor3] - - for i in range(4): - cursors[i].execute("DROP TABLE IF EXISTS test_replicas") - cursors[i].execute("CREATE TABLE test_replicas (id Integer, name Text)") - cursors[i].execute( - f"""INSERT INTO test_replicas select i, 'host{i+1}' from generate_series(0, 99) as t(i);""" - ) - - # test multiple ports parsing - result = node2.query( - """SELECT DISTINCT(name) FROM postgresql('postgres{1|2|3}:5432', 'postgres', 'test_replicas', 'postgres', 'mysecretpassword'); """ - ) - assert result == "host1\n" or result == "host2\n" or result == "host3\n" - result = node2.query( - """SELECT DISTINCT(name) FROM postgresql('postgres2:5431|postgres3:5432', 'postgres', 'test_replicas', 'postgres', 'mysecretpassword'); """ - ) - assert result == "host3\n" or result == "host2\n" - - # Create storage with with 3 replicas - node2.query("DROP TABLE IF EXISTS test_replicas") - node2.query( - """ - CREATE TABLE test_replicas - (id UInt32, name String) - ENGINE = PostgreSQL('postgres{2|3|4}:5432', 'postgres', 'test_replicas', 'postgres', 'mysecretpassword'); """ - ) - - # Check all replicas are traversed - query = "SELECT name FROM (" - for i in range(3): - query += "SELECT name FROM test_replicas UNION DISTINCT " - query += "SELECT name FROM test_replicas) ORDER BY name" - result = node2.query(query) - assert result == "host2\nhost3\nhost4\n" - - # Create storage with with two two shards, each has 2 replicas - node2.query("DROP TABLE IF EXISTS test_shards") - - node2.query( - """ - CREATE TABLE test_shards - (id UInt32, name String, age UInt32, money UInt32) - ENGINE = ExternalDistributed('PostgreSQL', 'postgres{1|2}:5432,postgres{3|4}:5432', 'postgres', 'test_replicas', 'postgres', 'mysecretpassword'); """ - ) - - # Check only one replica in each shard is used - result = node2.query("SELECT DISTINCT(name) FROM test_shards ORDER BY name") - assert result == "host1\nhost3\n" - - node2.query( - """ - CREATE TABLE test_shards2 - (id UInt32, name String, age UInt32, money UInt32) - ENGINE = ExternalDistributed('PostgreSQL', postgres4, addresses_expr='postgres{1|2}:5432,postgres{3|4}:5432'); """ - ) - - result = node2.query("SELECT DISTINCT(name) FROM test_shards2 ORDER BY name") - assert result == "host1\nhost3\n" - - # Check all replicas are traversed - query = "SELECT name FROM (" - for i in range(3): - query += "SELECT name FROM test_shards UNION DISTINCT " - query += "SELECT name FROM test_shards) ORDER BY name" - result = node2.query(query) - assert result == "host1\nhost2\nhost3\nhost4\n" - - # Disconnect postgres1 - started_cluster.pause_container("postgres1") - result = node2.query("SELECT DISTINCT(name) FROM test_shards ORDER BY name") - started_cluster.unpause_container("postgres1") - assert result == "host2\nhost4\n" or result == "host3\nhost4\n" - node2.query("DROP TABLE test_shards2") - node2.query("DROP TABLE test_shards") - node2.query("DROP TABLE test_replicas") - - def test_datetime_with_timezone(started_cluster): cursor = started_cluster.postgres_conn.cursor() cursor.execute("DROP TABLE IF EXISTS test_timezone") @@ -850,6 +767,7 @@ def test_filter_pushdown(started_cluster): "INSERT INTO test_filter_pushdown.test_table VALUES (1, 10), (1, 110), (2, 0), (3, 33), (4, 0)" ) + node1.query("DROP TABLE IF EXISTS test_filter_pushdown_pg_table") node1.query( """ CREATE TABLE test_filter_pushdown_pg_table (id UInt32, value UInt32) @@ -857,12 +775,14 @@ def test_filter_pushdown(started_cluster): """ ) + node1.query("DROP TABLE IF EXISTS test_filter_pushdown_local_table") node1.query( """ CREATE TABLE test_filter_pushdown_local_table (id UInt32, value UInt32) ENGINE Memory AS SELECT * FROM test_filter_pushdown_pg_table """ ) + node1.query("DROP TABLE IF EXISTS ch_table") node1.query( "CREATE TABLE ch_table (id UInt32, pg_id UInt32) ENGINE MergeTree ORDER BY id" ) diff --git a/tests/integration/test_storage_s3_queue/test.py b/tests/integration/test_storage_s3_queue/test.py index c495fc1d44f..284b304c632 100644 --- a/tests/integration/test_storage_s3_queue/test.py +++ b/tests/integration/test_storage_s3_queue/test.py @@ -1403,8 +1403,8 @@ def test_shards_distributed(started_cluster, mode, processing_threads): # A unique path is necessary for repeatable tests keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" files_path = f"{table_name}_data" - files_to_generate = 300 - row_num = 300 + files_to_generate = 600 + row_num = 1000 total_rows = row_num * files_to_generate shards_num = 2 diff --git a/tests/performance/replacing_final_non_intersecting.xml b/tests/performance/replacing_final_non_intersecting.xml new file mode 100644 index 00000000000..b3d32f1ca2e --- /dev/null +++ b/tests/performance/replacing_final_non_intersecting.xml @@ -0,0 +1,26 @@ + + + + 0 + 0 + + + + CREATE TABLE replacing_final_non_intersecting (d DateTime, c1 UInt64, c2 String, c3 LowCardinality(String)) + ENGINE = ReplacingMergeTree() + ORDER BY d + + + INSERT INTO replacing_final_non_intersecting SELECT toDateTime('2020-10-10 00:00:00') - number, number, toString(number), toString(number % 1000) FROM numbers(0, 5000000) + OPTIMIZE TABLE replacing_final_non_intersecting FINAL + SYSTEM STOP MERGES replacing_final_non_intersecting + INSERT INTO replacing_final_non_intersecting SELECT toDateTime('2020-10-10 00:00:00') - number, number, toString(number), toString(number % 1000) FROM numbers(5000000, 500000) + + SELECT * FROM replacing_final_non_intersecting FINAL FORMAT Null SETTINGS enable_vertical_final = 0 + SELECT * FROM replacing_final_non_intersecting FINAL FORMAT Null SETTINGS enable_vertical_final = 1 + + DROP TABLE IF EXISTS replacing_final_non_intersecting + diff --git a/tests/queries/0_stateless/01271_show_privileges.reference b/tests/queries/0_stateless/01271_show_privileges.reference index 85ffee8e44d..de6df7ac021 100644 --- a/tests/queries/0_stateless/01271_show_privileges.reference +++ b/tests/queries/0_stateless/01271_show_privileges.reference @@ -189,6 +189,9 @@ HDFS [] GLOBAL SOURCES S3 [] GLOBAL SOURCES HIVE [] GLOBAL SOURCES AZURE [] GLOBAL SOURCES +KAFKA [] GLOBAL SOURCES +NATS [] GLOBAL SOURCES +RABBITMQ [] GLOBAL SOURCES SOURCES [] \N ALL CLUSTER [] GLOBAL ALL ALL ['ALL PRIVILEGES'] \N \N diff --git a/tests/queries/0_stateless/01825_new_type_json_10.sql b/tests/queries/0_stateless/01825_new_type_json_10.sql index f586cc4477b..9aac35e2c88 100644 --- a/tests/queries/0_stateless/01825_new_type_json_10.sql +++ b/tests/queries/0_stateless/01825_new_type_json_10.sql @@ -1,6 +1,7 @@ -- Tags: no-fasttest SET allow_experimental_json_type = 1; +SET allow_suspicious_types_in_order_by = 1; DROP TABLE IF EXISTS t_json_10; CREATE TABLE t_json_10 (o JSON) ENGINE = Memory; diff --git a/tests/queries/0_stateless/01825_new_type_json_11.sh b/tests/queries/0_stateless/01825_new_type_json_11.sh index f448b7433ab..e9b90af4499 100755 --- a/tests/queries/0_stateless/01825_new_type_json_11.sh +++ b/tests/queries/0_stateless/01825_new_type_json_11.sh @@ -57,8 +57,8 @@ $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(obj)) as $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(obj.key_1[]))) as path FROM t_json_11 order by path;" $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(arrayJoin(obj.key_1[].key_3[])))) as path FROM t_json_11 order by path;" $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(arrayJoin(arrayJoin(obj.key_1[].key_3[].key_4[]))))) as path FROM t_json_11 order by path;" -$CLICKHOUSE_CLIENT -q "SELECT obj FROM t_json_11 ORDER BY obj.id FORMAT JSONEachRow" -$CLICKHOUSE_CLIENT -q "SELECT obj.key_1[].key_3 FROM t_json_11 ORDER BY obj.id FORMAT JSONEachRow" -$CLICKHOUSE_CLIENT -q "SELECT obj.key_1[].key_3[].key_4[].key_5, obj.key_1[].key_3[].key_7 FROM t_json_11 ORDER BY obj.id" +$CLICKHOUSE_CLIENT -q "SELECT obj FROM t_json_11 ORDER BY obj.id FORMAT JSONEachRow" --allow_suspicious_types_in_order_by 1 +$CLICKHOUSE_CLIENT -q "SELECT obj.key_1[].key_3 FROM t_json_11 ORDER BY obj.id FORMAT JSONEachRow" --allow_suspicious_types_in_order_by 1 +$CLICKHOUSE_CLIENT -q "SELECT obj.key_1[].key_3[].key_4[].key_5, obj.key_1[].key_3[].key_7 FROM t_json_11 ORDER BY obj.id" --allow_suspicious_types_in_order_by 1 $CLICKHOUSE_CLIENT -q "DROP TABLE t_json_11;" diff --git a/tests/queries/0_stateless/01825_new_type_json_12.sh b/tests/queries/0_stateless/01825_new_type_json_12.sh index d7c938d7cd1..fd5b9fddd75 100755 --- a/tests/queries/0_stateless/01825_new_type_json_12.sh +++ b/tests/queries/0_stateless/01825_new_type_json_12.sh @@ -47,8 +47,8 @@ $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(obj)) as $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(obj.key_0[]))) as path FROM t_json_12 order by path;" $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(arrayJoin(obj.key_0[].key_1[])))) as path FROM t_json_12 order by path;" $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(arrayJoin(arrayJoin(obj.key_0[].key_1[].key_3[]))))) as path FROM t_json_12 order by path;" -$CLICKHOUSE_CLIENT -q "SELECT obj FROM t_json_12 ORDER BY obj.id FORMAT JSONEachRow" --output_format_json_named_tuples_as_objects 1 +$CLICKHOUSE_CLIENT -q "SELECT obj FROM t_json_12 ORDER BY obj.id FORMAT JSONEachRow" --output_format_json_named_tuples_as_objects 1 --allow_suspicious_types_in_order_by 1 $CLICKHOUSE_CLIENT -q "SELECT obj.key_0[].key_1[].key_3[].key_4, obj.key_0[].key_1[].key_3[].key_5, \ - obj.key_0[].key_1[].key_3[].key_6, obj.key_0[].key_1[].key_3[].key_7 FROM t_json_12 ORDER BY obj.id" + obj.key_0[].key_1[].key_3[].key_6, obj.key_0[].key_1[].key_3[].key_7 FROM t_json_12 ORDER BY obj.id" --allow_suspicious_types_in_order_by 1 $CLICKHOUSE_CLIENT -q "DROP TABLE t_json_12;" diff --git a/tests/queries/0_stateless/01825_new_type_json_13.sh b/tests/queries/0_stateless/01825_new_type_json_13.sh index 316e6890d5e..116665e58e3 100755 --- a/tests/queries/0_stateless/01825_new_type_json_13.sh +++ b/tests/queries/0_stateless/01825_new_type_json_13.sh @@ -39,12 +39,12 @@ EOF $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(obj)) as path FROM t_json_13 order by path;" $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(obj.key1[]))) as path FROM t_json_13 order by path;" -$CLICKHOUSE_CLIENT -q "SELECT obj FROM t_json_13 ORDER BY obj.id FORMAT JSONEachRow" --output_format_json_named_tuples_as_objects 1 +$CLICKHOUSE_CLIENT -q "SELECT obj FROM t_json_13 ORDER BY obj.id FORMAT JSONEachRow" --output_format_json_named_tuples_as_objects 1 --allow_suspicious_types_in_order_by 1 $CLICKHOUSE_CLIENT -q "SELECT \ obj.key_1.key_2.key_3.key_8, \ obj.key_1.key_2.key_3.key_4.key_5, \ obj.key_1.key_2.key_3.key_4.key_6, \ obj.key_1.key_2.key_3.key_4.key_7 \ -FROM t_json_13 ORDER BY obj.id" +FROM t_json_13 ORDER BY obj.id" --allow_suspicious_types_in_order_by 1 $CLICKHOUSE_CLIENT -q "DROP TABLE t_json_13;" diff --git a/tests/queries/0_stateless/01825_new_type_json_6.sh b/tests/queries/0_stateless/01825_new_type_json_6.sh index 6b9a7e71f50..a2102636c42 100755 --- a/tests/queries/0_stateless/01825_new_type_json_6.sh +++ b/tests/queries/0_stateless/01825_new_type_json_6.sh @@ -54,6 +54,6 @@ EOF $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(data)) as path FROM t_json_6 order by path;" $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(data.out[]))) as path FROM t_json_6 order by path;" $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(arrayJoin(data.out[].outputs[])))) as path FROM t_json_6 order by path;" -$CLICKHOUSE_CLIENT -q "SELECT data.key, data.out[].type, data.out[].value, data.out[].outputs[].index, data.out[].outputs[].n FROM t_json_6 ORDER BY data.key" +$CLICKHOUSE_CLIENT -q "SELECT data.key, data.out[].type, data.out[].value, data.out[].outputs[].index, data.out[].outputs[].n FROM t_json_6 ORDER BY data.key" --allow_suspicious_types_in_order_by 1 $CLICKHOUSE_CLIENT -q "DROP TABLE t_json_6;" diff --git a/tests/queries/0_stateless/01825_new_type_json_7.sh b/tests/queries/0_stateless/01825_new_type_json_7.sh index 36483175df6..b6ea46f5ff8 100755 --- a/tests/queries/0_stateless/01825_new_type_json_7.sh +++ b/tests/queries/0_stateless/01825_new_type_json_7.sh @@ -25,6 +25,6 @@ cat < 1 mark -1 [1,0] 0 -9000 [9000,0] 0 -Issue #69085: Reference vector computed by a subquery Expression (Projection) Limit (preliminary LIMIT (without OFFSET)) Sorting (Sorting for ORDER BY) @@ -40,4 +33,3 @@ Expression (Projection) Condition: true Parts: 1/1 Granules: 4/4 -index_granularity_bytes = 0 is disallowed diff --git a/tests/queries/0_stateless/02354_vector_search_bug_69085.sql b/tests/queries/0_stateless/02354_vector_search_bug_69085.sql new file mode 100644 index 00000000000..4dbcdf66e36 --- /dev/null +++ b/tests/queries/0_stateless/02354_vector_search_bug_69085.sql @@ -0,0 +1,52 @@ +-- Tags: no-fasttest, no-ordinary-database + +SET allow_experimental_vector_similarity_index = 1; +SET enable_analyzer = 0; + +-- Issue #69085: Reference vector for vector search is computed by a subquery + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'cosineDistance', 'f16', 0, 0) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +INSERT INTO tab VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); + +-- works +EXPLAIN indexes = 1 +WITH [0., 2.] AS reference_vec +SELECT + id, + vec, + cosineDistance(vec, reference_vec) AS distance +FROM tab +ORDER BY distance +LIMIT 1; + +-- does not work +EXPLAIN indexes = 1 +WITH ( + SELECT vec + FROM tab + LIMIT 1 +) AS reference_vec +SELECT + id, + vec, + cosineDistance(vec, reference_vec) AS distance +FROM tab +ORDER BY distance +LIMIT 1; + +-- does not work as well +EXPLAIN indexes = 1 +WITH ( + SELECT [0., 2.] +) AS reference_vec +SELECT + id, + vec, + cosineDistance(vec, reference_vec) AS distance +FROM tab +ORDER BY distance +LIMIT 1; + +DROP TABLE tab; diff --git a/tests/queries/0_stateless/03231_dynamic_not_safe_primary_key.reference b/tests/queries/0_stateless/02354_vector_search_bug_71381.reference similarity index 100% rename from tests/queries/0_stateless/03231_dynamic_not_safe_primary_key.reference rename to tests/queries/0_stateless/02354_vector_search_bug_71381.reference diff --git a/tests/queries/0_stateless/02354_vector_search_bug_71381.sql b/tests/queries/0_stateless/02354_vector_search_bug_71381.sql new file mode 100644 index 00000000000..9e3246700b8 --- /dev/null +++ b/tests/queries/0_stateless/02354_vector_search_bug_71381.sql @@ -0,0 +1,20 @@ +-- Tags: no-fasttest, no-ordinary-database + +SET allow_experimental_vector_similarity_index = 1; + +-- Issue #71381: Usage of vector similarity index and further skipping indexes on the same table + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab( + val String, + vec Array(Float32), + INDEX ann_idx vec TYPE vector_similarity('hnsw', 'cosineDistance'), + INDEX set_idx val TYPE set(100) +) +ENGINE = MergeTree() +ORDER BY tuple(); + +INSERT INTO tab VALUES ('hello world', [0.0]); + +DROP TABLE tab; diff --git a/tests/queries/0_stateless/02354_vector_search_bug_adaptive_index_granularity.reference b/tests/queries/0_stateless/02354_vector_search_bug_adaptive_index_granularity.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02354_vector_search_bug_adaptive_index_granularity.sql b/tests/queries/0_stateless/02354_vector_search_bug_adaptive_index_granularity.sql new file mode 100644 index 00000000000..208b5b7a874 --- /dev/null +++ b/tests/queries/0_stateless/02354_vector_search_bug_adaptive_index_granularity.sql @@ -0,0 +1,20 @@ +-- Tags: no-fasttest, no-ordinary-database + +-- Tests that vector similarity indexes cannot be created with index_granularity_bytes = 0 + +SET allow_experimental_vector_similarity_index = 1; + +DROP TABLE IF EXISTS tab; + +-- If adaptive index granularity is disabled, certain vector search queries with PREWHERE run into LOGICAL_ERRORs. +-- SET allow_experimental_vector_similarity_index = 1; +-- CREATE TABLE tab (`id` Int32, `vec` Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance') GRANULARITY 100000000) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity_bytes = 0; +-- INSERT INTO tab SELECT number, [toFloat32(number), 0.] FROM numbers(10000); +-- WITH [1., 0.] AS reference_vec SELECT id, L2Distance(vec, reference_vec) FROM tab PREWHERE toLowCardinality(10) ORDER BY L2Distance(vec, reference_vec) ASC LIMIT 100; +-- As a workaround, force enabled adaptive index granularity for now (it is the default anyways). +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity_bytes = 0; -- { serverError INVALID_SETTING_VALUE } + +CREATE TABLE tab(id Int32, vec Array(Float32)) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity_bytes = 0; +ALTER TABLE tab ADD INDEX vec_idx1(vec) TYPE vector_similarity('hnsw', 'cosineDistance'); -- { serverError INVALID_SETTING_VALUE } + +DROP TABLE tab; diff --git a/tests/queries/0_stateless/02354_vector_search_bug_different_array_sizes.reference b/tests/queries/0_stateless/02354_vector_search_bug_different_array_sizes.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02354_vector_search_bug_different_array_sizes.sql b/tests/queries/0_stateless/02354_vector_search_bug_different_array_sizes.sql new file mode 100644 index 00000000000..41b9d7869e4 --- /dev/null +++ b/tests/queries/0_stateless/02354_vector_search_bug_different_array_sizes.sql @@ -0,0 +1,24 @@ +-- Tags: no-fasttest, no-ordinary-database + +SET allow_experimental_vector_similarity_index = 1; +SET enable_analyzer = 1; -- 0 vs. 1 produce slightly different error codes, make it future-proof + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; + +-- Vector similarity indexes reject INSERTs of Arrays with different sizes +INSERT INTO tab values (0, [2.2, 2.3]) (1, [3.1, 3.2, 3.3]); -- { serverError INCORRECT_DATA } + +-- It is possible to create parts with different Array vector sizes but there will be an error at query time +SYSTEM STOP MERGES tab; +INSERT INTO tab values (0, [2.2, 2.3]) (1, [3.1, 3.2]); +INSERT INTO tab values (2, [2.2, 2.3, 2.4]) (3, [3.1, 3.2, 3.3]); + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; -- { serverError SIZES_OF_ARRAYS_DONT_MATCH } + +DROP TABLE tab; diff --git a/tests/queries/0_stateless/02354_vector_search_bug_multiple_indexes.reference b/tests/queries/0_stateless/02354_vector_search_bug_multiple_indexes.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02354_vector_search_multiple_indexes.sql b/tests/queries/0_stateless/02354_vector_search_bug_multiple_indexes.sql similarity index 100% rename from tests/queries/0_stateless/02354_vector_search_multiple_indexes.sql rename to tests/queries/0_stateless/02354_vector_search_bug_multiple_indexes.sql diff --git a/tests/queries/0_stateless/02354_vector_search_bug_multiple_marks.reference b/tests/queries/0_stateless/02354_vector_search_bug_multiple_marks.reference new file mode 100644 index 00000000000..117bf2cead8 --- /dev/null +++ b/tests/queries/0_stateless/02354_vector_search_bug_multiple_marks.reference @@ -0,0 +1,2 @@ +1 [1,0] 0 +9000 [9000,0] 0 diff --git a/tests/queries/0_stateless/02354_vector_search_bug_multiple_marks.sql b/tests/queries/0_stateless/02354_vector_search_bug_multiple_marks.sql new file mode 100644 index 00000000000..fb99dd2361c --- /dev/null +++ b/tests/queries/0_stateless/02354_vector_search_bug_multiple_marks.sql @@ -0,0 +1,25 @@ +-- Tags: no-fasttest, no-ordinary-database + +-- Tests correctness of vector similarity index with > 1 mark + +SET allow_experimental_vector_similarity_index = 1; +SET enable_analyzer = 0; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 8192; +INSERT INTO tab SELECT number, [toFloat32(number), 0.0] from numbers(10000); + +WITH [1.0, 0.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab +ORDER BY L2Distance(vec, reference_vec) +LIMIT 1; + +WITH [9000.0, 0.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab +ORDER BY L2Distance(vec, reference_vec) +LIMIT 1; + +DROP TABLE tab; diff --git a/tests/queries/0_stateless/02354_vector_search_bugs.sql b/tests/queries/0_stateless/02354_vector_search_bugs.sql deleted file mode 100644 index d55bdb88a76..00000000000 --- a/tests/queries/0_stateless/02354_vector_search_bugs.sql +++ /dev/null @@ -1,119 +0,0 @@ --- Tags: no-fasttest, no-ordinary-database - --- Tests various bugs and special cases for vector indexes. - -SET allow_experimental_vector_similarity_index = 1; -SET enable_analyzer = 1; -- 0 vs. 1 produce slightly different error codes, make it future-proof - -DROP TABLE IF EXISTS tab; - -SELECT 'Rejects INSERTs of Arrays with different sizes'; - -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; -INSERT INTO tab values (0, [2.2, 2.3]) (1, [3.1, 3.2, 3.3]); -- { serverError INCORRECT_DATA } -DROP TABLE tab; - -SELECT 'Issue #52258: Empty Arrays or Arrays with default values are rejected'; - -CREATE TABLE tab (id UInt64, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree() ORDER BY id; -INSERT INTO tab VALUES (1, []); -- { serverError INCORRECT_DATA } -INSERT INTO tab (id) VALUES (1); -- { serverError INCORRECT_DATA } -DROP TABLE tab; - -SELECT 'It is possible to create parts with different Array vector sizes but there will be an error at query time'; - -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; -SYSTEM STOP MERGES tab; -INSERT INTO tab values (0, [2.2, 2.3]) (1, [3.1, 3.2]); -INSERT INTO tab values (2, [2.2, 2.3, 2.4]) (3, [3.1, 3.2, 3.3]); - -WITH [0.0, 2.0] AS reference_vec -SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab -ORDER BY L2Distance(vec, reference_vec) -LIMIT 3; -- { serverError SIZES_OF_ARRAYS_DONT_MATCH } - -DROP TABLE tab; - -SELECT 'Correctness of index with > 1 mark'; - -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 8192; -INSERT INTO tab SELECT number, [toFloat32(number), 0.0] from numbers(10000); - -WITH [1.0, 0.0] AS reference_vec -SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab -ORDER BY L2Distance(vec, reference_vec) -LIMIT 1; - -WITH [9000.0, 0.0] AS reference_vec -SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab -ORDER BY L2Distance(vec, reference_vec) -LIMIT 1; - -DROP TABLE tab; - -SELECT 'Issue #69085: Reference vector computed by a subquery'; - -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'cosineDistance', 'f16', 0, 0) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; -INSERT INTO tab VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); - --- works -EXPLAIN indexes = 1 -WITH [0., 2.] AS reference_vec -SELECT - id, - vec, - cosineDistance(vec, reference_vec) AS distance -FROM tab -ORDER BY distance -LIMIT 1 -SETTINGS enable_analyzer = 0; - --- does not work -EXPLAIN indexes = 1 -WITH ( - SELECT vec - FROM tab - LIMIT 1 -) AS reference_vec -SELECT - id, - vec, - cosineDistance(vec, reference_vec) AS distance -FROM tab -ORDER BY distance -LIMIT 1 -SETTINGS enable_analyzer = 0; - --- does not work as well -EXPLAIN indexes = 1 -WITH ( - SELECT [0., 2.] -) AS reference_vec -SELECT - id, - vec, - cosineDistance(vec, reference_vec) AS distance -FROM tab -ORDER BY distance -LIMIT 1 -SETTINGS enable_analyzer = 0; - -DROP TABLE tab; - -SELECT 'index_granularity_bytes = 0 is disallowed'; - --- If adaptive index granularity is disabled, certain vector search queries with PREWHERE run into LOGICAL_ERRORs. --- SET allow_experimental_vector_similarity_index = 1; --- CREATE TABLE tab (`id` Int32, `vec` Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance') GRANULARITY 100000000) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity_bytes = 0; --- INSERT INTO tab SELECT number, [toFloat32(number), 0.] FROM numbers(10000); --- WITH [1., 0.] AS reference_vec SELECT id, L2Distance(vec, reference_vec) FROM tab PREWHERE toLowCardinality(10) ORDER BY L2Distance(vec, reference_vec) ASC LIMIT 100; --- As a workaround, force enabled adaptive index granularity for now (it is the default anyways). -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity_bytes = 0; -- { serverError INVALID_SETTING_VALUE } - -CREATE TABLE tab(id Int32, vec Array(Float32)) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity_bytes = 0; -ALTER TABLE tab ADD INDEX vec_idx1(vec) TYPE vector_similarity('hnsw', 'cosineDistance'); -- { serverError INVALID_SETTING_VALUE } - -DROP TABLE tab; diff --git a/tests/queries/0_stateless/02354_vector_search_expansion_search.sql b/tests/queries/0_stateless/02354_vector_search_expansion_search.sql index fcbe9ee42b9..427148b829f 100644 --- a/tests/queries/0_stateless/02354_vector_search_expansion_search.sql +++ b/tests/queries/0_stateless/02354_vector_search_expansion_search.sql @@ -1,4 +1,4 @@ --- Tags: no-fasttest, long, no-asan, no-asan, no-ubsan, no-debug +-- Tags: no-fasttest, long, no-asan, no-ubsan, no-debug -- ^^ Disable test for slow builds: generating data takes time but a sufficiently large data set -- is necessary for different hnsw_candidate_list_size_for_search settings to make a difference @@ -14,7 +14,7 @@ CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similar -- Generate random values but with a fixed seed (conceptually), so that the data is deterministic. -- Unfortunately, no random functions in ClickHouse accepts a seed. Instead, abuse the numbers table + hash functions to provide -- deterministic randomness. -INSERT INTO tab SELECT number, [sipHash64(number)/18446744073709551615, wyHash64(number)/18446744073709551615] FROM numbers(370000); -- 18446744073709551615 is the biggest UInt64 +INSERT INTO tab SELECT number, [sipHash64(number)/18446744073709551615, wyHash64(number)/18446744073709551615] FROM numbers(660000); -- 18446744073709551615 is the biggest UInt64 -- hnsw_candidate_list_size_for_search = 0 is illegal WITH [0.5, 0.5] AS reference_vec diff --git a/tests/queries/0_stateless/02354_vector_search_queries.reference b/tests/queries/0_stateless/02354_vector_search_queries.reference index 223a18b57bf..cf80f46f53c 100644 --- a/tests/queries/0_stateless/02354_vector_search_queries.reference +++ b/tests/queries/0_stateless/02354_vector_search_queries.reference @@ -67,7 +67,7 @@ Expression (Projection) Condition: true Parts: 1/1 Granules: 4/4 --- Non-default quantization +-- Test all distance metrics x all quantization 1 [2,3.2] 2.3323807824711897 4 [2.4,5.2] 3.9999999046325727 2 [4.2,3.4] 4.427188573446585 @@ -75,7 +75,7 @@ Expression (Projection) Limit (preliminary LIMIT (without OFFSET)) Sorting (Sorting for ORDER BY) Expression (Before ORDER BY) - ReadFromMergeTree (default.tab_f64) + ReadFromMergeTree (default.tab_l2_f64) Indexes: PrimaryKey Condition: true @@ -93,7 +93,7 @@ Expression (Projection) Limit (preliminary LIMIT (without OFFSET)) Sorting (Sorting for ORDER BY) Expression (Before ORDER BY) - ReadFromMergeTree (default.tab_f32) + ReadFromMergeTree (default.tab_l2_f32) Indexes: PrimaryKey Condition: true @@ -111,7 +111,7 @@ Expression (Projection) Limit (preliminary LIMIT (without OFFSET)) Sorting (Sorting for ORDER BY) Expression (Before ORDER BY) - ReadFromMergeTree (default.tab_f16) + ReadFromMergeTree (default.tab_l2_f16) Indexes: PrimaryKey Condition: true @@ -129,7 +129,7 @@ Expression (Projection) Limit (preliminary LIMIT (without OFFSET)) Sorting (Sorting for ORDER BY) Expression (Before ORDER BY) - ReadFromMergeTree (default.tab_bf16) + ReadFromMergeTree (default.tab_l2_bf16) Indexes: PrimaryKey Condition: true @@ -147,7 +147,97 @@ Expression (Projection) Limit (preliminary LIMIT (without OFFSET)) Sorting (Sorting for ORDER BY) Expression (Before ORDER BY) - ReadFromMergeTree (default.tab_i8) + ReadFromMergeTree (default.tab_l2_i8) + Indexes: + PrimaryKey + Condition: true + Parts: 1/1 + Granules: 4/4 + Skip + Name: idx + Description: vector_similarity GRANULARITY 2 + Parts: 1/1 + Granules: 3/4 +6 [1,9.3] 0.005731362878640178 +4 [2.4,5.2] 0.09204062768384846 +1 [2,3.2] 0.15200169244542905 +Expression (Projection) + Limit (preliminary LIMIT (without OFFSET)) + Sorting (Sorting for ORDER BY) + Expression (Before ORDER BY) + ReadFromMergeTree (default.tab_cos_f64) + Indexes: + PrimaryKey + Condition: true + Parts: 1/1 + Granules: 4/4 + Skip + Name: idx + Description: vector_similarity GRANULARITY 2 + Parts: 1/1 + Granules: 3/4 +6 [1,9.3] 0.005731362878640178 +4 [2.4,5.2] 0.09204062768384846 +1 [2,3.2] 0.15200169244542905 +Expression (Projection) + Limit (preliminary LIMIT (without OFFSET)) + Sorting (Sorting for ORDER BY) + Expression (Before ORDER BY) + ReadFromMergeTree (default.tab_cos_f32) + Indexes: + PrimaryKey + Condition: true + Parts: 1/1 + Granules: 4/4 + Skip + Name: idx + Description: vector_similarity GRANULARITY 2 + Parts: 1/1 + Granules: 3/4 +6 [1,9.3] 0.005731362878640178 +4 [2.4,5.2] 0.09204062768384846 +1 [2,3.2] 0.15200169244542905 +Expression (Projection) + Limit (preliminary LIMIT (without OFFSET)) + Sorting (Sorting for ORDER BY) + Expression (Before ORDER BY) + ReadFromMergeTree (default.tab_cos_f16) + Indexes: + PrimaryKey + Condition: true + Parts: 1/1 + Granules: 4/4 + Skip + Name: idx + Description: vector_similarity GRANULARITY 2 + Parts: 1/1 + Granules: 3/4 +6 [1,9.3] 0.005731362878640178 +4 [2.4,5.2] 0.09204062768384846 +1 [2,3.2] 0.15200169244542905 +Expression (Projection) + Limit (preliminary LIMIT (without OFFSET)) + Sorting (Sorting for ORDER BY) + Expression (Before ORDER BY) + ReadFromMergeTree (default.tab_cos_bf16) + Indexes: + PrimaryKey + Condition: true + Parts: 1/1 + Granules: 4/4 + Skip + Name: idx + Description: vector_similarity GRANULARITY 2 + Parts: 1/1 + Granules: 3/4 +6 [1,9.3] 0.005731362878640178 +4 [2.4,5.2] 0.09204062768384846 +1 [2,3.2] 0.15200169244542905 +Expression (Projection) + Limit (preliminary LIMIT (without OFFSET)) + Sorting (Sorting for ORDER BY) + Expression (Before ORDER BY) + ReadFromMergeTree (default.tab_cos_i8) Indexes: PrimaryKey Condition: true diff --git a/tests/queries/0_stateless/02354_vector_search_queries.sql b/tests/queries/0_stateless/02354_vector_search_queries.sql index 71b8a1e520a..0941f9a43d6 100644 --- a/tests/queries/0_stateless/02354_vector_search_queries.sql +++ b/tests/queries/0_stateless/02354_vector_search_queries.sql @@ -81,88 +81,181 @@ SETTINGS max_limit_for_ann_queries = 2; -- LIMIT 3 > 2 --> don't use the ann ind DROP TABLE tab; -SELECT '-- Non-default quantization'; -CREATE TABLE tab_f64(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'f64', 0, 0) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; -CREATE TABLE tab_f32(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'f32', 0, 0) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; -CREATE TABLE tab_f16(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'f16', 0, 0) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; -CREATE TABLE tab_bf16(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'bf16', 0, 0) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; -CREATE TABLE tab_i8(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'i8', 0, 0) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; -INSERT INTO tab_f64 VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); -INSERT INTO tab_f32 VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); -INSERT INTO tab_f16 VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); -INSERT INTO tab_bf16 VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); -INSERT INTO tab_i8 VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); +SELECT '-- Test all distance metrics x all quantization'; + +DROP TABLE IF EXISTS tab_l2_f64; +DROP TABLE IF EXISTS tab_l2_f32; +DROP TABLE IF EXISTS tab_l2_f16; +DROP TABLE IF EXISTS tab_l2_bf16; +DROP TABLE IF EXISTS tab_l2_i8; +DROP TABLE IF EXISTS tab_cos_f64; +DROP TABLE IF EXISTS tab_cos_f32; +DROP TABLE IF EXISTS tab_cos_f16; +DROP TABLE IF EXISTS tab_cos_bf16; +DROP TABLE IF EXISTS tab_cos_i8; + +CREATE TABLE tab_l2_f64(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'f64', 0, 0) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +CREATE TABLE tab_l2_f32(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'f32', 0, 0) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +CREATE TABLE tab_l2_f16(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'f16', 0, 0) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +CREATE TABLE tab_l2_bf16(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'bf16', 0, 0) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +CREATE TABLE tab_l2_i8(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'i8', 0, 0) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +CREATE TABLE tab_cos_f64(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'cosineDistance', 'f64', 0, 0) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +CREATE TABLE tab_cos_f32(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'cosineDistance', 'f32', 0, 0) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +CREATE TABLE tab_cos_f16(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'cosineDistance', 'f16', 0, 0) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +CREATE TABLE tab_cos_bf16(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'cosineDistance', 'bf16', 0, 0) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +CREATE TABLE tab_cos_i8(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'cosineDistance', 'i8', 0, 0) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; + +INSERT INTO tab_l2_f64 VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); +INSERT INTO tab_l2_f32 VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); +INSERT INTO tab_l2_f16 VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); +INSERT INTO tab_l2_bf16 VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); +INSERT INTO tab_l2_i8 VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); +INSERT INTO tab_cos_f64 VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); +INSERT INTO tab_cos_f32 VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); +INSERT INTO tab_cos_f16 VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); +INSERT INTO tab_cos_bf16 VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); +INSERT INTO tab_cos_i8 VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); WITH [0.0, 2.0] AS reference_vec SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab_f64 +FROM tab_l2_f64 ORDER BY L2Distance(vec, reference_vec) LIMIT 3; EXPLAIN indexes = 1 WITH [0.0, 2.0] AS reference_vec SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab_f64 +FROM tab_l2_f64 ORDER BY L2Distance(vec, reference_vec) LIMIT 3; WITH [0.0, 2.0] AS reference_vec SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab_f32 +FROM tab_l2_f32 ORDER BY L2Distance(vec, reference_vec) LIMIT 3; EXPLAIN indexes = 1 WITH [0.0, 2.0] AS reference_vec SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab_f32 +FROM tab_l2_f32 ORDER BY L2Distance(vec, reference_vec) LIMIT 3; WITH [0.0, 2.0] AS reference_vec SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab_f16 +FROM tab_l2_f16 ORDER BY L2Distance(vec, reference_vec) LIMIT 3; EXPLAIN indexes = 1 WITH [0.0, 2.0] AS reference_vec SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab_f16 +FROM tab_l2_f16 ORDER BY L2Distance(vec, reference_vec) LIMIT 3; WITH [0.0, 2.0] AS reference_vec SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab_bf16 +FROM tab_l2_bf16 ORDER BY L2Distance(vec, reference_vec) LIMIT 3; EXPLAIN indexes = 1 WITH [0.0, 2.0] AS reference_vec SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab_bf16 +FROM tab_l2_bf16 ORDER BY L2Distance(vec, reference_vec) LIMIT 3; WITH [0.0, 2.0] AS reference_vec SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab_i8 +FROM tab_l2_i8 ORDER BY L2Distance(vec, reference_vec) LIMIT 3; EXPLAIN indexes = 1 WITH [0.0, 2.0] AS reference_vec SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab_i8 +FROM tab_l2_i8 ORDER BY L2Distance(vec, reference_vec) LIMIT 3; -DROP TABLE tab_f64; -DROP TABLE tab_f32; -DROP TABLE tab_f16; -DROP TABLE tab_bf16; -DROP TABLE tab_i8; +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, cosineDistance(vec, reference_vec) +FROM tab_cos_f64 +ORDER BY cosineDistance(vec, reference_vec) +LIMIT 3; + +EXPLAIN indexes = 1 +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, cosineDistance(vec, reference_vec) +FROM tab_cos_f64 +ORDER BY cosineDistance(vec, reference_vec) +LIMIT 3; + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, cosineDistance(vec, reference_vec) +FROM tab_cos_f32 +ORDER BY cosineDistance(vec, reference_vec) +LIMIT 3; + +EXPLAIN indexes = 1 +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, cosineDistance(vec, reference_vec) +FROM tab_cos_f32 +ORDER BY cosineDistance(vec, reference_vec) +LIMIT 3; + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, cosineDistance(vec, reference_vec) +FROM tab_cos_f16 +ORDER BY cosineDistance(vec, reference_vec) +LIMIT 3; + +EXPLAIN indexes = 1 +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, cosineDistance(vec, reference_vec) +FROM tab_cos_f16 +ORDER BY cosineDistance(vec, reference_vec) +LIMIT 3; + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, cosineDistance(vec, reference_vec) +FROM tab_cos_bf16 +ORDER BY cosineDistance(vec, reference_vec) +LIMIT 3; + +EXPLAIN indexes = 1 +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, cosineDistance(vec, reference_vec) +FROM tab_cos_bf16 +ORDER BY cosineDistance(vec, reference_vec) +LIMIT 3; + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, cosineDistance(vec, reference_vec) +FROM tab_cos_i8 +ORDER BY cosineDistance(vec, reference_vec) +LIMIT 3; + +EXPLAIN indexes = 1 +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, cosineDistance(vec, reference_vec) +FROM tab_cos_i8 +ORDER BY cosineDistance(vec, reference_vec) +LIMIT 3; + +DROP TABLE tab_l2_f64; +DROP TABLE tab_l2_f32; +DROP TABLE tab_l2_f16; +DROP TABLE tab_l2_bf16; +DROP TABLE tab_l2_i8; +DROP TABLE tab_cos_f64; +DROP TABLE tab_cos_f32; +DROP TABLE tab_cos_f16; +DROP TABLE tab_cos_bf16; +DROP TABLE tab_cos_i8; SELECT '-- Index on Array(Float64) column'; CREATE TABLE tab(id Int32, vec Array(Float64), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance') GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; diff --git a/tests/queries/0_stateless/02421_new_type_json_async_insert.sh b/tests/queries/0_stateless/02421_new_type_json_async_insert.sh index b23470a4179..3c863d83f2d 100755 --- a/tests/queries/0_stateless/02421_new_type_json_async_insert.sh +++ b/tests/queries/0_stateless/02421_new_type_json_async_insert.sh @@ -17,5 +17,5 @@ $CLICKHOUSE_CLIENT --async_insert=1 --wait_for_async_insert=1 -q 'INSERT INTO t_ wait -$CLICKHOUSE_CLIENT -q "SELECT data.k1 FROM t_json_async_insert ORDER BY data.k1" +$CLICKHOUSE_CLIENT -q "SELECT data.k1 FROM t_json_async_insert ORDER BY data.k1" --allow_suspicious_types_in_order_by 1 $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS t_json_async_insert" diff --git a/tests/queries/0_stateless/02494_query_cache_normalize_ast.sql b/tests/queries/0_stateless/02494_query_cache_normalize_ast.sql index 1dbb3ef8158..cb53c4db7de 100644 --- a/tests/queries/0_stateless/02494_query_cache_normalize_ast.sql +++ b/tests/queries/0_stateless/02494_query_cache_normalize_ast.sql @@ -7,7 +7,7 @@ SYSTEM DROP QUERY CACHE; -- Run query whose result gets cached in the query cache. -- Besides "use_query_cache", pass two more knobs (one QC-specific knob and one non-QC-specific knob). We just care -- *that* they are passed and not about their effect. -SELECT 1 SETTINGS use_query_cache = true, query_cache_store_results_of_queries_with_nondeterministic_functions = true, max_threads = 16; +SELECT 1 SETTINGS use_query_cache = true, query_cache_nondeterministic_function_handling = 'save', max_threads = 16; -- Check that entry in QC exists SELECT COUNT(*) FROM system.query_cache; diff --git a/tests/queries/0_stateless/02494_query_cache_system_tables.sql b/tests/queries/0_stateless/02494_query_cache_system_tables.sql index 7c9f01c4e91..12eaec0f8bc 100644 --- a/tests/queries/0_stateless/02494_query_cache_system_tables.sql +++ b/tests/queries/0_stateless/02494_query_cache_system_tables.sql @@ -44,9 +44,16 @@ SELECT * SETTINGS use_query_cache = 1; SELECT * FROM information_schema.tables SETTINGS use_query_cache = 1; -- { serverError QUERY_CACHE_USED_WITH_SYSTEM_TABLE } SELECT * FROM INFORMATION_SCHEMA.TABLES SETTINGS use_query_cache = 1; -- { serverError QUERY_CACHE_USED_WITH_SYSTEM_TABLE } +-- Issue #69010: A system table name appears as a literal. That's okay and must not throw. +DROP TABLE IF EXISTS tab; +CREATE TABLE tab (uid Int16, name String) ENGINE = Memory; +SELECT * FROM tab WHERE name = 'system.one' SETTINGS use_query_cache = true; +DROP TABLE tab; + -- System tables can be "hidden" inside e.g. table functions SELECT * FROM clusterAllReplicas('test_shard_localhost', system.one) SETTINGS use_query_cache = 1; -- {serverError QUERY_CACHE_USED_WITH_SYSTEM_TABLE } SELECT * FROM clusterAllReplicas('test_shard_localhost', 'system.one') SETTINGS use_query_cache = 1; -- {serverError QUERY_CACHE_USED_WITH_SYSTEM_TABLE } +-- Note how in the previous query ^^ 'system.one' is also a literal. ClusterAllReplicas gets special handling. -- Criminal edge case that a user creates a table named "system". The query cache must not reject queries against it. DROP TABLE IF EXISTS system; @@ -60,5 +67,4 @@ CREATE TABLE system.system (c UInt64) ENGINE = Memory; SElECT * FROM system.system SETTINGS use_query_cache = 1; -- { serverError QUERY_CACHE_USED_WITH_SYSTEM_TABLE } DROP TABLE system.system; --- Cleanup SYSTEM DROP QUERY CACHE; diff --git a/tests/queries/0_stateless/02718_parquet_metadata_format.reference b/tests/queries/0_stateless/02718_parquet_metadata_format.reference index 1f55c29da56..815968aeba5 100644 --- a/tests/queries/0_stateless/02718_parquet_metadata_format.reference +++ b/tests/queries/0_stateless/02718_parquet_metadata_format.reference @@ -78,7 +78,8 @@ "distinct_count": null, "min": "0", "max": "999" - } + }, + "have_bloom_filter": false }, { "name": "str", @@ -92,7 +93,8 @@ "distinct_count": null, "min": "Hello0", "max": "Hello999" - } + }, + "have_bloom_filter": false }, { "name": "mod", @@ -106,7 +108,8 @@ "distinct_count": null, "min": "0", "max": "8" - } + }, + "have_bloom_filter": false } ] }, @@ -128,7 +131,8 @@ "distinct_count": null, "min": "0", "max": "999" - } + }, + "have_bloom_filter": false }, { "name": "str", @@ -142,7 +146,8 @@ "distinct_count": null, "min": "Hello0", "max": "Hello999" - } + }, + "have_bloom_filter": false }, { "name": "mod", @@ -156,7 +161,8 @@ "distinct_count": null, "min": "0", "max": "8" - } + }, + "have_bloom_filter": false } ] } @@ -223,3 +229,55 @@ } 1 1 +{ + "num_columns": "1", + "num_rows": "5", + "num_row_groups": "1", + "format_version": "1.0", + "metadata_size": "267", + "total_uncompressed_size": "105", + "total_compressed_size": "128", + "columns": [ + { + "name": "ipv6", + "path": "ipv6", + "max_definition_level": "0", + "max_repetition_level": "0", + "physical_type": "FIXED_LEN_BYTE_ARRAY", + "logical_type": "None", + "compression": "GZIP", + "total_uncompressed_size": "105", + "total_compressed_size": "128", + "space_saved": "-21.9%", + "encodings": [ + "PLAIN", + "BIT_PACKED" + ] + } + ], + "row_groups": [ + { + "num_columns": "1", + "num_rows": "5", + "total_uncompressed_size": "105", + "total_compressed_size": "128", + "columns": [ + { + "name": "ipv6", + "path": "ipv6", + "total_compressed_size": "128", + "total_uncompressed_size": "105", + "have_statistics": true, + "statistics": { + "num_values": "5", + "null_count": "0", + "distinct_count": null, + "min": "27 32 150 125 17 250 66 31 157 44 75 218 51 50 19 144 ", + "max": "154 31 90 141 15 7 68 47 190 29 121 145 188 162 234 154 " + }, + "have_bloom_filter": true + } + ] + } + ] +} diff --git a/tests/queries/0_stateless/02718_parquet_metadata_format.sh b/tests/queries/0_stateless/02718_parquet_metadata_format.sh index 94d7f453850..c6371cff7a3 100755 --- a/tests/queries/0_stateless/02718_parquet_metadata_format.sh +++ b/tests/queries/0_stateless/02718_parquet_metadata_format.sh @@ -17,3 +17,4 @@ $CLICKHOUSE_LOCAL -q "select some_column from file('$CURDIR/data_parquet/02718_d $CLICKHOUSE_LOCAL -q "select num_columns from file('$CURDIR/data_parquet/02718_data.parquet', ParquetMetadata, 'num_columns Array(UInt32)')" 2>&1 | grep -c "BAD_ARGUMENTS" +$CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_parquet/ipv6_bloom_filter.gz.parquet', ParquetMetadata) format JSONEachRow" | python3 -m json.tool diff --git a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.reference b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.reference index e1bf9c27a81..7475cc7a97e 100644 --- a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.reference +++ b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.reference @@ -28,3 +28,19 @@ SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value` FROM `default`.` SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value`, `__table3`.`key` AS `r.key`, `__table3`.`value` AS `r.value` FROM (SELECT `__table2`.`key` AS `key`, `__table2`.`value` AS `value` FROM `default`.`num_1` AS `__table2`) AS `__table1` GLOBAL ALL INNER JOIN `_data_` AS `__table3` ON `__table1`.`key` = `__table3`.`key` ORDER BY `__table1`.`key` ASC LIMIT _CAST(700000, 'UInt64'), _CAST(10, 'UInt64') (stage: WithMergeableState) SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value`, `__table3`.`key` AS `r.key`, `__table3`.`value` AS `r.value` FROM (SELECT `__table2`.`key` AS `key`, `__table2`.`value` AS `value` FROM `default`.`num_1` AS `__table2`) AS `__table1` GLOBAL ALL INNER JOIN `_data_` AS `__table3` ON `__table1`.`key` = `__table3`.`key` ORDER BY `__table1`.`key` ASC LIMIT _CAST(700000, 'UInt64'), _CAST(10, 'UInt64') (stage: WithMergeableState) DefaultCoordinator: Coordination done + +simple (global) join with analyzer and parallel replicas with local plan +4200000 4200000 4200000 -1400000 +4200006 4200006 4200006 -1400002 +4200012 4200012 4200012 -1400004 +4200018 4200018 4200018 -1400006 +4200024 4200024 4200024 -1400008 +4200030 4200030 4200030 -1400010 +4200036 4200036 4200036 -1400012 +4200042 4200042 4200042 -1400014 +4200048 4200048 4200048 -1400016 +4200054 4200054 4200054 -1400018 +SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value` FROM `default`.`num_2` AS `__table1` (stage: WithMergeableState) + DefaultCoordinator: Coordination done +SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value`, `__table3`.`key` AS `r.key`, `__table3`.`value` AS `r.value` FROM (SELECT `__table2`.`key` AS `key`, `__table2`.`value` AS `value` FROM `default`.`num_1` AS `__table2`) AS `__table1` GLOBAL ALL INNER JOIN `_data_` AS `__table3` ON `__table1`.`key` = `__table3`.`key` ORDER BY `__table1`.`key` ASC LIMIT _CAST(700000, 'UInt64'), _CAST(10, 'UInt64') (stage: WithMergeableState) + DefaultCoordinator: Coordination done diff --git a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.sh b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.sh index b4271c3d29b..a6e755ebc35 100755 --- a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.sh +++ b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.sh @@ -27,6 +27,8 @@ inner join (select key, value from num_2) r on l.key = r.key order by l.key limit 10 offset 700000 SETTINGS allow_experimental_analyzer=1" +PARALLEL_REPLICAS_SETTINGS="enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join = 0" + ############## echo echo "simple (global) join with analyzer and parallel replicas" @@ -35,17 +37,31 @@ $CLICKHOUSE_CLIENT -q " select * from (select key, value from num_1) l inner join (select key, value from num_2) r on l.key = r.key order by l.key limit 10 offset 700000 -SETTINGS allow_experimental_analyzer=1, allow_experimental_parallel_reading_from_replicas = 2, -max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, -cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=0" +SETTINGS enable_analyzer=1, $PARALLEL_REPLICAS_SETTINGS, parallel_replicas_local_plan=0" $CLICKHOUSE_CLIENT -q " select * from (select key, value from num_1) l inner join (select key, value from num_2) r on l.key = r.key order by l.key limit 10 offset 700000 -SETTINGS allow_experimental_analyzer=1, allow_experimental_parallel_reading_from_replicas = 2, send_logs_level='trace', -max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, -cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=0" 2>&1 | +SETTINGS enable_analyzer=1, send_logs_level='trace', $PARALLEL_REPLICAS_SETTINGS, parallel_replicas_local_plan=0" 2>&1 | +grep "executeQuery\|.*Coordinator: Coordination done" | +grep -o "SELECT.*WithMergeableState)\|.*Coordinator: Coordination done" | +sed -re 's/_data_[[:digit:]]+_[[:digit:]]+/_data_/g' + +echo +echo "simple (global) join with analyzer and parallel replicas with local plan" + +$CLICKHOUSE_CLIENT -q " +select * from (select key, value from num_1) l +inner join (select key, value from num_2) r on l.key = r.key +order by l.key limit 10 offset 700000 +SETTINGS enable_analyzer=1, $PARALLEL_REPLICAS_SETTINGS, parallel_replicas_local_plan=1" + +$CLICKHOUSE_CLIENT -q " +select * from (select key, value from num_1) l +inner join (select key, value from num_2) r on l.key = r.key +order by l.key limit 10 offset 700000 +SETTINGS enable_analyzer=1, send_logs_level='trace', $PARALLEL_REPLICAS_SETTINGS, parallel_replicas_local_plan=1" 2>&1 | grep "executeQuery\|.*Coordinator: Coordination done" | grep -o "SELECT.*WithMergeableState)\|.*Coordinator: Coordination done" | sed -re 's/_data_[[:digit:]]+_[[:digit:]]+/_data_/g' diff --git a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_2.reference b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_2.reference index 297ec311f3e..f17d9aea3d5 100644 --- a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_2.reference +++ b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_2.reference @@ -11,7 +11,6 @@ simple (local) join with analyzer and parallel replicas 4200048 4200048 4200048 -1400016 4200054 4200054 4200054 -1400018 SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value`, `__table3`.`key` AS `r.key`, `__table3`.`value` AS `r.value` FROM (SELECT `__table2`.`key` AS `key`, `__table2`.`value` AS `value` FROM `default`.`num_1` AS `__table2`) AS `__table1` ALL INNER JOIN (SELECT `__table4`.`key` AS `key`, `__table4`.`value` AS `value` FROM `default`.`num_2` AS `__table4`) AS `__table3` ON `__table1`.`key` = `__table3`.`key` ORDER BY `__table1`.`key` ASC LIMIT _CAST(700000, 'UInt64'), _CAST(10, 'UInt64') (stage: WithMergeableState) -SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value`, `__table3`.`key` AS `r.key`, `__table3`.`value` AS `r.value` FROM (SELECT `__table2`.`key` AS `key`, `__table2`.`value` AS `value` FROM `default`.`num_1` AS `__table2`) AS `__table1` ALL INNER JOIN (SELECT `__table4`.`key` AS `key`, `__table4`.`value` AS `value` FROM `default`.`num_2` AS `__table4`) AS `__table3` ON `__table1`.`key` = `__table3`.`key` ORDER BY `__table1`.`key` ASC LIMIT _CAST(700000, 'UInt64'), _CAST(10, 'UInt64') (stage: WithMergeableState) DefaultCoordinator: Coordination done simple (local) join with analyzer and parallel replicas and full sorting merge join @@ -26,7 +25,6 @@ simple (local) join with analyzer and parallel replicas and full sorting merge j 4200048 4200048 4200048 -1400016 4200054 4200054 4200054 -1400018 SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value`, `__table3`.`key` AS `r.key`, `__table3`.`value` AS `r.value` FROM (SELECT `__table2`.`key` AS `key`, `__table2`.`value` AS `value` FROM `default`.`num_1` AS `__table2`) AS `__table1` ALL INNER JOIN (SELECT `__table4`.`key` AS `key`, `__table4`.`value` AS `value` FROM `default`.`num_2` AS `__table4`) AS `__table3` ON `__table1`.`key` = `__table3`.`key` ORDER BY `__table1`.`key` ASC LIMIT _CAST(700000, 'UInt64'), _CAST(10, 'UInt64') (stage: WithMergeableState) -SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value`, `__table3`.`key` AS `r.key`, `__table3`.`value` AS `r.value` FROM (SELECT `__table2`.`key` AS `key`, `__table2`.`value` AS `value` FROM `default`.`num_1` AS `__table2`) AS `__table1` ALL INNER JOIN (SELECT `__table4`.`key` AS `key`, `__table4`.`value` AS `value` FROM `default`.`num_2` AS `__table4`) AS `__table3` ON `__table1`.`key` = `__table3`.`key` ORDER BY `__table1`.`key` ASC LIMIT _CAST(700000, 'UInt64'), _CAST(10, 'UInt64') (stage: WithMergeableState) WithOrderCoordinator: Coordination done nested join with analyzer @@ -53,5 +51,4 @@ nested join with analyzer and parallel replicas, both local 420336 420336 420336 -140112 420378 420378 420378 -140126 SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value`, `__table3`.`key` AS `r.key`, `__table3`.`value` AS `r.value` FROM (SELECT `__table2`.`key` AS `key`, `__table2`.`value` AS `value` FROM `default`.`num_1` AS `__table2`) AS `__table1` ALL INNER JOIN (SELECT `__table4`.`key` AS `key`, `__table4`.`value` AS `value` FROM `default`.`num_2` AS `__table4` ALL INNER JOIN (SELECT `__table6`.`number` * 7 AS `key` FROM numbers(100000.) AS `__table6`) AS `__table5` ON `__table4`.`key` = `__table5`.`key` SETTINGS parallel_replicas_prefer_local_join = 1) AS `__table3` ON `__table1`.`key` = `__table3`.`key` ORDER BY `__table1`.`key` ASC LIMIT _CAST(10000, 'UInt64'), _CAST(10, 'UInt64') (stage: WithMergeableState) -SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value`, `__table3`.`key` AS `r.key`, `__table3`.`value` AS `r.value` FROM (SELECT `__table2`.`key` AS `key`, `__table2`.`value` AS `value` FROM `default`.`num_1` AS `__table2`) AS `__table1` ALL INNER JOIN (SELECT `__table4`.`key` AS `key`, `__table4`.`value` AS `value` FROM `default`.`num_2` AS `__table4` ALL INNER JOIN (SELECT `__table6`.`number` * 7 AS `key` FROM numbers(100000.) AS `__table6`) AS `__table5` ON `__table4`.`key` = `__table5`.`key` SETTINGS parallel_replicas_prefer_local_join = 1) AS `__table3` ON `__table1`.`key` = `__table3`.`key` ORDER BY `__table1`.`key` ASC LIMIT _CAST(10000, 'UInt64'), _CAST(10, 'UInt64') (stage: WithMergeableState) WithOrderCoordinator: Coordination done diff --git a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_2.sh b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_2.sh index ed13bf3321b..4768e308f1e 100755 --- a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_2.sh +++ b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_2.sh @@ -17,6 +17,8 @@ insert into num_1 select number * 2, toString(number * 2) from numbers(1e7); insert into num_2 select number * 3, -number from numbers(1.5e6); " +PARALLEL_REPLICAS_SETTINGS="allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join = 1, parallel_replicas_local_plan=1" + ############## echo echo "simple (local) join with analyzer and parallel replicas" @@ -25,17 +27,13 @@ $CLICKHOUSE_CLIENT -q " select * from (select key, value from num_1) l inner join (select key, value from num_2) r on l.key = r.key order by l.key limit 10 offset 700000 -SETTINGS allow_experimental_analyzer=1, -allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, -cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=1" +SETTINGS enable_analyzer=1, $PARALLEL_REPLICAS_SETTINGS" $CLICKHOUSE_CLIENT -q " select * from (select key, value from num_1) l inner join (select key, value from num_2) r on l.key = r.key order by l.key limit 10 offset 700000 -SETTINGS allow_experimental_analyzer=1, send_logs_level='trace', -allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, -cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=1" 2>&1 | +SETTINGS enable_analyzer=1, send_logs_level='trace', $PARALLEL_REPLICAS_SETTINGS" 2>&1 | grep "executeQuery\|.*Coordinator: Coordination done" | grep -o "SELECT.*WithMergeableState)\|.*Coordinator: Coordination done" | sed -re 's/_data_[[:digit:]]+_[[:digit:]]+/_data_/g' @@ -49,17 +47,13 @@ $CLICKHOUSE_CLIENT -q " select * from (select key, value from num_1) l inner join (select key, value from num_2) r on l.key = r.key order by l.key limit 10 offset 700000 -SETTINGS allow_experimental_analyzer=1, join_algorithm='full_sorting_merge', -allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, -cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=1" +SETTINGS enable_analyzer=1, join_algorithm='full_sorting_merge', $PARALLEL_REPLICAS_SETTINGS" $CLICKHOUSE_CLIENT -q " select * from (select key, value from num_1) l inner join (select key, value from num_2) r on l.key = r.key order by l.key limit 10 offset 700000 -SETTINGS allow_experimental_analyzer=1, join_algorithm='full_sorting_merge', send_logs_level='trace', -allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, -cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=1" 2>&1 | +SETTINGS enable_analyzer=1, join_algorithm='full_sorting_merge', send_logs_level='trace', $PARALLEL_REPLICAS_SETTINGS" 2>&1 | grep "executeQuery\|.*Coordinator: Coordination done" | grep -o "SELECT.*WithMergeableState)\|.*Coordinator: Coordination done" | sed -re 's/_data_[[:digit:]]+_[[:digit:]]+/_data_/g' @@ -74,7 +68,7 @@ select * from (select key, value from num_1) l inner join (select key, value from num_2 inner join (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings parallel_replicas_prefer_local_join=1) r on l.key = r.key order by l.key limit 10 offset 10000 -SETTINGS allow_experimental_analyzer=1" +SETTINGS enable_analyzer=1" ############## @@ -86,18 +80,14 @@ select * from (select key, value from num_1) l inner join (select key, value from num_2 inner join (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings parallel_replicas_prefer_local_join=1) r on l.key = r.key order by l.key limit 10 offset 10000 -SETTINGS allow_experimental_analyzer=1, -allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, -cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=1" +SETTINGS enable_analyzer=1, $PARALLEL_REPLICAS_SETTINGS" $CLICKHOUSE_CLIENT -q " select * from (select key, value from num_1) l inner join (select key, value from num_2 inner join (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings parallel_replicas_prefer_local_join=1) r on l.key = r.key order by l.key limit 10 offset 10000 -SETTINGS allow_experimental_analyzer=1, join_algorithm='full_sorting_merge', send_logs_level='trace', -allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, -cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=1" 2>&1 | +SETTINGS enable_analyzer=1, join_algorithm='full_sorting_merge', send_logs_level='trace', $PARALLEL_REPLICAS_SETTINGS" 2>&1 | grep "executeQuery\|.*Coordinator: Coordination done" | grep -o "SELECT.*WithMergeableState)\|.*Coordinator: Coordination done" | sed -re 's/_data_[[:digit:]]+_[[:digit:]]+/_data_/g' diff --git a/tests/queries/0_stateless/02989_variant_comparison.sql b/tests/queries/0_stateless/02989_variant_comparison.sql index e0dcbc97c27..4d09933fb7b 100644 --- a/tests/queries/0_stateless/02989_variant_comparison.sql +++ b/tests/queries/0_stateless/02989_variant_comparison.sql @@ -1,4 +1,5 @@ set allow_experimental_variant_type=1; +set allow_suspicious_types_in_order_by=1; create table test (v1 Variant(String, UInt64, Array(UInt32)), v2 Variant(String, UInt64, Array(UInt32))) engine=Memory; diff --git a/tests/queries/0_stateless/03035_dynamic_sorting.sql b/tests/queries/0_stateless/03035_dynamic_sorting.sql index 6c584f57b1e..43d6568a14a 100644 --- a/tests/queries/0_stateless/03035_dynamic_sorting.sql +++ b/tests/queries/0_stateless/03035_dynamic_sorting.sql @@ -1,4 +1,5 @@ set allow_experimental_dynamic_type = 1; +set allow_suspicious_types_in_order_by=1; drop table if exists test; create table test (d1 Dynamic(max_types=2), d2 Dynamic(max_types=2)) engine=Memory; diff --git a/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.sql.j2 b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.sql.j2 index dde4f3f53c3..d6732d91e74 100644 --- a/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.sql.j2 +++ b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.sql.j2 @@ -1,6 +1,7 @@ set allow_experimental_variant_type = 1; set use_variant_as_common_type = 1; set allow_experimental_dynamic_type = 1; +set allow_suspicious_types_in_order_by = 1; drop table if exists test; diff --git a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_small.sql.j2 b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_small.sql.j2 index 3253d7a6c68..daf85077160 100644 --- a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_small.sql.j2 +++ b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_small.sql.j2 @@ -1,6 +1,7 @@ set allow_experimental_variant_type = 1; set use_variant_as_common_type = 1; set allow_experimental_dynamic_type = 1; +set allow_suspicious_types_in_order_by = 1; drop table if exists test; diff --git a/tests/queries/0_stateless/03096_variant_in_primary_key.reference b/tests/queries/0_stateless/03096_variant_in_primary_key.reference deleted file mode 100644 index c701d7d3c26..00000000000 --- a/tests/queries/0_stateless/03096_variant_in_primary_key.reference +++ /dev/null @@ -1,4 +0,0 @@ -1 str_1 -1 str_2 -1 1 -1 2 diff --git a/tests/queries/0_stateless/03096_variant_in_primary_key.sql b/tests/queries/0_stateless/03096_variant_in_primary_key.sql deleted file mode 100644 index 48fbc821bcc..00000000000 --- a/tests/queries/0_stateless/03096_variant_in_primary_key.sql +++ /dev/null @@ -1,7 +0,0 @@ -set allow_experimental_variant_type=1; -drop table if exists test; -create table test (id UInt64, v Variant(UInt64, String)) engine=MergeTree order by (id, v); -insert into test values (1, 1), (1, 'str_1'), (1, 2), (1, 'str_2'); -select * from test; -drop table test; - diff --git a/tests/queries/0_stateless/03141_wildcard_grants.sql b/tests/queries/0_stateless/03141_wildcard_grants.sql index 45962d9b929..e71fa531134 100644 --- a/tests/queries/0_stateless/03141_wildcard_grants.sql +++ b/tests/queries/0_stateless/03141_wildcard_grants.sql @@ -19,4 +19,6 @@ REVOKE SELECT ON team*.* FROM user_03141; SHOW GRANTS FOR user_03141; SELECT '---'; +GRANT SELECT(bar) ON foo.test* TO user_03141; -- { clientError SYNTAX_ERROR } + DROP USER user_03141; diff --git a/tests/queries/0_stateless/03150_dynamic_type_mv_insert.sql b/tests/queries/0_stateless/03150_dynamic_type_mv_insert.sql index 71d5dd4abd1..0e5119a38e0 100644 --- a/tests/queries/0_stateless/03150_dynamic_type_mv_insert.sql +++ b/tests/queries/0_stateless/03150_dynamic_type_mv_insert.sql @@ -1,4 +1,5 @@ SET allow_experimental_dynamic_type=1; +SET allow_suspicious_types_in_order_by=1; DROP TABLE IF EXISTS null_table; CREATE TABLE null_table diff --git a/tests/queries/0_stateless/03151_dynamic_type_scale_max_types.sql b/tests/queries/0_stateless/03151_dynamic_type_scale_max_types.sql index e476d34a1db..f00c1492e40 100644 --- a/tests/queries/0_stateless/03151_dynamic_type_scale_max_types.sql +++ b/tests/queries/0_stateless/03151_dynamic_type_scale_max_types.sql @@ -1,5 +1,6 @@ -SET allow_experimental_dynamic_type=1; -set min_compress_block_size = 585572, max_compress_block_size = 373374, max_block_size = 60768, max_joined_block_size_rows = 18966, max_insert_threads = 5, max_threads = 50, max_read_buffer_size = 708232, connect_timeout_with_failover_ms = 2000, connect_timeout_with_failover_secure_ms = 3000, idle_connection_timeout = 36000, use_uncompressed_cache = true, stream_like_engine_allow_direct_select = true, replication_wait_for_inactive_replica_timeout = 30, compile_aggregate_expressions = false, min_count_to_compile_aggregate_expression = 0, compile_sort_description = false, group_by_two_level_threshold = 1000000, group_by_two_level_threshold_bytes = 12610083, enable_memory_bound_merging_of_aggregation_results = false, min_chunk_bytes_for_parallel_parsing = 18769830, merge_tree_coarse_index_granularity = 12, min_bytes_to_use_direct_io = 10737418240, min_bytes_to_use_mmap_io = 10737418240, log_queries = true, insert_quorum_timeout = 60000, merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.05000000074505806, http_response_buffer_size = 294986, fsync_metadata = true, http_send_timeout = 60., http_receive_timeout = 60., opentelemetry_start_trace_probability = 0.10000000149011612, max_bytes_before_external_group_by = 1, max_bytes_before_external_sort = 10737418240, max_bytes_before_remerge_sort = 1326536545, max_untracked_memory = 1048576, memory_profiler_step = 1048576, log_comment = '03151_dynamic_type_scale_max_types.sql', send_logs_level = 'fatal', prefer_localhost_replica = false, optimize_read_in_order = false, optimize_aggregation_in_order = true, aggregation_in_order_max_block_bytes = 27069500, read_in_order_two_level_merge_threshold = 75, allow_introspection_functions = true, database_atomic_wait_for_drop_and_detach_synchronously = true, remote_filesystem_read_method = 'read', local_filesystem_read_prefetch = true, remote_filesystem_read_prefetch = false, merge_tree_compact_parts_min_granules_to_multibuffer_read = 119, async_insert_busy_timeout_max_ms = 5000, read_from_filesystem_cache_if_exists_otherwise_bypass_cache = true, filesystem_cache_segments_batch_size = 10, use_page_cache_for_disks_without_file_cache = true, page_cache_inject_eviction = true, allow_prefetched_read_pool_for_remote_filesystem = false, filesystem_prefetch_step_marks = 50, filesystem_prefetch_min_bytes_for_single_read_task = 16777216, filesystem_prefetch_max_memory_usage = 134217728, filesystem_prefetches_limit = 10, optimize_sorting_by_input_stream_properties = false, allow_experimental_dynamic_type = true, session_timezone = 'Africa/Khartoum', prefer_warmed_unmerged_parts_seconds = 2; +SET allow_experimental_dynamic_type = 1; +SET allow_suspicious_types_in_order_by = 1; +SET optimize_read_in_order = 1; drop table if exists to_table; diff --git a/tests/queries/0_stateless/03158_dynamic_type_from_variant.sql b/tests/queries/0_stateless/03158_dynamic_type_from_variant.sql index a18f985f217..429ac21b5eb 100644 --- a/tests/queries/0_stateless/03158_dynamic_type_from_variant.sql +++ b/tests/queries/0_stateless/03158_dynamic_type_from_variant.sql @@ -1,5 +1,6 @@ SET allow_experimental_dynamic_type=1; SET allow_experimental_variant_type=1; +SET allow_suspicious_types_in_order_by=1; CREATE TABLE test_variable (v Variant(String, UInt32, IPv6, Bool, DateTime64)) ENGINE = Memory; CREATE TABLE test_dynamic (d Dynamic) ENGINE = Memory; diff --git a/tests/queries/0_stateless/03159_dynamic_type_all_types.sql b/tests/queries/0_stateless/03159_dynamic_type_all_types.sql index 28b679e2214..cf8ba687d3f 100644 --- a/tests/queries/0_stateless/03159_dynamic_type_all_types.sql +++ b/tests/queries/0_stateless/03159_dynamic_type_all_types.sql @@ -3,7 +3,7 @@ SET allow_experimental_dynamic_type=1; SET allow_experimental_variant_type=1; SET allow_suspicious_low_cardinality_types=1; - +SET allow_suspicious_types_in_order_by=1; CREATE TABLE t (d Dynamic(max_types=254)) ENGINE = Memory; -- Integer types: signed and unsigned integers (UInt8, UInt16, UInt32, UInt64, UInt128, UInt256, Int8, Int16, Int32, Int64, Int128, Int256) diff --git a/tests/queries/0_stateless/03162_dynamic_type_nested.sql b/tests/queries/0_stateless/03162_dynamic_type_nested.sql index 94007459a9e..59c22491957 100644 --- a/tests/queries/0_stateless/03162_dynamic_type_nested.sql +++ b/tests/queries/0_stateless/03162_dynamic_type_nested.sql @@ -1,4 +1,5 @@ SET allow_experimental_dynamic_type=1; +SET allow_suspicious_types_in_order_by=1; CREATE TABLE t (d Dynamic) ENGINE = Memory; diff --git a/tests/queries/0_stateless/03163_dynamic_as_supertype.sql b/tests/queries/0_stateless/03163_dynamic_as_supertype.sql index baba637eea4..e859fbd1815 100644 --- a/tests/queries/0_stateless/03163_dynamic_as_supertype.sql +++ b/tests/queries/0_stateless/03163_dynamic_as_supertype.sql @@ -1,4 +1,5 @@ SET allow_experimental_dynamic_type=1; +SET allow_suspicious_types_in_order_by=1; SELECT if(number % 2, number::Dynamic(max_types=3), ('str_' || toString(number))::Dynamic(max_types=2)) AS d, toTypeName(d), dynamicType(d) FROM numbers(4); CREATE TABLE dynamic_test_1 (d Dynamic(max_types=3)) ENGINE = Memory; INSERT INTO dynamic_test_1 VALUES ('str_1'), (42::UInt64); diff --git a/tests/queries/0_stateless/03214_json_typed_dynamic_path.sql b/tests/queries/0_stateless/03214_json_typed_dynamic_path.sql index 1f6a025825a..eee3d70b8da 100644 --- a/tests/queries/0_stateless/03214_json_typed_dynamic_path.sql +++ b/tests/queries/0_stateless/03214_json_typed_dynamic_path.sql @@ -1,6 +1,7 @@ -- Tags: no-fasttest set allow_experimental_json_type = 1; +set allow_experimental_dynamic_type = 1; drop table if exists test; create table test (json JSON(a Dynamic)) engine=MergeTree order by tuple() settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1; insert into test select '{"a" : 42}'; diff --git a/tests/queries/0_stateless/03225_alter_to_json_not_supported.sql b/tests/queries/0_stateless/03225_alter_to_json_not_supported.sql deleted file mode 100644 index 398494d56de..00000000000 --- a/tests/queries/0_stateless/03225_alter_to_json_not_supported.sql +++ /dev/null @@ -1,15 +0,0 @@ -set allow_experimental_json_type = 1; - -drop table if exists test; -create table test (s String) engine=MergeTree order by tuple(); -alter table test modify column s JSON; -- { serverError BAD_ARGUMENTS } -drop table test; - -create table test (s Array(String)) engine=MergeTree order by tuple(); -alter table test modify column s Array(JSON); -- { serverError BAD_ARGUMENTS } -drop table test; - -create table test (s Tuple(String, String)) engine=MergeTree order by tuple(); -alter table test modify column s Tuple(JSON, String); -- { serverError BAD_ARGUMENTS } -drop table test; - diff --git a/tests/queries/0_stateless/03228_dynamic_serializations_uninitialized_value.sql b/tests/queries/0_stateless/03228_dynamic_serializations_uninitialized_value.sql index 8a565fe36b9..60e2439d45f 100644 --- a/tests/queries/0_stateless/03228_dynamic_serializations_uninitialized_value.sql +++ b/tests/queries/0_stateless/03228_dynamic_serializations_uninitialized_value.sql @@ -1,4 +1,5 @@ set allow_experimental_dynamic_type=1; +set allow_suspicious_types_in_group_by=1; set cast_keep_nullable=1; SELECT toFixedString('str', 3), 3, CAST(if(1 = 0, toInt8(3), NULL), 'Int32') AS x from numbers(10) GROUP BY GROUPING SETS ((CAST(toInt32(1), 'Int32')), ('str', 3), (CAST(toFixedString('str', 3), 'Dynamic')), (CAST(toFixedString(toFixedString('str', 3), 3), 'Dynamic'))); diff --git a/tests/queries/0_stateless/03231_dynamic_incomplete_type_insert_bug.sql b/tests/queries/0_stateless/03231_dynamic_incomplete_type_insert_bug.sql index a6fc2e66480..4e845a66574 100644 --- a/tests/queries/0_stateless/03231_dynamic_incomplete_type_insert_bug.sql +++ b/tests/queries/0_stateless/03231_dynamic_incomplete_type_insert_bug.sql @@ -1,4 +1,5 @@ SET allow_experimental_dynamic_type = 1; +SET allow_suspicious_types_in_order_by = 1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 (c0 Array(Dynamic)) ENGINE = MergeTree() ORDER BY tuple(); INSERT INTO t1 (c0) VALUES ([]); diff --git a/tests/queries/0_stateless/03231_dynamic_not_safe_primary_key.sql b/tests/queries/0_stateless/03231_dynamic_not_safe_primary_key.sql deleted file mode 100644 index f207581f482..00000000000 --- a/tests/queries/0_stateless/03231_dynamic_not_safe_primary_key.sql +++ /dev/null @@ -1,10 +0,0 @@ -SET allow_experimental_dynamic_type = 1; -DROP TABLE IF EXISTS t0; -DROP TABLE IF EXISTS t1; -CREATE TABLE t0 (c0 Int) ENGINE = AggregatingMergeTree() ORDER BY (c0); -CREATE TABLE t1 (c0 Array(Dynamic), c1 Int) ENGINE = MergeTree() ORDER BY (c0); -INSERT INTO t1 (c0, c1) VALUES ([18446717433683171873], 13623876564923702671), ([-4], 6111684076076982207); -SELECT 1 FROM t0 FINAL JOIN t1 ON TRUE; -DROP TABLE t0; -DROP TABLE t1; - diff --git a/tests/queries/0_stateless/03231_dynamic_uniq_group_by.sql b/tests/queries/0_stateless/03231_dynamic_uniq_group_by.sql index fe052027f56..d8869e71405 100644 --- a/tests/queries/0_stateless/03231_dynamic_uniq_group_by.sql +++ b/tests/queries/0_stateless/03231_dynamic_uniq_group_by.sql @@ -1,4 +1,6 @@ set allow_experimental_dynamic_type = 1; +set allow_suspicious_types_in_group_by = 1; +set allow_suspicious_types_in_order_by = 1; drop table if exists test; create table test (d Dynamic(max_types=2)) engine=Memory; insert into test values (42), ('Hello'), ([1,2,3]), ('2020-01-01'); diff --git a/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.reference b/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.reference new file mode 100644 index 00000000000..5983dd15f5b --- /dev/null +++ b/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.reference @@ -0,0 +1,184 @@ +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +[0] +[1] +[2] +[3] +[4] +{'str':0} +{'str':1} +{'str':2} +{'str':3} +{'str':4} +0 +1 +2 +3 +4 +\N +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +[0] +[1] +[2] +[3] +[4] +{'str':0} +{'str':1} +{'str':2} +{'str':3} +{'str':4} +0 +1 +2 +3 +4 +\N +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +[0] +[1] +[2] +[3] +[4] +{'str':0} +{'str':1} +{'str':2} +{'str':3} +{'str':4} +0 +1 +2 +3 +4 +\N +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +[0] +[1] +[2] +[3] +[4] +{'str':0} +{'str':1} +{'str':2} +{'str':3} +{'str':4} +0 +1 +2 +3 +4 +\N diff --git a/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.sql b/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.sql new file mode 100644 index 00000000000..a53b02e8e41 --- /dev/null +++ b/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.sql @@ -0,0 +1,166 @@ +set allow_experimental_variant_type=1; +set allow_experimental_dynamic_type=1; + +drop table if exists test; + +create table test (d Dynamic) engine=MergeTree order by d; -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Dynamic) engine=MergeTree order by tuple(d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Dynamic) engine=MergeTree order by array(d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Dynamic) engine=MergeTree order by map('str', d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Dynamic) engine=MergeTree order by tuple() primary key d; -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Dynamic) engine=MergeTree order by tuple() partition by d; -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Dynamic) engine=MergeTree order by tuple() partition by tuple(d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Dynamic) engine=MergeTree order by tuple() partition by array(d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Dynamic) engine=MergeTree order by tuple() partition by map('str', d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} + +create table test (d Variant(UInt64)) engine=MergeTree order by d; -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Variant(UInt64)) engine=MergeTree order by tuple(d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Variant(UInt64)) engine=MergeTree order by array(d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Variant(UInt64)) engine=MergeTree order by map('str', d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Variant(UInt64)) engine=MergeTree order by tuple() primary key d; -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Variant(UInt64)) engine=MergeTree order by tuple() partition by d; -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Variant(UInt64)) engine=MergeTree order by tuple() partition by tuple(d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Variant(UInt64)) engine=MergeTree order by tuple() partition by array(d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Variant(UInt64)) engine=MergeTree order by tuple() partition by map('str', d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} + +create table test (d Dynamic) engine=Memory; +insert into test select * from numbers(5); + +set allow_experimental_analyzer=1; + +set allow_suspicious_types_in_group_by=1; +set allow_suspicious_types_in_order_by=0; + +select * from test order by d; -- {serverError ILLEGAL_COLUMN} +select * from test order by tuple(d); -- {serverError ILLEGAL_COLUMN} +select * from test order by array(d); -- {serverError ILLEGAL_COLUMN} +select * from test order by map('str', d); -- {serverError ILLEGAL_COLUMN} + +set allow_suspicious_types_in_group_by=0; +set allow_suspicious_types_in_order_by=1; + +select * from test group by d; -- {serverError ILLEGAL_COLUMN} +select * from test group by tuple(d); -- {serverError ILLEGAL_COLUMN} +select array(d) from test group by array(d); -- {serverError ILLEGAL_COLUMN} +select map('str', d) from test group by map('str', d); -- {serverError ILLEGAL_COLUMN} +select * from test group by grouping sets ((d), ('str')); -- {serverError ILLEGAL_COLUMN} + +set allow_suspicious_types_in_group_by=1; +set allow_suspicious_types_in_order_by=1; + +select * from test order by d; +select * from test order by tuple(d); +select * from test order by array(d); +select * from test order by map('str', d); + +select * from test group by d order by all; +select * from test group by tuple(d) order by all; +select array(d) from test group by array(d) order by all; +select map('str', d) from test group by map('str', d) order by all; +select * from test group by grouping sets ((d), ('str')) order by all; + +set allow_experimental_analyzer=0; + +set allow_suspicious_types_in_group_by=1; +set allow_suspicious_types_in_order_by=0; + +select * from test order by d; -- {serverError ILLEGAL_COLUMN} +select * from test order by tuple(d); -- {serverError ILLEGAL_COLUMN} +select * from test order by array(d); -- {serverError ILLEGAL_COLUMN} +select * from test order by map('str', d); -- {serverError ILLEGAL_COLUMN} + +set allow_suspicious_types_in_group_by=0; +set allow_suspicious_types_in_order_by=1; + +select * from test group by d; -- {serverError ILLEGAL_COLUMN} +select * from test group by tuple(d); -- {serverError ILLEGAL_COLUMN} +select array(d) from test group by array(d); -- {serverError ILLEGAL_COLUMN} +select map('str', d) from test group by map('str', d); -- {serverError ILLEGAL_COLUMN} +select * from test group by grouping sets ((d), ('str')); -- {serverError ILLEGAL_COLUMN} + +set allow_suspicious_types_in_group_by=1; +set allow_suspicious_types_in_order_by=1; + +select * from test order by d; +select * from test order by tuple(d); +select * from test order by array(d); +select * from test order by map('str', d); + +select * from test group by d order by all; +select * from test group by tuple(d) order by all; +select array(d) from test group by array(d) order by all; +select map('str', d) from test group by map('str', d) order by all; +select * from test group by grouping sets ((d), ('str')) order by all; + +drop table test; + +create table test (d Variant(UInt64)) engine=Memory; +insert into test select * from numbers(5); + +set allow_experimental_analyzer=1; + +set allow_suspicious_types_in_group_by=1; +set allow_suspicious_types_in_order_by=0; + +select * from test order by d; -- {serverError ILLEGAL_COLUMN} +select * from test order by tuple(d); -- {serverError ILLEGAL_COLUMN} +select * from test order by array(d); -- {serverError ILLEGAL_COLUMN} +select * from test order by map('str', d); -- {serverError ILLEGAL_COLUMN} + +set allow_suspicious_types_in_group_by=0; +set allow_suspicious_types_in_order_by=1; + +select * from test group by d; -- {serverError ILLEGAL_COLUMN} +select * from test group by tuple(d); -- {serverError ILLEGAL_COLUMN} +select array(d) from test group by array(d); -- {serverError ILLEGAL_COLUMN} +select map('str', d) from test group by map('str', d); -- {serverError ILLEGAL_COLUMN} +select * from test group by grouping sets ((d), ('str')); -- {serverError ILLEGAL_COLUMN} + +set allow_suspicious_types_in_group_by=1; +set allow_suspicious_types_in_order_by=1; + +select * from test order by d; +select * from test order by tuple(d); +select * from test order by array(d); +select * from test order by map('str', d); + +select * from test group by d order by all; +select * from test group by tuple(d) order by all; +select array(d) from test group by array(d) order by all; +select map('str', d) from test group by map('str', d) order by all; +select * from test group by grouping sets ((d), ('str')) order by all; + +set allow_experimental_analyzer=0; + +set allow_suspicious_types_in_group_by=1; +set allow_suspicious_types_in_order_by=0; + +select * from test order by d; -- {serverError ILLEGAL_COLUMN} +select * from test order by tuple(d); -- {serverError ILLEGAL_COLUMN} +select * from test order by array(d); -- {serverError ILLEGAL_COLUMN} +select * from test order by map('str', d); -- {serverError ILLEGAL_COLUMN} + +set allow_suspicious_types_in_group_by=0; +set allow_suspicious_types_in_order_by=1; + +select * from test group by d; -- {serverError ILLEGAL_COLUMN} +select * from test group by tuple(d); -- {serverError ILLEGAL_COLUMN} +select array(d) from test group by array(d); -- {serverError ILLEGAL_COLUMN} +select map('str', d) from test group by map('str', d); -- {serverError ILLEGAL_COLUMN} +select * from test group by grouping sets ((d), ('str')); -- {serverError ILLEGAL_COLUMN} + +set allow_suspicious_types_in_group_by=1; +set allow_suspicious_types_in_order_by=1; + +select * from test order by d; +select * from test order by tuple(d); +select * from test order by array(d); +select * from test order by map('str', d); + +select * from test group by d order by all; +select * from test group by tuple(d) order by all; +select array(d) from test group by array(d) order by all; +select map('str', d) from test group by map('str', d) order by all; +select * from test group by grouping sets ((d), ('str')) order by all; + +drop table test; diff --git a/tests/queries/0_stateless/03246_alter_from_string_to_json.reference b/tests/queries/0_stateless/03246_alter_from_string_to_json.reference new file mode 100644 index 00000000000..8253c4fef48 --- /dev/null +++ b/tests/queries/0_stateless/03246_alter_from_string_to_json.reference @@ -0,0 +1,134 @@ +All paths: +['key0','key1','key2','key3','key4','key5'] +Shared data paths: +key2 +key3 +key4 +key5 +{"key0":"value0"} +{"key1":"value1"} +{"key0":"value2"} +{"key1":"value3"} +{"key0":"value4"} +{"key1":"value5"} +{"key0":"value6"} +{"key1":"value7"} +{"key0":"value8"} +{"key1":"value9"} +{"key2":"value60000"} +{"key3":"value60001"} +{"key2":"value60002"} +{"key3":"value60003"} +{"key2":"value60004"} +{"key3":"value60005"} +{"key2":"value60006"} +{"key3":"value60007"} +{"key2":"value60008"} +{"key3":"value60009"} +{"key4":"value120000"} +{"key5":"value120001"} +{"key4":"value120002"} +{"key5":"value120003"} +{"key4":"value120004"} +{"key5":"value120005"} +{"key4":"value120006"} +{"key5":"value120007"} +{"key4":"value120008"} +{"key5":"value120009"} +value0 \N \N \N \N \N +\N value1 \N \N \N \N +value2 \N \N \N \N \N +\N value3 \N \N \N \N +value4 \N \N \N \N \N +\N value5 \N \N \N \N +value6 \N \N \N \N \N +\N value7 \N \N \N \N +value8 \N \N \N \N \N +\N value9 \N \N \N \N +\N \N value60000 \N \N \N +\N \N \N value60001 \N \N +\N \N value60002 \N \N \N +\N \N \N value60003 \N \N +\N \N value60004 \N \N \N +\N \N \N value60005 \N \N +\N \N value60006 \N \N \N +\N \N \N value60007 \N \N +\N \N value60008 \N \N \N +\N \N \N value60009 \N \N +\N \N \N \N value120000 \N +\N \N \N \N \N value120001 +\N \N \N \N value120002 \N +\N \N \N \N \N value120003 +\N \N \N \N value120004 \N +\N \N \N \N \N value120005 +\N \N \N \N value120006 \N +\N \N \N \N \N value120007 +\N \N \N \N value120008 \N +\N \N \N \N \N value120009 +All paths: +['key0','key1','key2','key3','key4','key5'] +Shared data paths: +key2 +key3 +key4 +key5 +{"key0":"value0"} +{"key1":"value1"} +{"key0":"value2"} +{"key1":"value3"} +{"key0":"value4"} +{"key1":"value5"} +{"key0":"value6"} +{"key1":"value7"} +{"key0":"value8"} +{"key1":"value9"} +{"key2":"value60000"} +{"key3":"value60001"} +{"key2":"value60002"} +{"key3":"value60003"} +{"key2":"value60004"} +{"key3":"value60005"} +{"key2":"value60006"} +{"key3":"value60007"} +{"key2":"value60008"} +{"key3":"value60009"} +{"key4":"value120000"} +{"key5":"value120001"} +{"key4":"value120002"} +{"key5":"value120003"} +{"key4":"value120004"} +{"key5":"value120005"} +{"key4":"value120006"} +{"key5":"value120007"} +{"key4":"value120008"} +{"key5":"value120009"} +value0 \N \N \N \N \N +\N value1 \N \N \N \N +value2 \N \N \N \N \N +\N value3 \N \N \N \N +value4 \N \N \N \N \N +\N value5 \N \N \N \N +value6 \N \N \N \N \N +\N value7 \N \N \N \N +value8 \N \N \N \N \N +\N value9 \N \N \N \N +\N \N value60000 \N \N \N +\N \N \N value60001 \N \N +\N \N value60002 \N \N \N +\N \N \N value60003 \N \N +\N \N value60004 \N \N \N +\N \N \N value60005 \N \N +\N \N value60006 \N \N \N +\N \N \N value60007 \N \N +\N \N value60008 \N \N \N +\N \N \N value60009 \N \N +\N \N \N \N value120000 \N +\N \N \N \N \N value120001 +\N \N \N \N value120002 \N +\N \N \N \N \N value120003 +\N \N \N \N value120004 \N +\N \N \N \N \N value120005 +\N \N \N \N value120006 \N +\N \N \N \N \N value120007 +\N \N \N \N value120008 \N +\N \N \N \N \N value120009 diff --git a/tests/queries/0_stateless/03246_alter_from_string_to_json.sql.j2 b/tests/queries/0_stateless/03246_alter_from_string_to_json.sql.j2 new file mode 100644 index 00000000000..2ccf2153699 --- /dev/null +++ b/tests/queries/0_stateless/03246_alter_from_string_to_json.sql.j2 @@ -0,0 +1,36 @@ +-- Random settings limits: index_granularity=(None, 60000) +-- Tags: long + +set allow_experimental_json_type = 1; +set max_block_size = 20000; + +drop table if exists test; + +{% for create_command in ['create table test (x UInt64, json String) engine=MergeTree order by x settings min_rows_for_wide_part=100000000, min_bytes_for_wide_part=1000000000;', + 'create table test (x UInt64, json String) engine=MergeTree order by x settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;'] -%} + +{{ create_command }} + +insert into test select number, toJSONString(map('key' || multiIf(number < 60000, number % 2, number < 120000, number % 2 + 2, number % 2 + 4), 'value' || number)) from numbers(200000); + +alter table test modify column json JSON settings mutations_sync=1; + +select 'All paths:'; +select distinctJSONPaths(json) from test; +select 'Shared data paths:'; +select distinct (arrayJoin(JSONSharedDataPaths(json))) as path from test order by path; +select json from test order by x limit 10; +select json from test order by x limit 10 offset 60000; +select json from test order by x limit 10 offset 120000; +select json.key0, json.key1, json.key2, json.key3, json.key4, json.key5 from test order by x limit 10; +select json.key0, json.key1, json.key2, json.key3, json.key4, json.key5 from test order by x limit 10 offset 60000; +select json.key0, json.key1, json.key2, json.key3, json.key4, json.key5 from test order by x limit 10 offset 120000; + +select json from test format Null; +select json from test order by x format Null; +select json.key0, json.key1, json.key2, json.key3, json.key4, json.key5 from test format Null; +select json.key0, json.key1, json.key2, json.key3, json.key4, json.key5 from test order by x format Null; + +drop table test; + +{% endfor -%} diff --git a/tests/queries/0_stateless/03247_ghdata_string_to_json_alter.reference b/tests/queries/0_stateless/03247_ghdata_string_to_json_alter.reference new file mode 100644 index 00000000000..ca2fb7e8ff9 --- /dev/null +++ b/tests/queries/0_stateless/03247_ghdata_string_to_json_alter.reference @@ -0,0 +1,12 @@ +5000 +leonardomso/33-js-concepts 3 +ytdl-org/youtube-dl 3 +Bogdanp/neko 2 +bminossi/AllVideoPocsFromHackerOne 2 +disclose/diodata 2 +Commit 182 +chipeo345 119 +phanwi346 114 +Nicholas Piggin 95 +direwolf-github 49 +2 diff --git a/tests/queries/0_stateless/03247_ghdata_string_to_json_alter.sh b/tests/queries/0_stateless/03247_ghdata_string_to_json_alter.sh new file mode 100755 index 00000000000..931d106120c --- /dev/null +++ b/tests/queries/0_stateless/03247_ghdata_string_to_json_alter.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, no-s3-storage, long +# ^ no-s3-storage: too memory hungry + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS ghdata" +${CLICKHOUSE_CLIENT} -q "CREATE TABLE ghdata (data String) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'" + +cat $CUR_DIR/data_json/ghdata_sample.json | ${CLICKHOUSE_CLIENT} \ + --max_memory_usage 10G --query "INSERT INTO ghdata FORMAT JSONAsString" + +${CLICKHOUSE_CLIENT} -q "ALTER TABLE ghdata MODIFY column data JSON SETTINGS mutations_sync=1" --allow_experimental_json_type 1 + +${CLICKHOUSE_CLIENT} -q "SELECT count() FROM ghdata WHERE NOT ignore(*)" + +${CLICKHOUSE_CLIENT} -q \ +"SELECT data.repo.name, count() AS stars FROM ghdata \ + WHERE data.type = 'WatchEvent' GROUP BY data.repo.name ORDER BY stars DESC, data.repo.name LIMIT 5" + +${CLICKHOUSE_CLIENT} --enable_analyzer=1 -q \ +"SELECT data.payload.commits[].author.name AS name, count() AS c FROM ghdata \ + ARRAY JOIN data.payload.commits[].author.name \ + GROUP BY name ORDER BY c DESC, name LIMIT 5" + +${CLICKHOUSE_CLIENT} -q "SELECT max(data.payload.pull_request.assignees[].size0) FROM ghdata" + +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS ghdata" diff --git a/tests/queries/0_stateless/03254_parallel_replicas_join_with_totals.reference b/tests/queries/0_stateless/03254_parallel_replicas_join_with_totals.reference new file mode 100644 index 00000000000..f87bb786c46 --- /dev/null +++ b/tests/queries/0_stateless/03254_parallel_replicas_join_with_totals.reference @@ -0,0 +1,10 @@ +1 1 +1 1 + +0 0 +----- +1 1 +1 1 + +0 0 +----- diff --git a/tests/queries/0_stateless/03254_parallel_replicas_join_with_totals.sh b/tests/queries/0_stateless/03254_parallel_replicas_join_with_totals.sh new file mode 100755 index 00000000000..365d7abed7a --- /dev/null +++ b/tests/queries/0_stateless/03254_parallel_replicas_join_with_totals.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + + +${CLICKHOUSE_CLIENT} --query=" +CREATE TABLE t +( + item_id UInt64, + price_sold Float32, + date Date +) +ENGINE = MergeTree +ORDER BY item_id; + +INSERT INTO t VALUES (1, 100, '1970-01-01'), (1, 200, '1970-01-02'); +" + +for enable_parallel_replicas in {0..1}; do + ${CLICKHOUSE_CLIENT} --query=" + --- Old analyzer uses different code path and it produces wrong result in this case. + set enable_analyzer=1; + set allow_experimental_parallel_reading_from_replicas=${enable_parallel_replicas}, cluster_for_parallel_replicas='parallel_replicas', max_parallel_replicas=100, parallel_replicas_for_non_replicated_merge_tree=1; + + SELECT * + FROM + ( + SELECT item_id + FROM t + ) AS l + LEFT JOIN + ( + SELECT item_id + FROM t + GROUP BY item_id + WITH TOTALS + ORDER BY item_id ASC + ) AS r ON l.item_id = r.item_id; + + SELECT '-----'; + " +done + +${CLICKHOUSE_CLIENT} --query=" +DROP TABLE t; +" diff --git a/tests/queries/0_stateless/03254_system_prewarm_mark_cache.reference b/tests/queries/0_stateless/03254_system_prewarm_mark_cache.reference new file mode 100644 index 00000000000..86674e7765a --- /dev/null +++ b/tests/queries/0_stateless/03254_system_prewarm_mark_cache.reference @@ -0,0 +1,4 @@ +20000 +20000 +1 +0 diff --git a/tests/queries/0_stateless/03254_system_prewarm_mark_cache.sql b/tests/queries/0_stateless/03254_system_prewarm_mark_cache.sql new file mode 100644 index 00000000000..f9e77365836 --- /dev/null +++ b/tests/queries/0_stateless/03254_system_prewarm_mark_cache.sql @@ -0,0 +1,27 @@ +-- Tags: no-parallel, no-shared-merge-tree + +DROP TABLE IF EXISTS t_prewarm_cache; + +CREATE TABLE t_prewarm_cache (a UInt64, b UInt64, c UInt64) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/03254_prewarm_mark_cache_smt/t_prewarm_cache', '1') +ORDER BY a SETTINGS prewarm_mark_cache = 0; + +SYSTEM DROP MARK CACHE; + +INSERT INTO t_prewarm_cache SELECT number, rand(), rand() FROM numbers(20000); + +SELECT count() FROM t_prewarm_cache WHERE NOT ignore(*); + +SYSTEM DROP MARK CACHE; + +SYSTEM PREWARM MARK CACHE t_prewarm_cache; + +SELECT count() FROM t_prewarm_cache WHERE NOT ignore(*); + +SYSTEM FLUSH LOGS; + +SELECT ProfileEvents['LoadedMarksCount'] > 0 FROM system.query_log +WHERE current_database = currentDatabase() AND type = 'QueryFinish' AND query LIKE 'SELECT count() FROM t_prewarm_cache%' +ORDER BY event_time_microseconds; + +DROP TABLE IF EXISTS t_prewarm_cache; diff --git a/tests/queries/0_stateless/03255_parallel_replicas_join_algo_and_analyzer_4.reference b/tests/queries/0_stateless/03255_parallel_replicas_join_algo_and_analyzer_4.reference new file mode 100644 index 00000000000..d846b26b72b --- /dev/null +++ b/tests/queries/0_stateless/03255_parallel_replicas_join_algo_and_analyzer_4.reference @@ -0,0 +1,116 @@ +4999950000 +4999950000 +SELECT `__table1`.`item_id` AS `item_id` FROM `default`.`t` AS `__table1` GROUP BY `__table1`.`item_id` +SELECT `__table1`.`item_id` AS `item_id` FROM `default`.`t1` AS `__table1` +4999950000 +4999950000 +SELECT `__table1`.`item_id` AS `item_id` FROM `default`.`t` AS `__table1` +SELECT `__table1`.`item_id` AS `item_id` FROM `default`.`t1` AS `__table1` GROUP BY `__table1`.`item_id` +499950000 +499960000 +499970000 +499980000 +499990000 +500000000 +500010000 +500020000 +500030000 +500040000 +499950000 +499960000 +499970000 +499980000 +499990000 +500000000 +500010000 +500020000 +500030000 +500040000 +SELECT sum(`__table1`.`item_id`) AS `sum(item_id)` FROM (SELECT `__table2`.`item_id` AS `item_id`, `__table2`.`price_sold` AS `price_sold` FROM `default`.`t` AS `__table2`) AS `__table1` ALL LEFT JOIN (SELECT `__table4`.`item_id` AS `item_id` FROM `default`.`t1` AS `__table4`) AS `__table3` ON `__table1`.`item_id` = `__table3`.`item_id` GROUP BY `__table1`.`price_sold` ORDER BY `__table1`.`price_sold` ASC +4999950000 +4999950000 +SELECT `__table1`.`item_id` AS `item_id` FROM `default`.`t` AS `__table1` GROUP BY `__table1`.`item_id` +SELECT `__table1`.`item_id` AS `item_id` FROM `default`.`t1` AS `__table1` +4999950000 +4999950000 +SELECT `__table1`.`item_id` AS `item_id` FROM `default`.`t` AS `__table1` +SELECT `__table1`.`item_id` AS `item_id` FROM `default`.`t1` AS `__table1` GROUP BY `__table1`.`item_id` +499950000 +499960000 +499970000 +499980000 +499990000 +500000000 +500010000 +500020000 +500030000 +500040000 +499950000 +499960000 +499970000 +499980000 +499990000 +500000000 +500010000 +500020000 +500030000 +500040000 +SELECT sum(`__table1`.`item_id`) AS `sum(item_id)` FROM (SELECT `__table2`.`item_id` AS `item_id`, `__table2`.`price_sold` AS `price_sold` FROM `default`.`t` AS `__table2`) AS `__table1` ALL LEFT JOIN (SELECT `__table4`.`item_id` AS `item_id` FROM `default`.`t1` AS `__table4`) AS `__table3` ON `__table1`.`item_id` = `__table3`.`item_id` GROUP BY `__table1`.`price_sold` ORDER BY `__table1`.`price_sold` ASC +4999950000 +4999950000 +SELECT `__table1`.`item_id` AS `item_id` FROM `default`.`t` AS `__table1` GROUP BY `__table1`.`item_id` +SELECT `__table1`.`item_id` AS `item_id` FROM `default`.`t1` AS `__table1` +4999950000 +4999950000 +SELECT `__table1`.`item_id` AS `item_id` FROM `default`.`t` AS `__table1` +SELECT `__table1`.`item_id` AS `item_id` FROM `default`.`t1` AS `__table1` GROUP BY `__table1`.`item_id` +499950000 +499960000 +499970000 +499980000 +499990000 +500000000 +500010000 +500020000 +500030000 +500040000 +499950000 +499960000 +499970000 +499980000 +499990000 +500000000 +500010000 +500020000 +500030000 +500040000 +SELECT sum(`__table1`.`item_id`) AS `sum(item_id)` FROM (SELECT `__table2`.`item_id` AS `item_id`, `__table2`.`price_sold` AS `price_sold` FROM `default`.`t` AS `__table2`) AS `__table1` GLOBAL ALL LEFT JOIN `_data_x_y_` AS `__table3` ON `__table1`.`item_id` = `__table3`.`item_id` GROUP BY `__table1`.`price_sold` ORDER BY `__table1`.`price_sold` ASC +4999950000 +4999950000 +SELECT `__table1`.`item_id` AS `item_id` FROM `default`.`t` AS `__table1` GROUP BY `__table1`.`item_id` +SELECT `__table1`.`item_id` AS `item_id` FROM `default`.`t1` AS `__table1` +4999950000 +4999950000 +SELECT `__table1`.`item_id` AS `item_id` FROM `default`.`t` AS `__table1` +SELECT `__table1`.`item_id` AS `item_id` FROM `default`.`t1` AS `__table1` GROUP BY `__table1`.`item_id` +499950000 +499960000 +499970000 +499980000 +499990000 +500000000 +500010000 +500020000 +500030000 +500040000 +499950000 +499960000 +499970000 +499980000 +499990000 +500000000 +500010000 +500020000 +500030000 +500040000 +SELECT sum(`__table1`.`item_id`) AS `sum(item_id)` FROM (SELECT `__table2`.`item_id` AS `item_id`, `__table2`.`price_sold` AS `price_sold` FROM `default`.`t` AS `__table2`) AS `__table1` GLOBAL ALL LEFT JOIN `_data_x_y_` AS `__table3` ON `__table1`.`item_id` = `__table3`.`item_id` GROUP BY `__table1`.`price_sold` ORDER BY `__table1`.`price_sold` ASC diff --git a/tests/queries/0_stateless/03255_parallel_replicas_join_algo_and_analyzer_4.sh b/tests/queries/0_stateless/03255_parallel_replicas_join_algo_and_analyzer_4.sh new file mode 100755 index 00000000000..19866f26949 --- /dev/null +++ b/tests/queries/0_stateless/03255_parallel_replicas_join_algo_and_analyzer_4.sh @@ -0,0 +1,101 @@ +#!/usr/bin/env bash +# Tags: long, no-random-settings, no-random-merge-tree-settings + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + + +${CLICKHOUSE_CLIENT} --query=" +CREATE TABLE t +( + item_id UInt64, + price_sold Float32, + date Date +) +ENGINE = MergeTree +ORDER BY item_id; + +CREATE TABLE t1 +( + item_id UInt64, + price_sold Float32, + date Date +) +ENGINE = MergeTree +ORDER BY item_id; + +INSERT INTO t SELECT number, number % 10, toDate(number) FROM numbers(100000); +INSERT INTO t1 SELECT number, number % 10, toDate(number) FROM numbers(100000); +" + +query1=" + SELECT sum(item_id) + FROM + ( + SELECT item_id + FROM t + GROUP BY item_id + ) AS l + LEFT JOIN + ( + SELECT item_id + FROM t1 + ) AS r ON l.item_id = r.item_id +" + +query2=" + SELECT sum(item_id) + FROM + ( + SELECT item_id + FROM t + ) AS l + LEFT JOIN + ( + SELECT item_id + FROM t1 + GROUP BY item_id + ) AS r ON l.item_id = r.item_id +" + +query3=" + SELECT sum(item_id) + FROM + ( + SELECT item_id, price_sold + FROM t + ) AS l + LEFT JOIN + ( + SELECT item_id + FROM t1 + ) AS r ON l.item_id = r.item_id + GROUP BY price_sold + ORDER BY price_sold +" + +for parallel_replicas_prefer_local_join in 1 0; do + for prefer_local_plan in {0..1}; do + for query in "${query1}" "${query2}" "${query3}"; do + for enable_parallel_replicas in {0..1}; do + ${CLICKHOUSE_CLIENT} --query=" + set enable_analyzer=1; + set parallel_replicas_prefer_local_join=${parallel_replicas_prefer_local_join}; + set parallel_replicas_local_plan=${prefer_local_plan}; + set allow_experimental_parallel_reading_from_replicas=${enable_parallel_replicas}, cluster_for_parallel_replicas='parallel_replicas', max_parallel_replicas=100, parallel_replicas_for_non_replicated_merge_tree=1; + + --SELECT '----- enable_parallel_replicas=$enable_parallel_replicas prefer_local_plan=$prefer_local_plan parallel_replicas_prefer_local_join=$parallel_replicas_prefer_local_join -----'; + ${query}; + + SELECT replaceRegexpAll(replaceRegexpAll(explain, '.*Query: (.*) Replicas:.*', '\\1'), '(.*)_data_[\d]+_[\d]+(.*)', '\1_data_x_y_\2') + FROM + ( + EXPLAIN actions=1 ${query} + ) + WHERE explain LIKE '%ParallelReplicas%'; + " + done + done + done +done diff --git a/tests/queries/0_stateless/03257_json_escape_file_names.reference b/tests/queries/0_stateless/03257_json_escape_file_names.reference new file mode 100644 index 00000000000..f44e7d62cc1 --- /dev/null +++ b/tests/queries/0_stateless/03257_json_escape_file_names.reference @@ -0,0 +1,3 @@ +{"a-b-c":"43","a-b\\/c-d\\/e":"44","a\\/b\\/c":"42"} +42 43 44 +42 43 44 diff --git a/tests/queries/0_stateless/03257_json_escape_file_names.sql b/tests/queries/0_stateless/03257_json_escape_file_names.sql new file mode 100644 index 00000000000..9cc150170fd --- /dev/null +++ b/tests/queries/0_stateless/03257_json_escape_file_names.sql @@ -0,0 +1,10 @@ +set allow_experimental_json_type = 1; +drop table if exists test; +create table test (json JSON) engine=MergeTree order by tuple() settings min_rows_for_wide_part=0, min_bytes_for_wide_part=0; +insert into test format JSONAsObject {"a/b/c" : 42, "a-b-c" : 43, "a-b/c-d/e" : 44}; + +select * from test; +select json.`a/b/c`, json.`a-b-c`, json.`a-b/c-d/e` from test; +select json.`a/b/c`.:Int64, json.`a-b-c`.:Int64, json.`a-b/c-d/e`.:Int64 from test; +drop table test; + diff --git a/tests/queries/0_stateless/03258_dynamic_in_functions_weak_ptr_exception.reference b/tests/queries/0_stateless/03258_dynamic_in_functions_weak_ptr_exception.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03258_dynamic_in_functions_weak_ptr_exception.sql b/tests/queries/0_stateless/03258_dynamic_in_functions_weak_ptr_exception.sql new file mode 100644 index 00000000000..f825353c135 --- /dev/null +++ b/tests/queries/0_stateless/03258_dynamic_in_functions_weak_ptr_exception.sql @@ -0,0 +1,6 @@ +SET allow_experimental_dynamic_type = 1; +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 Tuple(c1 Int,c2 Dynamic)) ENGINE = Memory(); +SELECT 1 FROM t0 tx JOIN t0 ty ON tx.c0 = ty.c0; +DROP TABLE t0; + diff --git a/tests/queries/0_stateless/03258_multiple_array_joins.reference b/tests/queries/0_stateless/03258_multiple_array_joins.reference new file mode 100644 index 00000000000..4d357c8ac80 --- /dev/null +++ b/tests/queries/0_stateless/03258_multiple_array_joins.reference @@ -0,0 +1,8 @@ +1 Michel Foucault alive no +1 Michel Foucault profession philosopher +1 Thomas Aquinas alive no +1 Thomas Aquinas profession philosopher +2 Nicola Tesla alive no +2 Nicola Tesla profession inventor +2 Thomas Edison alive no +2 Thomas Edison profession inventor diff --git a/tests/queries/0_stateless/03258_multiple_array_joins.sql b/tests/queries/0_stateless/03258_multiple_array_joins.sql new file mode 100644 index 00000000000..ddfac1da080 --- /dev/null +++ b/tests/queries/0_stateless/03258_multiple_array_joins.sql @@ -0,0 +1,25 @@ +SET enable_analyzer = 1; +DROP TABLE IF EXISTS test_multiple_array_join; + +CREATE TABLE test_multiple_array_join ( + id UInt64, + person Nested ( + name String, + surname String + ), + properties Nested ( + key String, + value String + ) +) Engine=MergeTree ORDER BY id; + +INSERT INTO test_multiple_array_join VALUES (1, ['Thomas', 'Michel'], ['Aquinas', 'Foucault'], ['profession', 'alive'], ['philosopher', 'no']); +INSERT INTO test_multiple_array_join VALUES (2, ['Thomas', 'Nicola'], ['Edison', 'Tesla'], ['profession', 'alive'], ['inventor', 'no']); + +SELECT * +FROM test_multiple_array_join +ARRAY JOIN person +ARRAY JOIN properties +ORDER BY ALL; + +DROP TABLE test_multiple_array_join; diff --git a/tests/queries/0_stateless/03258_old_analyzer_const_expr_bug.reference b/tests/queries/0_stateless/03258_old_analyzer_const_expr_bug.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03258_old_analyzer_const_expr_bug.sql b/tests/queries/0_stateless/03258_old_analyzer_const_expr_bug.sql new file mode 100644 index 00000000000..913de3b849c --- /dev/null +++ b/tests/queries/0_stateless/03258_old_analyzer_const_expr_bug.sql @@ -0,0 +1,23 @@ +WITH + multiIf('-1' = '-1', 10080, '-1' = '7', 60, '-1' = '1', 5, 1440) AS interval_start, -- noqa + multiIf('-1' = '-1', CEIL((today() - toDate('2017-06-22')) / 7)::UInt16, '-1' = '7', 168, '-1' = '1', 288, 90) AS days_run, -- noqa:L045 + block_time as (SELECT arrayJoin( + arrayMap( + i -> toDateTime(toStartOfInterval(now(), INTERVAL interval_start MINUTE) - interval_start * 60 * i, 'UTC'), + range(days_run) + ) + )), + +sales AS ( + SELECT + toDateTime(toStartOfInterval(now(), INTERVAL interval_start MINUTE), 'UTC') AS block_time + FROM + numbers(1) + GROUP BY + block_time + ORDER BY + block_time) + +SELECT + block_time +FROM sales where block_time >= (SELECT MIN(block_time) FROM sales) format Null; diff --git a/tests/queries/0_stateless/03258_quantile_exact_weighted_issue.reference b/tests/queries/0_stateless/03258_quantile_exact_weighted_issue.reference new file mode 100644 index 00000000000..69afec5d545 --- /dev/null +++ b/tests/queries/0_stateless/03258_quantile_exact_weighted_issue.reference @@ -0,0 +1,2 @@ +AggregateFunction(quantilesExactWeighted(0.2, 0.4, 0.6, 0.8), UInt64, UInt8) +AggregateFunction(quantilesExactWeightedInterpolated(0.2, 0.4, 0.6, 0.8), UInt64, UInt8) diff --git a/tests/queries/0_stateless/03258_quantile_exact_weighted_issue.sql b/tests/queries/0_stateless/03258_quantile_exact_weighted_issue.sql new file mode 100644 index 00000000000..3069389f4e2 --- /dev/null +++ b/tests/queries/0_stateless/03258_quantile_exact_weighted_issue.sql @@ -0,0 +1,2 @@ +SELECT toTypeName(quantilesExactWeightedState(0.2, 0.4, 0.6, 0.8)(number + 1, 1) AS x) FROM numbers(49999); +SELECT toTypeName(quantilesExactWeightedInterpolatedState(0.2, 0.4, 0.6, 0.8)(number + 1, 1) AS x) FROM numbers(49999); diff --git a/tests/queries/0_stateless/03259_native_http_async_insert_settings.reference b/tests/queries/0_stateless/03259_native_http_async_insert_settings.reference new file mode 100644 index 00000000000..573541ac970 --- /dev/null +++ b/tests/queries/0_stateless/03259_native_http_async_insert_settings.reference @@ -0,0 +1 @@ +0 diff --git a/tests/queries/0_stateless/03259_native_http_async_insert_settings.sh b/tests/queries/0_stateless/03259_native_http_async_insert_settings.sh new file mode 100755 index 00000000000..c0934b06cc7 --- /dev/null +++ b/tests/queries/0_stateless/03259_native_http_async_insert_settings.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + + +$CLICKHOUSE_CLIENT -q "drop table if exists test" +$CLICKHOUSE_CLIENT -q "create table test (x UInt32) engine=Memory"; + +url="${CLICKHOUSE_URL}&async_insert=1&wait_for_async_insert=1" + +$CLICKHOUSE_LOCAL -q "select NULL::Nullable(UInt32) as x format Native" | ${CLICKHOUSE_CURL} -sS "$url&query=INSERT%20INTO%20test%20FORMAT%20Native" --data-binary @- + +$CLICKHOUSE_CLIENT -q "select * from test"; +$CLICKHOUSE_CLIENT -q "drop table test" + diff --git a/tests/queries/0_stateless/03260_dynamic_low_cardinality_dict_bug.reference b/tests/queries/0_stateless/03260_dynamic_low_cardinality_dict_bug.reference new file mode 100644 index 00000000000..8ae0f8e9f14 --- /dev/null +++ b/tests/queries/0_stateless/03260_dynamic_low_cardinality_dict_bug.reference @@ -0,0 +1,20 @@ +12345678 +12345678 +12345678 +12345678 +12345678 +12345678 +12345678 +12345678 +12345678 +12345678 +12345678 +12345678 +12345678 +12345678 +12345678 +12345678 +12345678 +12345678 +12345678 +12345678 diff --git a/tests/queries/0_stateless/03260_dynamic_low_cardinality_dict_bug.sql b/tests/queries/0_stateless/03260_dynamic_low_cardinality_dict_bug.sql new file mode 100644 index 00000000000..c5b981d5965 --- /dev/null +++ b/tests/queries/0_stateless/03260_dynamic_low_cardinality_dict_bug.sql @@ -0,0 +1,12 @@ +set allow_experimental_dynamic_type = 1; +set min_bytes_to_use_direct_io = 0; + +drop table if exists test; +create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, index_granularity=1, use_adaptive_write_buffer_for_dynamic_subcolumns=0, max_compress_block_size=8, min_compress_block_size=8, use_compact_variant_discriminators_serialization=0; + +insert into test select number, '12345678'::LowCardinality(String) from numbers(20); + +select d.`LowCardinality(String)` from test settings max_threads=1; + +drop table test; + diff --git a/tests/queries/0_stateless/03261_delayed_streams_memory.reference b/tests/queries/0_stateless/03261_delayed_streams_memory.reference new file mode 100644 index 00000000000..7326d960397 --- /dev/null +++ b/tests/queries/0_stateless/03261_delayed_streams_memory.reference @@ -0,0 +1 @@ +Ok diff --git a/tests/queries/0_stateless/03261_delayed_streams_memory.sql b/tests/queries/0_stateless/03261_delayed_streams_memory.sql new file mode 100644 index 00000000000..863644a0dff --- /dev/null +++ b/tests/queries/0_stateless/03261_delayed_streams_memory.sql @@ -0,0 +1,20 @@ +-- Tags: long, no-debug, no-asan, no-tsan, no-msan, no-ubsan, no-random-settings, no-random-merge-tree-settings + +DROP TABLE IF EXISTS t_100_columns; + +CREATE TABLE t_100_columns (id UInt64, c0 String, c1 String, c2 String, c3 String, c4 String, c5 String, c6 String, c7 String, c8 String, c9 String, c10 String, c11 String, c12 String, c13 String, c14 String, c15 String, c16 String, c17 String, c18 String, c19 String, c20 String, c21 String, c22 String, c23 String, c24 String, c25 String, c26 String, c27 String, c28 String, c29 String, c30 String, c31 String, c32 String, c33 String, c34 String, c35 String, c36 String, c37 String, c38 String, c39 String, c40 String, c41 String, c42 String, c43 String, c44 String, c45 String, c46 String, c47 String, c48 String, c49 String, c50 String) +ENGINE = MergeTree +ORDER BY id PARTITION BY id % 50 +SETTINGS min_bytes_for_wide_part = 0, ratio_of_defaults_for_sparse_serialization = 1.0, max_compress_block_size = '1M', storage_policy = 's3_cache'; + +SET max_insert_delayed_streams_for_parallel_write = 55; + +INSERT INTO t_100_columns (id) SELECT number FROM numbers(100); + +SYSTEM FLUSH LOGS; + +SELECT if (memory_usage < 300000000, 'Ok', format('Fail: memory usage {}', formatReadableSize(memory_usage))) +FROM system.query_log +WHERE current_database = currentDatabase() AND query LIKE 'INSERT INTO t_100_columns%' AND type = 'QueryFinish'; + +DROP TABLE t_100_columns; diff --git a/tests/queries/0_stateless/03261_json_hints_types_check.reference b/tests/queries/0_stateless/03261_json_hints_types_check.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03261_json_hints_types_check.sql b/tests/queries/0_stateless/03261_json_hints_types_check.sql new file mode 100644 index 00000000000..a407aa9474b --- /dev/null +++ b/tests/queries/0_stateless/03261_json_hints_types_check.sql @@ -0,0 +1,9 @@ +set allow_experimental_json_type=1; +set allow_experimental_variant_type=0; +set allow_experimental_object_type=0; + +select '{}'::JSON(a LowCardinality(Int128)); -- {serverError SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY} +select '{}'::JSON(a FixedString(100000)); -- {serverError ILLEGAL_COLUMN} +select '{}'::JSON(a Variant(Int32)); -- {serverError ILLEGAL_COLUMN} +select '{}'::JSON(a Object('json')); -- {serverError ILLEGAL_COLUMN} + diff --git a/tests/queries/0_stateless/03261_mongodb_argumetns_crash.reference b/tests/queries/0_stateless/03261_mongodb_argumetns_crash.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03261_mongodb_argumetns_crash.sql b/tests/queries/0_stateless/03261_mongodb_argumetns_crash.sql new file mode 100644 index 00000000000..ca558ac6bc6 --- /dev/null +++ b/tests/queries/0_stateless/03261_mongodb_argumetns_crash.sql @@ -0,0 +1,14 @@ +-- Tags: no-fasttest + +SELECT * FROM mongodb('mongodb://some-cluster:27017/?retryWrites=false', NULL, 'my_collection', 'test_user', 'password', 'x Int32'); -- { serverError BAD_ARGUMENTS } +SELECT * FROM mongodb('mongodb://some-cluster:27017/?retryWrites=false', 'test', NULL, 'test_user', 'password', 'x Int32'); -- { serverError BAD_ARGUMENTS } +SELECT * FROM mongodb('mongodb://some-cluster:27017/?retryWrites=false', 'test', 'my_collection', NULL, 'password', 'x Int32'); -- { serverError BAD_ARGUMENTS } +SELECT * FROM mongodb('mongodb://some-cluster:27017/?retryWrites=false', 'test', 'my_collection', 'test_user', NULL, 'x Int32'); -- { serverError BAD_ARGUMENTS } +SELECT * FROM mongodb('mongodb://some-cluster:27017/?retryWrites=false', 'test', 'my_collection', 'test_user', 'password', NULL); -- { serverError BAD_ARGUMENTS } +SELECT * FROM mongodb('mongodb://some-cluster:27017/?retryWrites=false', 'test', 'my_collection', 'test_user', 'password', materialize(1) + 1); -- { serverError BAD_ARGUMENTS } +SELECT * FROM mongodb('mongodb://some-cluster:27017/?retryWrites=false', 'test', 'my_collection', 'test_user', 'password', 'x Int32', NULL); -- { serverError BAD_ARGUMENTS } +SELECT * FROM mongodb('mongodb://some-cluster:27017/?retryWrites=false', 'test', 'my_collection', 'test_user', 'password', NULL, 'x Int32'); -- { serverError BAD_ARGUMENTS } +SELECT * FROM mongodb('mongodb://some-cluster:27017/?retryWrites=false', 'test', 'my_collection', 'test_user', 'password', NULL, 'x Int32'); -- { serverError BAD_ARGUMENTS } +SELECT * FROM mongodb(NULL, 'test', 'my_collection', 'test_user', 'password', 'x Int32'); -- { serverError BAD_ARGUMENTS } + +CREATE TABLE IF NOT EXISTS store_version ( `_id` String ) ENGINE = MongoDB(`localhost:27017`, mongodb, storeinfo, adminUser, adminUser); -- { serverError NAMED_COLLECTION_DOESNT_EXIST } diff --git a/tests/queries/0_stateless/03261_sort_cursor_crash.reference b/tests/queries/0_stateless/03261_sort_cursor_crash.reference new file mode 100644 index 00000000000..7299f2f5a5f --- /dev/null +++ b/tests/queries/0_stateless/03261_sort_cursor_crash.reference @@ -0,0 +1,4 @@ +42 +43 +44 +45 diff --git a/tests/queries/0_stateless/03261_sort_cursor_crash.sql b/tests/queries/0_stateless/03261_sort_cursor_crash.sql new file mode 100644 index 00000000000..b659f3d4a92 --- /dev/null +++ b/tests/queries/0_stateless/03261_sort_cursor_crash.sql @@ -0,0 +1,24 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/70779 +-- Crash in SortCursorImpl with the old analyzer, which produces a block with 0 columns and 1 row +DROP TABLE IF EXISTS t0; +DROP TABLE IF EXISTS t1; + +CREATE TABLE t0 (c0 Int) ENGINE = AggregatingMergeTree() ORDER BY tuple(); +INSERT INTO TABLE t0 (c0) VALUES (1); +SELECT 42 FROM t0 FINAL PREWHERE t0.c0 = 1; +DROP TABLE t0; + +CREATE TABLE t0 (c0 Int) ENGINE = SummingMergeTree() ORDER BY tuple(); +INSERT INTO TABLE t0 (c0) VALUES (1); +SELECT 43 FROM t0 FINAL PREWHERE t0.c0 = 1; +DROP TABLE t0; + +CREATE TABLE t0 (c0 Int) ENGINE = ReplacingMergeTree() ORDER BY tuple(); +INSERT INTO TABLE t0 (c0) VALUES (1); +SELECT 44 FROM t0 FINAL PREWHERE t0.c0 = 1; +DROP TABLE t0; + +CREATE TABLE t1 (a0 UInt8, c0 Int32, c1 UInt8) ENGINE = AggregatingMergeTree() ORDER BY tuple(); +INSERT INTO TABLE t1 (a0, c0, c1) VALUES (1, 1, 1); +SELECT 45 FROM t1 FINAL PREWHERE t1.c0 = t1.c1; +DROP TABLE t1; diff --git a/tests/queries/0_stateless/03261_tuple_map_object_to_json_cast.reference b/tests/queries/0_stateless/03261_tuple_map_object_to_json_cast.reference new file mode 100644 index 00000000000..0ae94e68663 --- /dev/null +++ b/tests/queries/0_stateless/03261_tuple_map_object_to_json_cast.reference @@ -0,0 +1,23 @@ +Map to JSON +{"a":"0","b":"1970-01-01","c":[],"d":[{"e":"0"}]} {'a':'Int64','b':'Date','c':'Array(Nullable(String))','d':'Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))'} +{"a":"1","b":"1970-01-02","c":["0"],"d":[{"e":"1"}]} {'a':'Int64','b':'Date','c':'Array(Nullable(String))','d':'Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))'} +{"a":"2","b":"1970-01-03","c":["0","1"],"d":[{"e":"2"}]} {'a':'Int64','b':'Date','c':'Array(Nullable(String))','d':'Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))'} +{"a":"3","b":"1970-01-04","c":["0","1","2"],"d":[{"e":"3"}]} {'a':'Int64','b':'Date','c':'Array(Nullable(String))','d':'Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))'} +{"a":"4","b":"1970-01-05","c":["0","1","2","3"],"d":[{"e":"4"}]} {'a':'Int64','b':'Date','c':'Array(Nullable(String))','d':'Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))'} +{"a0":"0","b0":"1970-01-01","c0":[],"d0":[{"e0":"0"}]} {'a0':'Int64','b0':'Date','c0':'Array(Nullable(String))','d0':'Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))'} +{"a1":"1","b1":"1970-01-02","c1":["0"],"d1":[{"e1":"1"}]} {'a1':'Int64','b1':'Date','c1':'Array(Nullable(String))','d1':'Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))'} +{"a2":"2","b2":"1970-01-03","c2":["0","1"],"d2":[{"e2":"2"}]} {'a2':'Int64','b2':'Date','c2':'Array(Nullable(String))','d2':'Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))'} +{"a0":"3","b0":"1970-01-04","c0":["0","1","2"],"d0":[{"e0":"3"}]} {'a0':'Int64','b0':'Date','c0':'Array(Nullable(String))','d0':'Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))'} +{"a1":"4","b1":"1970-01-05","c1":["0","1","2","3"],"d1":[{"e1":"4"}]} {'a1':'Int64','b1':'Date','c1':'Array(Nullable(String))','d1':'Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))'} +Tuple to JSON +{"a":"0","b":"1970-01-01","c":[],"d":[{"e":"0"}]} {'a':'Int64','b':'Date','c':'Array(Nullable(String))','d':'Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))'} +{"a":"1","b":"1970-01-02","c":["0"],"d":[{"e":"1"}]} {'a':'Int64','b':'Date','c':'Array(Nullable(String))','d':'Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))'} +{"a":"2","b":"1970-01-03","c":["0","1"],"d":[{"e":"2"}]} {'a':'Int64','b':'Date','c':'Array(Nullable(String))','d':'Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))'} +{"a":"3","b":"1970-01-04","c":["0","1","2"],"d":[{"e":"3"}]} {'a':'Int64','b':'Date','c':'Array(Nullable(String))','d':'Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))'} +{"a":"4","b":"1970-01-05","c":["0","1","2","3"],"d":[{"e":"4"}]} {'a':'Int64','b':'Date','c':'Array(Nullable(String))','d':'Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))'} +Object to JSON +{"a":"0","b":"1970-01-01","c":[],"d":{"e":["0"]}} {'a':'Int64','b':'Date','c':'Array(Nullable(String))','d.e':'Array(Nullable(Int64))'} +{"a":"1","b":"1970-01-02","c":["0"],"d":{"e":["1"]}} {'a':'Int64','b':'Date','c':'Array(Nullable(String))','d.e':'Array(Nullable(Int64))'} +{"a":"2","b":"1970-01-03","c":["0","1"],"d":{"e":["2"]}} {'a':'Int64','b':'Date','c':'Array(Nullable(String))','d.e':'Array(Nullable(Int64))'} +{"a":"3","b":"1970-01-04","c":["0","1","2"],"d":{"e":["3"]}} {'a':'Int64','b':'Date','c':'Array(Nullable(String))','d.e':'Array(Nullable(Int64))'} +{"a":"4","b":"1970-01-05","c":["0","1","2","3"],"d":{"e":["4"]}} {'a':'Int64','b':'Date','c':'Array(Nullable(String))','d.e':'Array(Nullable(Int64))'} diff --git a/tests/queries/0_stateless/03261_tuple_map_object_to_json_cast.sql b/tests/queries/0_stateless/03261_tuple_map_object_to_json_cast.sql new file mode 100644 index 00000000000..2e5cecaf502 --- /dev/null +++ b/tests/queries/0_stateless/03261_tuple_map_object_to_json_cast.sql @@ -0,0 +1,18 @@ +-- Tags: no-fasttest + +set allow_experimental_json_type = 1; +set allow_experimental_object_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set enable_named_columns_in_function_tuple = 1; +set enable_analyzer = 1; + +select 'Map to JSON'; +select map('a', number::UInt32, 'b', toDate(number), 'c', range(number), 'd', [map('e', number::UInt32)])::JSON as json, JSONAllPathsWithTypes(json) from numbers(5); +select map('a' || number % 3, number::UInt32, 'b' || number % 3, toDate(number), 'c' || number % 3, range(number), 'd' || number % 3, [map('e' || number % 3, number::UInt32)])::JSON as json, JSONAllPathsWithTypes(json) from numbers(5); + +select 'Tuple to JSON'; +select tuple(number::UInt32 as a, toDate(number) as b, range(number) as c, [tuple(number::UInt32 as e)] as d)::JSON as json, JSONAllPathsWithTypes(json) from numbers(5); + +select 'Object to JSON'; +select toJSONString(map('a', number::UInt32, 'b', toDate(number), 'c', range(number), 'd', [map('e', number::UInt32)]))::Object('json')::JSON as json, JSONAllPathsWithTypes(json) from numbers(5); diff --git a/tests/queries/0_stateless/03261_variant_permutation_bug.reference b/tests/queries/0_stateless/03261_variant_permutation_bug.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03261_variant_permutation_bug.sql b/tests/queries/0_stateless/03261_variant_permutation_bug.sql new file mode 100644 index 00000000000..373dd9e19fa --- /dev/null +++ b/tests/queries/0_stateless/03261_variant_permutation_bug.sql @@ -0,0 +1,6 @@ +set allow_experimental_variant_type=1; +create table test (x UInt64, d Variant(UInt64)) engine=Memory; +insert into test select number, null from numbers(200000); +select d from test order by d::String limit 32213 format Null; +drop table test; + diff --git a/tests/queries/0_stateless/03262_analyzer_materialized_view_in_with_cte.reference b/tests/queries/0_stateless/03262_analyzer_materialized_view_in_with_cte.reference new file mode 100644 index 00000000000..5ddf8439af5 --- /dev/null +++ b/tests/queries/0_stateless/03262_analyzer_materialized_view_in_with_cte.reference @@ -0,0 +1 @@ +1 2 \N test diff --git a/tests/queries/0_stateless/03262_analyzer_materialized_view_in_with_cte.sql b/tests/queries/0_stateless/03262_analyzer_materialized_view_in_with_cte.sql new file mode 100644 index 00000000000..4543d336d14 --- /dev/null +++ b/tests/queries/0_stateless/03262_analyzer_materialized_view_in_with_cte.sql @@ -0,0 +1,63 @@ +SET allow_experimental_analyzer = 1; + +DROP TABLE IF EXISTS mv_test; +DROP TABLE IF EXISTS mv_test_target; +DROP VIEW IF EXISTS mv_test_mv; + +CREATE TABLE mv_test +( + `id` UInt64, + `ref_id` UInt64, + `final_id` Nullable(UInt64), + `display` String +) +ENGINE = Log; + +CREATE TABLE mv_test_target +( + `id` UInt64, + `ref_id` UInt64, + `final_id` Nullable(UInt64), + `display` String +) +ENGINE = Log; + +CREATE MATERIALIZED VIEW mv_test_mv TO mv_test_target +( + `id` UInt64, + `ref_id` UInt64, + `final_id` Nullable(UInt64), + `display` String +) +AS WITH + tester AS + ( + SELECT + id, + ref_id, + final_id, + display + FROM mv_test + ), + id_set AS + ( + SELECT + display, + max(id) AS max_id + FROM mv_test + GROUP BY display + ) +SELECT * +FROM tester +WHERE id IN ( + SELECT max_id + FROM id_set +); + +INSERT INTO mv_test ( id, ref_id, display) values ( 1, 2, 'test'); + +SELECT * FROM mv_test_target; + +DROP VIEW mv_test_mv; +DROP TABLE mv_test_target; +DROP TABLE mv_test; diff --git a/tests/queries/0_stateless/03262_system_functions_should_not_fill_query_log_functions.reference b/tests/queries/0_stateless/03262_system_functions_should_not_fill_query_log_functions.reference new file mode 100644 index 00000000000..021c06382c8 --- /dev/null +++ b/tests/queries/0_stateless/03262_system_functions_should_not_fill_query_log_functions.reference @@ -0,0 +1 @@ +[] ['equals'] [] diff --git a/tests/queries/0_stateless/03262_system_functions_should_not_fill_query_log_functions.sql b/tests/queries/0_stateless/03262_system_functions_should_not_fill_query_log_functions.sql new file mode 100644 index 00000000000..7e6f384c0a8 --- /dev/null +++ b/tests/queries/0_stateless/03262_system_functions_should_not_fill_query_log_functions.sql @@ -0,0 +1,9 @@ +SELECT * FROM system.functions WHERE name = 'bitShiftLeft' format Null; +SYSTEM FLUSH LOGS; +SELECT used_aggregate_functions, used_functions, used_table_functions +FROM system.query_log +WHERE + event_date >= yesterday() + AND type = 'QueryFinish' + AND current_database = currentDatabase() + AND query LIKE '%bitShiftLeft%'; diff --git a/tests/queries/0_stateless/03262_udf_in_constraint.reference b/tests/queries/0_stateless/03262_udf_in_constraint.reference new file mode 100644 index 00000000000..29d403b85a8 --- /dev/null +++ b/tests/queries/0_stateless/03262_udf_in_constraint.reference @@ -0,0 +1,2 @@ +CREATE TABLE default.t0\n(\n `c0` Int32,\n CONSTRAINT c1 CHECK c0 > 5\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +10 diff --git a/tests/queries/0_stateless/03262_udf_in_constraint.sh b/tests/queries/0_stateless/03262_udf_in_constraint.sh new file mode 100755 index 00000000000..3c36e7caeb4 --- /dev/null +++ b/tests/queries/0_stateless/03262_udf_in_constraint.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT -q " + CREATE FUNCTION ${CLICKHOUSE_DATABASE}_function AS (x) -> x > 5; + CREATE TABLE t0 (c0 Int, CONSTRAINT c1 CHECK ${CLICKHOUSE_DATABASE}_function(c0)) ENGINE = MergeTree() ORDER BY tuple(); + SHOW CREATE TABLE t0; + INSERT INTO t0(c0) VALUES (10); + INSERT INTO t0(c0) VALUES (3); -- {serverError VIOLATED_CONSTRAINT} + SELECT * FROM t0; + + DROP TABLE t0; + DROP FUNCTION ${CLICKHOUSE_DATABASE}_function; +" diff --git a/tests/queries/0_stateless/03263_analyzer_materialized_view_cte_nested.reference b/tests/queries/0_stateless/03263_analyzer_materialized_view_cte_nested.reference new file mode 100644 index 00000000000..0cfbf08886f --- /dev/null +++ b/tests/queries/0_stateless/03263_analyzer_materialized_view_cte_nested.reference @@ -0,0 +1 @@ +2 diff --git a/tests/queries/0_stateless/03263_analyzer_materialized_view_cte_nested.sql b/tests/queries/0_stateless/03263_analyzer_materialized_view_cte_nested.sql new file mode 100644 index 00000000000..4ea853a7c22 --- /dev/null +++ b/tests/queries/0_stateless/03263_analyzer_materialized_view_cte_nested.sql @@ -0,0 +1,19 @@ +SET allow_experimental_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +DROP VIEW IF EXISTS test_mv; + +CREATE TABLE test_table ENGINE = MergeTree ORDER BY tuple() AS SELECT 1 as col1; + +CREATE MATERIALIZED VIEW test_mv ENGINE = MergeTree ORDER BY tuple() AS +WITH + subquery_on_source AS (SELECT col1 AS aliased FROM test_table), + output AS (SELECT * FROM test_table WHERE col1 IN (SELECT aliased FROM subquery_on_source)) +SELECT * FROM output; + +INSERT INTO test_table VALUES (2); + +SELECT * FROM test_mv; + +DROP VIEW test_mv; +DROP TABLE test_table; diff --git a/tests/queries/0_stateless/03266_with_fill_staleness.reference b/tests/queries/0_stateless/03266_with_fill_staleness.reference new file mode 100644 index 00000000000..25d7b7c3f24 --- /dev/null +++ b/tests/queries/0_stateless/03266_with_fill_staleness.reference @@ -0,0 +1,151 @@ +add samples +regular with fill +2016-06-15 23:00:00 0 original +2016-06-15 23:00:01 0 +2016-06-15 23:00:02 0 +2016-06-15 23:00:03 0 +2016-06-15 23:00:04 0 +2016-06-15 23:00:05 5 original +2016-06-15 23:00:06 5 +2016-06-15 23:00:07 5 +2016-06-15 23:00:08 5 +2016-06-15 23:00:09 5 +2016-06-15 23:00:10 10 original +2016-06-15 23:00:11 10 +2016-06-15 23:00:12 10 +2016-06-15 23:00:13 10 +2016-06-15 23:00:14 10 +2016-06-15 23:00:15 15 original +2016-06-15 23:00:16 15 +2016-06-15 23:00:17 15 +2016-06-15 23:00:18 15 +2016-06-15 23:00:19 15 +2016-06-15 23:00:20 20 original +2016-06-15 23:00:21 20 +2016-06-15 23:00:22 20 +2016-06-15 23:00:23 20 +2016-06-15 23:00:24 20 +2016-06-15 23:00:25 25 original +staleness 1 seconds +2016-06-15 23:00:00 0 original +2016-06-15 23:00:05 5 original +2016-06-15 23:00:10 10 original +2016-06-15 23:00:15 15 original +2016-06-15 23:00:20 20 original +2016-06-15 23:00:25 25 original +staleness 3 seconds +2016-06-15 23:00:00 0 original +2016-06-15 23:00:01 0 +2016-06-15 23:00:02 0 +2016-06-15 23:00:05 5 original +2016-06-15 23:00:06 5 +2016-06-15 23:00:07 5 +2016-06-15 23:00:10 10 original +2016-06-15 23:00:11 10 +2016-06-15 23:00:12 10 +2016-06-15 23:00:15 15 original +2016-06-15 23:00:16 15 +2016-06-15 23:00:17 15 +2016-06-15 23:00:20 20 original +2016-06-15 23:00:21 20 +2016-06-15 23:00:22 20 +2016-06-15 23:00:25 25 original +2016-06-15 23:00:26 25 +2016-06-15 23:00:27 25 +descending order +2016-06-15 23:00:25 25 original +2016-06-15 23:00:24 25 +2016-06-15 23:00:20 20 original +2016-06-15 23:00:19 20 +2016-06-15 23:00:15 15 original +2016-06-15 23:00:14 15 +2016-06-15 23:00:10 10 original +2016-06-15 23:00:09 10 +2016-06-15 23:00:05 5 original +2016-06-15 23:00:04 5 +2016-06-15 23:00:00 0 original +2016-06-15 22:59:59 0 +staleness with to and step +2016-06-15 23:00:00 0 original +2016-06-15 23:00:03 0 +2016-06-15 23:00:05 5 original +2016-06-15 23:00:06 5 +2016-06-15 23:00:09 5 +2016-06-15 23:00:10 10 original +2016-06-15 23:00:12 10 +2016-06-15 23:00:15 15 original +2016-06-15 23:00:18 15 +2016-06-15 23:00:20 20 original +2016-06-15 23:00:21 20 +2016-06-15 23:00:24 20 +2016-06-15 23:00:25 25 original +2016-06-15 23:00:27 25 +2016-06-15 23:00:30 25 +staleness with another regular with fill +2016-06-15 23:00:00 1970-01-01 01:00:00 0 +2016-06-15 23:00:00 1970-01-01 01:00:01 0 +2016-06-15 23:00:00 1970-01-01 01:00:02 0 +2016-06-15 23:00:00 2016-06-15 23:00:00 0 original +2016-06-15 23:00:01 1970-01-01 01:00:00 0 +2016-06-15 23:00:01 1970-01-01 01:00:01 0 +2016-06-15 23:00:01 1970-01-01 01:00:02 0 +2016-06-15 23:00:05 1970-01-01 01:00:00 0 +2016-06-15 23:00:05 1970-01-01 01:00:01 0 +2016-06-15 23:00:05 1970-01-01 01:00:02 0 +2016-06-15 23:00:05 2016-06-15 23:00:05 5 original +2016-06-15 23:00:06 1970-01-01 01:00:00 5 +2016-06-15 23:00:06 1970-01-01 01:00:01 5 +2016-06-15 23:00:06 1970-01-01 01:00:02 5 +2016-06-15 23:00:10 1970-01-01 01:00:00 5 +2016-06-15 23:00:10 1970-01-01 01:00:01 5 +2016-06-15 23:00:10 1970-01-01 01:00:02 5 +2016-06-15 23:00:10 2016-06-15 23:00:10 10 original +2016-06-15 23:00:11 1970-01-01 01:00:00 10 +2016-06-15 23:00:11 1970-01-01 01:00:01 10 +2016-06-15 23:00:11 1970-01-01 01:00:02 10 +2016-06-15 23:00:15 1970-01-01 01:00:00 10 +2016-06-15 23:00:15 1970-01-01 01:00:01 10 +2016-06-15 23:00:15 1970-01-01 01:00:02 10 +2016-06-15 23:00:15 2016-06-15 23:00:15 15 original +2016-06-15 23:00:16 1970-01-01 01:00:00 15 +2016-06-15 23:00:16 1970-01-01 01:00:01 15 +2016-06-15 23:00:16 1970-01-01 01:00:02 15 +2016-06-15 23:00:20 1970-01-01 01:00:00 15 +2016-06-15 23:00:20 1970-01-01 01:00:01 15 +2016-06-15 23:00:20 1970-01-01 01:00:02 15 +2016-06-15 23:00:20 2016-06-15 23:00:20 20 original +2016-06-15 23:00:21 1970-01-01 01:00:00 20 +2016-06-15 23:00:21 1970-01-01 01:00:01 20 +2016-06-15 23:00:21 1970-01-01 01:00:02 20 +2016-06-15 23:00:25 1970-01-01 01:00:00 20 +2016-06-15 23:00:25 1970-01-01 01:00:01 20 +2016-06-15 23:00:25 1970-01-01 01:00:02 20 +2016-06-15 23:00:25 2016-06-15 23:00:25 25 original +2016-06-15 23:00:26 1970-01-01 01:00:00 25 +2016-06-15 23:00:26 1970-01-01 01:00:01 25 +2016-06-15 23:00:26 1970-01-01 01:00:02 25 +double staleness +2016-06-15 23:00:00 2016-06-15 23:00:00 0 original +2016-06-15 23:00:00 2016-06-15 23:00:02 0 +2016-06-15 23:00:00 2016-06-15 23:00:04 0 +2016-06-15 23:00:01 1970-01-01 01:00:00 0 +2016-06-15 23:00:05 2016-06-15 23:00:05 5 original +2016-06-15 23:00:05 2016-06-15 23:00:07 5 +2016-06-15 23:00:05 2016-06-15 23:00:09 5 +2016-06-15 23:00:06 1970-01-01 01:00:00 5 +2016-06-15 23:00:10 2016-06-15 23:00:10 10 original +2016-06-15 23:00:10 2016-06-15 23:00:12 10 +2016-06-15 23:00:10 2016-06-15 23:00:14 10 +2016-06-15 23:00:11 1970-01-01 01:00:00 10 +2016-06-15 23:00:15 2016-06-15 23:00:15 15 original +2016-06-15 23:00:15 2016-06-15 23:00:17 15 +2016-06-15 23:00:15 2016-06-15 23:00:19 15 +2016-06-15 23:00:16 1970-01-01 01:00:00 15 +2016-06-15 23:00:20 2016-06-15 23:00:20 20 original +2016-06-15 23:00:20 2016-06-15 23:00:22 20 +2016-06-15 23:00:20 2016-06-15 23:00:24 20 +2016-06-15 23:00:21 1970-01-01 01:00:00 20 +2016-06-15 23:00:25 2016-06-15 23:00:25 25 original +2016-06-15 23:00:25 2016-06-15 23:00:27 25 +2016-06-15 23:00:25 2016-06-15 23:00:29 25 +2016-06-15 23:00:26 1970-01-01 01:00:00 25 diff --git a/tests/queries/0_stateless/03266_with_fill_staleness.sql b/tests/queries/0_stateless/03266_with_fill_staleness.sql new file mode 100644 index 00000000000..de47d8287ad --- /dev/null +++ b/tests/queries/0_stateless/03266_with_fill_staleness.sql @@ -0,0 +1,34 @@ +SET session_timezone='Europe/Amsterdam'; +SET enable_analyzer=1; + +DROP TABLE IF EXISTS with_fill_staleness; +CREATE TABLE with_fill_staleness (a DateTime, b DateTime, c UInt64) ENGINE = MergeTree ORDER BY a; + +SELECT 'add samples'; + +INSERT INTO with_fill_staleness +SELECT + toDateTime('2016-06-15 23:00:00') + number AS a, a as b, number as c +FROM numbers(30) +WHERE (number % 5) == 0; + +SELECT 'regular with fill'; +SELECT a, c, 'original' as original FROM with_fill_staleness ORDER BY a ASC WITH FILL INTERPOLATE (c); + +SELECT 'staleness 1 seconds'; +SELECT a, c, 'original' as original FROM with_fill_staleness ORDER BY a ASC WITH FILL STALENESS INTERVAL 1 SECOND INTERPOLATE (c); + +SELECT 'staleness 3 seconds'; +SELECT a, c, 'original' as original FROM with_fill_staleness ORDER BY a ASC WITH FILL STALENESS INTERVAL 3 SECOND INTERPOLATE (c); + +SELECT 'descending order'; +SELECT a, c, 'original' as original FROM with_fill_staleness ORDER BY a DESC WITH FILL STALENESS INTERVAL -2 SECOND INTERPOLATE (c); + +SELECT 'staleness with to and step'; +SELECT a, c, 'original' as original FROM with_fill_staleness ORDER BY a ASC WITH FILL TO toDateTime('2016-06-15 23:00:40') STEP 3 STALENESS INTERVAL 7 SECOND INTERPOLATE (c); + +SELECT 'staleness with another regular with fill'; +SELECT a, b, c, 'original' as original FROM with_fill_staleness ORDER BY a ASC WITH FILL STALENESS INTERVAL 2 SECOND, b ASC WITH FILL FROM 0 TO 3 INTERPOLATE (c); + +SELECT 'double staleness'; +SELECT a, b, c, 'original' as original FROM with_fill_staleness ORDER BY a ASC WITH FILL STALENESS INTERVAL 2 SECOND, b ASC WITH FILL TO toDateTime('2016-06-15 23:01:00') STEP 2 STALENESS 5 INTERPOLATE (c); diff --git a/tests/queries/0_stateless/03266_with_fill_staleness_cases.reference b/tests/queries/0_stateless/03266_with_fill_staleness_cases.reference new file mode 100644 index 00000000000..bf8e5bbe331 --- /dev/null +++ b/tests/queries/0_stateless/03266_with_fill_staleness_cases.reference @@ -0,0 +1,67 @@ +test-1 +0 5 10 original +0 5 13 +0 5 16 +0 5 19 +0 5 22 +0 7 0 +7 8 15 original +7 8 18 +7 8 21 +7 8 24 +7 10 0 +14 10 20 original +14 10 23 +14 12 0 +test-2-1 +1 0 original +1 1 +1 2 +1 3 +1 4 original +1 5 +1 6 +1 7 +1 8 original +1 9 +1 10 +1 11 +1 12 original +test-2-2 +1 0 original +1 1 +1 2 +1 3 +1 4 original +1 5 +1 6 +1 7 +1 8 original +1 9 +1 10 +1 11 +1 12 original +1 13 +1 14 +2 0 +3 0 +4 0 +test-3-1 +25 -10 +25 -8 +25 -6 +25 -4 +25 -2 +25 0 +25 2 +25 4 +25 6 +25 8 +25 10 +25 12 +25 14 +25 16 +25 17 original +28 -10 +30 18 original +31 -10 diff --git a/tests/queries/0_stateless/03266_with_fill_staleness_cases.sql b/tests/queries/0_stateless/03266_with_fill_staleness_cases.sql new file mode 100644 index 00000000000..9e28041c9a1 --- /dev/null +++ b/tests/queries/0_stateless/03266_with_fill_staleness_cases.sql @@ -0,0 +1,25 @@ +SET enable_analyzer=1; + +DROP TABLE IF EXISTS test; +CREATE TABLE test (a Int64, b Int64, c Int64) Engine=MergeTree ORDER BY a; +INSERT INTO test(a, b, c) VALUES (0, 5, 10), (7, 8, 15), (14, 10, 20); + +SELECT 'test-1'; +SELECT *, 'original' AS orig FROM test ORDER BY a, b WITH FILL TO 20 STEP 2 STALENESS 3, c WITH FILL TO 25 step 3; + +DROP TABLE IF EXISTS test2; +CREATE TABLE test2 (a Int64, b Int64) Engine=MergeTree ORDER BY a; +INSERT INTO test2(a, b) values (1, 0), (1, 4), (1, 8), (1, 12); + +SELECT 'test-2-1'; +SELECT *, 'original' AS orig FROM test2 ORDER BY a, b WITH FILL; + +SELECT 'test-2-2'; +SELECT *, 'original' AS orig FROM test2 ORDER BY a WITH FILL to 20 STALENESS 4, b WITH FILL TO 15 STALENESS 7; + +DROP TABLE IF EXISTS test2; +CREATE TABLE test3 (a Int64, b Int64) Engine=MergeTree ORDER BY a; +INSERT INTO test3(a, b) VALUES (25, 17), (30, 18); + +SELECT 'test-3-1'; +SELECT a, b, 'original' AS orig FROM test3 ORDER BY a WITH FILL TO 33 STEP 3, b WITH FILL FROM -10 STEP 2; diff --git a/tests/queries/0_stateless/03266_with_fill_staleness_errors.reference b/tests/queries/0_stateless/03266_with_fill_staleness_errors.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03266_with_fill_staleness_errors.sql b/tests/queries/0_stateless/03266_with_fill_staleness_errors.sql new file mode 100644 index 00000000000..fbfaf3743ca --- /dev/null +++ b/tests/queries/0_stateless/03266_with_fill_staleness_errors.sql @@ -0,0 +1,5 @@ +SET enable_analyzer=1; + +SELECT 1 AS a, 2 AS b ORDER BY a, b WITH FILL FROM 0 TO 10 STALENESS 3; -- { serverError INVALID_WITH_FILL_EXPRESSION } +SELECT 1 AS a, 2 AS b ORDER BY a, b DESC WITH FILL TO 10 STALENESS 3; -- { serverError INVALID_WITH_FILL_EXPRESSION } +SELECT 1 AS a, 2 AS b ORDER BY a, b ASC WITH FILL TO 10 STALENESS -3; -- { serverError INVALID_WITH_FILL_EXPRESSION } diff --git a/utils/CMakeLists.txt b/utils/CMakeLists.txt index ec44a1e1de9..2373a98239a 100644 --- a/utils/CMakeLists.txt +++ b/utils/CMakeLists.txt @@ -23,3 +23,7 @@ if (ENABLE_UTILS) add_subdirectory (keeper-data-dumper) add_subdirectory (memcpy-bench) endif () + +if (ENABLE_FUZZING AND ENABLE_FUZZER_TEST) + add_subdirectory (libfuzzer-test) +endif () diff --git a/utils/check-style/check-doc-aspell b/utils/check-style/check-doc-aspell index b5a3958e6cf..0406b337575 100755 --- a/utils/check-style/check-doc-aspell +++ b/utils/check-style/check-doc-aspell @@ -53,7 +53,7 @@ done if (( STATUS != 0 )); then echo "====== Errors found ======" echo "To exclude some words add them to the dictionary file \"${ASPELL_IGNORE_PATH}/aspell-dict.txt\"" - echo "You can also run ${0} -i to see the errors interactively and fix them or add to the dictionary file" + echo "You can also run '$(realpath --relative-base=${ROOT_PATH} ${0}) -i' to see the errors interactively and fix them or add to the dictionary file" fi exit ${STATUS} diff --git a/utils/libfuzzer-test/CMakeLists.txt b/utils/libfuzzer-test/CMakeLists.txt new file mode 100644 index 00000000000..8765787ff8a --- /dev/null +++ b/utils/libfuzzer-test/CMakeLists.txt @@ -0,0 +1 @@ +add_subdirectory (test_basic_fuzzer) diff --git a/utils/libfuzzer-test/README.md b/utils/libfuzzer-test/README.md new file mode 100644 index 00000000000..5598cbdb961 --- /dev/null +++ b/utils/libfuzzer-test/README.md @@ -0,0 +1 @@ +This folder contains various stuff intended to test libfuzzer functionality. diff --git a/utils/libfuzzer-test/test_basic_fuzzer/CMakeLists.txt b/utils/libfuzzer-test/test_basic_fuzzer/CMakeLists.txt new file mode 100644 index 00000000000..dc927f35a4b --- /dev/null +++ b/utils/libfuzzer-test/test_basic_fuzzer/CMakeLists.txt @@ -0,0 +1 @@ +add_executable (test_basic_fuzzer main.cpp) diff --git a/utils/libfuzzer-test/test_basic_fuzzer/main.cpp b/utils/libfuzzer-test/test_basic_fuzzer/main.cpp new file mode 100644 index 00000000000..7ccad63273d --- /dev/null +++ b/utils/libfuzzer-test/test_basic_fuzzer/main.cpp @@ -0,0 +1,11 @@ +#include +#include + +extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) +{ + if (size > 0 && data[0] == 'H') + if (size > 1 && data[1] == 'I') + if (size > 2 && data[2] == '!') + __builtin_trap(); + return 0; +} diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 10c55aa4bf5..fab562a8cbb 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,5 +1,7 @@ +v24.10.1.2812-stable 2024-11-01 v24.9.2.42-stable 2024-10-03 v24.9.1.3278-stable 2024-09-26 +v24.8.6.70-lts 2024-11-04 v24.8.5.115-lts 2024-10-08 v24.8.4.13-lts 2024-09-06 v24.8.3.59-lts 2024-09-03 @@ -29,6 +31,7 @@ v24.4.4.113-stable 2024-08-02 v24.4.3.25-stable 2024-06-14 v24.4.2.141-stable 2024-06-07 v24.4.1.2088-stable 2024-05-01 +v24.3.13.40-lts 2024-11-07 v24.3.12.75-lts 2024-10-08 v24.3.11.7-lts 2024-09-06 v24.3.10.33-lts 2024-09-03